xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_crat.c (revision 70a59dd8)
1 /*
2  * Copyright 2015-2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/pci.h>
24 #include <linux/acpi.h>
25 #include "kfd_crat.h"
26 #include "kfd_priv.h"
27 #include "kfd_topology.h"
28 #include "kfd_iommu.h"
29 #include "amdgpu_amdkfd.h"
30 
31 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
32  * GPU processor ID are expressed with Bit[31]=1.
33  * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
34  * used in the CRAT.
35  */
36 static uint32_t gpu_processor_id_low = 0x80001000;
37 
38 /* Return the next available gpu_processor_id and increment it for next GPU
39  *	@total_cu_count - Total CUs present in the GPU including ones
40  *			  masked off
41  */
42 static inline unsigned int get_and_inc_gpu_processor_id(
43 				unsigned int total_cu_count)
44 {
45 	int current_id = gpu_processor_id_low;
46 
47 	gpu_processor_id_low += total_cu_count;
48 	return current_id;
49 }
50 
51 /* Static table to describe GPU Cache information */
52 struct kfd_gpu_cache_info {
53 	uint32_t	cache_size;
54 	uint32_t	cache_level;
55 	uint32_t	flags;
56 	/* Indicates how many Compute Units share this cache
57 	 * Value = 1 indicates the cache is not shared
58 	 */
59 	uint32_t	num_cu_shared;
60 };
61 
62 static struct kfd_gpu_cache_info kaveri_cache_info[] = {
63 	{
64 		/* TCP L1 Cache per CU */
65 		.cache_size = 16,
66 		.cache_level = 1,
67 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
68 				CRAT_CACHE_FLAGS_DATA_CACHE |
69 				CRAT_CACHE_FLAGS_SIMD_CACHE),
70 		.num_cu_shared = 1,
71 
72 	},
73 	{
74 		/* Scalar L1 Instruction Cache (in SQC module) per bank */
75 		.cache_size = 16,
76 		.cache_level = 1,
77 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
78 				CRAT_CACHE_FLAGS_INST_CACHE |
79 				CRAT_CACHE_FLAGS_SIMD_CACHE),
80 		.num_cu_shared = 2,
81 	},
82 	{
83 		/* Scalar L1 Data Cache (in SQC module) per bank */
84 		.cache_size = 8,
85 		.cache_level = 1,
86 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
87 				CRAT_CACHE_FLAGS_DATA_CACHE |
88 				CRAT_CACHE_FLAGS_SIMD_CACHE),
89 		.num_cu_shared = 2,
90 	},
91 
92 	/* TODO: Add L2 Cache information */
93 };
94 
95 
96 static struct kfd_gpu_cache_info carrizo_cache_info[] = {
97 	{
98 		/* TCP L1 Cache per CU */
99 		.cache_size = 16,
100 		.cache_level = 1,
101 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
102 				CRAT_CACHE_FLAGS_DATA_CACHE |
103 				CRAT_CACHE_FLAGS_SIMD_CACHE),
104 		.num_cu_shared = 1,
105 	},
106 	{
107 		/* Scalar L1 Instruction Cache (in SQC module) per bank */
108 		.cache_size = 8,
109 		.cache_level = 1,
110 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
111 				CRAT_CACHE_FLAGS_INST_CACHE |
112 				CRAT_CACHE_FLAGS_SIMD_CACHE),
113 		.num_cu_shared = 4,
114 	},
115 	{
116 		/* Scalar L1 Data Cache (in SQC module) per bank. */
117 		.cache_size = 4,
118 		.cache_level = 1,
119 		.flags = (CRAT_CACHE_FLAGS_ENABLED |
120 				CRAT_CACHE_FLAGS_DATA_CACHE |
121 				CRAT_CACHE_FLAGS_SIMD_CACHE),
122 		.num_cu_shared = 4,
123 	},
124 
125 	/* TODO: Add L2 Cache information */
126 };
127 
128 /* NOTE: In future if more information is added to struct kfd_gpu_cache_info
129  * the following ASICs may need a separate table.
130  */
131 #define hawaii_cache_info kaveri_cache_info
132 #define tonga_cache_info carrizo_cache_info
133 #define fiji_cache_info  carrizo_cache_info
134 #define polaris10_cache_info carrizo_cache_info
135 #define polaris11_cache_info carrizo_cache_info
136 #define polaris12_cache_info carrizo_cache_info
137 #define vegam_cache_info carrizo_cache_info
138 /* TODO - check & update Vega10 cache details */
139 #define vega10_cache_info carrizo_cache_info
140 #define raven_cache_info carrizo_cache_info
141 #define renoir_cache_info carrizo_cache_info
142 /* TODO - check & update Navi10 cache details */
143 #define navi10_cache_info carrizo_cache_info
144 
145 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
146 		struct crat_subtype_computeunit *cu)
147 {
148 	dev->node_props.cpu_cores_count = cu->num_cpu_cores;
149 	dev->node_props.cpu_core_id_base = cu->processor_id_low;
150 	if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
151 		dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
152 
153 	pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
154 			cu->processor_id_low);
155 }
156 
157 static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
158 		struct crat_subtype_computeunit *cu)
159 {
160 	dev->node_props.simd_id_base = cu->processor_id_low;
161 	dev->node_props.simd_count = cu->num_simd_cores;
162 	dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
163 	dev->node_props.max_waves_per_simd = cu->max_waves_simd;
164 	dev->node_props.wave_front_size = cu->wave_front_size;
165 	dev->node_props.array_count = cu->array_count;
166 	dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
167 	dev->node_props.simd_per_cu = cu->num_simd_per_cu;
168 	dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
169 	if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
170 		dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
171 	pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
172 }
173 
174 /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
175  * topology device present in the device_list
176  */
177 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
178 				struct list_head *device_list)
179 {
180 	struct kfd_topology_device *dev;
181 
182 	pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
183 			cu->proximity_domain, cu->hsa_capability);
184 	list_for_each_entry(dev, device_list, list) {
185 		if (cu->proximity_domain == dev->proximity_domain) {
186 			if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
187 				kfd_populated_cu_info_cpu(dev, cu);
188 
189 			if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
190 				kfd_populated_cu_info_gpu(dev, cu);
191 			break;
192 		}
193 	}
194 
195 	return 0;
196 }
197 
198 static struct kfd_mem_properties *
199 find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
200 		struct kfd_topology_device *dev)
201 {
202 	struct kfd_mem_properties *props;
203 
204 	list_for_each_entry(props, &dev->mem_props, list) {
205 		if (props->heap_type == heap_type
206 				&& props->flags == flags
207 				&& props->width == width)
208 			return props;
209 	}
210 
211 	return NULL;
212 }
213 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
214  * topology device present in the device_list
215  */
216 static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
217 				struct list_head *device_list)
218 {
219 	struct kfd_mem_properties *props;
220 	struct kfd_topology_device *dev;
221 	uint32_t heap_type;
222 	uint64_t size_in_bytes;
223 	uint32_t flags = 0;
224 	uint32_t width;
225 
226 	pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
227 			mem->proximity_domain);
228 	list_for_each_entry(dev, device_list, list) {
229 		if (mem->proximity_domain == dev->proximity_domain) {
230 			/* We're on GPU node */
231 			if (dev->node_props.cpu_cores_count == 0) {
232 				/* APU */
233 				if (mem->visibility_type == 0)
234 					heap_type =
235 						HSA_MEM_HEAP_TYPE_FB_PRIVATE;
236 				/* dGPU */
237 				else
238 					heap_type = mem->visibility_type;
239 			} else
240 				heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
241 
242 			if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
243 				flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
244 			if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
245 				flags |= HSA_MEM_FLAGS_NON_VOLATILE;
246 
247 			size_in_bytes =
248 				((uint64_t)mem->length_high << 32) +
249 							mem->length_low;
250 			width = mem->width;
251 
252 			/* Multiple banks of the same type are aggregated into
253 			 * one. User mode doesn't care about multiple physical
254 			 * memory segments. It's managed as a single virtual
255 			 * heap for user mode.
256 			 */
257 			props = find_subtype_mem(heap_type, flags, width, dev);
258 			if (props) {
259 				props->size_in_bytes += size_in_bytes;
260 				break;
261 			}
262 
263 			props = kfd_alloc_struct(props);
264 			if (!props)
265 				return -ENOMEM;
266 
267 			props->heap_type = heap_type;
268 			props->flags = flags;
269 			props->size_in_bytes = size_in_bytes;
270 			props->width = width;
271 
272 			dev->node_props.mem_banks_count++;
273 			list_add_tail(&props->list, &dev->mem_props);
274 
275 			break;
276 		}
277 	}
278 
279 	return 0;
280 }
281 
282 /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
283  * topology device present in the device_list
284  */
285 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
286 			struct list_head *device_list)
287 {
288 	struct kfd_cache_properties *props;
289 	struct kfd_topology_device *dev;
290 	uint32_t id;
291 	uint32_t total_num_of_cu;
292 
293 	id = cache->processor_id_low;
294 
295 	pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
296 	list_for_each_entry(dev, device_list, list) {
297 		total_num_of_cu = (dev->node_props.array_count *
298 					dev->node_props.cu_per_simd_array);
299 
300 		/* Cache infomration in CRAT doesn't have proximity_domain
301 		 * information as it is associated with a CPU core or GPU
302 		 * Compute Unit. So map the cache using CPU core Id or SIMD
303 		 * (GPU) ID.
304 		 * TODO: This works because currently we can safely assume that
305 		 *  Compute Units are parsed before caches are parsed. In
306 		 *  future, remove this dependency
307 		 */
308 		if ((id >= dev->node_props.cpu_core_id_base &&
309 			id <= dev->node_props.cpu_core_id_base +
310 				dev->node_props.cpu_cores_count) ||
311 			(id >= dev->node_props.simd_id_base &&
312 			id < dev->node_props.simd_id_base +
313 				total_num_of_cu)) {
314 			props = kfd_alloc_struct(props);
315 			if (!props)
316 				return -ENOMEM;
317 
318 			props->processor_id_low = id;
319 			props->cache_level = cache->cache_level;
320 			props->cache_size = cache->cache_size;
321 			props->cacheline_size = cache->cache_line_size;
322 			props->cachelines_per_tag = cache->lines_per_tag;
323 			props->cache_assoc = cache->associativity;
324 			props->cache_latency = cache->cache_latency;
325 			memcpy(props->sibling_map, cache->sibling_map,
326 					sizeof(props->sibling_map));
327 
328 			if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
329 				props->cache_type |= HSA_CACHE_TYPE_DATA;
330 			if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
331 				props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
332 			if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
333 				props->cache_type |= HSA_CACHE_TYPE_CPU;
334 			if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
335 				props->cache_type |= HSA_CACHE_TYPE_HSACU;
336 
337 			dev->cache_count++;
338 			dev->node_props.caches_count++;
339 			list_add_tail(&props->list, &dev->cache_props);
340 
341 			break;
342 		}
343 	}
344 
345 	return 0;
346 }
347 
348 /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
349  * topology device present in the device_list
350  */
351 static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
352 					struct list_head *device_list)
353 {
354 	struct kfd_iolink_properties *props = NULL, *props2;
355 	struct kfd_topology_device *dev, *to_dev;
356 	uint32_t id_from;
357 	uint32_t id_to;
358 
359 	id_from = iolink->proximity_domain_from;
360 	id_to = iolink->proximity_domain_to;
361 
362 	pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
363 			id_from, id_to);
364 	list_for_each_entry(dev, device_list, list) {
365 		if (id_from == dev->proximity_domain) {
366 			props = kfd_alloc_struct(props);
367 			if (!props)
368 				return -ENOMEM;
369 
370 			props->node_from = id_from;
371 			props->node_to = id_to;
372 			props->ver_maj = iolink->version_major;
373 			props->ver_min = iolink->version_minor;
374 			props->iolink_type = iolink->io_interface_type;
375 
376 			if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
377 				props->weight = 20;
378 			else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
379 				props->weight = 15 * iolink->num_hops_xgmi;
380 			else
381 				props->weight = node_distance(id_from, id_to);
382 
383 			props->min_latency = iolink->minimum_latency;
384 			props->max_latency = iolink->maximum_latency;
385 			props->min_bandwidth = iolink->minimum_bandwidth_mbs;
386 			props->max_bandwidth = iolink->maximum_bandwidth_mbs;
387 			props->rec_transfer_size =
388 					iolink->recommended_transfer_size;
389 
390 			dev->io_link_count++;
391 			dev->node_props.io_links_count++;
392 			list_add_tail(&props->list, &dev->io_link_props);
393 			break;
394 		}
395 	}
396 
397 	/* CPU topology is created before GPUs are detected, so CPU->GPU
398 	 * links are not built at that time. If a PCIe type is discovered, it
399 	 * means a GPU is detected and we are adding GPU->CPU to the topology.
400 	 * At this time, also add the corresponded CPU->GPU link if GPU
401 	 * is large bar.
402 	 * For xGMI, we only added the link with one direction in the crat
403 	 * table, add corresponded reversed direction link now.
404 	 */
405 	if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
406 		to_dev = kfd_topology_device_by_proximity_domain(id_to);
407 		if (!to_dev)
408 			return -ENODEV;
409 		/* same everything but the other direction */
410 		props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
411 		props2->node_from = id_to;
412 		props2->node_to = id_from;
413 		props2->kobj = NULL;
414 		to_dev->io_link_count++;
415 		to_dev->node_props.io_links_count++;
416 		list_add_tail(&props2->list, &to_dev->io_link_props);
417 	}
418 
419 	return 0;
420 }
421 
422 /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
423  * present in the device_list
424  *	@sub_type_hdr - subtype section of crat_image
425  *	@device_list - list of topology devices present in this crat_image
426  */
427 static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
428 				struct list_head *device_list)
429 {
430 	struct crat_subtype_computeunit *cu;
431 	struct crat_subtype_memory *mem;
432 	struct crat_subtype_cache *cache;
433 	struct crat_subtype_iolink *iolink;
434 	int ret = 0;
435 
436 	switch (sub_type_hdr->type) {
437 	case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
438 		cu = (struct crat_subtype_computeunit *)sub_type_hdr;
439 		ret = kfd_parse_subtype_cu(cu, device_list);
440 		break;
441 	case CRAT_SUBTYPE_MEMORY_AFFINITY:
442 		mem = (struct crat_subtype_memory *)sub_type_hdr;
443 		ret = kfd_parse_subtype_mem(mem, device_list);
444 		break;
445 	case CRAT_SUBTYPE_CACHE_AFFINITY:
446 		cache = (struct crat_subtype_cache *)sub_type_hdr;
447 		ret = kfd_parse_subtype_cache(cache, device_list);
448 		break;
449 	case CRAT_SUBTYPE_TLB_AFFINITY:
450 		/*
451 		 * For now, nothing to do here
452 		 */
453 		pr_debug("Found TLB entry in CRAT table (not processing)\n");
454 		break;
455 	case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
456 		/*
457 		 * For now, nothing to do here
458 		 */
459 		pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
460 		break;
461 	case CRAT_SUBTYPE_IOLINK_AFFINITY:
462 		iolink = (struct crat_subtype_iolink *)sub_type_hdr;
463 		ret = kfd_parse_subtype_iolink(iolink, device_list);
464 		break;
465 	default:
466 		pr_warn("Unknown subtype %d in CRAT\n",
467 				sub_type_hdr->type);
468 	}
469 
470 	return ret;
471 }
472 
473 /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
474  * create a kfd_topology_device and add in to device_list. Also parse
475  * CRAT subtypes and attach it to appropriate kfd_topology_device
476  *	@crat_image - input image containing CRAT
477  *	@device_list - [OUT] list of kfd_topology_device generated after
478  *		       parsing crat_image
479  *	@proximity_domain - Proximity domain of the first device in the table
480  *
481  *	Return - 0 if successful else -ve value
482  */
483 int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
484 			 uint32_t proximity_domain)
485 {
486 	struct kfd_topology_device *top_dev = NULL;
487 	struct crat_subtype_generic *sub_type_hdr;
488 	uint16_t node_id;
489 	int ret = 0;
490 	struct crat_header *crat_table = (struct crat_header *)crat_image;
491 	uint16_t num_nodes;
492 	uint32_t image_len;
493 
494 	if (!crat_image)
495 		return -EINVAL;
496 
497 	if (!list_empty(device_list)) {
498 		pr_warn("Error device list should be empty\n");
499 		return -EINVAL;
500 	}
501 
502 	num_nodes = crat_table->num_domains;
503 	image_len = crat_table->length;
504 
505 	pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
506 
507 	for (node_id = 0; node_id < num_nodes; node_id++) {
508 		top_dev = kfd_create_topology_device(device_list);
509 		if (!top_dev)
510 			break;
511 		top_dev->proximity_domain = proximity_domain++;
512 	}
513 
514 	if (!top_dev) {
515 		ret = -ENOMEM;
516 		goto err;
517 	}
518 
519 	memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
520 	memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
521 			CRAT_OEMTABLEID_LENGTH);
522 	top_dev->oem_revision = crat_table->oem_revision;
523 
524 	sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
525 	while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
526 			((char *)crat_image) + image_len) {
527 		if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
528 			ret = kfd_parse_subtype(sub_type_hdr, device_list);
529 			if (ret)
530 				break;
531 		}
532 
533 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
534 				sub_type_hdr->length);
535 	}
536 
537 err:
538 	if (ret)
539 		kfd_release_topology_device_list(device_list);
540 
541 	return ret;
542 }
543 
544 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
545 static int fill_in_pcache(struct crat_subtype_cache *pcache,
546 				struct kfd_gpu_cache_info *pcache_info,
547 				struct kfd_cu_info *cu_info,
548 				int mem_available,
549 				int cu_bitmask,
550 				int cache_type, unsigned int cu_processor_id,
551 				int cu_block)
552 {
553 	unsigned int cu_sibling_map_mask;
554 	int first_active_cu;
555 
556 	/* First check if enough memory is available */
557 	if (sizeof(struct crat_subtype_cache) > mem_available)
558 		return -ENOMEM;
559 
560 	cu_sibling_map_mask = cu_bitmask;
561 	cu_sibling_map_mask >>= cu_block;
562 	cu_sibling_map_mask &=
563 		((1 << pcache_info[cache_type].num_cu_shared) - 1);
564 	first_active_cu = ffs(cu_sibling_map_mask);
565 
566 	/* CU could be inactive. In case of shared cache find the first active
567 	 * CU. and incase of non-shared cache check if the CU is inactive. If
568 	 * inactive active skip it
569 	 */
570 	if (first_active_cu) {
571 		memset(pcache, 0, sizeof(struct crat_subtype_cache));
572 		pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
573 		pcache->length = sizeof(struct crat_subtype_cache);
574 		pcache->flags = pcache_info[cache_type].flags;
575 		pcache->processor_id_low = cu_processor_id
576 					 + (first_active_cu - 1);
577 		pcache->cache_level = pcache_info[cache_type].cache_level;
578 		pcache->cache_size = pcache_info[cache_type].cache_size;
579 
580 		/* Sibling map is w.r.t processor_id_low, so shift out
581 		 * inactive CU
582 		 */
583 		cu_sibling_map_mask =
584 			cu_sibling_map_mask >> (first_active_cu - 1);
585 
586 		pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
587 		pcache->sibling_map[1] =
588 				(uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
589 		pcache->sibling_map[2] =
590 				(uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
591 		pcache->sibling_map[3] =
592 				(uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
593 		return 0;
594 	}
595 	return 1;
596 }
597 
598 /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
599  * tables
600  *
601  *	@kdev - [IN] GPU device
602  *	@gpu_processor_id - [IN] GPU processor ID to which these caches
603  *			    associate
604  *	@available_size - [IN] Amount of memory available in pcache
605  *	@cu_info - [IN] Compute Unit info obtained from KGD
606  *	@pcache - [OUT] memory into which cache data is to be filled in.
607  *	@size_filled - [OUT] amount of data used up in pcache.
608  *	@num_of_entries - [OUT] number of caches added
609  */
610 static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
611 			int gpu_processor_id,
612 			int available_size,
613 			struct kfd_cu_info *cu_info,
614 			struct crat_subtype_cache *pcache,
615 			int *size_filled,
616 			int *num_of_entries)
617 {
618 	struct kfd_gpu_cache_info *pcache_info;
619 	int num_of_cache_types = 0;
620 	int i, j, k;
621 	int ct = 0;
622 	int mem_available = available_size;
623 	unsigned int cu_processor_id;
624 	int ret;
625 
626 	switch (kdev->device_info->asic_family) {
627 	case CHIP_KAVERI:
628 		pcache_info = kaveri_cache_info;
629 		num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
630 		break;
631 	case CHIP_HAWAII:
632 		pcache_info = hawaii_cache_info;
633 		num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
634 		break;
635 	case CHIP_CARRIZO:
636 		pcache_info = carrizo_cache_info;
637 		num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
638 		break;
639 	case CHIP_TONGA:
640 		pcache_info = tonga_cache_info;
641 		num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
642 		break;
643 	case CHIP_FIJI:
644 		pcache_info = fiji_cache_info;
645 		num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
646 		break;
647 	case CHIP_POLARIS10:
648 		pcache_info = polaris10_cache_info;
649 		num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
650 		break;
651 	case CHIP_POLARIS11:
652 		pcache_info = polaris11_cache_info;
653 		num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
654 		break;
655 	case CHIP_POLARIS12:
656 		pcache_info = polaris12_cache_info;
657 		num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
658 		break;
659 	case CHIP_VEGAM:
660 		pcache_info = vegam_cache_info;
661 		num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
662 		break;
663 	case CHIP_VEGA10:
664 	case CHIP_VEGA12:
665 	case CHIP_VEGA20:
666 	case CHIP_ARCTURUS:
667 		pcache_info = vega10_cache_info;
668 		num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
669 		break;
670 	case CHIP_RAVEN:
671 		pcache_info = raven_cache_info;
672 		num_of_cache_types = ARRAY_SIZE(raven_cache_info);
673 		break;
674 	case CHIP_RENOIR:
675 		pcache_info = renoir_cache_info;
676 		num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
677 		break;
678 	case CHIP_NAVI10:
679 	case CHIP_NAVI12:
680 	case CHIP_NAVI14:
681 	case CHIP_SIENNA_CICHLID:
682 	case CHIP_NAVY_FLOUNDER:
683 		pcache_info = navi10_cache_info;
684 		num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
685 		break;
686 	default:
687 		return -EINVAL;
688 	}
689 
690 	*size_filled = 0;
691 	*num_of_entries = 0;
692 
693 	/* For each type of cache listed in the kfd_gpu_cache_info table,
694 	 * go through all available Compute Units.
695 	 * The [i,j,k] loop will
696 	 *		if kfd_gpu_cache_info.num_cu_shared = 1
697 	 *			will parse through all available CU
698 	 *		If (kfd_gpu_cache_info.num_cu_shared != 1)
699 	 *			then it will consider only one CU from
700 	 *			the shared unit
701 	 */
702 
703 	for (ct = 0; ct < num_of_cache_types; ct++) {
704 		cu_processor_id = gpu_processor_id;
705 		for (i = 0; i < cu_info->num_shader_engines; i++) {
706 			for (j = 0; j < cu_info->num_shader_arrays_per_engine;
707 				j++) {
708 				for (k = 0; k < cu_info->num_cu_per_sh;
709 					k += pcache_info[ct].num_cu_shared) {
710 
711 					ret = fill_in_pcache(pcache,
712 						pcache_info,
713 						cu_info,
714 						mem_available,
715 						cu_info->cu_bitmap[i % 4][j + i / 4],
716 						ct,
717 						cu_processor_id,
718 						k);
719 
720 					if (ret < 0)
721 						break;
722 
723 					if (!ret) {
724 						pcache++;
725 						(*num_of_entries)++;
726 						mem_available -=
727 							sizeof(*pcache);
728 						(*size_filled) +=
729 							sizeof(*pcache);
730 					}
731 
732 					/* Move to next CU block */
733 					cu_processor_id +=
734 						pcache_info[ct].num_cu_shared;
735 				}
736 			}
737 		}
738 	}
739 
740 	pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
741 
742 	return 0;
743 }
744 
745 static bool kfd_ignore_crat(void)
746 {
747 	bool ret;
748 
749 	if (ignore_crat)
750 		return true;
751 
752 #ifndef KFD_SUPPORT_IOMMU_V2
753 	ret = true;
754 #else
755 	ret = false;
756 #endif
757 
758 	return ret;
759 }
760 
761 /*
762  * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
763  * copies CRAT from ACPI (if available).
764  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
765  *
766  *	@crat_image: CRAT read from ACPI. If no CRAT in ACPI then
767  *		     crat_image will be NULL
768  *	@size: [OUT] size of crat_image
769  *
770  *	Return 0 if successful else return error code
771  */
772 int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
773 {
774 	struct acpi_table_header *crat_table;
775 	acpi_status status;
776 	void *pcrat_image;
777 
778 	if (!crat_image)
779 		return -EINVAL;
780 
781 	*crat_image = NULL;
782 
783 	/* Fetch the CRAT table from ACPI */
784 	status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
785 	if (status == AE_NOT_FOUND) {
786 		pr_warn("CRAT table not found\n");
787 		return -ENODATA;
788 	} else if (ACPI_FAILURE(status)) {
789 		const char *err = acpi_format_exception(status);
790 
791 		pr_err("CRAT table error: %s\n", err);
792 		return -EINVAL;
793 	}
794 
795 	if (kfd_ignore_crat()) {
796 		pr_info("CRAT table disabled by module option\n");
797 		return -ENODATA;
798 	}
799 
800 	pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
801 	memcpy(pcrat_image, crat_table, crat_table->length);
802 	if (!pcrat_image)
803 		return -ENOMEM;
804 
805 	*crat_image = pcrat_image;
806 	*size = crat_table->length;
807 
808 	return 0;
809 }
810 
811 /* Memory required to create Virtual CRAT.
812  * Since there is no easy way to predict the amount of memory required, the
813  * following amount is allocated for GPU Virtual CRAT. This is
814  * expected to cover all known conditions. But to be safe additional check
815  * is put in the code to ensure we don't overwrite.
816  */
817 #define VCRAT_SIZE_FOR_GPU	(4 * PAGE_SIZE)
818 
819 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
820  *
821  *	@numa_node_id: CPU NUMA node id
822  *	@avail_size: Available size in the memory
823  *	@sub_type_hdr: Memory into which compute info will be filled in
824  *
825  *	Return 0 if successful else return -ve value
826  */
827 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
828 				int proximity_domain,
829 				struct crat_subtype_computeunit *sub_type_hdr)
830 {
831 	const struct cpumask *cpumask;
832 
833 	*avail_size -= sizeof(struct crat_subtype_computeunit);
834 	if (*avail_size < 0)
835 		return -ENOMEM;
836 
837 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
838 
839 	/* Fill in subtype header data */
840 	sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
841 	sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
842 	sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
843 
844 	cpumask = cpumask_of_node(numa_node_id);
845 
846 	/* Fill in CU data */
847 	sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
848 	sub_type_hdr->proximity_domain = proximity_domain;
849 	sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
850 	if (sub_type_hdr->processor_id_low == -1)
851 		return -EINVAL;
852 
853 	sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
854 
855 	return 0;
856 }
857 
858 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
859  *
860  *	@numa_node_id: CPU NUMA node id
861  *	@avail_size: Available size in the memory
862  *	@sub_type_hdr: Memory into which compute info will be filled in
863  *
864  *	Return 0 if successful else return -ve value
865  */
866 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
867 			int proximity_domain,
868 			struct crat_subtype_memory *sub_type_hdr)
869 {
870 	uint64_t mem_in_bytes = 0;
871 	pg_data_t *pgdat;
872 	int zone_type;
873 
874 	*avail_size -= sizeof(struct crat_subtype_memory);
875 	if (*avail_size < 0)
876 		return -ENOMEM;
877 
878 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
879 
880 	/* Fill in subtype header data */
881 	sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
882 	sub_type_hdr->length = sizeof(struct crat_subtype_memory);
883 	sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
884 
885 	/* Fill in Memory Subunit data */
886 
887 	/* Unlike si_meminfo, si_meminfo_node is not exported. So
888 	 * the following lines are duplicated from si_meminfo_node
889 	 * function
890 	 */
891 	pgdat = NODE_DATA(numa_node_id);
892 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
893 		mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
894 	mem_in_bytes <<= PAGE_SHIFT;
895 
896 	sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
897 	sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
898 	sub_type_hdr->proximity_domain = proximity_domain;
899 
900 	return 0;
901 }
902 
903 #ifdef CONFIG_X86_64
904 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
905 				uint32_t *num_entries,
906 				struct crat_subtype_iolink *sub_type_hdr)
907 {
908 	int nid;
909 	struct cpuinfo_x86 *c = &cpu_data(0);
910 	uint8_t link_type;
911 
912 	if (c->x86_vendor == X86_VENDOR_AMD)
913 		link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
914 	else
915 		link_type = CRAT_IOLINK_TYPE_QPI_1_1;
916 
917 	*num_entries = 0;
918 
919 	/* Create IO links from this node to other CPU nodes */
920 	for_each_online_node(nid) {
921 		if (nid == numa_node_id) /* node itself */
922 			continue;
923 
924 		*avail_size -= sizeof(struct crat_subtype_iolink);
925 		if (*avail_size < 0)
926 			return -ENOMEM;
927 
928 		memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
929 
930 		/* Fill in subtype header data */
931 		sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
932 		sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
933 		sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
934 
935 		/* Fill in IO link data */
936 		sub_type_hdr->proximity_domain_from = numa_node_id;
937 		sub_type_hdr->proximity_domain_to = nid;
938 		sub_type_hdr->io_interface_type = link_type;
939 
940 		(*num_entries)++;
941 		sub_type_hdr++;
942 	}
943 
944 	return 0;
945 }
946 #endif
947 
948 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
949  *
950  *	@pcrat_image: Fill in VCRAT for CPU
951  *	@size:	[IN] allocated size of crat_image.
952  *		[OUT] actual size of data filled in crat_image
953  */
954 static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
955 {
956 	struct crat_header *crat_table = (struct crat_header *)pcrat_image;
957 	struct acpi_table_header *acpi_table;
958 	acpi_status status;
959 	struct crat_subtype_generic *sub_type_hdr;
960 	int avail_size = *size;
961 	int numa_node_id;
962 #ifdef CONFIG_X86_64
963 	uint32_t entries = 0;
964 #endif
965 	int ret = 0;
966 
967 	if (!pcrat_image)
968 		return -EINVAL;
969 
970 	/* Fill in CRAT Header.
971 	 * Modify length and total_entries as subunits are added.
972 	 */
973 	avail_size -= sizeof(struct crat_header);
974 	if (avail_size < 0)
975 		return -ENOMEM;
976 
977 	memset(crat_table, 0, sizeof(struct crat_header));
978 	memcpy(&crat_table->signature, CRAT_SIGNATURE,
979 			sizeof(crat_table->signature));
980 	crat_table->length = sizeof(struct crat_header);
981 
982 	status = acpi_get_table("DSDT", 0, &acpi_table);
983 	if (status != AE_OK)
984 		pr_warn("DSDT table not found for OEM information\n");
985 	else {
986 		crat_table->oem_revision = acpi_table->revision;
987 		memcpy(crat_table->oem_id, acpi_table->oem_id,
988 				CRAT_OEMID_LENGTH);
989 		memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
990 				CRAT_OEMTABLEID_LENGTH);
991 	}
992 	crat_table->total_entries = 0;
993 	crat_table->num_domains = 0;
994 
995 	sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
996 
997 	for_each_online_node(numa_node_id) {
998 		if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
999 			continue;
1000 
1001 		/* Fill in Subtype: Compute Unit */
1002 		ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1003 			crat_table->num_domains,
1004 			(struct crat_subtype_computeunit *)sub_type_hdr);
1005 		if (ret < 0)
1006 			return ret;
1007 		crat_table->length += sub_type_hdr->length;
1008 		crat_table->total_entries++;
1009 
1010 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1011 			sub_type_hdr->length);
1012 
1013 		/* Fill in Subtype: Memory */
1014 		ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1015 			crat_table->num_domains,
1016 			(struct crat_subtype_memory *)sub_type_hdr);
1017 		if (ret < 0)
1018 			return ret;
1019 		crat_table->length += sub_type_hdr->length;
1020 		crat_table->total_entries++;
1021 
1022 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1023 			sub_type_hdr->length);
1024 
1025 		/* Fill in Subtype: IO Link */
1026 #ifdef CONFIG_X86_64
1027 		ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1028 				&entries,
1029 				(struct crat_subtype_iolink *)sub_type_hdr);
1030 		if (ret < 0)
1031 			return ret;
1032 		crat_table->length += (sub_type_hdr->length * entries);
1033 		crat_table->total_entries += entries;
1034 
1035 		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1036 				sub_type_hdr->length * entries);
1037 #else
1038 		pr_info("IO link not available for non x86 platforms\n");
1039 #endif
1040 
1041 		crat_table->num_domains++;
1042 	}
1043 
1044 	/* TODO: Add cache Subtype for CPU.
1045 	 * Currently, CPU cache information is available in function
1046 	 * detect_cache_attributes(cpu) defined in the file
1047 	 * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1048 	 * exported and to get the same information the code needs to be
1049 	 * duplicated.
1050 	 */
1051 
1052 	*size = crat_table->length;
1053 	pr_info("Virtual CRAT table created for CPU\n");
1054 
1055 	return 0;
1056 }
1057 
1058 static int kfd_fill_gpu_memory_affinity(int *avail_size,
1059 		struct kfd_dev *kdev, uint8_t type, uint64_t size,
1060 		struct crat_subtype_memory *sub_type_hdr,
1061 		uint32_t proximity_domain,
1062 		const struct kfd_local_mem_info *local_mem_info)
1063 {
1064 	*avail_size -= sizeof(struct crat_subtype_memory);
1065 	if (*avail_size < 0)
1066 		return -ENOMEM;
1067 
1068 	memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1069 	sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1070 	sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1071 	sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1072 
1073 	sub_type_hdr->proximity_domain = proximity_domain;
1074 
1075 	pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1076 			type, size);
1077 
1078 	sub_type_hdr->length_low = lower_32_bits(size);
1079 	sub_type_hdr->length_high = upper_32_bits(size);
1080 
1081 	sub_type_hdr->width = local_mem_info->vram_width;
1082 	sub_type_hdr->visibility_type = type;
1083 
1084 	return 0;
1085 }
1086 
1087 /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1088  * to its NUMA node
1089  *	@avail_size: Available size in the memory
1090  *	@kdev - [IN] GPU device
1091  *	@sub_type_hdr: Memory into which io link info will be filled in
1092  *	@proximity_domain - proximity domain of the GPU node
1093  *
1094  *	Return 0 if successful else return -ve value
1095  */
1096 static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1097 			struct kfd_dev *kdev,
1098 			struct crat_subtype_iolink *sub_type_hdr,
1099 			uint32_t proximity_domain)
1100 {
1101 	*avail_size -= sizeof(struct crat_subtype_iolink);
1102 	if (*avail_size < 0)
1103 		return -ENOMEM;
1104 
1105 	memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1106 
1107 	/* Fill in subtype header data */
1108 	sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1109 	sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1110 	sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1111 	if (kfd_dev_is_large_bar(kdev))
1112 		sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1113 
1114 	/* Fill in IOLINK subtype.
1115 	 * TODO: Fill-in other fields of iolink subtype
1116 	 */
1117 	sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1118 	sub_type_hdr->proximity_domain_from = proximity_domain;
1119 #ifdef CONFIG_NUMA
1120 	if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
1121 		sub_type_hdr->proximity_domain_to = 0;
1122 	else
1123 		sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
1124 #else
1125 	sub_type_hdr->proximity_domain_to = 0;
1126 #endif
1127 	return 0;
1128 }
1129 
1130 static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
1131 			struct kfd_dev *kdev,
1132 			struct kfd_dev *peer_kdev,
1133 			struct crat_subtype_iolink *sub_type_hdr,
1134 			uint32_t proximity_domain_from,
1135 			uint32_t proximity_domain_to)
1136 {
1137 	*avail_size -= sizeof(struct crat_subtype_iolink);
1138 	if (*avail_size < 0)
1139 		return -ENOMEM;
1140 
1141 	memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1142 
1143 	sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1144 	sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1145 	sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
1146 			       CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1147 
1148 	sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1149 	sub_type_hdr->proximity_domain_from = proximity_domain_from;
1150 	sub_type_hdr->proximity_domain_to = proximity_domain_to;
1151 	sub_type_hdr->num_hops_xgmi =
1152 		amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
1153 	return 0;
1154 }
1155 
1156 /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
1157  *
1158  *	@pcrat_image: Fill in VCRAT for GPU
1159  *	@size:	[IN] allocated size of crat_image.
1160  *		[OUT] actual size of data filled in crat_image
1161  */
1162 static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1163 				      size_t *size, struct kfd_dev *kdev,
1164 				      uint32_t proximity_domain)
1165 {
1166 	struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1167 	struct crat_subtype_generic *sub_type_hdr;
1168 	struct kfd_local_mem_info local_mem_info;
1169 	struct kfd_topology_device *peer_dev;
1170 	struct crat_subtype_computeunit *cu;
1171 	struct kfd_cu_info cu_info;
1172 	int avail_size = *size;
1173 	uint32_t total_num_of_cu;
1174 	int num_of_cache_entries = 0;
1175 	int cache_mem_filled = 0;
1176 	uint32_t nid = 0;
1177 	int ret = 0;
1178 
1179 	if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
1180 		return -EINVAL;
1181 
1182 	/* Fill the CRAT Header.
1183 	 * Modify length and total_entries as subunits are added.
1184 	 */
1185 	avail_size -= sizeof(struct crat_header);
1186 	if (avail_size < 0)
1187 		return -ENOMEM;
1188 
1189 	memset(crat_table, 0, sizeof(struct crat_header));
1190 
1191 	memcpy(&crat_table->signature, CRAT_SIGNATURE,
1192 			sizeof(crat_table->signature));
1193 	/* Change length as we add more subtypes*/
1194 	crat_table->length = sizeof(struct crat_header);
1195 	crat_table->num_domains = 1;
1196 	crat_table->total_entries = 0;
1197 
1198 	/* Fill in Subtype: Compute Unit
1199 	 * First fill in the sub type header and then sub type data
1200 	 */
1201 	avail_size -= sizeof(struct crat_subtype_computeunit);
1202 	if (avail_size < 0)
1203 		return -ENOMEM;
1204 
1205 	sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
1206 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1207 
1208 	sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1209 	sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1210 	sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1211 
1212 	/* Fill CU subtype data */
1213 	cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1214 	cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
1215 	cu->proximity_domain = proximity_domain;
1216 
1217 	amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
1218 	cu->num_simd_per_cu = cu_info.simd_per_cu;
1219 	cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
1220 	cu->max_waves_simd = cu_info.max_waves_per_simd;
1221 
1222 	cu->wave_front_size = cu_info.wave_front_size;
1223 	cu->array_count = cu_info.num_shader_arrays_per_engine *
1224 		cu_info.num_shader_engines;
1225 	total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
1226 	cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
1227 	cu->num_cu_per_array = cu_info.num_cu_per_sh;
1228 	cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
1229 	cu->num_banks = cu_info.num_shader_engines;
1230 	cu->lds_size_in_kb = cu_info.lds_size;
1231 
1232 	cu->hsa_capability = 0;
1233 
1234 	/* Check if this node supports IOMMU. During parsing this flag will
1235 	 * translate to HSA_CAP_ATS_PRESENT
1236 	 */
1237 	if (!kfd_iommu_check_device(kdev))
1238 		cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1239 
1240 	crat_table->length += sub_type_hdr->length;
1241 	crat_table->total_entries++;
1242 
1243 	/* Fill in Subtype: Memory. Only on systems with large BAR (no
1244 	 * private FB), report memory as public. On other systems
1245 	 * report the total FB size (public+private) as a single
1246 	 * private heap.
1247 	 */
1248 	amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
1249 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1250 			sub_type_hdr->length);
1251 
1252 	if (debug_largebar)
1253 		local_mem_info.local_mem_size_private = 0;
1254 
1255 	if (local_mem_info.local_mem_size_private == 0)
1256 		ret = kfd_fill_gpu_memory_affinity(&avail_size,
1257 				kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
1258 				local_mem_info.local_mem_size_public,
1259 				(struct crat_subtype_memory *)sub_type_hdr,
1260 				proximity_domain,
1261 				&local_mem_info);
1262 	else
1263 		ret = kfd_fill_gpu_memory_affinity(&avail_size,
1264 				kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
1265 				local_mem_info.local_mem_size_public +
1266 				local_mem_info.local_mem_size_private,
1267 				(struct crat_subtype_memory *)sub_type_hdr,
1268 				proximity_domain,
1269 				&local_mem_info);
1270 	if (ret < 0)
1271 		return ret;
1272 
1273 	crat_table->length += sizeof(struct crat_subtype_memory);
1274 	crat_table->total_entries++;
1275 
1276 	/* TODO: Fill in cache information. This information is NOT readily
1277 	 * available in KGD
1278 	 */
1279 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1280 		sub_type_hdr->length);
1281 	ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
1282 				avail_size,
1283 				&cu_info,
1284 				(struct crat_subtype_cache *)sub_type_hdr,
1285 				&cache_mem_filled,
1286 				&num_of_cache_entries);
1287 
1288 	if (ret < 0)
1289 		return ret;
1290 
1291 	crat_table->length += cache_mem_filled;
1292 	crat_table->total_entries += num_of_cache_entries;
1293 	avail_size -= cache_mem_filled;
1294 
1295 	/* Fill in Subtype: IO_LINKS
1296 	 *  Only direct links are added here which is Link from GPU to
1297 	 *  to its NUMA node. Indirect links are added by userspace.
1298 	 */
1299 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1300 		cache_mem_filled);
1301 	ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
1302 		(struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
1303 
1304 	if (ret < 0)
1305 		return ret;
1306 
1307 	crat_table->length += sub_type_hdr->length;
1308 	crat_table->total_entries++;
1309 
1310 
1311 	/* Fill in Subtype: IO_LINKS
1312 	 * Direct links from GPU to other GPUs through xGMI.
1313 	 * We will loop GPUs that already be processed (with lower value
1314 	 * of proximity_domain), add the link for the GPUs with same
1315 	 * hive id (from this GPU to other GPU) . The reversed iolink
1316 	 * (from other GPU to this GPU) will be added
1317 	 * in kfd_parse_subtype_iolink.
1318 	 */
1319 	if (kdev->hive_id) {
1320 		for (nid = 0; nid < proximity_domain; ++nid) {
1321 			peer_dev = kfd_topology_device_by_proximity_domain(nid);
1322 			if (!peer_dev->gpu)
1323 				continue;
1324 			if (peer_dev->gpu->hive_id != kdev->hive_id)
1325 				continue;
1326 			sub_type_hdr = (typeof(sub_type_hdr))(
1327 				(char *)sub_type_hdr +
1328 				sizeof(struct crat_subtype_iolink));
1329 			ret = kfd_fill_gpu_xgmi_link_to_gpu(
1330 				&avail_size, kdev, peer_dev->gpu,
1331 				(struct crat_subtype_iolink *)sub_type_hdr,
1332 				proximity_domain, nid);
1333 			if (ret < 0)
1334 				return ret;
1335 			crat_table->length += sub_type_hdr->length;
1336 			crat_table->total_entries++;
1337 		}
1338 	}
1339 	*size = crat_table->length;
1340 	pr_info("Virtual CRAT table created for GPU\n");
1341 
1342 	return ret;
1343 }
1344 
1345 /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
1346  *		creates a Virtual CRAT (VCRAT) image
1347  *
1348  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1349  *
1350  *	@crat_image: VCRAT image created because ACPI does not have a
1351  *		     CRAT for this device
1352  *	@size: [OUT] size of virtual crat_image
1353  *	@flags:	COMPUTE_UNIT_CPU - Create VCRAT for CPU device
1354  *		COMPUTE_UNIT_GPU - Create VCRAT for GPU
1355  *		(COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
1356  *			-- this option is not currently implemented.
1357  *			The assumption is that all AMD APUs will have CRAT
1358  *	@kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
1359  *
1360  *	Return 0 if successful else return -ve value
1361  */
1362 int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
1363 				  int flags, struct kfd_dev *kdev,
1364 				  uint32_t proximity_domain)
1365 {
1366 	void *pcrat_image = NULL;
1367 	int ret = 0, num_nodes;
1368 	size_t dyn_size;
1369 
1370 	if (!crat_image)
1371 		return -EINVAL;
1372 
1373 	*crat_image = NULL;
1374 
1375 	/* Allocate the CPU Virtual CRAT size based on the number of online
1376 	 * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
1377 	 * This should cover all the current conditions. A check is put not
1378 	 * to overwrite beyond allocated size for GPUs
1379 	 */
1380 	switch (flags) {
1381 	case COMPUTE_UNIT_CPU:
1382 		num_nodes = num_online_nodes();
1383 		dyn_size = sizeof(struct crat_header) +
1384 			num_nodes * (sizeof(struct crat_subtype_computeunit) +
1385 			sizeof(struct crat_subtype_memory) +
1386 			(num_nodes - 1) * sizeof(struct crat_subtype_iolink));
1387 		pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
1388 		if (!pcrat_image)
1389 			return -ENOMEM;
1390 		*size = dyn_size;
1391 		pr_debug("CRAT size is %ld", dyn_size);
1392 		ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
1393 		break;
1394 	case COMPUTE_UNIT_GPU:
1395 		if (!kdev)
1396 			return -EINVAL;
1397 		pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
1398 		if (!pcrat_image)
1399 			return -ENOMEM;
1400 		*size = VCRAT_SIZE_FOR_GPU;
1401 		ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
1402 						 proximity_domain);
1403 		break;
1404 	case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
1405 		/* TODO: */
1406 		ret = -EINVAL;
1407 		pr_err("VCRAT not implemented for APU\n");
1408 		break;
1409 	default:
1410 		ret = -EINVAL;
1411 	}
1412 
1413 	if (!ret)
1414 		*crat_image = pcrat_image;
1415 	else
1416 		kvfree(pcrat_image);
1417 
1418 	return ret;
1419 }
1420 
1421 
1422 /* kfd_destroy_crat_image
1423  *
1424  *	@crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
1425  *
1426  */
1427 void kfd_destroy_crat_image(void *crat_image)
1428 {
1429 	kvfree(crat_image);
1430 }
1431