1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * LoongArch cacheinfo support
4  *
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  */
7 #include <linux/cacheinfo.h>
8 #include <asm/bootinfo.h>
9 #include <asm/cpu-info.h>
10 
11 /* Populates leaf and increments to next leaf */
12 #define populate_cache(cache, leaf, c_level, c_type)		\
13 do {								\
14 	leaf->type = c_type;					\
15 	leaf->level = c_level;					\
16 	leaf->coherency_line_size = c->cache.linesz;		\
17 	leaf->number_of_sets = c->cache.sets;			\
18 	leaf->ways_of_associativity = c->cache.ways;		\
19 	leaf->size = c->cache.linesz * c->cache.sets *		\
20 		c->cache.ways;					\
21 	if (leaf->level > 2)					\
22 		leaf->size *= nodes_per_package;		\
23 	leaf++;							\
24 } while (0)
25 
26 int init_cache_level(unsigned int cpu)
27 {
28 	struct cpuinfo_loongarch *c = &current_cpu_data;
29 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
30 	int levels = 0, leaves = 0;
31 
32 	/*
33 	 * If Dcache is not set, we assume the cache structures
34 	 * are not properly initialized.
35 	 */
36 	if (c->dcache.waysize)
37 		levels += 1;
38 	else
39 		return -ENOENT;
40 
41 
42 	leaves += (c->icache.waysize) ? 2 : 1;
43 
44 	if (c->vcache.waysize) {
45 		levels++;
46 		leaves++;
47 	}
48 
49 	if (c->scache.waysize) {
50 		levels++;
51 		leaves++;
52 	}
53 
54 	if (c->tcache.waysize) {
55 		levels++;
56 		leaves++;
57 	}
58 
59 	this_cpu_ci->num_levels = levels;
60 	this_cpu_ci->num_leaves = leaves;
61 	return 0;
62 }
63 
64 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
65 					   struct cacheinfo *sib_leaf)
66 {
67 	return !((this_leaf->level == 1) || (this_leaf->level == 2));
68 }
69 
70 static void cache_cpumap_setup(unsigned int cpu)
71 {
72 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
73 	struct cacheinfo *this_leaf, *sib_leaf;
74 	unsigned int index;
75 
76 	for (index = 0; index < this_cpu_ci->num_leaves; index++) {
77 		unsigned int i;
78 
79 		this_leaf = this_cpu_ci->info_list + index;
80 		/* skip if shared_cpu_map is already populated */
81 		if (!cpumask_empty(&this_leaf->shared_cpu_map))
82 			continue;
83 
84 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
85 		for_each_online_cpu(i) {
86 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
87 
88 			if (i == cpu || !sib_cpu_ci->info_list)
89 				continue;/* skip if itself or no cacheinfo */
90 			sib_leaf = sib_cpu_ci->info_list + index;
91 			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
92 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
93 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
94 			}
95 		}
96 	}
97 }
98 
99 int populate_cache_leaves(unsigned int cpu)
100 {
101 	int level = 1, nodes_per_package = 1;
102 	struct cpuinfo_loongarch *c = &current_cpu_data;
103 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
104 	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
105 
106 	if (loongson_sysconf.nr_nodes > 1)
107 		nodes_per_package = loongson_sysconf.cores_per_package
108 					/ loongson_sysconf.cores_per_node;
109 
110 	if (c->icache.waysize) {
111 		populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
112 		populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST);
113 	} else {
114 		populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
115 	}
116 
117 	if (c->vcache.waysize)
118 		populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
119 
120 	if (c->scache.waysize)
121 		populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED);
122 
123 	if (c->tcache.waysize)
124 		populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
125 
126 	cache_cpumap_setup(cpu);
127 	this_cpu_ci->cpu_map_populated = true;
128 
129 	return 0;
130 }
131