1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * LoongArch cacheinfo support
4  *
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  */
7 #include <linux/cacheinfo.h>
8 
9 /* Populates leaf and increments to next leaf */
10 #define populate_cache(cache, leaf, c_level, c_type)		\
11 do {								\
12 	leaf->type = c_type;					\
13 	leaf->level = c_level;					\
14 	leaf->coherency_line_size = c->cache.linesz;		\
15 	leaf->number_of_sets = c->cache.sets;			\
16 	leaf->ways_of_associativity = c->cache.ways;		\
17 	leaf->size = c->cache.linesz * c->cache.sets *		\
18 		c->cache.ways;					\
19 	leaf++;							\
20 } while (0)
21 
22 int init_cache_level(unsigned int cpu)
23 {
24 	struct cpuinfo_loongarch *c = &current_cpu_data;
25 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
26 	int levels = 0, leaves = 0;
27 
28 	/*
29 	 * If Dcache is not set, we assume the cache structures
30 	 * are not properly initialized.
31 	 */
32 	if (c->dcache.waysize)
33 		levels += 1;
34 	else
35 		return -ENOENT;
36 
37 
38 	leaves += (c->icache.waysize) ? 2 : 1;
39 
40 	if (c->vcache.waysize) {
41 		levels++;
42 		leaves++;
43 	}
44 
45 	if (c->scache.waysize) {
46 		levels++;
47 		leaves++;
48 	}
49 
50 	if (c->tcache.waysize) {
51 		levels++;
52 		leaves++;
53 	}
54 
55 	this_cpu_ci->num_levels = levels;
56 	this_cpu_ci->num_leaves = leaves;
57 	return 0;
58 }
59 
60 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
61 					   struct cacheinfo *sib_leaf)
62 {
63 	return !((this_leaf->level == 1) || (this_leaf->level == 2));
64 }
65 
66 static void cache_cpumap_setup(unsigned int cpu)
67 {
68 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
69 	struct cacheinfo *this_leaf, *sib_leaf;
70 	unsigned int index;
71 
72 	for (index = 0; index < this_cpu_ci->num_leaves; index++) {
73 		unsigned int i;
74 
75 		this_leaf = this_cpu_ci->info_list + index;
76 		/* skip if shared_cpu_map is already populated */
77 		if (!cpumask_empty(&this_leaf->shared_cpu_map))
78 			continue;
79 
80 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
81 		for_each_online_cpu(i) {
82 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
83 
84 			if (i == cpu || !sib_cpu_ci->info_list)
85 				continue;/* skip if itself or no cacheinfo */
86 			sib_leaf = sib_cpu_ci->info_list + index;
87 			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
88 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
89 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
90 			}
91 		}
92 	}
93 }
94 
95 int populate_cache_leaves(unsigned int cpu)
96 {
97 	int level = 1;
98 	struct cpuinfo_loongarch *c = &current_cpu_data;
99 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
100 	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
101 
102 	if (c->icache.waysize) {
103 		populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
104 		populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST);
105 	} else {
106 		populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
107 	}
108 
109 	if (c->vcache.waysize)
110 		populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
111 
112 	if (c->scache.waysize)
113 		populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED);
114 
115 	if (c->tcache.waysize)
116 		populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
117 
118 	cache_cpumap_setup(cpu);
119 	this_cpu_ci->cpu_map_populated = true;
120 
121 	return 0;
122 }
123