xref: /openbmc/linux/tools/perf/util/block-range.c (revision 70fbe057)
1 #include "block-range.h"
2 #include "annotate.h"
3 
4 struct {
5 	struct rb_root root;
6 	u64 blocks;
7 } block_ranges;
8 
9 static void block_range__debug(void)
10 {
11 	/*
12 	 * XXX still paranoid for now; see if we can make this depend on
13 	 * DEBUG=1 builds.
14 	 */
15 #if 1
16 	struct rb_node *rb;
17 	u64 old = 0; /* NULL isn't executable */
18 
19 	for (rb = rb_first(&block_ranges.root); rb; rb = rb_next(rb)) {
20 		struct block_range *entry = rb_entry(rb, struct block_range, node);
21 
22 		assert(old < entry->start);
23 		assert(entry->start <= entry->end); /* single instruction block; jump to a jump */
24 
25 		old = entry->end;
26 	}
27 #endif
28 }
29 
30 struct block_range *block_range__find(u64 addr)
31 {
32 	struct rb_node **p = &block_ranges.root.rb_node;
33 	struct rb_node *parent = NULL;
34 	struct block_range *entry;
35 
36 	while (*p != NULL) {
37 		parent = *p;
38 		entry = rb_entry(parent, struct block_range, node);
39 
40 		if (addr < entry->start)
41 			p = &parent->rb_left;
42 		else if (addr > entry->end)
43 			p = &parent->rb_right;
44 		else
45 			return entry;
46 	}
47 
48 	return NULL;
49 }
50 
51 static inline void rb_link_left_of_node(struct rb_node *left, struct rb_node *node)
52 {
53 	struct rb_node **p = &node->rb_left;
54 	while (*p) {
55 		node = *p;
56 		p = &node->rb_right;
57 	}
58 	rb_link_node(left, node, p);
59 }
60 
61 static inline void rb_link_right_of_node(struct rb_node *right, struct rb_node *node)
62 {
63 	struct rb_node **p = &node->rb_right;
64 	while (*p) {
65 		node = *p;
66 		p = &node->rb_left;
67 	}
68 	rb_link_node(right, node, p);
69 }
70 
71 /**
72  * block_range__create
73  * @start: branch target starting this basic block
74  * @end:   branch ending this basic block
75  *
76  * Create all the required block ranges to precisely span the given range.
77  */
78 struct block_range_iter block_range__create(u64 start, u64 end)
79 {
80 	struct rb_node **p = &block_ranges.root.rb_node;
81 	struct rb_node *n, *parent = NULL;
82 	struct block_range *next, *entry = NULL;
83 	struct block_range_iter iter = { NULL, NULL };
84 
85 	while (*p != NULL) {
86 		parent = *p;
87 		entry = rb_entry(parent, struct block_range, node);
88 
89 		if (start < entry->start)
90 			p = &parent->rb_left;
91 		else if (start > entry->end)
92 			p = &parent->rb_right;
93 		else
94 			break;
95 	}
96 
97 	/*
98 	 * Didn't find anything.. there's a hole at @start, however @end might
99 	 * be inside/behind the next range.
100 	 */
101 	if (!*p) {
102 		if (!entry) /* tree empty */
103 			goto do_whole;
104 
105 		/*
106 		 * If the last node is before, advance one to find the next.
107 		 */
108 		n = parent;
109 		if (entry->end < start) {
110 			n = rb_next(n);
111 			if (!n)
112 				goto do_whole;
113 		}
114 		next = rb_entry(n, struct block_range, node);
115 
116 		if (next->start <= end) { /* add head: [start...][n->start...] */
117 			struct block_range *head = malloc(sizeof(struct block_range));
118 			if (!head)
119 				return iter;
120 
121 			*head = (struct block_range){
122 				.start		= start,
123 				.end		= next->start - 1,
124 				.is_target	= 1,
125 				.is_branch	= 0,
126 			};
127 
128 			rb_link_left_of_node(&head->node, &next->node);
129 			rb_insert_color(&head->node, &block_ranges.root);
130 			block_range__debug();
131 
132 			iter.start = head;
133 			goto do_tail;
134 		}
135 
136 do_whole:
137 		/*
138 		 * The whole [start..end] range is non-overlapping.
139 		 */
140 		entry = malloc(sizeof(struct block_range));
141 		if (!entry)
142 			return iter;
143 
144 		*entry = (struct block_range){
145 			.start		= start,
146 			.end		= end,
147 			.is_target	= 1,
148 			.is_branch	= 1,
149 		};
150 
151 		rb_link_node(&entry->node, parent, p);
152 		rb_insert_color(&entry->node, &block_ranges.root);
153 		block_range__debug();
154 
155 		iter.start = entry;
156 		iter.end   = entry;
157 		goto done;
158 	}
159 
160 	/*
161 	 * We found a range that overlapped with ours, split if needed.
162 	 */
163 	if (entry->start < start) { /* split: [e->start...][start...] */
164 		struct block_range *head = malloc(sizeof(struct block_range));
165 		if (!head)
166 			return iter;
167 
168 		*head = (struct block_range){
169 			.start		= entry->start,
170 			.end		= start - 1,
171 			.is_target	= entry->is_target,
172 			.is_branch	= 0,
173 
174 			.coverage	= entry->coverage,
175 			.entry		= entry->entry,
176 		};
177 
178 		entry->start		= start;
179 		entry->is_target	= 1;
180 		entry->entry		= 0;
181 
182 		rb_link_left_of_node(&head->node, &entry->node);
183 		rb_insert_color(&head->node, &block_ranges.root);
184 		block_range__debug();
185 
186 	} else if (entry->start == start)
187 		entry->is_target = 1;
188 
189 	iter.start = entry;
190 
191 do_tail:
192 	/*
193 	 * At this point we've got: @iter.start = [@start...] but @end can still be
194 	 * inside or beyond it.
195 	 */
196 	entry = iter.start;
197 	for (;;) {
198 		/*
199 		 * If @end is inside @entry, split.
200 		 */
201 		if (end < entry->end) { /* split: [...end][...e->end] */
202 			struct block_range *tail = malloc(sizeof(struct block_range));
203 			if (!tail)
204 				return iter;
205 
206 			*tail = (struct block_range){
207 				.start		= end + 1,
208 				.end		= entry->end,
209 				.is_target	= 0,
210 				.is_branch	= entry->is_branch,
211 
212 				.coverage	= entry->coverage,
213 				.taken		= entry->taken,
214 				.pred		= entry->pred,
215 			};
216 
217 			entry->end		= end;
218 			entry->is_branch	= 1;
219 			entry->taken		= 0;
220 			entry->pred		= 0;
221 
222 			rb_link_right_of_node(&tail->node, &entry->node);
223 			rb_insert_color(&tail->node, &block_ranges.root);
224 			block_range__debug();
225 
226 			iter.end = entry;
227 			goto done;
228 		}
229 
230 		/*
231 		 * If @end matches @entry, done
232 		 */
233 		if (end == entry->end) {
234 			entry->is_branch = 1;
235 			iter.end = entry;
236 			goto done;
237 		}
238 
239 		next = block_range__next(entry);
240 		if (!next)
241 			goto add_tail;
242 
243 		/*
244 		 * If @end is in beyond @entry but not inside @next, add tail.
245 		 */
246 		if (end < next->start) { /* add tail: [...e->end][...end] */
247 			struct block_range *tail;
248 add_tail:
249 			tail = malloc(sizeof(struct block_range));
250 			if (!tail)
251 				return iter;
252 
253 			*tail = (struct block_range){
254 				.start		= entry->end + 1,
255 				.end		= end,
256 				.is_target	= 0,
257 				.is_branch	= 1,
258 			};
259 
260 			rb_link_right_of_node(&tail->node, &entry->node);
261 			rb_insert_color(&tail->node, &block_ranges.root);
262 			block_range__debug();
263 
264 			iter.end = tail;
265 			goto done;
266 		}
267 
268 		/*
269 		 * If there is a hole between @entry and @next, fill it.
270 		 */
271 		if (entry->end + 1 != next->start) {
272 			struct block_range *hole = malloc(sizeof(struct block_range));
273 			if (!hole)
274 				return iter;
275 
276 			*hole = (struct block_range){
277 				.start		= entry->end + 1,
278 				.end		= next->start - 1,
279 				.is_target	= 0,
280 				.is_branch	= 0,
281 			};
282 
283 			rb_link_left_of_node(&hole->node, &next->node);
284 			rb_insert_color(&hole->node, &block_ranges.root);
285 			block_range__debug();
286 		}
287 
288 		entry = next;
289 	}
290 
291 done:
292 	assert(iter.start->start == start && iter.start->is_target);
293 	assert(iter.end->end == end && iter.end->is_branch);
294 
295 	block_ranges.blocks++;
296 
297 	return iter;
298 }
299 
300 
301 /*
302  * Compute coverage as:
303  *
304  *    br->coverage / br->sym->max_coverage
305  *
306  * This ensures each symbol has a 100% spot, to reflect that each symbol has a
307  * most covered section.
308  *
309  * Returns [0-1] for coverage and -1 if we had no data what so ever or the
310  * symbol does not exist.
311  */
312 double block_range__coverage(struct block_range *br)
313 {
314 	struct symbol *sym;
315 
316 	if (!br) {
317 		if (block_ranges.blocks)
318 			return 0;
319 
320 		return -1;
321 	}
322 
323 	sym = br->sym;
324 	if (!sym)
325 		return -1;
326 
327 	return (double)br->coverage / symbol__annotation(sym)->max_coverage;
328 }
329