xref: /openbmc/linux/tools/perf/util/ordered-events.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/list.h>
5 #include <linux/compiler.h>
6 #include <linux/string.h>
7 #include "ordered-events.h"
8 #include "session.h"
9 #include "asm/bug.h"
10 #include "debug.h"
11 #include "ui/progress.h"
12 
13 #define pr_N(n, fmt, ...) \
14 	eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
15 
16 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
17 
18 static void queue_event(struct ordered_events *oe, struct ordered_event *new)
19 {
20 	struct ordered_event *last = oe->last;
21 	u64 timestamp = new->timestamp;
22 	struct list_head *p;
23 
24 	++oe->nr_events;
25 	oe->last = new;
26 
27 	pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
28 
29 	if (!last) {
30 		list_add(&new->list, &oe->events);
31 		oe->max_timestamp = timestamp;
32 		return;
33 	}
34 
35 	/*
36 	 * last event might point to some random place in the list as it's
37 	 * the last queued event. We expect that the new event is close to
38 	 * this.
39 	 */
40 	if (last->timestamp <= timestamp) {
41 		while (last->timestamp <= timestamp) {
42 			p = last->list.next;
43 			if (p == &oe->events) {
44 				list_add_tail(&new->list, &oe->events);
45 				oe->max_timestamp = timestamp;
46 				return;
47 			}
48 			last = list_entry(p, struct ordered_event, list);
49 		}
50 		list_add_tail(&new->list, &last->list);
51 	} else {
52 		while (last->timestamp > timestamp) {
53 			p = last->list.prev;
54 			if (p == &oe->events) {
55 				list_add(&new->list, &oe->events);
56 				return;
57 			}
58 			last = list_entry(p, struct ordered_event, list);
59 		}
60 		list_add(&new->list, &last->list);
61 	}
62 }
63 
64 static union perf_event *__dup_event(struct ordered_events *oe,
65 				     union perf_event *event)
66 {
67 	union perf_event *new_event = NULL;
68 
69 	if (oe->cur_alloc_size < oe->max_alloc_size) {
70 		new_event = memdup(event, event->header.size);
71 		if (new_event)
72 			oe->cur_alloc_size += event->header.size;
73 	}
74 
75 	return new_event;
76 }
77 
78 static union perf_event *dup_event(struct ordered_events *oe,
79 				   union perf_event *event)
80 {
81 	return oe->copy_on_queue ? __dup_event(oe, event) : event;
82 }
83 
84 static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
85 {
86 	if (event) {
87 		oe->cur_alloc_size -= event->header.size;
88 		free(event);
89 	}
90 }
91 
92 static void free_dup_event(struct ordered_events *oe, union perf_event *event)
93 {
94 	if (oe->copy_on_queue)
95 		__free_dup_event(oe, event);
96 }
97 
98 #define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct ordered_event))
99 static struct ordered_event *alloc_event(struct ordered_events *oe,
100 					 union perf_event *event)
101 {
102 	struct list_head *cache = &oe->cache;
103 	struct ordered_event *new = NULL;
104 	union perf_event *new_event;
105 	size_t size;
106 
107 	new_event = dup_event(oe, event);
108 	if (!new_event)
109 		return NULL;
110 
111 	/*
112 	 * We maintain the following scheme of buffers for ordered
113 	 * event allocation:
114 	 *
115 	 *   to_free list -> buffer1 (64K)
116 	 *                   buffer2 (64K)
117 	 *                   ...
118 	 *
119 	 * Each buffer keeps an array of ordered events objects:
120 	 *    buffer -> event[0]
121 	 *              event[1]
122 	 *              ...
123 	 *
124 	 * Each allocated ordered event is linked to one of
125 	 * following lists:
126 	 *   - time ordered list 'events'
127 	 *   - list of currently removed events 'cache'
128 	 *
129 	 * Allocation of the ordered event uses the following order
130 	 * to get the memory:
131 	 *   - use recently removed object from 'cache' list
132 	 *   - use available object in current allocation buffer
133 	 *   - allocate new buffer if the current buffer is full
134 	 *
135 	 * Removal of ordered event object moves it from events to
136 	 * the cache list.
137 	 */
138 	size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
139 
140 	if (!list_empty(cache)) {
141 		new = list_entry(cache->next, struct ordered_event, list);
142 		list_del_init(&new->list);
143 	} else if (oe->buffer) {
144 		new = &oe->buffer->event[oe->buffer_idx];
145 		if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
146 			oe->buffer = NULL;
147 	} else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
148 		oe->buffer = malloc(size);
149 		if (!oe->buffer) {
150 			free_dup_event(oe, new_event);
151 			return NULL;
152 		}
153 
154 		pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
155 		   oe->cur_alloc_size, size, oe->max_alloc_size);
156 
157 		oe->cur_alloc_size += size;
158 		list_add(&oe->buffer->list, &oe->to_free);
159 
160 		oe->buffer_idx = 1;
161 		new = &oe->buffer->event[0];
162 	} else {
163 		pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
164 		return NULL;
165 	}
166 
167 	new->event = new_event;
168 	return new;
169 }
170 
171 static struct ordered_event *
172 ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
173 		    union perf_event *event)
174 {
175 	struct ordered_event *new;
176 
177 	new = alloc_event(oe, event);
178 	if (new) {
179 		new->timestamp = timestamp;
180 		queue_event(oe, new);
181 	}
182 
183 	return new;
184 }
185 
186 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
187 {
188 	list_move(&event->list, &oe->cache);
189 	oe->nr_events--;
190 	free_dup_event(oe, event->event);
191 	event->event = NULL;
192 }
193 
194 int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
195 			  u64 timestamp, u64 file_offset)
196 {
197 	struct ordered_event *oevent;
198 
199 	if (!timestamp || timestamp == ~0ULL)
200 		return -ETIME;
201 
202 	if (timestamp < oe->last_flush) {
203 		pr_oe_time(timestamp,      "out of order event\n");
204 		pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
205 			   oe->last_flush_type);
206 
207 		oe->nr_unordered_events++;
208 	}
209 
210 	oevent = ordered_events__new_event(oe, timestamp, event);
211 	if (!oevent) {
212 		ordered_events__flush(oe, OE_FLUSH__HALF);
213 		oevent = ordered_events__new_event(oe, timestamp, event);
214 	}
215 
216 	if (!oevent)
217 		return -ENOMEM;
218 
219 	oevent->file_offset = file_offset;
220 	return 0;
221 }
222 
223 static int do_flush(struct ordered_events *oe, bool show_progress)
224 {
225 	struct list_head *head = &oe->events;
226 	struct ordered_event *tmp, *iter;
227 	u64 limit = oe->next_flush;
228 	u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
229 	struct ui_progress prog;
230 	int ret;
231 
232 	if (!limit)
233 		return 0;
234 
235 	if (show_progress)
236 		ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
237 
238 	list_for_each_entry_safe(iter, tmp, head, list) {
239 		if (session_done())
240 			return 0;
241 
242 		if (iter->timestamp > limit)
243 			break;
244 		ret = oe->deliver(oe, iter);
245 		if (ret)
246 			return ret;
247 
248 		ordered_events__delete(oe, iter);
249 		oe->last_flush = iter->timestamp;
250 
251 		if (show_progress)
252 			ui_progress__update(&prog, 1);
253 	}
254 
255 	if (list_empty(head))
256 		oe->last = NULL;
257 	else if (last_ts <= limit)
258 		oe->last = list_entry(head->prev, struct ordered_event, list);
259 
260 	if (show_progress)
261 		ui_progress__finish();
262 
263 	return 0;
264 }
265 
266 static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
267 				   u64 timestamp)
268 {
269 	static const char * const str[] = {
270 		"NONE",
271 		"FINAL",
272 		"ROUND",
273 		"HALF ",
274 		"TOP  ",
275 		"TIME ",
276 	};
277 	int err;
278 	bool show_progress = false;
279 
280 	if (oe->nr_events == 0)
281 		return 0;
282 
283 	switch (how) {
284 	case OE_FLUSH__FINAL:
285 		show_progress = true;
286 		__fallthrough;
287 	case OE_FLUSH__TOP:
288 		oe->next_flush = ULLONG_MAX;
289 		break;
290 
291 	case OE_FLUSH__HALF:
292 	{
293 		struct ordered_event *first, *last;
294 		struct list_head *head = &oe->events;
295 
296 		first = list_entry(head->next, struct ordered_event, list);
297 		last = oe->last;
298 
299 		/* Warn if we are called before any event got allocated. */
300 		if (WARN_ONCE(!last || list_empty(head), "empty queue"))
301 			return 0;
302 
303 		oe->next_flush  = first->timestamp;
304 		oe->next_flush += (last->timestamp - first->timestamp) / 2;
305 		break;
306 	}
307 
308 	case OE_FLUSH__TIME:
309 		oe->next_flush = timestamp;
310 		show_progress = false;
311 		break;
312 
313 	case OE_FLUSH__ROUND:
314 	case OE_FLUSH__NONE:
315 	default:
316 		break;
317 	}
318 
319 	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
320 		   str[how], oe->nr_events);
321 	pr_oe_time(oe->max_timestamp, "max_timestamp\n");
322 
323 	err = do_flush(oe, show_progress);
324 
325 	if (!err) {
326 		if (how == OE_FLUSH__ROUND)
327 			oe->next_flush = oe->max_timestamp;
328 
329 		oe->last_flush_type = how;
330 	}
331 
332 	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
333 		   str[how], oe->nr_events);
334 	pr_oe_time(oe->last_flush, "last_flush\n");
335 
336 	return err;
337 }
338 
339 int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
340 {
341 	return __ordered_events__flush(oe, how, 0);
342 }
343 
344 int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
345 {
346 	return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
347 }
348 
349 u64 ordered_events__first_time(struct ordered_events *oe)
350 {
351 	struct ordered_event *event;
352 
353 	if (list_empty(&oe->events))
354 		return 0;
355 
356 	event = list_first_entry(&oe->events, struct ordered_event, list);
357 	return event->timestamp;
358 }
359 
360 void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
361 			  void *data)
362 {
363 	INIT_LIST_HEAD(&oe->events);
364 	INIT_LIST_HEAD(&oe->cache);
365 	INIT_LIST_HEAD(&oe->to_free);
366 	oe->max_alloc_size = (u64) -1;
367 	oe->cur_alloc_size = 0;
368 	oe->deliver	   = deliver;
369 	oe->data	   = data;
370 }
371 
372 static void
373 ordered_events_buffer__free(struct ordered_events_buffer *buffer,
374 			    unsigned int max, struct ordered_events *oe)
375 {
376 	if (oe->copy_on_queue) {
377 		unsigned int i;
378 
379 		for (i = 0; i < max; i++)
380 			__free_dup_event(oe, buffer->event[i].event);
381 	}
382 
383 	free(buffer);
384 }
385 
386 void ordered_events__free(struct ordered_events *oe)
387 {
388 	struct ordered_events_buffer *buffer, *tmp;
389 
390 	if (list_empty(&oe->to_free))
391 		return;
392 
393 	/*
394 	 * Current buffer might not have all the events allocated
395 	 * yet, we need to free only allocated ones ...
396 	 */
397 	if (oe->buffer) {
398 		list_del_init(&oe->buffer->list);
399 		ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
400 	}
401 
402 	/* ... and continue with the rest */
403 	list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
404 		list_del_init(&buffer->list);
405 		ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
406 	}
407 }
408 
409 void ordered_events__reinit(struct ordered_events *oe)
410 {
411 	ordered_events__deliver_t old_deliver = oe->deliver;
412 
413 	ordered_events__free(oe);
414 	memset(oe, '\0', sizeof(*oe));
415 	ordered_events__init(oe, old_deliver, oe->data);
416 }
417