ring_buffer.c (a524446fe82f7f38738403a5a080c4910af86a61) | ring_buffer.c (2d622719f1572ef31e0616444a515eba3094d050) |
---|---|
1/* 2 * Generic ring buffer 3 * 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 5 */ 6#include <linux/ring_buffer.h> 7#include <linux/trace_clock.h> 8#include <linux/ftrace_irq.h> --- 175 unchanged lines hidden (view full) --- 184#define RB_ALIGNMENT 4U 185#define RB_MAX_SMALL_DATA 28 186 187enum { 188 RB_LEN_TIME_EXTEND = 8, 189 RB_LEN_TIME_STAMP = 16, 190}; 191 | 1/* 2 * Generic ring buffer 3 * 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 5 */ 6#include <linux/ring_buffer.h> 7#include <linux/trace_clock.h> 8#include <linux/ftrace_irq.h> --- 175 unchanged lines hidden (view full) --- 184#define RB_ALIGNMENT 4U 185#define RB_MAX_SMALL_DATA 28 186 187enum { 188 RB_LEN_TIME_EXTEND = 8, 189 RB_LEN_TIME_STAMP = 16, 190}; 191 |
192/* inline for ring buffer fast paths */ | 192static inline int rb_null_event(struct ring_buffer_event *event) 193{ 194 return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0; 195} 196 197static inline int rb_discarded_event(struct ring_buffer_event *event) 198{ 199 return event->type == RINGBUF_TYPE_PADDING && event->time_delta; 200} 201 202static void rb_event_set_padding(struct ring_buffer_event *event) 203{ 204 event->type = RINGBUF_TYPE_PADDING; 205 event->time_delta = 0; 206} 207 208/** 209 * ring_buffer_event_discard - discard an event in the ring buffer 210 * @buffer: the ring buffer 211 * @event: the event to discard 212 * 213 * Sometimes a event that is in the ring buffer needs to be ignored. 214 * This function lets the user discard an event in the ring buffer 215 * and then that event will not be read later. 216 * 217 * Note, it is up to the user to be careful with this, and protect 218 * against races. If the user discards an event that has been consumed 219 * it is possible that it could corrupt the ring buffer. 220 */ 221void ring_buffer_event_discard(struct ring_buffer_event *event) 222{ 223 event->type = RINGBUF_TYPE_PADDING; 224 /* time delta must be non zero */ 225 if (!event->time_delta) 226 event->time_delta = 1; 227} 228 |
193static unsigned | 229static unsigned |
194rb_event_length(struct ring_buffer_event *event) | 230rb_event_data_length(struct ring_buffer_event *event) |
195{ 196 unsigned length; 197 | 231{ 232 unsigned length; 233 |
234 if (event->len) 235 length = event->len * RB_ALIGNMENT; 236 else 237 length = event->array[0]; 238 return length + RB_EVNT_HDR_SIZE; 239} 240 241/* inline for ring buffer fast paths */ 242static unsigned 243rb_event_length(struct ring_buffer_event *event) 244{ |
|
198 switch (event->type) { 199 case RINGBUF_TYPE_PADDING: | 245 switch (event->type) { 246 case RINGBUF_TYPE_PADDING: |
200 /* undefined */ 201 return -1; | 247 if (rb_null_event(event)) 248 /* undefined */ 249 return -1; 250 return rb_event_data_length(event); |
202 203 case RINGBUF_TYPE_TIME_EXTEND: 204 return RB_LEN_TIME_EXTEND; 205 206 case RINGBUF_TYPE_TIME_STAMP: 207 return RB_LEN_TIME_STAMP; 208 209 case RINGBUF_TYPE_DATA: | 251 252 case RINGBUF_TYPE_TIME_EXTEND: 253 return RB_LEN_TIME_EXTEND; 254 255 case RINGBUF_TYPE_TIME_STAMP: 256 return RB_LEN_TIME_STAMP; 257 258 case RINGBUF_TYPE_DATA: |
210 if (event->len) 211 length = event->len * RB_ALIGNMENT; 212 else 213 length = event->array[0]; 214 return length + RB_EVNT_HDR_SIZE; | 259 return rb_event_data_length(event); |
215 default: 216 BUG(); 217 } 218 /* not hit */ 219 return 0; 220} 221 222/** --- 307 unchanged lines hidden (view full) --- 530 531/* 532 * Causes compile errors if the struct buffer_page gets bigger 533 * than the struct page. 534 */ 535extern int ring_buffer_page_too_big(void); 536 537#ifdef CONFIG_HOTPLUG_CPU | 260 default: 261 BUG(); 262 } 263 /* not hit */ 264 return 0; 265} 266 267/** --- 307 unchanged lines hidden (view full) --- 575 576/* 577 * Causes compile errors if the struct buffer_page gets bigger 578 * than the struct page. 579 */ 580extern int ring_buffer_page_too_big(void); 581 582#ifdef CONFIG_HOTPLUG_CPU |
538static int rb_cpu_notify(struct notifier_block *self, 539 unsigned long action, void *hcpu); | 583static int __cpuinit rb_cpu_notify(struct notifier_block *self, 584 unsigned long action, void *hcpu); |
540#endif 541 542/** 543 * ring_buffer_alloc - allocate a new ring_buffer 544 * @size: the size in bytes per cpu that is needed. 545 * @flags: attributes to set for the ring buffer. 546 * 547 * Currently the only flag that is available is the RB_FL_OVERWRITE --- 292 unchanged lines hidden (view full) --- 840 */ 841 out_fail: 842 put_online_cpus(); 843 mutex_unlock(&buffer->mutex); 844 return -1; 845} 846EXPORT_SYMBOL_GPL(ring_buffer_resize); 847 | 585#endif 586 587/** 588 * ring_buffer_alloc - allocate a new ring_buffer 589 * @size: the size in bytes per cpu that is needed. 590 * @flags: attributes to set for the ring buffer. 591 * 592 * Currently the only flag that is available is the RB_FL_OVERWRITE --- 292 unchanged lines hidden (view full) --- 885 */ 886 out_fail: 887 put_online_cpus(); 888 mutex_unlock(&buffer->mutex); 889 return -1; 890} 891EXPORT_SYMBOL_GPL(ring_buffer_resize); 892 |
848static inline int rb_null_event(struct ring_buffer_event *event) 849{ 850 return event->type == RINGBUF_TYPE_PADDING; 851} 852 | |
853static inline void * 854__rb_data_page_index(struct buffer_data_page *bpage, unsigned index) 855{ 856 return bpage->data + index; 857} 858 859static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 860{ --- 353 unchanged lines hidden (view full) --- 1214 } 1215 1216 /* 1217 * The actual tail page has moved forward. 1218 */ 1219 if (tail < BUF_PAGE_SIZE) { 1220 /* Mark the rest of the page with padding */ 1221 event = __rb_page_index(tail_page, tail); | 893static inline void * 894__rb_data_page_index(struct buffer_data_page *bpage, unsigned index) 895{ 896 return bpage->data + index; 897} 898 899static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 900{ --- 353 unchanged lines hidden (view full) --- 1254 } 1255 1256 /* 1257 * The actual tail page has moved forward. 1258 */ 1259 if (tail < BUF_PAGE_SIZE) { 1260 /* Mark the rest of the page with padding */ 1261 event = __rb_page_index(tail_page, tail); |
1222 event->type = RINGBUF_TYPE_PADDING; | 1262 rb_event_set_padding(event); |
1223 } 1224 1225 if (tail <= BUF_PAGE_SIZE) 1226 /* Set the write back to the previous setting */ 1227 local_set(&tail_page->write, tail); 1228 1229 /* 1230 * If this was a commit entry that failed, --- 733 unchanged lines hidden (view full) --- 1964 reader = rb_get_reader_page(cpu_buffer); 1965 1966 /* This function should not be called when buffer is empty */ 1967 if (RB_WARN_ON(cpu_buffer, !reader)) 1968 return; 1969 1970 event = rb_reader_event(cpu_buffer); 1971 | 1263 } 1264 1265 if (tail <= BUF_PAGE_SIZE) 1266 /* Set the write back to the previous setting */ 1267 local_set(&tail_page->write, tail); 1268 1269 /* 1270 * If this was a commit entry that failed, --- 733 unchanged lines hidden (view full) --- 2004 reader = rb_get_reader_page(cpu_buffer); 2005 2006 /* This function should not be called when buffer is empty */ 2007 if (RB_WARN_ON(cpu_buffer, !reader)) 2008 return; 2009 2010 event = rb_reader_event(cpu_buffer); 2011 |
1972 if (event->type == RINGBUF_TYPE_DATA) | 2012 if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event)) |
1973 cpu_buffer->entries--; 1974 1975 rb_update_read_stamp(cpu_buffer, event); 1976 1977 length = rb_event_length(event); 1978 cpu_buffer->reader_page->read += length; 1979} 1980 --- 66 unchanged lines hidden (view full) --- 2047 reader = rb_get_reader_page(cpu_buffer); 2048 if (!reader) 2049 return NULL; 2050 2051 event = rb_reader_event(cpu_buffer); 2052 2053 switch (event->type) { 2054 case RINGBUF_TYPE_PADDING: | 2013 cpu_buffer->entries--; 2014 2015 rb_update_read_stamp(cpu_buffer, event); 2016 2017 length = rb_event_length(event); 2018 cpu_buffer->reader_page->read += length; 2019} 2020 --- 66 unchanged lines hidden (view full) --- 2087 reader = rb_get_reader_page(cpu_buffer); 2088 if (!reader) 2089 return NULL; 2090 2091 event = rb_reader_event(cpu_buffer); 2092 2093 switch (event->type) { 2094 case RINGBUF_TYPE_PADDING: |
2055 RB_WARN_ON(cpu_buffer, 1); | 2095 if (rb_null_event(event)) 2096 RB_WARN_ON(cpu_buffer, 1); 2097 /* 2098 * Because the writer could be discarding every 2099 * event it creates (which would probably be bad) 2100 * if we were to go back to "again" then we may never 2101 * catch up, and will trigger the warn on, or lock 2102 * the box. Return the padding, and we will release 2103 * the current locks, and try again. 2104 */ |
2056 rb_advance_reader(cpu_buffer); | 2105 rb_advance_reader(cpu_buffer); |
2057 return NULL; | 2106 return event; |
2058 2059 case RINGBUF_TYPE_TIME_EXTEND: 2060 /* Internal data, OK to advance */ 2061 rb_advance_reader(cpu_buffer); 2062 goto again; 2063 2064 case RINGBUF_TYPE_TIME_STAMP: 2065 /* FIXME: not implemented */ --- 44 unchanged lines hidden (view full) --- 2110 2111 if (rb_per_cpu_empty(cpu_buffer)) 2112 return NULL; 2113 2114 event = rb_iter_head_event(iter); 2115 2116 switch (event->type) { 2117 case RINGBUF_TYPE_PADDING: | 2107 2108 case RINGBUF_TYPE_TIME_EXTEND: 2109 /* Internal data, OK to advance */ 2110 rb_advance_reader(cpu_buffer); 2111 goto again; 2112 2113 case RINGBUF_TYPE_TIME_STAMP: 2114 /* FIXME: not implemented */ --- 44 unchanged lines hidden (view full) --- 2159 2160 if (rb_per_cpu_empty(cpu_buffer)) 2161 return NULL; 2162 2163 event = rb_iter_head_event(iter); 2164 2165 switch (event->type) { 2166 case RINGBUF_TYPE_PADDING: |
2118 rb_inc_iter(iter); 2119 goto again; | 2167 if (rb_null_event(event)) { 2168 rb_inc_iter(iter); 2169 goto again; 2170 } 2171 rb_advance_iter(iter); 2172 return event; |
2120 2121 case RINGBUF_TYPE_TIME_EXTEND: 2122 /* Internal data, OK to advance */ 2123 rb_advance_iter(iter); 2124 goto again; 2125 2126 case RINGBUF_TYPE_TIME_STAMP: 2127 /* FIXME: not implemented */ --- 30 unchanged lines hidden (view full) --- 2158{ 2159 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2160 struct ring_buffer_event *event; 2161 unsigned long flags; 2162 2163 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2164 return NULL; 2165 | 2173 2174 case RINGBUF_TYPE_TIME_EXTEND: 2175 /* Internal data, OK to advance */ 2176 rb_advance_iter(iter); 2177 goto again; 2178 2179 case RINGBUF_TYPE_TIME_STAMP: 2180 /* FIXME: not implemented */ --- 30 unchanged lines hidden (view full) --- 2211{ 2212 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2213 struct ring_buffer_event *event; 2214 unsigned long flags; 2215 2216 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2217 return NULL; 2218 |
2219 again: |
|
2166 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2167 event = rb_buffer_peek(buffer, cpu, ts); 2168 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2169 | 2220 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2221 event = rb_buffer_peek(buffer, cpu, ts); 2222 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2223 |
2224 if (event && event->type == RINGBUF_TYPE_PADDING) { 2225 cpu_relax(); 2226 goto again; 2227 } 2228 |
|
2170 return event; 2171} 2172 2173/** 2174 * ring_buffer_iter_peek - peek at the next event to be read 2175 * @iter: The ring buffer iterator 2176 * @ts: The timestamp counter of this event. 2177 * 2178 * This will return the event that will be read next, but does 2179 * not increment the iterator. 2180 */ 2181struct ring_buffer_event * 2182ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 2183{ 2184 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2185 struct ring_buffer_event *event; 2186 unsigned long flags; 2187 | 2229 return event; 2230} 2231 2232/** 2233 * ring_buffer_iter_peek - peek at the next event to be read 2234 * @iter: The ring buffer iterator 2235 * @ts: The timestamp counter of this event. 2236 * 2237 * This will return the event that will be read next, but does 2238 * not increment the iterator. 2239 */ 2240struct ring_buffer_event * 2241ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 2242{ 2243 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2244 struct ring_buffer_event *event; 2245 unsigned long flags; 2246 |
2247 again: |
|
2188 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2189 event = rb_iter_peek(iter, ts); 2190 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2191 | 2248 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2249 event = rb_iter_peek(iter, ts); 2250 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2251 |
2252 if (event && event->type == RINGBUF_TYPE_PADDING) { 2253 cpu_relax(); 2254 goto again; 2255 } 2256 |
|
2192 return event; 2193} 2194 2195/** 2196 * ring_buffer_consume - return an event and consume it 2197 * @buffer: The ring buffer to get the next event from 2198 * 2199 * Returns the next event in the ring buffer, and that event is consumed. 2200 * Meaning, that sequential reads will keep returning a different event, 2201 * and eventually empty the ring buffer if the producer is slower. 2202 */ 2203struct ring_buffer_event * 2204ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 2205{ 2206 struct ring_buffer_per_cpu *cpu_buffer; 2207 struct ring_buffer_event *event = NULL; 2208 unsigned long flags; 2209 | 2257 return event; 2258} 2259 2260/** 2261 * ring_buffer_consume - return an event and consume it 2262 * @buffer: The ring buffer to get the next event from 2263 * 2264 * Returns the next event in the ring buffer, and that event is consumed. 2265 * Meaning, that sequential reads will keep returning a different event, 2266 * and eventually empty the ring buffer if the producer is slower. 2267 */ 2268struct ring_buffer_event * 2269ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 2270{ 2271 struct ring_buffer_per_cpu *cpu_buffer; 2272 struct ring_buffer_event *event = NULL; 2273 unsigned long flags; 2274 |
2275 again: |
|
2210 /* might be called in atomic */ 2211 preempt_disable(); 2212 2213 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2214 goto out; 2215 2216 cpu_buffer = buffer->buffers[cpu]; 2217 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); --- 5 unchanged lines hidden (view full) --- 2223 rb_advance_reader(cpu_buffer); 2224 2225 out_unlock: 2226 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2227 2228 out: 2229 preempt_enable(); 2230 | 2276 /* might be called in atomic */ 2277 preempt_disable(); 2278 2279 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2280 goto out; 2281 2282 cpu_buffer = buffer->buffers[cpu]; 2283 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); --- 5 unchanged lines hidden (view full) --- 2289 rb_advance_reader(cpu_buffer); 2290 2291 out_unlock: 2292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2293 2294 out: 2295 preempt_enable(); 2296 |
2297 if (event && event->type == RINGBUF_TYPE_PADDING) { 2298 cpu_relax(); 2299 goto again; 2300 } 2301 |
|
2231 return event; 2232} 2233EXPORT_SYMBOL_GPL(ring_buffer_consume); 2234 2235/** 2236 * ring_buffer_read_start - start a non consuming read of the buffer 2237 * @buffer: The ring buffer to read from 2238 * @cpu: The cpu buffer to iterate over --- 62 unchanged lines hidden (view full) --- 2301 */ 2302struct ring_buffer_event * 2303ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 2304{ 2305 struct ring_buffer_event *event; 2306 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2307 unsigned long flags; 2308 | 2302 return event; 2303} 2304EXPORT_SYMBOL_GPL(ring_buffer_consume); 2305 2306/** 2307 * ring_buffer_read_start - start a non consuming read of the buffer 2308 * @buffer: The ring buffer to read from 2309 * @cpu: The cpu buffer to iterate over --- 62 unchanged lines hidden (view full) --- 2372 */ 2373struct ring_buffer_event * 2374ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 2375{ 2376 struct ring_buffer_event *event; 2377 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2378 unsigned long flags; 2379 |
2380 again: |
|
2309 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2310 event = rb_iter_peek(iter, ts); 2311 if (!event) 2312 goto out; 2313 2314 rb_advance_iter(iter); 2315 out: 2316 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2317 | 2381 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2382 event = rb_iter_peek(iter, ts); 2383 if (!event) 2384 goto out; 2385 2386 rb_advance_iter(iter); 2387 out: 2388 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2389 |
2390 if (event && event->type == RINGBUF_TYPE_PADDING) { 2391 cpu_relax(); 2392 goto again; 2393 } 2394 |
|
2318 return event; 2319} 2320EXPORT_SYMBOL_GPL(ring_buffer_read); 2321 2322/** 2323 * ring_buffer_size - return the size of the ring buffer (in bytes) 2324 * @buffer: The ring buffer. 2325 */ --- 453 unchanged lines hidden (view full) --- 2779 pr_warning("Could not create debugfs 'tracing_on' entry\n"); 2780 2781 return 0; 2782} 2783 2784fs_initcall(rb_init_debugfs); 2785 2786#ifdef CONFIG_HOTPLUG_CPU | 2395 return event; 2396} 2397EXPORT_SYMBOL_GPL(ring_buffer_read); 2398 2399/** 2400 * ring_buffer_size - return the size of the ring buffer (in bytes) 2401 * @buffer: The ring buffer. 2402 */ --- 453 unchanged lines hidden (view full) --- 2856 pr_warning("Could not create debugfs 'tracing_on' entry\n"); 2857 2858 return 0; 2859} 2860 2861fs_initcall(rb_init_debugfs); 2862 2863#ifdef CONFIG_HOTPLUG_CPU |
2787static int rb_cpu_notify(struct notifier_block *self, 2788 unsigned long action, void *hcpu) | 2864static int __cpuinit rb_cpu_notify(struct notifier_block *self, 2865 unsigned long action, void *hcpu) |
2789{ 2790 struct ring_buffer *buffer = 2791 container_of(self, struct ring_buffer, cpu_notify); 2792 long cpu = (long)hcpu; 2793 2794 switch (action) { 2795 case CPU_UP_PREPARE: 2796 case CPU_UP_PREPARE_FROZEN: --- 27 unchanged lines hidden --- | 2866{ 2867 struct ring_buffer *buffer = 2868 container_of(self, struct ring_buffer, cpu_notify); 2869 long cpu = (long)hcpu; 2870 2871 switch (action) { 2872 case CPU_UP_PREPARE: 2873 case CPU_UP_PREPARE_FROZEN: --- 27 unchanged lines hidden --- |