Lines Matching +full:partition +full:- +full:art
1 // SPDX-License-Identifier: GPL-2.0
78 * Here's some silly ASCII art.
80 * +------+
83 * +------+ +---+ +---+ +---+
84 * | |-->| |-->| |
85 * +---+ +---+ +---+
88 * +---------------+
91 * +------+
93 * |page |------------------v
94 * +------+ +---+ +---+ +---+
95 * | |-->| |-->| |
96 * +---+ +---+ +---+
99 * +---------------+
102 * +------+
104 * |page |------------------v
105 * +------+ +---+ +---+ +---+
106 * ^ | |-->| |-->| |
107 * | +---+ +---+ +---+
110 * +------------------------------+
113 * +------+
115 * |page |------------------v
116 * +------+ +---+ +---+ +---+
117 * ^ | | | |-->| |
118 * | New +---+ +---+ +---+
119 * | Reader------^ |
121 * +------------------------------+
164 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
168 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; in rb_null_event()
174 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_set_padding()
175 event->time_delta = 0; in rb_event_set_padding()
183 if (event->type_len) in rb_event_data_length()
184 length = event->type_len * RB_ALIGNMENT; in rb_event_data_length()
186 length = event->array[0]; in rb_event_data_length()
198 switch (event->type_len) { in rb_event_length()
202 return -1; in rb_event_length()
203 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length()
238 * ring_buffer_event_length - return the length of the event
255 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in ring_buffer_event_length()
257 length -= RB_EVNT_HDR_SIZE; in ring_buffer_event_length()
258 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length()
259 length -= sizeof(event->array[0]); in ring_buffer_event_length()
270 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); in rb_event_data()
272 if (event->type_len) in rb_event_data()
273 return (void *)&event->array[0]; in rb_event_data()
275 return (void *)&event->array[1]; in rb_event_data()
279 * ring_buffer_event_data - return the data of the event
289 for_each_cpu(cpu, buffer->cpumask)
292 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
295 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
302 ts = event->array[0]; in rb_event_time_stamp()
304 ts += event->time_delta; in rb_event_time_stamp()
344 * the update partition of the counter is incremented. This will
354 local_set(&bpage->commit, 0); in rb_init_page()
359 return local_read(&bpage->page->commit); in rb_page_commit()
364 free_page((unsigned long)bpage->page); in free_buffer_page()
376 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
378 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
379 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
436 * EXTEND - wants a time extend
437 * ABSOLUTE - the buffer requests all events to have absolute time stamps
438 * FORCE - force a full time stamp.
585 * - Reads may fail if it interrupted a modification of the time stamp.
590 * - Writes always succeed and will overwrite other writes and writes
593 * - A write followed by a read of the same time stamp will always succeed,
596 * - A cmpxchg will fail if it interrupted another write or cmpxchg.
602 * The two most significant bits of each half holds a 2 bit counter (0-3).
608 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
638 c = local_read(&t->cnt); in __rb_time_read()
639 top = local_read(&t->top); in __rb_time_read()
640 bottom = local_read(&t->bottom); in __rb_time_read()
641 msb = local_read(&t->msb); in __rb_time_read()
642 } while (c != local_read(&t->cnt)); in __rb_time_read()
689 cnt = local_inc_return(&t->cnt); in rb_time_set()
690 rb_time_val_set(&t->top, top, cnt); in rb_time_set()
691 rb_time_val_set(&t->bottom, bottom, cnt); in rb_time_set()
692 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); in rb_time_set()
693 } while (cnt != local_read(&t->cnt)); in rb_time_set()
708 *ret = local64_read(&t->time); in rb_time_read()
713 local64_set(&t->time, val); in rb_time_set()
728 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
729 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
740 commit = local_read(&page->page->commit); in verify_event()
741 write = local_read(&page->write); in verify_event()
742 if (addr >= (unsigned long)&page->page->data[commit] && in verify_event()
743 addr < (unsigned long)&page->page->data[write]) in verify_event()
746 next = rb_list_head(page->list.next); in verify_event()
780 * ring_buffer_event_time_stamp - return the event's current time stamp
799 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
804 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { in ring_buffer_event_time_stamp()
806 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
809 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
815 if (likely(--nest < MAX_NEST)) in ring_buffer_event_time_stamp()
816 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
823 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) in ring_buffer_event_time_stamp()
825 ts = rb_time_stamp(cpu_buffer->buffer); in ring_buffer_event_time_stamp()
831 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
839 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
843 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
855 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
856 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
857 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
862 cnt -= lost; in ring_buffer_nr_dirty_pages()
870 return cnt - read; in ring_buffer_nr_dirty_pages()
875 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
879 nr_pages = cpu_buffer->nr_pages; in full_hit()
884 * Add one as dirty will never equal nr_pages, as the sub-buffer in full_hit()
894 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
903 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters()
904 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { in rb_wake_up_waiters()
910 raw_spin_lock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
911 rbwork->wakeup_full = false; in rb_wake_up_waiters()
912 rbwork->full_waiters_pending = false; in rb_wake_up_waiters()
915 cpu_buffer->shortest_full = 0; in rb_wake_up_waiters()
916 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
918 wake_up_all(&rbwork->full_waiters); in rb_wake_up_waiters()
923 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
944 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
946 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
951 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
955 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
959 irq_work_queue(&rbwork->work); in ring_buffer_wake_waiters()
971 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
980 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
981 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_watermark_hit()
984 if (!ret && (!cpu_buffer->shortest_full || in rb_watermark_hit()
985 cpu_buffer->shortest_full > full)) { in rb_watermark_hit()
986 cpu_buffer->shortest_full = full; in rb_watermark_hit()
988 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
1024 rbwork->full_waiters_pending = true; in rb_wait_cond()
1026 rbwork->waiters_pending = true; in rb_wait_cond()
1048 * ring_buffer_wait - wait for input to the ring buffer
1076 rbwork = &buffer->irq_work; in ring_buffer_wait()
1080 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
1081 return -ENODEV; in ring_buffer_wait()
1082 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
1083 rbwork = &cpu_buffer->irq_work; in ring_buffer_wait()
1087 waitq = &rbwork->full_waiters; in ring_buffer_wait()
1089 waitq = &rbwork->waiters; in ring_buffer_wait()
1098 * ring_buffer_poll_wait - poll on buffer input
1119 rbwork = &buffer->irq_work; in ring_buffer_poll_wait()
1122 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1125 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1126 rbwork = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1132 poll_wait(filp, &rbwork->full_waiters, poll_table); in ring_buffer_poll_wait()
1134 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_poll_wait()
1135 if (!cpu_buffer->shortest_full || in ring_buffer_poll_wait()
1136 cpu_buffer->shortest_full > full) in ring_buffer_poll_wait()
1137 cpu_buffer->shortest_full = full; in ring_buffer_poll_wait()
1138 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_poll_wait()
1154 rbwork->full_waiters_pending = true; in ring_buffer_poll_wait()
1158 poll_wait(filp, &rbwork->waiters, poll_table); in ring_buffer_poll_wait()
1159 rbwork->waiters_pending = true; in ring_buffer_poll_wait()
1190 atomic_inc(&__b->buffer->record_disabled); \
1192 atomic_inc(&b->record_disabled); \
1205 /* Skip retpolines :-( */ in rb_time_stamp()
1206 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1209 ts = buffer->clock(); in rb_time_stamp()
1251 * ASCII art, the reader sets its old page to point to the next
1267 * head->list->prev->next bit 1 bit 0
1268 * ------- -------
1275 * +----+ +-----+ +-----+
1276 * | |------>| T |---X--->| N |
1277 * | |<------| | | |
1278 * +----+ +-----+ +-----+
1280 * | +-----+ | |
1281 * +----------| R |----------+ |
1282 * | |<-----------+
1283 * +-----+
1285 * Key: ---X--> HEAD flag set in pointer
1315 * rb_list_head - remove any bit
1325 * rb_is_head_page - test if the given page is the head page
1337 val = (unsigned long)list->next; in rb_is_head_page()
1339 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page()
1354 struct list_head *list = page->list.prev; in rb_is_reader_page()
1356 return rb_list_head(list->next) != &page->list; in rb_is_reader_page()
1360 * rb_set_list_to_head - set a list_head to be pointing to head.
1366 ptr = (unsigned long *)&list->next; in rb_set_list_to_head()
1372 * rb_head_page_activate - sets up head page
1378 head = cpu_buffer->head_page; in rb_head_page_activate()
1385 rb_set_list_to_head(head->list.prev); in rb_head_page_activate()
1390 unsigned long *ptr = (unsigned long *)&list->next; in rb_list_head_clear()
1396 * rb_head_page_deactivate - clears head page ptr (for free list)
1404 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1406 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1416 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
1419 list = &prev->list; in rb_head_page_set()
1423 ret = cmpxchg((unsigned long *)&list->next, in rb_head_page_set()
1462 struct list_head *p = rb_list_head((*bpage)->list.next); in rb_inc_page()
1475 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1479 list = cpu_buffer->pages; in rb_set_head_page()
1480 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1483 page = head = cpu_buffer->head_page; in rb_set_head_page()
1492 if (rb_is_head_page(page, page->list.prev)) { in rb_set_head_page()
1493 cpu_buffer->head_page = page; in rb_set_head_page()
1508 unsigned long *ptr = (unsigned long *)&old->list.prev->next; in rb_head_page_replace()
1514 return try_cmpxchg(ptr, &val, (unsigned long)&new->list); in rb_head_page_replace()
1518 * rb_tail_page_update - move the tail page forward
1536 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); in rb_tail_page_update()
1537 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); in rb_tail_page_update()
1550 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1565 (void)local_cmpxchg(&next_page->write, old_write, val); in rb_tail_page_update()
1566 (void)local_cmpxchg(&next_page->entries, old_entries, eval); in rb_tail_page_update()
1573 local_set(&next_page->page->commit, 0); in rb_tail_page_update()
1576 if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page)) in rb_tail_page_update()
1577 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1590 * rb_check_pages - integrity check of buffer pages
1599 * the caller should take cpu_buffer->reader_lock.
1603 struct list_head *head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1607 rb_list_head(rb_list_head(head->next)->prev) != head)) in rb_check_pages()
1611 rb_list_head(rb_list_head(head->prev)->next) != head)) in rb_check_pages()
1614 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { in rb_check_pages()
1616 rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) in rb_check_pages()
1620 rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) in rb_check_pages()
1629 bool user_thread = current->mm != NULL; in __rb_allocate_pages()
1642 return -ENOMEM; in __rb_allocate_pages()
1646 * gracefully without invoking oom-killer and the system is not in __rb_allocate_pages()
1666 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1672 list_add(&bpage->list, pages); in __rb_allocate_pages()
1674 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); in __rb_allocate_pages()
1677 bpage->page = page_address(page); in __rb_allocate_pages()
1678 rb_init_page(bpage->page); in __rb_allocate_pages()
1690 list_del_init(&bpage->list); in __rb_allocate_pages()
1696 return -ENOMEM; in __rb_allocate_pages()
1707 return -ENOMEM; in rb_allocate_pages()
1714 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1717 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1737 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1738 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1739 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1740 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1741 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1742 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1743 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1744 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1745 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1746 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1755 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1759 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
1760 rb_init_page(bpage->page); in rb_allocate_cpu_buffer()
1762 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1763 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1769 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1770 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1771 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1778 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1787 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1790 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
1792 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1798 list_del_init(&bpage->list); in rb_free_cpu_buffer()
1805 free_page((unsigned long)cpu_buffer->free_page); in rb_free_cpu_buffer()
1811 * __ring_buffer_alloc - allocate a new ring_buffer
1836 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1840 buffer->flags = flags; in __ring_buffer_alloc()
1841 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1842 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1844 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1845 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1851 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1854 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1856 if (!buffer->buffers) in __ring_buffer_alloc()
1860 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1861 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1862 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1865 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1869 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1875 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1876 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1878 kfree(buffer->buffers); in __ring_buffer_alloc()
1881 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1890 * ring_buffer_free - free a ring buffer.
1898 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
1900 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
1903 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1905 kfree(buffer->buffers); in ring_buffer_free()
1906 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1915 buffer->clock = clock; in ring_buffer_set_clock()
1920 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1925 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
1932 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
1937 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write()
1952 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1953 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1963 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1969 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1970 tail_page = rb_list_head(tail_page->next); in rb_remove_pages()
1974 first_page = list_entry(rb_list_head(to_remove->next), in rb_remove_pages()
1978 to_remove = rb_list_head(to_remove)->next; in rb_remove_pages()
1982 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
1984 next_page = rb_list_head(to_remove)->next; in rb_remove_pages()
1991 tail_page->next = (struct list_head *)((unsigned long)next_page | in rb_remove_pages()
1994 next_page->prev = tail_page; in rb_remove_pages()
1997 cpu_buffer->pages = next_page; in rb_remove_pages()
2001 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
2005 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
2006 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2008 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
2030 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
2031 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
2032 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2040 nr_removed--; in rb_remove_pages()
2052 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2058 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2066 * 2. We cmpxchg the prev_page->next to point from head page to the in rb_insert_pages()
2068 * 3. Finally, we update the head->prev to the end of new list. in rb_insert_pages()
2075 while (retries--) { in rb_insert_pages()
2083 head_page = &hpage->list; in rb_insert_pages()
2084 prev_page = head_page->prev; in rb_insert_pages()
2086 first_page = pages->next; in rb_insert_pages()
2087 last_page = pages->prev; in rb_insert_pages()
2092 last_page->next = head_page_with_bit; in rb_insert_pages()
2093 first_page->prev = prev_page; in rb_insert_pages()
2095 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); in rb_insert_pages()
2103 head_page->prev = last_page; in rb_insert_pages()
2116 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2121 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2123 list_del_init(&bpage->list); in rb_insert_pages()
2134 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2138 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2141 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2149 complete(&cpu_buffer->update_done); in update_pages_handler()
2153 * ring_buffer_resize - resize the ring buffer
2170 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
2177 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2187 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2188 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2197 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2198 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2199 err = -EBUSY; in ring_buffer_resize()
2206 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2208 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2209 cpu_buffer->nr_pages; in ring_buffer_resize()
2213 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2219 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2220 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2221 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2223 err = -ENOMEM; in ring_buffer_resize()
2237 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2238 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2244 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2251 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2253 update_pages_handler(&cpu_buffer->update_pages_work); in ring_buffer_resize()
2261 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2262 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2266 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2267 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2272 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2274 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2282 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2283 err = -EBUSY; in ring_buffer_resize()
2287 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2288 cpu_buffer->nr_pages; in ring_buffer_resize()
2290 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2291 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2292 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2293 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2294 err = -ENOMEM; in ring_buffer_resize()
2312 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2313 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2317 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2329 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2330 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2341 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2342 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_resize()
2344 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_resize()
2346 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2349 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2350 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2357 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2358 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2360 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2363 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2365 list_del_init(&bpage->list); in ring_buffer_resize()
2370 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2371 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2378 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2380 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2382 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2383 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2389 return bpage->page->data + index; in __rb_page_index()
2395 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2396 cpu_buffer->reader_page->read); in rb_reader_event()
2403 struct buffer_page *iter_head_page = iter->head_page; in rb_iter_head_event()
2407 if (iter->head != iter->next_event) in rb_iter_head_event()
2408 return iter->event; in rb_iter_head_event()
2419 if (iter->head > commit - 8) in rb_iter_head_event()
2422 event = __rb_page_index(iter_head_page, iter->head); in rb_iter_head_event()
2431 if ((iter->head + length) > commit || length > BUF_PAGE_SIZE) in rb_iter_head_event()
2435 memcpy(iter->event, event, length); in rb_iter_head_event()
2443 if (iter->page_stamp != iter_head_page->page->time_stamp || in rb_iter_head_event()
2447 iter->next_event = iter->head + length; in rb_iter_head_event()
2448 return iter->event; in rb_iter_head_event()
2451 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_head_event()
2452 iter->head = 0; in rb_iter_head_event()
2453 iter->next_event = 0; in rb_iter_head_event()
2454 iter->missed_events = 1; in rb_iter_head_event()
2467 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
2475 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; in rb_event_index()
2480 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
2488 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
2489 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2491 rb_inc_page(&iter->head_page); in rb_inc_iter()
2493 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_inc_iter()
2494 iter->head = 0; in rb_inc_iter()
2495 iter->next_event = 0; in rb_inc_iter()
2499 * rb_handle_head_page - writer hit the head page
2503 * -1 on error
2527 * NORMAL - an interrupt already moved it for us in rb_handle_head_page()
2528 * HEAD - we are the first to get here. in rb_handle_head_page()
2529 * UPDATE - we are the interrupt interrupting in rb_handle_head_page()
2531 * MOVED - a reader on another CPU moved the next in rb_handle_head_page()
2543 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2544 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
2545 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
2577 return -1; in rb_handle_head_page()
2602 * HEAD - an interrupt came in and already set it. in rb_handle_head_page()
2603 * NORMAL - One of two things: in rb_handle_head_page()
2615 return -1; in rb_handle_head_page()
2631 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2654 return -1; in rb_handle_head_page()
2664 struct buffer_page *tail_page = info->tail_page; in rb_reset_tail()
2666 unsigned long length = info->length; in rb_reset_tail()
2679 tail_page->real_end = 0; in rb_reset_tail()
2681 local_sub(length, &tail_page->write); in rb_reset_tail()
2692 tail_page->real_end = tail; in rb_reset_tail()
2706 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { in rb_reset_tail()
2716 local_sub(length, &tail_page->write); in rb_reset_tail()
2721 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail()
2722 event->type_len = RINGBUF_TYPE_PADDING; in rb_reset_tail()
2724 event->time_delta = 1; in rb_reset_tail()
2727 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2729 /* Make sure the padding is visible before the tail_page->write update */ in rb_reset_tail()
2733 length = (tail + length) - BUF_PAGE_SIZE; in rb_reset_tail()
2734 local_sub(length, &tail_page->write); in rb_reset_tail()
2746 struct buffer_page *tail_page = info->tail_page; in rb_move_tail()
2747 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2748 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2762 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2780 if (rb_is_head_page(next_page, &tail_page->list)) { in rb_move_tail()
2786 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2791 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
2792 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2814 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2815 cpu_buffer->tail_page) && in rb_move_tail()
2816 (cpu_buffer->commit_page == in rb_move_tail()
2817 cpu_buffer->reader_page))) { in rb_move_tail()
2818 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2833 local_inc(&cpu_buffer->committing); in rb_move_tail()
2836 return ERR_PTR(-EAGAIN); in rb_move_tail()
2850 event->type_len = RINGBUF_TYPE_TIME_STAMP; in rb_add_time_stamp()
2852 event->type_len = RINGBUF_TYPE_TIME_EXTEND; in rb_add_time_stamp()
2856 event->time_delta = delta & TS_MASK; in rb_add_time_stamp()
2857 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp()
2860 event->time_delta = 0; in rb_add_time_stamp()
2861 event->array[0] = 0; in rb_add_time_stamp()
2881 (unsigned long long)info->delta, in rb_check_timestamp()
2882 (unsigned long long)info->ts, in rb_check_timestamp()
2883 (unsigned long long)info->before, in rb_check_timestamp()
2884 (unsigned long long)info->after, in rb_check_timestamp()
2885 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), in rb_check_timestamp()
2899 bool abs = info->add_timestamp & in rb_add_timestamp()
2902 if (unlikely(info->delta > (1ULL << 59))) { in rb_add_timestamp()
2907 if (abs && (info->ts & TS_MSB)) { in rb_add_timestamp()
2908 info->delta &= ABS_TS_MASK; in rb_add_timestamp()
2911 } else if (info->before == info->after && info->before > info->ts) { in rb_add_timestamp()
2921 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
2922 info->before, info->ts); in rb_add_timestamp()
2927 info->delta = 0; in rb_add_timestamp()
2929 *event = rb_add_time_stamp(*event, info->delta, abs); in rb_add_timestamp()
2930 *length -= RB_LEN_TIME_EXTEND; in rb_add_timestamp()
2935 * rb_update_event - update event type and data
2950 unsigned length = info->length; in rb_update_event()
2951 u64 delta = info->delta; in rb_update_event()
2952 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
2955 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
2961 if (unlikely(info->add_timestamp)) in rb_update_event()
2964 event->time_delta = delta; in rb_update_event()
2965 length -= RB_EVNT_HDR_SIZE; in rb_update_event()
2967 event->type_len = 0; in rb_update_event()
2968 event->array[0] = length; in rb_update_event()
2970 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); in rb_update_event()
3018 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3024 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { in rb_try_to_discard()
3026 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard()
3040 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
3060 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { in rb_try_to_discard()
3062 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3073 local_inc(&cpu_buffer->committing); in rb_start_commit()
3074 local_inc(&cpu_buffer->commits); in rb_start_commit()
3091 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3093 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3094 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3097 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3103 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3104 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3105 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3110 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3114 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3115 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3117 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3130 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3139 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3143 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3146 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3149 local_dec(&cpu_buffer->committing); in rb_end_commit()
3159 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3160 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3161 local_inc(&cpu_buffer->committing); in rb_end_commit()
3172 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; in rb_event_discard()
3173 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_discard()
3175 if (!event->time_delta) in rb_event_discard()
3176 event->time_delta = 1; in rb_event_discard()
3181 local_inc(&cpu_buffer->entries); in rb_commit()
3188 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3189 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3191 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3194 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3195 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3197 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3200 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3203 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3206 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3209 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3211 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3214 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3215 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3217 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3254 * 101 - 1 = 100
3257 * 1010 - 1 = 1001
3292 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3295 bit = RB_CTX_NORMAL - bit; in trace_recursive_lock()
3297 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3304 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3310 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3311 cpu_buffer->current_context = val; in trace_recursive_lock()
3319 cpu_buffer->current_context &= in trace_recursive_unlock()
3320 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3327 * ring_buffer_nest_start - Allow to trace while nested
3347 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3349 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3353 * ring_buffer_nest_end - Allow to trace while nested
3366 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3368 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3373 * ring_buffer_unlock_commit - commit a reserved
3385 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3411 ts = bpage->time_stamp; in dump_buffer_page()
3416 event = (struct ring_buffer_event *)(bpage->data + e); in dump_buffer_page()
3418 switch (event->type_len) { in dump_buffer_page()
3433 ts += event->time_delta; in dump_buffer_page()
3434 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); in dump_buffer_page()
3438 ts += event->time_delta; in dump_buffer_page()
3439 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); in dump_buffer_page()
3465 bpage = info->tail_page->page; in check_buffer()
3469 tail = local_read(&bpage->commit); in check_buffer()
3470 } else if (info->add_timestamp & in check_buffer()
3480 if (tail <= 8 || tail > local_read(&bpage->commit)) in check_buffer()
3489 ts = bpage->time_stamp; in check_buffer()
3493 event = (struct ring_buffer_event *)(bpage->data + e); in check_buffer()
3495 switch (event->type_len) { in check_buffer()
3508 if (event->time_delta == 1) in check_buffer()
3512 ts += event->time_delta; in check_buffer()
3519 if ((full && ts > info->ts) || in check_buffer()
3520 (!full && ts + info->delta != info->ts)) { in check_buffer()
3526 atomic_inc(&cpu_buffer->record_disabled); in check_buffer()
3530 cpu_buffer->cpu, in check_buffer()
3531 ts + info->delta, info->ts, info->delta, in check_buffer()
3532 info->before, info->after, in check_buffer()
3536 /* Do not re-enable checking */ in check_buffer()
3560 /* Don't let the compiler play games with cpu_buffer->tail_page */ in __rb_reserve_next()
3561 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
3563 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; in __rb_reserve_next()
3565 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3566 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3568 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3570 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { in __rb_reserve_next()
3571 info->delta = info->ts; in __rb_reserve_next()
3579 /* Use the sub-buffer timestamp */ in __rb_reserve_next()
3580 info->delta = 0; in __rb_reserve_next()
3581 } else if (unlikely(!a_ok || !b_ok || info->before != info->after)) { in __rb_reserve_next()
3582 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3583 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3585 info->delta = info->ts - info->after; in __rb_reserve_next()
3586 if (unlikely(test_time_stamp(info->delta))) { in __rb_reserve_next()
3587 info->add_timestamp |= RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3588 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3593 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
3595 /*C*/ write = local_add_return(info->length, &tail_page->write); in __rb_reserve_next()
3600 tail = write - info->length; in __rb_reserve_next()
3610 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
3617 if (likely(!(info->add_timestamp & in __rb_reserve_next()
3620 info->delta = info->ts - info->after; in __rb_reserve_next()
3623 info->delta = info->ts; in __rb_reserve_next()
3627 /* SLOW PATH - Interrupted between A and C */ in __rb_reserve_next()
3630 a_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3639 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3640 rb_time_set(&cpu_buffer->before_stamp, ts); in __rb_reserve_next()
3643 /*E*/ a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3647 /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && in __rb_reserve_next()
3648 info->after == info->before && info->after < ts) { in __rb_reserve_next()
3651 * safe to use info->after for the delta as it in __rb_reserve_next()
3652 * matched info->before and is still valid. in __rb_reserve_next()
3654 info->delta = ts - info->after; in __rb_reserve_next()
3664 info->delta = 0; in __rb_reserve_next()
3666 info->ts = ts; in __rb_reserve_next()
3667 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; in __rb_reserve_next()
3674 if (unlikely(!tail && !(info->add_timestamp & in __rb_reserve_next()
3676 info->delta = 0; in __rb_reserve_next()
3683 local_inc(&tail_page->entries); in __rb_reserve_next()
3690 tail_page->page->time_stamp = info->ts; in __rb_reserve_next()
3693 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
3725 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3726 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
3727 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
3734 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3761 if (unlikely(PTR_ERR(event) == -EAGAIN)) { in rb_reserve_next_event()
3763 info.length -= RB_LEN_TIME_EXTEND; in rb_reserve_next_event()
3775 * ring_buffer_lock_reserve - reserve a part of the buffer
3799 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
3804 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3807 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3809 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
3843 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3849 if (likely(bpage->page == (void *)addr)) { in rb_decrement_entry()
3850 local_dec(&bpage->entries); in rb_decrement_entry()
3861 if (bpage->page == (void *)addr) { in rb_decrement_entry()
3862 local_dec(&bpage->entries); in rb_decrement_entry()
3873 * ring_buffer_discard_commit - discard an event that has not been committed
3901 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3908 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3925 * ring_buffer_write - write data to the buffer without reserving
3944 int ret = -EBUSY; in ring_buffer_write()
3949 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3954 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3957 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3959 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3994 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3996 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
4003 if (reader->read != rb_page_commit(reader)) in rb_per_cpu_empty()
4029 * ring_buffer_record_disable - stop all writes into the buffer
4039 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
4044 * ring_buffer_record_enable - enable writes to the buffer
4052 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
4057 * ring_buffer_record_off - stop all writes into the buffer
4072 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
4075 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_off()
4080 * ring_buffer_record_on - restart writes into the buffer
4095 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4098 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_on()
4103 * ring_buffer_record_is_on - return true if the ring buffer can write
4110 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4114 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4126 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4130 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4143 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4146 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4147 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4152 * ring_buffer_record_enable_cpu - enable writes to the buffer
4163 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4166 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4167 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4180 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4181 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4185 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4196 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4199 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4200 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4205 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4206 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4210 ret = bpage->page->time_stamp; in ring_buffer_oldest_event_ts()
4211 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4218 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4227 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4230 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4231 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4238 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4246 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4249 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4256 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4266 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4269 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4270 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4277 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4289 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4292 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4293 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4300 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4311 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4314 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4315 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4322 * ring_buffer_read_events_cpu - get the number of events successfully read
4331 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4334 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4335 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4340 * ring_buffer_entries - get the number of entries in a buffer
4354 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4363 * ring_buffer_overruns - get the number of overruns in buffer
4377 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4378 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
4387 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
4390 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
4391 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
4392 iter->next_event = iter->head; in rb_iter_reset()
4394 iter->cache_reader_page = iter->head_page; in rb_iter_reset()
4395 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
4396 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
4398 if (iter->head) { in rb_iter_reset()
4399 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
4400 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
4402 iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_reset()
4403 iter->page_stamp = iter->read_stamp; in rb_iter_reset()
4408 * ring_buffer_iter_reset - reset an iterator
4422 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
4424 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4426 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4431 * ring_buffer_iter_empty - check if an iterator has no more to read
4445 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
4446 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
4447 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
4448 commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4449 commit_ts = commit_page->page->time_stamp; in ring_buffer_iter_empty()
4462 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4463 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); in ring_buffer_iter_empty()
4471 return ((iter->head_page == commit_page && iter->head >= commit) || in ring_buffer_iter_empty()
4472 (iter->head_page == reader && commit_page == head_page && in ring_buffer_iter_empty()
4473 head_page->read == commit && in ring_buffer_iter_empty()
4474 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
4484 switch (event->type_len) { in rb_update_read_stamp()
4490 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
4495 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
4496 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
4500 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
4514 switch (event->type_len) { in rb_update_iter_read_stamp()
4520 iter->read_stamp += delta; in rb_update_iter_read_stamp()
4525 delta = rb_fix_abs_ts(delta, iter->read_stamp); in rb_update_iter_read_stamp()
4526 iter->read_stamp = delta; in rb_update_iter_read_stamp()
4530 iter->read_stamp += event->time_delta; in rb_update_iter_read_stamp()
4534 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
4548 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
4562 reader = cpu_buffer->reader_page; in rb_get_reader_page()
4565 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
4570 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
4575 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
4585 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
4586 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
4587 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
4588 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
4597 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
4598 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
4601 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
4605 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
4608 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
4620 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
4633 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
4646 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
4647 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
4649 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
4652 cpu_buffer->reader_page = reader; in rb_get_reader_page()
4653 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
4655 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
4656 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
4657 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
4664 if (reader && reader->read == 0) in rb_get_reader_page()
4665 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
4667 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
4719 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in rb_advance_reader()
4720 cpu_buffer->read++; in rb_advance_reader()
4725 cpu_buffer->reader_page->read += length; in rb_advance_reader()
4726 cpu_buffer->read_bytes += length; in rb_advance_reader()
4733 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
4736 if (iter->head == iter->next_event) { in rb_advance_iter()
4742 iter->head = iter->next_event; in rb_advance_iter()
4747 if (iter->next_event >= rb_page_size(iter->head_page)) { in rb_advance_iter()
4749 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
4755 rb_update_iter_read_stamp(iter, iter->event); in rb_advance_iter()
4760 return cpu_buffer->lost_events; in rb_lost_events()
4789 switch (event->type_len) { in rb_buffer_peek()
4811 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); in rb_buffer_peek()
4812 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4813 cpu_buffer->cpu, ts); in rb_buffer_peek()
4821 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
4822 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4823 cpu_buffer->cpu, ts); in rb_buffer_peek()
4848 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
4849 buffer = cpu_buffer->buffer; in rb_iter_peek()
4856 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
4857 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
4858 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
4878 if (iter->head >= rb_page_size(iter->head_page)) { in rb_iter_peek()
4887 switch (event->type_len) { in rb_iter_peek()
4904 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); in rb_iter_peek()
4905 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4906 cpu_buffer->cpu, ts); in rb_iter_peek()
4914 *ts = iter->read_stamp + event->time_delta; in rb_iter_peek()
4916 cpu_buffer->cpu, ts); in rb_iter_peek()
4931 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4944 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4948 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4956 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4960 * ring_buffer_peek - peek at the next event to be read
4973 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4978 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
4985 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
4990 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
4996 /** ring_buffer_iter_dropped - report if there are dropped events
5003 bool ret = iter->missed_events != 0; in ring_buffer_iter_dropped()
5005 iter->missed_events = 0; in ring_buffer_iter_dropped()
5011 * ring_buffer_iter_peek - peek at the next event to be read
5021 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
5026 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5028 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5030 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_iter_peek()
5037 * ring_buffer_consume - return an event and consume it
5060 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5063 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5069 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5079 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_consume()
5087 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5113 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5121 iter->event = kmalloc(BUF_PAGE_SIZE, flags); in ring_buffer_read_prepare()
5122 if (!iter->event) { in ring_buffer_read_prepare()
5127 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5129 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
5131 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5138 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5152 * ring_buffer_read_start - start a non consuming read of the buffer
5171 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5173 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5174 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5176 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5177 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5182 * ring_buffer_read_finish - finish reading the iterator of the buffer
5185 * This re-enables the recording to the buffer, and frees the
5191 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
5200 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5202 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5204 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5205 kfree(iter->event); in ring_buffer_read_finish()
5211 * ring_buffer_iter_advance - advance the iterator to the next location
5219 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
5222 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5226 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5231 * ring_buffer_size - return the size of the ring buffer (in bytes)
5239 * BUF_PAGE_SIZE * buffer->nr_pages in ring_buffer_size()
5243 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5246 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5252 local_set(&page->write, 0); in rb_clear_buffer_page()
5253 local_set(&page->entries, 0); in rb_clear_buffer_page()
5254 rb_init_page(page->page); in rb_clear_buffer_page()
5255 page->read = 0; in rb_clear_buffer_page()
5265 cpu_buffer->head_page in rb_reset_cpu()
5266 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5267 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
5268 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
5272 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5273 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5275 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5276 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5277 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
5279 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5280 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5281 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5282 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5283 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5284 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5285 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5286 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5287 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
5288 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5289 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5290 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5291 cpu_buffer->read = 0; in rb_reset_cpu()
5292 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5294 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5295 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5297 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
5299 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5300 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5303 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
5311 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5313 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5316 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5320 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5323 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5327 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5333 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5335 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5339 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
5341 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5342 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5349 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5350 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5352 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
5360 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5369 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5372 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5374 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5375 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5382 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5388 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
5393 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5394 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5397 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5401 * ring_buffer_reset - reset a ring buffer
5410 mutex_lock(&buffer->mutex); in ring_buffer_reset()
5413 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5415 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5416 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
5423 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5427 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
5428 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5431 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
5436 * ring_buffer_empty - is the ring buffer empty?
5449 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5465 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5476 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5479 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5492 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5507 int ret = -EINVAL; in ring_buffer_swap_cpu()
5509 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
5510 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
5513 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
5514 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
5517 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) in ring_buffer_swap_cpu()
5520 ret = -EAGAIN; in ring_buffer_swap_cpu()
5522 if (atomic_read(&buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5525 if (atomic_read(&buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5528 if (atomic_read(&cpu_buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5531 if (atomic_read(&cpu_buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5540 atomic_inc(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5541 atomic_inc(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5543 ret = -EBUSY; in ring_buffer_swap_cpu()
5544 if (local_read(&cpu_buffer_a->committing)) in ring_buffer_swap_cpu()
5546 if (local_read(&cpu_buffer_b->committing)) in ring_buffer_swap_cpu()
5553 if (atomic_read(&buffer_a->resizing)) in ring_buffer_swap_cpu()
5555 if (atomic_read(&buffer_b->resizing)) in ring_buffer_swap_cpu()
5558 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
5559 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
5561 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
5562 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
5567 atomic_dec(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5568 atomic_dec(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5576 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5598 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5599 return ERR_PTR(-ENODEV); in ring_buffer_alloc_read_page()
5601 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5603 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5605 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
5606 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
5607 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
5610 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5619 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
5631 * ring_buffer_free_read_page - free an allocated read page
5645 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
5648 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5655 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5657 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
5658 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
5662 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5671 * ring_buffer_read_page - extract a page from the ring buffer
5706 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5715 int ret = -1; in ring_buffer_read_page()
5717 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5727 len -= BUF_PAGE_HDR_SIZE; in ring_buffer_read_page()
5736 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5744 read = reader->read; in ring_buffer_read_page()
5748 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
5757 if (read || (len < (commit - read)) || in ring_buffer_read_page()
5758 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
5759 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
5771 (!read || (len < (commit - read)) || in ring_buffer_read_page()
5772 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
5775 if (len > (commit - read)) in ring_buffer_read_page()
5776 len = (commit - read); in ring_buffer_read_page()
5785 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
5796 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
5798 len -= size; in ring_buffer_read_page()
5801 rpos = reader->read; in ring_buffer_read_page()
5813 local_set(&bpage->commit, pos); in ring_buffer_read_page()
5814 bpage->time_stamp = save_timestamp; in ring_buffer_read_page()
5820 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
5821 cpu_buffer->read_bytes += rb_page_commit(reader); in ring_buffer_read_page()
5825 bpage = reader->page; in ring_buffer_read_page()
5826 reader->page = *data_page; in ring_buffer_read_page()
5827 local_set(&reader->write, 0); in ring_buffer_read_page()
5828 local_set(&reader->entries, 0); in ring_buffer_read_page()
5829 reader->read = 0; in ring_buffer_read_page()
5837 if (reader->real_end) in ring_buffer_read_page()
5838 local_set(&bpage->commit, reader->real_end); in ring_buffer_read_page()
5842 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
5844 commit = local_read(&bpage->commit); in ring_buffer_read_page()
5852 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { in ring_buffer_read_page()
5853 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_read_page()
5855 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_read_page()
5858 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_read_page()
5865 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); in ring_buffer_read_page()
5868 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5888 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
5897 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
5898 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
5906 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
5908 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
5911 return -ENOMEM; in trace_rb_cpu_prepare()
5914 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
5982 cnt = data->cnt + (nested ? 27 : 0); in rb_write_something()
5985 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); in rb_write_something()
5993 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
5998 data->bytes_dropped += len; in rb_write_something()
6000 data->bytes_dropped_nested += len; in rb_write_something()
6007 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
6011 item->size = size; in rb_write_something()
6012 memcpy(item->str, rb_string, size); in rb_write_something()
6015 data->bytes_alloc_nested += event_len; in rb_write_something()
6016 data->bytes_written_nested += len; in rb_write_something()
6017 data->events_nested++; in rb_write_something()
6018 if (!data->min_size_nested || len < data->min_size_nested) in rb_write_something()
6019 data->min_size_nested = len; in rb_write_something()
6020 if (len > data->max_size_nested) in rb_write_something()
6021 data->max_size_nested = len; in rb_write_something()
6023 data->bytes_alloc += event_len; in rb_write_something()
6024 data->bytes_written += len; in rb_write_something()
6025 data->events++; in rb_write_something()
6026 if (!data->min_size || len < data->min_size) in rb_write_something()
6027 data->max_size = len; in rb_write_something()
6028 if (len > data->max_size) in rb_write_something()
6029 data->max_size = len; in rb_write_something()
6033 ring_buffer_unlock_commit(data->buffer); in rb_write_something()
6044 data->cnt++; in rb_test()
6047 /* Now sleep between a min of 100-300us and a max of 1ms */ in rb_test()
6048 usleep_range(((data->cnt % 3) + 1) * 100, 1000); in rb_test()
6166 ret = -1; in test_ringbuffer()
6168 total_events = data->events + data->events_nested; in test_ringbuffer()
6169 total_written = data->bytes_written + data->bytes_written_nested; in test_ringbuffer()
6170 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; in test_ringbuffer()
6171 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; in test_ringbuffer()
6173 big_event_size = data->max_size + data->max_size_nested; in test_ringbuffer()
6174 small_event_size = data->min_size + data->min_size_nested; in test_ringbuffer()
6193 total_size += item->size + sizeof(struct rb_item); in test_ringbuffer()
6194 if (memcmp(&item->str[0], rb_string, item->size) != 0) { in test_ringbuffer()
6196 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
6197 pr_info("expected: %.*s\n", item->size, rb_string); in test_ringbuffer()
6199 ret = -1; in test_ringbuffer()
6207 ret = -1; in test_ringbuffer()