206
208
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
207
209
#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
211
#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
212
# define RB_FORCE_8BYTE_ALIGNMENT 0
213
# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
215
# define RB_FORCE_8BYTE_ALIGNMENT 1
216
# define RB_ARCH_ALIGNMENT 8U
209
219
/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210
220
#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
238
251
return length + RB_EVNT_HDR_SIZE;
241
/* inline for ring buffer fast paths */
255
* Return the length of the given event. Will return
256
* the length of the time extend if the event is a
259
static inline unsigned
243
260
rb_event_length(struct ring_buffer_event *event)
245
262
switch (event->type_len) {
285
* Return total length of time extend and data,
286
* or just the event length for all other events.
288
static inline unsigned
289
rb_event_ts_length(struct ring_buffer_event *event)
293
if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
294
/* time extends include the data event after it */
295
len = RB_LEN_TIME_EXTEND;
296
event = skip_time_extend(event);
298
return len + rb_event_length(event);
268
302
* ring_buffer_event_length - return the length of the event
269
303
* @event: the event to get the length of
305
* Returns the size of the data load of a data event.
306
* If the event is something other than a data event, it
307
* returns the size of the event itself. With the exception
308
* of a TIME EXTEND, where it still returns the size of the
309
* data load of the data event after it.
271
311
unsigned ring_buffer_event_length(struct ring_buffer_event *event)
273
unsigned length = rb_event_length(event);
315
if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
316
event = skip_time_extend(event);
318
length = rb_event_length(event);
274
319
if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
276
321
length -= RB_EVNT_HDR_SIZE;
285
330
rb_event_data(struct ring_buffer_event *event)
332
if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
333
event = skip_time_extend(event);
287
334
BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
288
335
/* If length is in len field, then array[0] has the data */
289
336
if (event->type_len)
309
356
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
310
357
#define TS_DELTA_TEST (~TS_MASK)
359
/* Flag when events were overwritten */
360
#define RB_MISSED_EVENTS (1 << 31)
361
/* Missed count stored at end */
362
#define RB_MISSED_STORED (1 << 30)
312
364
struct buffer_data_page {
313
365
u64 time_stamp; /* page time stamp */
314
366
local_t commit; /* write committed index */
328
380
local_t write; /* index for next write */
329
381
unsigned read; /* index for next read */
330
382
local_t entries; /* entries on this page */
383
unsigned long real_end; /* real end of data */
331
384
struct buffer_data_page *page; /* Actual data page */
388
441
/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
389
442
#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
391
/* Max number of timestamps that can fit on a page */
392
#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
394
444
int ring_buffer_print_page_header(struct trace_seq *s)
396
446
struct buffer_data_page field;
399
449
ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
400
"offset:0;\tsize:%u;\n",
401
(unsigned int)sizeof(field.time_stamp));
450
"offset:0;\tsize:%u;\tsigned:%u;\n",
451
(unsigned int)sizeof(field.time_stamp),
452
(unsigned int)is_signed_type(u64));
403
454
ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
404
"offset:%u;\tsize:%u;\n",
405
(unsigned int)offsetof(typeof(field), commit),
406
(unsigned int)sizeof(field.commit));
455
"offset:%u;\tsize:%u;\tsigned:%u;\n",
456
(unsigned int)offsetof(typeof(field), commit),
457
(unsigned int)sizeof(field.commit),
458
(unsigned int)is_signed_type(long));
460
ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
461
"offset:%u;\tsize:%u;\tsigned:%u;\n",
462
(unsigned int)offsetof(typeof(field), commit),
464
(unsigned int)is_signed_type(long));
408
466
ret = trace_seq_printf(s, "\tfield: char data;\t"
409
"offset:%u;\tsize:%u;\n",
467
"offset:%u;\tsize:%u;\tsigned:%u;\n",
410
468
(unsigned int)offsetof(typeof(field), data),
411
(unsigned int)BUF_PAGE_SIZE);
469
(unsigned int)BUF_PAGE_SIZE,
470
(unsigned int)is_signed_type(char));
419
478
struct ring_buffer_per_cpu {
480
atomic_t record_disabled;
421
481
struct ring_buffer *buffer;
422
482
spinlock_t reader_lock; /* serialize readers */
483
arch_spinlock_t lock;
424
484
struct lock_class_key lock_key;
425
485
struct list_head *pages;
426
486
struct buffer_page *head_page; /* read from head */
427
487
struct buffer_page *tail_page; /* write to tail */
428
488
struct buffer_page *commit_page; /* committed pages */
429
489
struct buffer_page *reader_page;
490
unsigned long lost_events;
491
unsigned long last_overrun;
430
492
local_t commit_overrun;
995
1058
cpu_buffer->buffer = buffer;
996
1059
spin_lock_init(&cpu_buffer->reader_lock);
997
1060
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
998
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
1061
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1000
1063
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1001
1064
GFP_KERNEL, cpu_to_node(cpu));
1190
1253
struct list_head *p;
1193
atomic_inc(&cpu_buffer->record_disabled);
1194
synchronize_sched();
1196
1256
spin_lock_irq(&cpu_buffer->reader_lock);
1197
1257
rb_head_page_deactivate(cpu_buffer);
1199
1259
for (i = 0; i < nr_pages; i++) {
1200
1260
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1202
1262
p = cpu_buffer->pages->next;
1203
1263
bpage = list_entry(p, struct buffer_page, list);
1204
1264
list_del_init(&bpage->list);
1205
1265
free_buffer_page(bpage);
1207
1267
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1210
1270
rb_reset_cpu(cpu_buffer);
1271
rb_check_pages(cpu_buffer);
1211
1274
spin_unlock_irq(&cpu_buffer->reader_lock);
1213
rb_check_pages(cpu_buffer);
1215
atomic_dec(&cpu_buffer->record_disabled);
1224
1282
struct list_head *p;
1227
atomic_inc(&cpu_buffer->record_disabled);
1228
synchronize_sched();
1230
1285
spin_lock_irq(&cpu_buffer->reader_lock);
1231
1286
rb_head_page_deactivate(cpu_buffer);
1233
1288
for (i = 0; i < nr_pages; i++) {
1234
1289
if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1236
1291
p = pages->next;
1237
1292
bpage = list_entry(p, struct buffer_page, list);
1238
1293
list_del_init(&bpage->list);
1239
1294
list_add_tail(&bpage->list, cpu_buffer->pages);
1241
1296
rb_reset_cpu(cpu_buffer);
1297
rb_check_pages(cpu_buffer);
1242
1300
spin_unlock_irq(&cpu_buffer->reader_lock);
1244
rb_check_pages(cpu_buffer);
1246
atomic_dec(&cpu_buffer->record_disabled);
1251
1305
* @buffer: the buffer to resize.
1252
1306
* @size: the new size.
1254
* The tracer is responsible for making sure that the buffer is
1255
* not being used while changing the size.
1256
* Note: We may be able to change the above requirement by using
1257
* RCU synchronizations.
1259
1308
* Minimum size is 2 * BUF_PAGE_SIZE.
1261
1310
* Returns -1 on failure.
1522
1580
iter->head = 0;
1583
/* Slow path, do not inline */
1584
static noinline struct ring_buffer_event *
1585
rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1587
event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1589
/* Not the first event on the page? */
1590
if (rb_event_index(event)) {
1591
event->time_delta = delta & TS_MASK;
1592
event->array[0] = delta >> TS_SHIFT;
1594
/* nope, just zero it */
1595
event->time_delta = 0;
1596
event->array[0] = 0;
1599
return skip_time_extend(event);
1526
1603
* ring_buffer_update_event - update event type and data
1527
1604
* @event: the even to update
1537
rb_update_event(struct ring_buffer_event *event,
1538
unsigned type, unsigned length)
1614
rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1615
struct ring_buffer_event *event, unsigned length,
1616
int add_timestamp, u64 delta)
1540
event->type_len = type;
1544
case RINGBUF_TYPE_PADDING:
1545
case RINGBUF_TYPE_TIME_EXTEND:
1546
case RINGBUF_TYPE_TIME_STAMP:
1550
length -= RB_EVNT_HDR_SIZE;
1551
if (length > RB_MAX_SMALL_DATA)
1552
event->array[0] = length;
1554
event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1618
/* Only a commit updates the timestamp */
1619
if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1623
* If we need to add a timestamp, then we
1624
* add it to the start of the resevered space.
1626
if (unlikely(add_timestamp)) {
1627
event = rb_add_time_stamp(event, delta);
1628
length -= RB_LEN_TIME_EXTEND;
1632
event->time_delta = delta;
1633
length -= RB_EVNT_HDR_SIZE;
1634
if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1635
event->type_len = 0;
1636
event->array[0] = length;
1638
event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1726
if (length > RB_MAX_SMALL_DATA)
1806
if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1727
1807
length += sizeof(event.array[0]);
1729
1809
length += RB_EVNT_HDR_SIZE;
1730
length = ALIGN(length, RB_ALIGNMENT);
1810
length = ALIGN(length, RB_ARCH_ALIGNMENT);
1744
1824
* must fill the old tail_page with padding.
1746
1826
if (tail >= BUF_PAGE_SIZE) {
1828
* If the page was filled, then we still need
1829
* to update the real_end. Reset it to zero
1830
* and the reader will ignore it.
1832
if (tail == BUF_PAGE_SIZE)
1833
tail_page->real_end = 0;
1747
1835
local_sub(length, &tail_page->write);
1752
1840
kmemcheck_annotate_bitfield(event, bitfield);
1843
* Save the original length to the meta data.
1844
* This will be used by the reader to add lost event
1847
tail_page->real_end = tail;
1755
1850
* If this event is bigger than the minimum size, then
1756
1851
* we need to be careful that we don't subtract the
1757
1852
* write counter enough to allow another writer to slip
1784
1879
local_sub(length, &tail_page->write);
1787
static struct ring_buffer_event *
1883
* This is the slow path, force gcc not to inline it.
1885
static noinline struct ring_buffer_event *
1788
1886
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1789
1887
unsigned long length, unsigned long tail,
1790
struct buffer_page *commit_page,
1791
struct buffer_page *tail_page, u64 *ts)
1888
struct buffer_page *tail_page, u64 ts)
1890
struct buffer_page *commit_page = cpu_buffer->commit_page;
1793
1891
struct ring_buffer *buffer = cpu_buffer->buffer;
1794
1892
struct buffer_page *next_page;
1870
1968
* Nested commits always have zero deltas, so
1871
1969
* just reread the time stamp
1873
*ts = rb_time_stamp(buffer);
1874
next_page->page->time_stamp = *ts;
1971
ts = rb_time_stamp(buffer);
1972
next_page->page->time_stamp = ts;
1891
1989
static struct ring_buffer_event *
1892
1990
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1893
unsigned type, unsigned long length, u64 *ts)
1991
unsigned long length, u64 ts,
1992
u64 delta, int add_timestamp)
1895
struct buffer_page *tail_page, *commit_page;
1994
struct buffer_page *tail_page;
1896
1995
struct ring_buffer_event *event;
1897
1996
unsigned long tail, write;
1899
commit_page = cpu_buffer->commit_page;
1900
/* we just need to protect against interrupts */
1999
* If the time delta since the last event is too big to
2000
* hold in the time field of the event, then we append a
2001
* TIME EXTEND event ahead of the data event.
2003
if (unlikely(add_timestamp))
2004
length += RB_LEN_TIME_EXTEND;
1902
2006
tail_page = cpu_buffer->tail_page;
1903
2007
write = local_add_return(length, &tail_page->write);
1907
2011
tail = write - length;
1909
2013
/* See if we shot pass the end of this buffer page */
1910
if (write > BUF_PAGE_SIZE)
2014
if (unlikely(write > BUF_PAGE_SIZE))
1911
2015
return rb_move_tail(cpu_buffer, length, tail,
1912
commit_page, tail_page, ts);
1914
2018
/* We reserved something on the buffer */
1916
2020
event = __rb_page_index(tail_page, tail);
1917
2021
kmemcheck_annotate_bitfield(event, bitfield);
1918
rb_update_event(event, type, length);
2022
rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
1920
/* The passed in type is zero for DATA */
1922
local_inc(&tail_page->entries);
2024
local_inc(&tail_page->entries);
1925
2027
* If this is the first commit on the page, then update
1926
2028
* its timestamp.
1929
tail_page->page->time_stamp = *ts;
2031
tail_page->page->time_stamp = ts;
1971
rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1972
u64 *ts, u64 *delta)
1974
struct ring_buffer_event *event;
1978
if (unlikely(*delta > (1ULL << 59) && !once++)) {
1979
printk(KERN_WARNING "Delta way too big! %llu"
1980
" ts=%llu write stamp = %llu\n",
1981
(unsigned long long)*delta,
1982
(unsigned long long)*ts,
1983
(unsigned long long)cpu_buffer->write_stamp);
1988
* The delta is too big, we to add a
1991
event = __rb_reserve_next(cpu_buffer,
1992
RINGBUF_TYPE_TIME_EXTEND,
1998
if (PTR_ERR(event) == -EAGAIN)
2001
/* Only a commited time event can update the write stamp */
2002
if (rb_event_is_commit(cpu_buffer, event)) {
2004
* If this is the first on the page, then it was
2005
* updated with the page itself. Try to discard it
2006
* and if we can't just make it zero.
2008
if (rb_event_index(event)) {
2009
event->time_delta = *delta & TS_MASK;
2010
event->array[0] = *delta >> TS_SHIFT;
2012
/* try to discard, since we do not need this */
2013
if (!rb_try_to_discard(cpu_buffer, event)) {
2014
/* nope, just zero it */
2015
event->time_delta = 0;
2016
event->array[0] = 0;
2019
cpu_buffer->write_stamp = *ts;
2020
/* let the caller know this was the commit */
2023
/* Try to discard the event */
2024
if (!rb_try_to_discard(cpu_buffer, event)) {
2025
/* Darn, this is just wasted space */
2026
event->time_delta = 0;
2027
event->array[0] = 0;
2037
2072
static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2039
2074
local_inc(&cpu_buffer->committing);
2040
2075
local_inc(&cpu_buffer->commits);
2043
static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2078
static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2045
2080
unsigned long commits;
2116
2155
ts = rb_time_stamp(cpu_buffer->buffer);
2119
* Only the first commit can update the timestamp.
2120
* Yes there is a race here. If an interrupt comes in
2121
* just after the conditional and it traces too, then it
2122
* will also check the deltas. More than one timestamp may
2123
* also be made. But only the entry that did the actual
2124
* commit will be something other than zero.
2126
if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
2127
rb_page_write(cpu_buffer->tail_page) ==
2128
rb_commit_index(cpu_buffer))) {
2131
diff = ts - cpu_buffer->write_stamp;
2133
/* make sure this diff is calculated here */
2136
/* Did the write stamp get updated already? */
2137
if (unlikely(ts < cpu_buffer->write_stamp))
2156
diff = ts - cpu_buffer->write_stamp;
2158
/* make sure this diff is calculated here */
2161
/* Did the write stamp get updated already? */
2162
if (likely(ts >= cpu_buffer->write_stamp)) {
2141
2164
if (unlikely(test_time_stamp(delta))) {
2143
commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
2144
if (commit == -EBUSY)
2147
if (commit == -EAGAIN)
2150
RB_WARN_ON(cpu_buffer, commit < 0);
2165
WARN_ONCE(delta > (1ULL << 59),
2166
KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
2167
(unsigned long long)delta,
2168
(unsigned long long)ts,
2169
(unsigned long long)cpu_buffer->write_stamp);
2155
event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
2174
event = __rb_reserve_next(cpu_buffer, length, ts,
2175
delta, add_timestamp);
2156
2176
if (unlikely(PTR_ERR(event) == -EAGAIN))
2162
if (!rb_event_is_commit(cpu_buffer, event))
2165
event->time_delta = delta;
2176
2191
#define TRACE_RECURSIVE_DEPTH 16
2178
static int trace_recursive_lock(void)
2193
/* Keep this code out of the fast path cache */
2194
static noinline void trace_recursive_fail(void)
2180
current->trace_recursion++;
2182
if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
2185
2196
/* Disable all tracing before we do anything else */
2186
2197
tracing_off_permanent();
2195
2206
WARN_ON_ONCE(1);
2209
static inline int trace_recursive_lock(void)
2211
current->trace_recursion++;
2213
if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
2216
trace_recursive_fail();
2199
static void trace_recursive_unlock(void)
2221
static inline void trace_recursive_unlock(void)
2201
2223
WARN_ON_ONCE(!current->trace_recursion);
2233
2253
struct ring_buffer_per_cpu *cpu_buffer;
2234
2254
struct ring_buffer_event *event;
2237
2257
if (ring_buffer_flags != RB_BUFFERS_ON)
2240
if (atomic_read(&buffer->record_disabled))
2243
2260
/* If we are tracing schedule, we don't want to recurse */
2244
resched = ftrace_preempt_disable();
2261
preempt_disable_notrace();
2263
if (atomic_read(&buffer->record_disabled))
2246
2266
if (trace_recursive_lock())
2247
2267
goto out_nocheck;
2267
* Need to store resched state on this cpu.
2268
* Only the first needs to.
2271
if (preempt_count() == 1)
2272
per_cpu(rb_need_resched, cpu) = resched;
2277
2289
trace_recursive_unlock();
2280
ftrace_preempt_enable(resched);
2292
preempt_enable_notrace();
2283
2295
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2286
2298
rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2287
2299
struct ring_buffer_event *event)
2290
2304
* The event first in the commit queue updates the
2293
if (rb_event_is_commit(cpu_buffer, event))
2294
cpu_buffer->write_stamp += event->time_delta;
2307
if (rb_event_is_commit(cpu_buffer, event)) {
2309
* A commit event that is first on a page
2310
* updates the write timestamp with the page stamp
2312
if (!rb_event_index(event))
2313
cpu_buffer->write_stamp =
2314
cpu_buffer->commit_page->page->time_stamp;
2315
else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2316
delta = event->array[0];
2318
delta += event->time_delta;
2319
cpu_buffer->write_stamp += delta;
2321
cpu_buffer->write_stamp += event->time_delta;
2297
2325
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2324
2352
trace_recursive_unlock();
2327
* Only the last preempt count needs to restore preemption.
2329
if (preempt_count() == 1)
2330
ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2332
preempt_enable_no_resched_notrace();
2354
preempt_enable_notrace();
2338
2360
static inline void rb_event_discard(struct ring_buffer_event *event)
2362
if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2363
event = skip_time_extend(event);
2340
2365
/* array[0] holds the actual length for the discarded event */
2341
2366
event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2342
2367
event->type_len = RINGBUF_TYPE_PADDING;
2438
2463
trace_recursive_unlock();
2441
* Only the last preempt count needs to restore preemption.
2443
if (preempt_count() == 1)
2444
ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2446
preempt_enable_no_resched_notrace();
2465
preempt_enable_notrace();
2449
2468
EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2469
2488
struct ring_buffer_event *event;
2471
2490
int ret = -EBUSY;
2474
2493
if (ring_buffer_flags != RB_BUFFERS_ON)
2496
preempt_disable_notrace();
2477
2498
if (atomic_read(&buffer->record_disabled))
2480
resched = ftrace_preempt_disable();
2482
2501
cpu = raw_smp_processor_id();
2546
2565
* @buffer: The ring buffer to enable writes
2548
2567
* Note, multiple disables will need the same number of enables
2549
* to truely enable the writing (much like preempt_disable).
2568
* to truly enable the writing (much like preempt_disable).
2551
2570
void ring_buffer_record_enable(struct ring_buffer *buffer)
2582
2601
* @cpu: The CPU to enable.
2584
2603
* Note, multiple disables will need the same number of enables
2585
* to truely enable the writing (much like preempt_disable).
2604
* to truly enable the writing (much like preempt_disable).
2587
2606
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2597
2616
EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2619
* The total entries in the ring buffer is the running counter
2620
* of entries entered into the ring buffer, minus the sum of
2621
* the entries read from the ring buffer and the number of
2622
* entries that were overwritten.
2624
static inline unsigned long
2625
rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2627
return local_read(&cpu_buffer->entries) -
2628
(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2600
2632
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2601
2633
* @buffer: The ring buffer
2604
2636
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2606
2638
struct ring_buffer_per_cpu *cpu_buffer;
2609
2640
if (!cpumask_test_cpu(cpu, buffer->cpumask))
2612
2643
cpu_buffer = buffer->buffers[cpu];
2613
ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
2645
return rb_num_of_entries(cpu_buffer);
2618
2647
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
2674
2703
/* if you care about this being correct, lock the buffer */
2675
2704
for_each_buffer_cpu(buffer, cpu) {
2676
2705
cpu_buffer = buffer->buffers[cpu];
2677
entries += (local_read(&cpu_buffer->entries) -
2678
local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
2706
entries += rb_num_of_entries(cpu_buffer);
2681
2709
return entries;
2870
2901
local_set(&cpu_buffer->reader_page->write, 0);
2871
2902
local_set(&cpu_buffer->reader_page->entries, 0);
2872
2903
local_set(&cpu_buffer->reader_page->page->commit, 0);
2904
cpu_buffer->reader_page->real_end = 0;
2876
2908
* Splice the empty reader page into the list around the head.
2878
2910
reader = rb_set_head_page(cpu_buffer);
2879
cpu_buffer->reader_page->list.next = reader->list.next;
2911
cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2880
2912
cpu_buffer->reader_page->list.prev = reader->list.prev;
2890
2922
rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
2925
* We want to make sure we read the overruns after we set up our
2926
* pointers to the next object. The writer side does a
2927
* cmpxchg to cross pages which acts as the mb on the writer
2928
* side. Note, the reader will constantly fail the swap
2929
* while the writer is updating the pointers, so this
2930
* guarantees that the overwrite recorded here is the one we
2931
* want to compare with the last_overrun.
2934
overwrite = local_read(&(cpu_buffer->overrun));
2893
2937
* Here's the tricky part.
2895
2939
* We need to move the pointer past the header page.
2914
2958
* Now make the new head point back to the reader page.
2916
reader->list.next->prev = &cpu_buffer->reader_page->list;
2960
rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2917
2961
rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2919
2963
/* Finally update the reader page to the new head */
2920
2964
cpu_buffer->reader_page = reader;
2921
2965
rb_reset_reader_page(cpu_buffer);
2967
if (overwrite != cpu_buffer->last_overrun) {
2968
cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
2969
cpu_buffer->last_overrun = overwrite;
2926
__raw_spin_unlock(&cpu_buffer->lock);
2975
arch_spin_unlock(&cpu_buffer->lock);
2927
2976
local_irq_restore(flags);
2955
3004
static void rb_advance_iter(struct ring_buffer_iter *iter)
2957
struct ring_buffer *buffer;
2958
3006
struct ring_buffer_per_cpu *cpu_buffer;
2959
3007
struct ring_buffer_event *event;
2960
3008
unsigned length;
2962
3010
cpu_buffer = iter->cpu_buffer;
2963
buffer = cpu_buffer->buffer;
2966
3013
* Check if we are at the end of the buffer.
2996
3043
rb_advance_iter(iter);
3046
static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3048
return cpu_buffer->lost_events;
2999
3051
static struct ring_buffer_event *
3000
rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
3052
rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3053
unsigned long *lost_events)
3002
3055
struct ring_buffer_event *event;
3003
3056
struct buffer_page *reader;
3008
* We repeat when a timestamp is encountered. It is possible
3009
* to get multiple timestamps from an interrupt entering just
3010
* as one timestamp is about to be written, or from discarded
3011
* commits. The most that we can have is the number on a single page.
3061
* We repeat when a time extend is encountered.
3062
* Since the time extend is always attached to a data event,
3063
* we should never loop more than once.
3064
* (We never hit the following condition more than twice).
3013
if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
3066
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3016
3069
reader = rb_get_reader_page(cpu_buffer);
3067
3122
struct ring_buffer_event *event;
3068
3123
int nr_loops = 0;
3125
cpu_buffer = iter->cpu_buffer;
3126
buffer = cpu_buffer->buffer;
3129
* Check if someone performed a consuming read to
3130
* the buffer. A consuming read invalidates the iterator
3131
* and we need to reset the iterator in this case.
3133
if (unlikely(iter->cache_read != cpu_buffer->read ||
3134
iter->cache_reader_page != cpu_buffer->reader_page))
3135
rb_iter_reset(iter);
3070
3138
if (ring_buffer_iter_empty(iter))
3073
cpu_buffer = iter->cpu_buffer;
3074
buffer = cpu_buffer->buffer;
3078
* We repeat when a timestamp is encountered.
3079
* We can get multiple timestamps by nested interrupts or also
3080
* if filtering is on (discarding commits). Since discarding
3081
* commits can be frequent we can get a lot of timestamps.
3082
* But we limit them by not adding timestamps if they begin
3083
* at the start of a page.
3142
* We repeat when a time extend is encountered.
3143
* Since the time extend is always attached to a data event,
3144
* we should never loop more than once.
3145
* (We never hit the following condition more than twice).
3085
if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
3147
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3088
3150
if (rb_per_cpu_empty(cpu_buffer))
3153
if (iter->head >= local_read(&iter->head_page->page->commit)) {
3091
3158
event = rb_iter_head_event(iter);
3093
3160
switch (event->type_len) {
3145
3212
* @buffer: The ring buffer to read
3146
3213
* @cpu: The cpu to peak at
3147
3214
* @ts: The timestamp counter of this event.
3215
* @lost_events: a variable to store if events were lost (may be NULL)
3149
3217
* This will return the event that will be read next, but does
3150
3218
* not consume the data.
3152
3220
struct ring_buffer_event *
3153
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
3221
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3222
unsigned long *lost_events)
3155
3224
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3156
3225
struct ring_buffer_event *event;
3165
3234
local_irq_save(flags);
3167
3236
spin_lock(&cpu_buffer->reader_lock);
3168
event = rb_buffer_peek(cpu_buffer, ts);
3237
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3169
3238
if (event && event->type_len == RINGBUF_TYPE_PADDING)
3170
3239
rb_advance_reader(cpu_buffer);
3208
3277
* ring_buffer_consume - return an event and consume it
3209
3278
* @buffer: The ring buffer to get the next event from
3279
* @cpu: the cpu to read the buffer from
3280
* @ts: a variable to store the timestamp (may be NULL)
3281
* @lost_events: a variable to store if events were lost (may be NULL)
3211
3283
* Returns the next event in the ring buffer, and that event is consumed.
3212
3284
* Meaning, that sequential reads will keep returning a different event,
3213
3285
* and eventually empty the ring buffer if the producer is slower.
3215
3287
struct ring_buffer_event *
3216
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
3288
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3289
unsigned long *lost_events)
3218
3291
struct ring_buffer_per_cpu *cpu_buffer;
3219
3292
struct ring_buffer_event *event = NULL;
3235
3308
spin_lock(&cpu_buffer->reader_lock);
3237
event = rb_buffer_peek(cpu_buffer, ts);
3310
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3312
cpu_buffer->lost_events = 0;
3239
3313
rb_advance_reader(cpu_buffer);
3242
3317
spin_unlock(&cpu_buffer->reader_lock);
3253
3328
EXPORT_SYMBOL_GPL(ring_buffer_consume);
3256
* ring_buffer_read_start - start a non consuming read of the buffer
3331
* ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3257
3332
* @buffer: The ring buffer to read from
3258
3333
* @cpu: The cpu buffer to iterate over
3260
* This starts up an iteration through the buffer. It also disables
3261
* the recording to the buffer until the reading is finished.
3262
* This prevents the reading from being corrupted. This is not
3263
* a consuming read, so a producer is not expected.
3265
* Must be paired with ring_buffer_finish.
3335
* This performs the initial preparations necessary to iterate
3336
* through the buffer. Memory is allocated, buffer recording
3337
* is disabled, and the iterator pointer is returned to the caller.
3339
* Disabling buffer recordng prevents the reading from being
3340
* corrupted. This is not a consuming read, so a producer is not
3343
* After a sequence of ring_buffer_read_prepare calls, the user is
3344
* expected to make at least one call to ring_buffer_prepare_sync.
3345
* Afterwards, ring_buffer_read_start is invoked to get things going
3348
* This overall must be paired with ring_buffer_finish.
3267
3350
struct ring_buffer_iter *
3268
ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3351
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3270
3353
struct ring_buffer_per_cpu *cpu_buffer;
3271
3354
struct ring_buffer_iter *iter;
3272
unsigned long flags;
3274
3356
if (!cpumask_test_cpu(cpu, buffer->cpumask))
3283
3365
iter->cpu_buffer = cpu_buffer;
3285
3367
atomic_inc(&cpu_buffer->record_disabled);
3286
synchronize_sched();
3288
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289
__raw_spin_lock(&cpu_buffer->lock);
3290
rb_iter_reset(iter);
3291
__raw_spin_unlock(&cpu_buffer->lock);
3292
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3371
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3374
* ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3376
* All previously invoked ring_buffer_read_prepare calls to prepare
3377
* iterators will be synchronized. Afterwards, read_buffer_read_start
3378
* calls on those iterators are allowed.
3381
ring_buffer_read_prepare_sync(void)
3383
synchronize_sched();
3385
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3388
* ring_buffer_read_start - start a non consuming read of the buffer
3389
* @iter: The iterator returned by ring_buffer_read_prepare
3391
* This finalizes the startup of an iteration through the buffer.
3392
* The iterator comes from a call to ring_buffer_read_prepare and
3393
* an intervening ring_buffer_read_prepare_sync must have been
3396
* Must be paired with ring_buffer_finish.
3399
ring_buffer_read_start(struct ring_buffer_iter *iter)
3401
struct ring_buffer_per_cpu *cpu_buffer;
3402
unsigned long flags;
3407
cpu_buffer = iter->cpu_buffer;
3409
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3410
arch_spin_lock(&cpu_buffer->lock);
3411
rb_iter_reset(iter);
3412
arch_spin_unlock(&cpu_buffer->lock);
3413
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3296
3415
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3385
3504
cpu_buffer->write_stamp = 0;
3386
3505
cpu_buffer->read_stamp = 0;
3507
cpu_buffer->lost_events = 0;
3508
cpu_buffer->last_overrun = 0;
3388
3510
rb_head_page_activate(cpu_buffer);
3408
3530
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3411
__raw_spin_lock(&cpu_buffer->lock);
3533
arch_spin_lock(&cpu_buffer->lock);
3413
3535
rb_reset_cpu(cpu_buffer);
3415
__raw_spin_unlock(&cpu_buffer->lock);
3537
arch_spin_unlock(&cpu_buffer->lock);
3418
3540
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3696
3819
read = reader->read;
3697
3820
commit = rb_page_commit(reader);
3822
/* Check if any events were dropped */
3823
missed_events = cpu_buffer->lost_events;
3700
3826
* If this page has been partially read or
3701
3827
* if len is not big enough to read the rest of the page or
3727
3854
/* Need to copy one event at a time */
3856
/* We need the size of one event, because
3857
* rb_advance_reader only advances by one event,
3858
* whereas rb_event_ts_length may include the size of
3859
* one or two events.
3860
* We have already ensured there's enough space if this
3861
* is a time extend. */
3862
size = rb_event_length(event);
3729
3863
memcpy(bpage->data + pos, rpage->data + rpos, size);
3734
3868
rpos = reader->read;
3737
3874
event = rb_reader_event(cpu_buffer);
3738
size = rb_event_length(event);
3739
} while (len > size);
3875
/* Always keep the time extend and data together */
3876
size = rb_event_ts_length(event);
3877
} while (len >= size);
3741
3879
/* update bpage */
3742
3880
local_set(&bpage->commit, pos);
3756
3894
local_set(&reader->entries, 0);
3757
3895
reader->read = 0;
3758
3896
*data_page = bpage;
3899
* Use the real_end for the data size,
3900
* This gives us a chance to store the lost events
3903
if (reader->real_end)
3904
local_set(&bpage->commit, reader->real_end);
3908
cpu_buffer->lost_events = 0;
3910
commit = local_read(&bpage->commit);
3912
* Set a flag in the commit field if we lost events
3914
if (missed_events) {
3915
/* If there is room at the end of the page to save the
3916
* missed events, then record it there.
3918
if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
3919
memcpy(&bpage->data[commit], &missed_events,
3920
sizeof(missed_events));
3921
local_add(RB_MISSED_STORED, &bpage->commit);
3922
commit += sizeof(missed_events);
3924
local_add(RB_MISSED_EVENTS, &bpage->commit);
3928
* This page may be off to user land. Zero it out here.
3930
if (commit < BUF_PAGE_SIZE)
3931
memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3763
3934
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);