2877
2875
if (has_branch_stack(event)) {
2878
2876
static_key_slow_dec_deferred(&perf_sched_events);
2879
2877
/* is system-wide event */
2880
if (!(event->attach_state & PERF_ATTACH_TASK))
2878
if (!(event->attach_state & PERF_ATTACH_TASK)) {
2881
2879
atomic_dec(&per_cpu(perf_branch_stack_events,
2886
2885
if (event->rb) {
2887
ring_buffer_put(event->rb);
2886
struct ring_buffer *rb;
2889
* Can happen when we close an event with re-directed output.
2891
* Since we have a 0 refcount, perf_mmap_close() will skip
2892
* over us; possibly making our ring_buffer_put() the last.
2894
mutex_lock(&event->mmap_mutex);
2897
rcu_assign_pointer(event->rb, NULL);
2898
ring_buffer_detach(event, rb);
2899
ring_buffer_put(rb); /* could be last */
2901
mutex_unlock(&event->mmap_mutex);
2891
2904
if (is_cgroup_event(event))
3123
3136
unsigned int events = POLL_HUP;
3126
* Race between perf_event_set_output() and perf_poll(): perf_poll()
3127
* grabs the rb reference but perf_event_set_output() overrides it.
3128
* Here is the timeline for two threads T1, T2:
3129
* t0: T1, rb = rcu_dereference(event->rb)
3130
* t1: T2, old_rb = event->rb
3131
* t2: T2, event->rb = new rb
3132
* t3: T2, ring_buffer_detach(old_rb)
3133
* t4: T1, ring_buffer_attach(rb1)
3134
* t5: T1, poll_wait(event->waitq)
3136
* To avoid this problem, we grab mmap_mutex in perf_poll()
3137
* thereby ensuring that the assignment of the new ring buffer
3138
* and the detachment of the old buffer appear atomic to perf_poll()
3139
* Pin the event->rb by taking event->mmap_mutex; otherwise
3140
* perf_event_set_output() can swizzle our rb and make us miss wakeups.
3140
3142
mutex_lock(&event->mmap_mutex);
3143
rb = rcu_dereference(event->rb);
3145
ring_buffer_attach(event, rb);
3146
3145
events = atomic_xchg(&rb->poll, 0);
3150
3146
mutex_unlock(&event->mmap_mutex);
3152
3148
poll_wait(file, &event->waitq, wait);
3526
3515
static void ring_buffer_put(struct ring_buffer *rb)
3528
struct perf_event *event, *n;
3529
unsigned long flags;
3531
3517
if (!atomic_dec_and_test(&rb->refcount))
3534
spin_lock_irqsave(&rb->event_lock, flags);
3535
list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3536
list_del_init(&event->rb_entry);
3537
wake_up_all(&event->waitq);
3539
spin_unlock_irqrestore(&rb->event_lock, flags);
3520
WARN_ON_ONCE(!list_empty(&rb->event_list));
3541
3522
call_rcu(&rb->rcu_head, rb_free_rcu);
3546
3527
struct perf_event *event = vma->vm_file->private_data;
3548
3529
atomic_inc(&event->mmap_count);
3530
atomic_inc(&event->rb->mmap_count);
3534
* A buffer can be mmap()ed multiple times; either directly through the same
3535
* event, or through other events by use of perf_event_set_output().
3537
* In order to undo the VM accounting done by perf_mmap() we need to destroy
3538
* the buffer here, where we still have a VM context. This means we need
3539
* to detach all events redirecting to us.
3551
3541
static void perf_mmap_close(struct vm_area_struct *vma)
3553
3543
struct perf_event *event = vma->vm_file->private_data;
3555
if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3556
unsigned long size = perf_data_size(event->rb);
3557
struct user_struct *user = event->mmap_user;
3558
struct ring_buffer *rb = event->rb;
3560
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3561
vma->vm_mm->pinned_vm -= event->mmap_locked;
3562
rcu_assign_pointer(event->rb, NULL);
3563
ring_buffer_detach(event, rb);
3545
struct ring_buffer *rb = event->rb;
3546
struct user_struct *mmap_user = rb->mmap_user;
3547
int mmap_locked = rb->mmap_locked;
3548
unsigned long size = perf_data_size(rb);
3550
atomic_dec(&rb->mmap_count);
3552
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3555
/* Detach current event from the buffer. */
3556
rcu_assign_pointer(event->rb, NULL);
3557
ring_buffer_detach(event, rb);
3558
mutex_unlock(&event->mmap_mutex);
3560
/* If there's still other mmap()s of this buffer, we're done. */
3561
if (atomic_read(&rb->mmap_count)) {
3562
ring_buffer_put(rb); /* can't be last */
3567
* No other mmap()s, detach from all other events that might redirect
3568
* into the now unreachable buffer. Somewhat complicated by the
3569
* fact that rb::event_lock otherwise nests inside mmap_mutex.
3573
list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3574
if (!atomic_long_inc_not_zero(&event->refcount)) {
3576
* This event is en-route to free_event() which will
3577
* detach it and remove it from the list.
3583
mutex_lock(&event->mmap_mutex);
3585
* Check we didn't race with perf_event_set_output() which can
3586
* swizzle the rb from under us while we were waiting to
3587
* acquire mmap_mutex.
3589
* If we find a different rb; ignore this event, a next
3590
* iteration will no longer find it on the list. We have to
3591
* still restart the iteration to make sure we're not now
3592
* iterating the wrong list.
3594
if (event->rb == rb) {
3595
rcu_assign_pointer(event->rb, NULL);
3596
ring_buffer_detach(event, rb);
3597
ring_buffer_put(rb); /* can't be last, we still have one */
3564
3599
mutex_unlock(&event->mmap_mutex);
3566
ring_buffer_put(rb);
3603
* Restart the iteration; either we're on the wrong list or
3604
* destroyed its integrity by doing a deletion.
3611
* It could be there's still a few 0-ref events on the list; they'll
3612
* get cleaned up by free_event() -- they'll also still have their
3613
* ref on the rb and will free it whenever they are done with it.
3615
* Aside from that, this buffer is 'fully' detached and unmapped,
3616
* undo the VM accounting.
3619
atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
3620
vma->vm_mm->pinned_vm -= mmap_locked;
3621
free_uid(mmap_user);
3623
ring_buffer_put(rb); /* could be last */
3571
3626
static const struct vm_operations_struct perf_mmap_vmops = {
3664
rcu_assign_pointer(event->rb, rb);
3732
atomic_set(&rb->mmap_count, 1);
3733
rb->mmap_locked = extra;
3734
rb->mmap_user = get_current_user();
3666
3736
atomic_long_add(user_extra, &user->locked_vm);
3667
event->mmap_locked = extra;
3668
event->mmap_user = get_current_user();
3669
vma->vm_mm->pinned_vm += event->mmap_locked;
3737
vma->vm_mm->pinned_vm += extra;
3739
ring_buffer_attach(event, rb);
3740
rcu_assign_pointer(event->rb, rb);
3671
3742
perf_event_update_userpage(event);
6175
rcu_assign_pointer(event->rb, rb);
6177
6252
ring_buffer_detach(event, old_rb);
6180
mutex_unlock(&event->mmap_mutex);
6255
ring_buffer_attach(event, rb);
6257
rcu_assign_pointer(event->rb, rb);
6183
6260
ring_buffer_put(old_rb);
6262
* Since we detached before setting the new rb, so that we
6263
* could attach the new rb, we could have missed a wakeup.
6266
wake_up_all(&event->waitq);
6271
mutex_unlock(&event->mmap_mutex);