~ubuntu-branches/ubuntu/wily/linux-ti-omap4/wily

« back to all changes in this revision

Viewing changes to kernel/events/core.c

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati
  • Date: 2013-07-11 18:35:20 UTC
  • Revision ID: package-import@ubuntu.com-20130711183520-htnf1x4y5r11hndr
Tags: 3.5.0-229.42
* Release Tracking Bug
  - LP: #1199276

[ Paolo Pisati ]

* [Config] CONFIG_ATH9K_LEGACY_RATE_CONTROL is not set

Show diffs side-by-side

added added

removed removed

Lines of Context:
193
193
static void update_context_time(struct perf_event_context *ctx);
194
194
static u64 perf_event_time(struct perf_event *event);
195
195
 
196
 
static void ring_buffer_attach(struct perf_event *event,
197
 
                               struct ring_buffer *rb);
198
 
 
199
196
void __weak perf_event_print_debug(void)        { }
200
197
 
201
198
extern __weak const char *perf_pmu_name(void)
2853
2850
}
2854
2851
 
2855
2852
static void ring_buffer_put(struct ring_buffer *rb);
 
2853
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
2856
2854
 
2857
2855
static void free_event(struct perf_event *event)
2858
2856
{
2877
2875
                if (has_branch_stack(event)) {
2878
2876
                        static_key_slow_dec_deferred(&perf_sched_events);
2879
2877
                        /* is system-wide event */
2880
 
                        if (!(event->attach_state & PERF_ATTACH_TASK))
 
2878
                        if (!(event->attach_state & PERF_ATTACH_TASK)) {
2881
2879
                                atomic_dec(&per_cpu(perf_branch_stack_events,
2882
2880
                                                    event->cpu));
 
2881
                        }
2883
2882
                }
2884
2883
        }
2885
2884
 
2886
2885
        if (event->rb) {
2887
 
                ring_buffer_put(event->rb);
2888
 
                event->rb = NULL;
 
2886
                struct ring_buffer *rb;
 
2887
 
 
2888
                /*
 
2889
                 * Can happen when we close an event with re-directed output.
 
2890
                 *
 
2891
                 * Since we have a 0 refcount, perf_mmap_close() will skip
 
2892
                 * over us; possibly making our ring_buffer_put() the last.
 
2893
                 */
 
2894
                mutex_lock(&event->mmap_mutex);
 
2895
                rb = event->rb;
 
2896
                if (rb) {
 
2897
                        rcu_assign_pointer(event->rb, NULL);
 
2898
                        ring_buffer_detach(event, rb);
 
2899
                        ring_buffer_put(rb); /* could be last */
 
2900
                }
 
2901
                mutex_unlock(&event->mmap_mutex);
2889
2902
        }
2890
2903
 
2891
2904
        if (is_cgroup_event(event))
3123
3136
        unsigned int events = POLL_HUP;
3124
3137
 
3125
3138
        /*
3126
 
         * Race between perf_event_set_output() and perf_poll(): perf_poll()
3127
 
         * grabs the rb reference but perf_event_set_output() overrides it.
3128
 
         * Here is the timeline for two threads T1, T2:
3129
 
         * t0: T1, rb = rcu_dereference(event->rb)
3130
 
         * t1: T2, old_rb = event->rb
3131
 
         * t2: T2, event->rb = new rb
3132
 
         * t3: T2, ring_buffer_detach(old_rb)
3133
 
         * t4: T1, ring_buffer_attach(rb1)
3134
 
         * t5: T1, poll_wait(event->waitq)
3135
 
         *
3136
 
         * To avoid this problem, we grab mmap_mutex in perf_poll()
3137
 
         * thereby ensuring that the assignment of the new ring buffer
3138
 
         * and the detachment of the old buffer appear atomic to perf_poll()
 
3139
         * Pin the event->rb by taking event->mmap_mutex; otherwise
 
3140
         * perf_event_set_output() can swizzle our rb and make us miss wakeups.
3139
3141
         */
3140
3142
        mutex_lock(&event->mmap_mutex);
3141
 
 
3142
 
        rcu_read_lock();
3143
 
        rb = rcu_dereference(event->rb);
3144
 
        if (rb) {
3145
 
                ring_buffer_attach(event, rb);
 
3143
        rb = event->rb;
 
3144
        if (rb)
3146
3145
                events = atomic_xchg(&rb->poll, 0);
3147
 
        }
3148
 
        rcu_read_unlock();
3149
 
 
3150
3146
        mutex_unlock(&event->mmap_mutex);
3151
3147
 
3152
3148
        poll_wait(file, &event->waitq, wait);
3462
3458
                return;
3463
3459
 
3464
3460
        spin_lock_irqsave(&rb->event_lock, flags);
3465
 
        if (!list_empty(&event->rb_entry))
3466
 
                goto unlock;
3467
 
 
3468
 
        list_add(&event->rb_entry, &rb->event_list);
3469
 
unlock:
 
3461
        if (list_empty(&event->rb_entry))
 
3462
                list_add(&event->rb_entry, &rb->event_list);
3470
3463
        spin_unlock_irqrestore(&rb->event_lock, flags);
3471
3464
}
3472
3465
 
3473
 
static void ring_buffer_detach(struct perf_event *event,
3474
 
                               struct ring_buffer *rb)
 
3466
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
3475
3467
{
3476
3468
        unsigned long flags;
3477
3469
 
3490
3482
 
3491
3483
        rcu_read_lock();
3492
3484
        rb = rcu_dereference(event->rb);
3493
 
        if (!rb)
3494
 
                goto unlock;
3495
 
 
3496
 
        list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3497
 
                wake_up_all(&event->waitq);
3498
 
 
3499
 
unlock:
 
3485
        if (rb) {
 
3486
                list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
 
3487
                        wake_up_all(&event->waitq);
 
3488
        }
3500
3489
        rcu_read_unlock();
3501
3490
}
3502
3491
 
3525
3514
 
3526
3515
static void ring_buffer_put(struct ring_buffer *rb)
3527
3516
{
3528
 
        struct perf_event *event, *n;
3529
 
        unsigned long flags;
3530
 
 
3531
3517
        if (!atomic_dec_and_test(&rb->refcount))
3532
3518
                return;
3533
3519
 
3534
 
        spin_lock_irqsave(&rb->event_lock, flags);
3535
 
        list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3536
 
                list_del_init(&event->rb_entry);
3537
 
                wake_up_all(&event->waitq);
3538
 
        }
3539
 
        spin_unlock_irqrestore(&rb->event_lock, flags);
 
3520
        WARN_ON_ONCE(!list_empty(&rb->event_list));
3540
3521
 
3541
3522
        call_rcu(&rb->rcu_head, rb_free_rcu);
3542
3523
}
3546
3527
        struct perf_event *event = vma->vm_file->private_data;
3547
3528
 
3548
3529
        atomic_inc(&event->mmap_count);
 
3530
        atomic_inc(&event->rb->mmap_count);
3549
3531
}
3550
3532
 
 
3533
/*
 
3534
 * A buffer can be mmap()ed multiple times; either directly through the same
 
3535
 * event, or through other events by use of perf_event_set_output().
 
3536
 *
 
3537
 * In order to undo the VM accounting done by perf_mmap() we need to destroy
 
3538
 * the buffer here, where we still have a VM context. This means we need
 
3539
 * to detach all events redirecting to us.
 
3540
 */
3551
3541
static void perf_mmap_close(struct vm_area_struct *vma)
3552
3542
{
3553
3543
        struct perf_event *event = vma->vm_file->private_data;
3554
3544
 
3555
 
        if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3556
 
                unsigned long size = perf_data_size(event->rb);
3557
 
                struct user_struct *user = event->mmap_user;
3558
 
                struct ring_buffer *rb = event->rb;
3559
 
 
3560
 
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3561
 
                vma->vm_mm->pinned_vm -= event->mmap_locked;
3562
 
                rcu_assign_pointer(event->rb, NULL);
3563
 
                ring_buffer_detach(event, rb);
 
3545
        struct ring_buffer *rb = event->rb;
 
3546
        struct user_struct *mmap_user = rb->mmap_user;
 
3547
        int mmap_locked = rb->mmap_locked;
 
3548
        unsigned long size = perf_data_size(rb);
 
3549
 
 
3550
        atomic_dec(&rb->mmap_count);
 
3551
 
 
3552
        if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
 
3553
                return;
 
3554
 
 
3555
        /* Detach current event from the buffer. */
 
3556
        rcu_assign_pointer(event->rb, NULL);
 
3557
        ring_buffer_detach(event, rb);
 
3558
        mutex_unlock(&event->mmap_mutex);
 
3559
 
 
3560
        /* If there's still other mmap()s of this buffer, we're done. */
 
3561
        if (atomic_read(&rb->mmap_count)) {
 
3562
                ring_buffer_put(rb); /* can't be last */
 
3563
                return;
 
3564
        }
 
3565
 
 
3566
        /*
 
3567
         * No other mmap()s, detach from all other events that might redirect
 
3568
         * into the now unreachable buffer. Somewhat complicated by the
 
3569
         * fact that rb::event_lock otherwise nests inside mmap_mutex.
 
3570
         */
 
3571
again:
 
3572
        rcu_read_lock();
 
3573
        list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
 
3574
                if (!atomic_long_inc_not_zero(&event->refcount)) {
 
3575
                        /*
 
3576
                         * This event is en-route to free_event() which will
 
3577
                         * detach it and remove it from the list.
 
3578
                         */
 
3579
                        continue;
 
3580
                }
 
3581
                rcu_read_unlock();
 
3582
 
 
3583
                mutex_lock(&event->mmap_mutex);
 
3584
                /*
 
3585
                 * Check we didn't race with perf_event_set_output() which can
 
3586
                 * swizzle the rb from under us while we were waiting to
 
3587
                 * acquire mmap_mutex.
 
3588
                 *
 
3589
                 * If we find a different rb; ignore this event, a next
 
3590
                 * iteration will no longer find it on the list. We have to
 
3591
                 * still restart the iteration to make sure we're not now
 
3592
                 * iterating the wrong list.
 
3593
                 */
 
3594
                if (event->rb == rb) {
 
3595
                        rcu_assign_pointer(event->rb, NULL);
 
3596
                        ring_buffer_detach(event, rb);
 
3597
                        ring_buffer_put(rb); /* can't be last, we still have one */
 
3598
                }
3564
3599
                mutex_unlock(&event->mmap_mutex);
 
3600
                put_event(event);
3565
3601
 
3566
 
                ring_buffer_put(rb);
3567
 
                free_uid(user);
 
3602
                /*
 
3603
                 * Restart the iteration; either we're on the wrong list or
 
3604
                 * destroyed its integrity by doing a deletion.
 
3605
                 */
 
3606
                goto again;
3568
3607
        }
 
3608
        rcu_read_unlock();
 
3609
 
 
3610
        /*
 
3611
         * It could be there's still a few 0-ref events on the list; they'll
 
3612
         * get cleaned up by free_event() -- they'll also still have their
 
3613
         * ref on the rb and will free it whenever they are done with it.
 
3614
         *
 
3615
         * Aside from that, this buffer is 'fully' detached and unmapped,
 
3616
         * undo the VM accounting.
 
3617
         */
 
3618
 
 
3619
        atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
 
3620
        vma->vm_mm->pinned_vm -= mmap_locked;
 
3621
        free_uid(mmap_user);
 
3622
 
 
3623
        ring_buffer_put(rb); /* could be last */
3569
3624
}
3570
3625
 
3571
3626
static const struct vm_operations_struct perf_mmap_vmops = {
3615
3670
                return -EINVAL;
3616
3671
 
3617
3672
        WARN_ON_ONCE(event->ctx->parent_ctx);
 
3673
again:
3618
3674
        mutex_lock(&event->mmap_mutex);
3619
3675
        if (event->rb) {
3620
 
                if (event->rb->nr_pages == nr_pages)
3621
 
                        atomic_inc(&event->rb->refcount);
3622
 
                else
 
3676
                if (event->rb->nr_pages != nr_pages) {
3623
3677
                        ret = -EINVAL;
 
3678
                        goto unlock;
 
3679
                }
 
3680
 
 
3681
                if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
 
3682
                        /*
 
3683
                         * Raced against perf_mmap_close() through
 
3684
                         * perf_event_set_output(). Try again, hope for better
 
3685
                         * luck.
 
3686
                         */
 
3687
                        mutex_unlock(&event->mmap_mutex);
 
3688
                        goto again;
 
3689
                }
 
3690
 
3624
3691
                goto unlock;
3625
3692
        }
3626
3693
 
3661
3728
                ret = -ENOMEM;
3662
3729
                goto unlock;
3663
3730
        }
3664
 
        rcu_assign_pointer(event->rb, rb);
 
3731
 
 
3732
        atomic_set(&rb->mmap_count, 1);
 
3733
        rb->mmap_locked = extra;
 
3734
        rb->mmap_user = get_current_user();
3665
3735
 
3666
3736
        atomic_long_add(user_extra, &user->locked_vm);
3667
 
        event->mmap_locked = extra;
3668
 
        event->mmap_user = get_current_user();
3669
 
        vma->vm_mm->pinned_vm += event->mmap_locked;
 
3737
        vma->vm_mm->pinned_vm += extra;
 
3738
 
 
3739
        ring_buffer_attach(event, rb);
 
3740
        rcu_assign_pointer(event->rb, rb);
3670
3741
 
3671
3742
        perf_event_update_userpage(event);
3672
3743
 
3675
3746
                atomic_inc(&event->mmap_count);
3676
3747
        mutex_unlock(&event->mmap_mutex);
3677
3748
 
3678
 
        vma->vm_flags |= VM_RESERVED;
 
3749
        /*
 
3750
         * Since pinned accounting is per vm we cannot allow fork() to copy our
 
3751
         * vma.
 
3752
         */
 
3753
        vma->vm_flags |= VM_DONTCOPY | VM_RESERVED;
3679
3754
        vma->vm_ops = &perf_mmap_vmops;
3680
3755
 
3681
3756
        return ret;
6164
6239
        if (atomic_read(&event->mmap_count))
6165
6240
                goto unlock;
6166
6241
 
 
6242
        old_rb = event->rb;
 
6243
 
6167
6244
        if (output_event) {
6168
6245
                /* get the rb we want to redirect to */
6169
6246
                rb = ring_buffer_get(output_event);
6171
6248
                        goto unlock;
6172
6249
        }
6173
6250
 
6174
 
        old_rb = event->rb;
6175
 
        rcu_assign_pointer(event->rb, rb);
6176
6251
        if (old_rb)
6177
6252
                ring_buffer_detach(event, old_rb);
6178
 
        ret = 0;
6179
 
unlock:
6180
 
        mutex_unlock(&event->mmap_mutex);
6181
 
 
6182
 
        if (old_rb)
 
6253
 
 
6254
        if (rb)
 
6255
                ring_buffer_attach(event, rb);
 
6256
 
 
6257
        rcu_assign_pointer(event->rb, rb);
 
6258
 
 
6259
        if (old_rb) {
6183
6260
                ring_buffer_put(old_rb);
 
6261
                /*
 
6262
                 * Since we detached before setting the new rb, so that we
 
6263
                 * could attach the new rb, we could have missed a wakeup.
 
6264
                 * Provide it now.
 
6265
                 */
 
6266
                wake_up_all(&event->waitq);
 
6267
        }
 
6268
 
 
6269
        ret = 0;
 
6270
unlock:
 
6271
        mutex_unlock(&event->mmap_mutex);
 
6272
 
6184
6273
out:
6185
6274
        return ret;
6186
6275
}