~ubuntu-branches/ubuntu/raring/linux-ti-omap4/raring-proposed

« back to all changes in this revision

Viewing changes to drivers/gpu/drm/i915/intel_display.c

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati, Ubuntu: 3.5.0-25.38
  • Date: 2013-02-20 22:03:31 UTC
  • mfrom: (74.1.1 quantal-proposed)
  • Revision ID: package-import@ubuntu.com-20130220220331-0ea4l33x3cr61nch
Tags: 3.5.0-220.28
* Release Tracking Bug
  - LP: #1130311

[ Paolo Pisati ]

* rebased on Ubuntu-3.5.0-25.38

[ Ubuntu: 3.5.0-25.38 ]

* Release Tracking Bug
  - LP: #1129472
* ptrace: introduce signal_wake_up_state() and ptrace_signal_wake_up()
  - LP: #1119885, #1129192
  - CVE-2013-0871
* ptrace: ensure arch_ptrace/ptrace_request can never race with SIGKILL
  - LP: #1119885, #1129192
  - CVE-2013-0871
* wake_up_process() should be never used to wakeup a TASK_STOPPED/TRACED
  task
  - LP: #1119885, #1129192
  - CVE-2013-0871

Show diffs side-by-side

added added

removed removed

Lines of Context:
2165
2165
                           FDI_FE_ERRC_ENABLE);
2166
2166
}
2167
2167
 
2168
 
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2169
 
{
2170
 
        struct drm_i915_private *dev_priv = dev->dev_private;
2171
 
        u32 flags = I915_READ(SOUTH_CHICKEN1);
2172
 
 
2173
 
        flags |= FDI_PHASE_SYNC_OVR(pipe);
2174
 
        I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2175
 
        flags |= FDI_PHASE_SYNC_EN(pipe);
2176
 
        I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2177
 
        POSTING_READ(SOUTH_CHICKEN1);
2178
 
}
2179
 
 
2180
2168
/* The FDI link training functions for ILK/Ibexpeak. */
2181
2169
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2182
2170
{
2327
2315
        POSTING_READ(reg);
2328
2316
        udelay(150);
2329
2317
 
2330
 
        if (HAS_PCH_CPT(dev))
2331
 
                cpt_phase_pointer_enable(dev, pipe);
2332
 
 
2333
2318
        for (i = 0; i < 4; i++) {
2334
2319
                reg = FDI_TX_CTL(pipe);
2335
2320
                temp = I915_READ(reg);
2456
2441
        POSTING_READ(reg);
2457
2442
        udelay(150);
2458
2443
 
2459
 
        if (HAS_PCH_CPT(dev))
2460
 
                cpt_phase_pointer_enable(dev, pipe);
2461
 
 
2462
2444
        for (i = 0; i < 4; i++) {
2463
2445
                reg = FDI_TX_CTL(pipe);
2464
2446
                temp = I915_READ(reg);
2572
2554
        }
2573
2555
}
2574
2556
 
2575
 
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2576
 
{
2577
 
        struct drm_i915_private *dev_priv = dev->dev_private;
2578
 
        u32 flags = I915_READ(SOUTH_CHICKEN1);
2579
 
 
2580
 
        flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2581
 
        I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2582
 
        flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2583
 
        I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2584
 
        POSTING_READ(SOUTH_CHICKEN1);
2585
 
}
2586
2557
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2587
2558
{
2588
2559
        struct drm_device *dev = crtc->dev;
2609
2580
        /* Ironlake workaround, disable clock pointer after downing FDI */
2610
2581
        if (HAS_PCH_IBX(dev)) {
2611
2582
                I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2612
 
                I915_WRITE(FDI_RX_CHICKEN(pipe),
2613
 
                           I915_READ(FDI_RX_CHICKEN(pipe) &
2614
 
                                     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2615
 
        } else if (HAS_PCH_CPT(dev)) {
2616
 
                cpt_phase_pointer_disable(dev, pipe);
2617
2583
        }
2618
2584
 
2619
2585
        /* still set train pattern 1 */
3054
3020
                 * as some pre-programmed values are broken,
3055
3021
                 * e.g. x201.
3056
3022
                 */
3057
 
                I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
 
3023
                if (IS_IVYBRIDGE(dev))
 
3024
                        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
 
3025
                                                 PF_PIPE_SEL_IVB(pipe));
 
3026
                else
 
3027
                        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3058
3028
                I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3059
3029
                I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3060
3030
        }
5872
5842
{
5873
5843
        struct intel_unpin_work *work =
5874
5844
                container_of(__work, struct intel_unpin_work, work);
 
5845
        struct drm_device *dev = work->crtc->dev;
5875
5846
 
5876
 
        mutex_lock(&work->dev->struct_mutex);
 
5847
        mutex_lock(&dev->struct_mutex);
5877
5848
        intel_unpin_fb_obj(work->old_fb_obj);
5878
5849
        drm_gem_object_unreference(&work->pending_flip_obj->base);
5879
5850
        drm_gem_object_unreference(&work->old_fb_obj->base);
5880
5851
 
5881
 
        intel_update_fbc(work->dev);
5882
 
        mutex_unlock(&work->dev->struct_mutex);
 
5852
        intel_update_fbc(dev);
 
5853
        mutex_unlock(&dev->struct_mutex);
 
5854
 
 
5855
        BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
 
5856
        atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
 
5857
 
5883
5858
        kfree(work);
5884
5859
}
5885
5860
 
5902
5877
 
5903
5878
        spin_lock_irqsave(&dev->event_lock, flags);
5904
5879
        work = intel_crtc->unpin_work;
5905
 
        if (work == NULL || !work->pending) {
 
5880
 
 
5881
        /* Ensure we don't miss a work->pending update ... */
 
5882
        smp_rmb();
 
5883
 
 
5884
        if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
5906
5885
                spin_unlock_irqrestore(&dev->event_lock, flags);
5907
5886
                return;
5908
5887
        }
5909
5888
 
 
5889
        /* and that the unpin work is consistent wrt ->pending. */
 
5890
        smp_rmb();
 
5891
 
5910
5892
        intel_crtc->unpin_work = NULL;
5911
5893
 
5912
5894
        if (work->event) {
5948
5930
 
5949
5931
        atomic_clear_mask(1 << intel_crtc->plane,
5950
5932
                          &obj->pending_flip.counter);
5951
 
 
5952
5933
        wake_up(&dev_priv->pending_flip_queue);
5953
 
        schedule_work(&work->work);
 
5934
 
 
5935
        queue_work(dev_priv->wq, &work->work);
5954
5936
 
5955
5937
        trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
5956
5938
}
5978
5960
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
5979
5961
        unsigned long flags;
5980
5962
 
 
5963
        /* NB: An MMIO update of the plane base pointer will also
 
5964
         * generate a page-flip completion irq, i.e. every modeset
 
5965
         * is also accompanied by a spurious intel_prepare_page_flip().
 
5966
         */
5981
5967
        spin_lock_irqsave(&dev->event_lock, flags);
5982
 
        if (intel_crtc->unpin_work) {
5983
 
                if ((++intel_crtc->unpin_work->pending) > 1)
5984
 
                        DRM_ERROR("Prepared flip multiple times\n");
5985
 
        } else {
5986
 
                DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
5987
 
        }
 
5968
        if (intel_crtc->unpin_work)
 
5969
                atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
5988
5970
        spin_unlock_irqrestore(&dev->event_lock, flags);
5989
5971
}
5990
5972
 
 
5973
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
 
5974
{
 
5975
        /* Ensure that the work item is consistent when activating it ... */
 
5976
        smp_wmb();
 
5977
        atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
 
5978
        /* and that it is marked active as soon as the irq could fire. */
 
5979
        smp_wmb();
 
5980
}
 
5981
 
5991
5982
static int intel_gen2_queue_flip(struct drm_device *dev,
5992
5983
                                 struct drm_crtc *crtc,
5993
5984
                                 struct drm_framebuffer *fb,
6025
6016
        intel_ring_emit(ring, fb->pitches[0]);
6026
6017
        intel_ring_emit(ring, obj->gtt_offset + offset);
6027
6018
        intel_ring_emit(ring, 0); /* aux display base address, unused */
 
6019
 
 
6020
        intel_mark_page_flip_active(intel_crtc);
6028
6021
        intel_ring_advance(ring);
6029
6022
        return 0;
6030
6023
 
6069
6062
        intel_ring_emit(ring, obj->gtt_offset + offset);
6070
6063
        intel_ring_emit(ring, MI_NOOP);
6071
6064
 
 
6065
        intel_mark_page_flip_active(intel_crtc);
6072
6066
        intel_ring_advance(ring);
6073
6067
        return 0;
6074
6068
 
6113
6107
        pf = 0;
6114
6108
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6115
6109
        intel_ring_emit(ring, pf | pipesrc);
 
6110
 
 
6111
        intel_mark_page_flip_active(intel_crtc);
6116
6112
        intel_ring_advance(ring);
6117
6113
        return 0;
6118
6114
 
6155
6151
        pf = 0;
6156
6152
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6157
6153
        intel_ring_emit(ring, pf | pipesrc);
 
6154
 
 
6155
        intel_mark_page_flip_active(intel_crtc);
6158
6156
        intel_ring_advance(ring);
6159
6157
        return 0;
6160
6158
 
6209
6207
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
6210
6208
        intel_ring_emit(ring, (obj->gtt_offset));
6211
6209
        intel_ring_emit(ring, (MI_NOOP));
 
6210
 
 
6211
        intel_mark_page_flip_active(intel_crtc);
6212
6212
        intel_ring_advance(ring);
6213
6213
        return 0;
6214
6214
 
6244
6244
                return -ENOMEM;
6245
6245
 
6246
6246
        work->event = event;
6247
 
        work->dev = crtc->dev;
 
6247
        work->crtc = crtc;
6248
6248
        intel_fb = to_intel_framebuffer(crtc->fb);
6249
6249
        work->old_fb_obj = intel_fb->obj;
6250
6250
        INIT_WORK(&work->work, intel_unpin_work_fn);
6269
6269
        intel_fb = to_intel_framebuffer(fb);
6270
6270
        obj = intel_fb->obj;
6271
6271
 
 
6272
        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
 
6273
                flush_workqueue(dev_priv->wq);
 
6274
 
6272
6275
        mutex_lock(&dev->struct_mutex);
6273
6276
 
6274
6277
        /* Reference the objects for the scheduled work. */
6285
6288
         * the flip occurs and the object is no longer visible.
6286
6289
         */
6287
6290
        atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 
6291
        atomic_inc(&intel_crtc->unpin_work_count);
6288
6292
 
6289
6293
        ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6290
6294
        if (ret)
6299
6303
        return 0;
6300
6304
 
6301
6305
cleanup_pending:
 
6306
        atomic_dec(&intel_crtc->unpin_work_count);
6302
6307
        atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6303
6308
        drm_gem_object_unreference(&work->old_fb_obj->base);
6304
6309
        drm_gem_object_unreference(&obj->base);
6974
6979
        }
6975
6980
}
6976
6981
 
 
6982
void i915_redisable_vga(struct drm_device *dev)
 
6983
{
 
6984
        struct drm_i915_private *dev_priv = dev->dev_private;
 
6985
        u32 vga_reg;
 
6986
 
 
6987
        if (HAS_PCH_SPLIT(dev))
 
6988
                vga_reg = CPU_VGACNTRL;
 
6989
        else
 
6990
                vga_reg = VGACNTRL;
 
6991
 
 
6992
        if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
 
6993
                DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
 
6994
                I915_WRITE(vga_reg, VGA_DISP_DISABLE);
 
6995
                POSTING_READ(vga_reg);
 
6996
        }
 
6997
}
 
6998
 
6977
6999
void intel_modeset_init(struct drm_device *dev)
6978
7000
{
6979
7001
        struct drm_i915_private *dev_priv = dev->dev_private;