1
/*****************************************************************************
2
* VIA Unichrome XvMC extension client lib.
4
* Copyright (c) 2004 Thomas Hellstr�m. All rights reserved.
5
* Copyright (c) 2003 Andreas Robinson. All rights reserved.
7
* Permission is hereby granted, free of charge, to any person obtaining a
8
* copy of this software and associated documentation files (the "Software"),
9
* to deal in the Software without restriction, including without limitation
10
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
11
* and/or sell copies of the Software, and to permit persons to whom the
12
* Software is furnished to do so, subject to the following conditions:
14
* The above copyright notice and this permission notice shall be included in
15
* all copies or substantial portions of the Software.
17
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
* AUTHOR(S) OR COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23
* DEALINGS IN THE SOFTWARE.
27
* Low-level functions that deal directly with the hardware. In the future,
28
* these functions might be implemented in a kernel module. Also, some of them
29
* would benefit from DMA.
31
* Authors: Andreas Robinson 2003. Thomas Hellstr�m 2004.
34
#include "viaXvMCPriv.h"
35
#include "viaLowLevel.h"
42
CARD32 agp_buffer[LL_AGP_CMDBUF_SIZE];
43
CARD32 pci_buffer[LL_PCI_CMDBUF_SIZE];
52
drm_context_t *drmcontext;
54
drmAddress mmioAddress;
60
unsigned curWaitFlags;
67
CARD32 lastReadTimeStamp;
69
CARD32 agpSyncTimeStamp;
74
* For Other architectures than i386 these might have to be modified for
78
#define MPEGIN(xl,reg) \
79
*((volatile CARD32 *)(((CARD8 *)(xl)->mmioAddress) + 0xc00 + (reg)))
81
#define VIDIN(ctx,reg) \
82
*((volatile CARD32 *)(((CARD8 *)(ctx)->mmioAddress) + (reg)))
84
#define REGIN(ctx,reg) \
85
*((volatile CARD32 *)(((CARD8 *)(ctx)->mmioAddress) + 0x0000 + (reg)))
87
#define HQV_CONTROL 0x3D0
88
#define HQV_SRC_STARTADDR_Y 0x3D4
89
#define HQV_SRC_STARTADDR_U 0x3D8
90
#define HQV_SRC_STARTADDR_V 0x3DC
91
#define HQV_MINIFY_DEBLOCK 0x3E8
93
#define HQV_SW_FLIP 0x00000010
94
#define HQV_FLIP_STATUS 0x00000001
95
#define HQV_SUBPIC_FLIP 0x00008000
96
#define HQV_FLIP_ODD 0x00000020
97
#define HQV_DEINTERLACE 0x00010000
98
#define HQV_FIELD_2_FRAME 0x00020000
99
#define HQV_FRAME_2_FIELD 0x00040000
100
#define HQV_FIELD_UV 0x00100000
101
#define HQV_DEBLOCK_HOR 0x00008000
102
#define HQV_DEBLOCK_VER 0x80000000
104
#define V_COMPOSE_MODE 0x298
105
#define V1_COMMAND_FIRE 0x80000000
106
#define V3_COMMAND_FIRE 0x40000000
108
/* SUBPICTURE Registers */
109
#define SUBP_CONTROL_STRIDE 0x3C0
110
#define SUBP_STARTADDR 0x3C4
111
#define RAM_TABLE_CONTROL 0x3C8
112
#define RAM_TABLE_READ 0x3CC
114
/* SUBP_CONTROL_STRIDE 0x3c0 */
115
#define SUBP_HQV_ENABLE 0x00010000
116
#define SUBP_IA44 0x00020000
117
#define SUBP_AI44 0x00000000
118
#define SUBP_STRIDE_MASK 0x00001fff
119
#define SUBP_CONTROL_MASK 0x00070000
121
/* RAM_TABLE_CONTROL 0x3c8 */
122
#define RAM_TABLE_RGB_ENABLE 0x00000007
124
#define VIA_REG_STATUS 0x400
125
#define VIA_REG_GEMODE 0x004
126
#define VIA_REG_SRCBASE 0x030
127
#define VIA_REG_DSTBASE 0x034
128
#define VIA_REG_PITCH 0x038
129
#define VIA_REG_SRCCOLORKEY 0x01C
130
#define VIA_REG_KEYCONTROL 0x02C
131
#define VIA_REG_SRCPOS 0x008
132
#define VIA_REG_DSTPOS 0x00C
133
#define VIA_REG_GECMD 0x000
134
#define VIA_REG_DIMENSION 0x010 /* width and height */
135
#define VIA_REG_FGCOLOR 0x018
137
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
138
#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
139
#define VIA_2D_ENG_BUSY 0x00000002 /* 2D Engine is busy */
140
#define VIA_3D_ENG_BUSY 0x00000001 /* 3D Engine is busy */
141
#define VIA_GEM_8bpp 0x00000000
142
#define VIA_GEM_16bpp 0x00000100
143
#define VIA_GEM_32bpp 0x00000300
144
#define VIA_GEC_BLT 0x00000001
145
#define VIA_PITCH_ENABLE 0x80000000
146
#define VIA_GEC_INCX 0x00000000
147
#define VIA_GEC_DECY 0x00004000
148
#define VIA_GEC_INCY 0x00000000
149
#define VIA_GEC_DECX 0x00008000
150
#define VIA_GEC_FIXCOLOR_PAT 0x00002000
152
#define VIA_BLIT_CLEAR 0x00
153
#define VIA_BLIT_COPY 0xCC
154
#define VIA_BLIT_FILL 0xF0
155
#define VIA_BLIT_SET 0xFF
157
#define VIA_SYNCWAITTIMEOUT 50000 /* Might be a bit conservative */
158
#define VIA_DMAWAITTIMEOUT 150000
159
#define VIA_VIDWAITTIMEOUT 50000
160
#define VIA_XVMC_DECODERTIMEOUT 50000 /*(microseconds) */
162
#define H1_ADDR(val) (((val) >> 2) | 0xF0000000)
163
#define WAITFLAGS(xl, flags) \
164
(xl)->curWaitFlags |= (flags)
165
#define BEGIN_RING_AGP(xl,size) \
167
if ((xl)->agp_pos > (LL_AGP_CMDBUF_SIZE-(size))) { \
171
#define OUT_RING_AGP(xl, val) \
172
(xl)->agp_buffer[(xl)->agp_pos++] = (val)
173
#define OUT_RING_QW_AGP(xl, val1, val2) \
175
(xl)->agp_buffer[(xl)->agp_pos++] = (val1); \
176
(xl)->agp_buffer[(xl)->agp_pos++] = (val2); \
179
#define LL_HW_LOCK(xl) \
181
DRM_LOCK((xl)->fd,(xl)->hwLock,*(xl)->drmcontext,0); \
183
#define LL_HW_UNLOCK(xl) \
185
DRM_UNLOCK((xl)->fd,(xl)->hwLock,*(xl)->drmcontext); \
189
* We want to have two concurrent types of thread taking the hardware
190
* lock simulataneously. One is the video out thread that needs immediate
191
* access to flip an image. The other is everything else which may have
192
* the lock for quite some time. This is only so the video out thread can
193
* sneak in and display an image while other resources are busy.
197
hwlLock(void *xlp, int videoLock)
199
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
205
hwlUnlock(void *xlp, int videoLock)
207
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
213
timeDiff(struct timeval *now, struct timeval *then)
215
return (now->tv_usec >= then->tv_usec) ?
216
now->tv_usec - then->tv_usec :
217
1000000 - (then->tv_usec - now->tv_usec);
221
setAGPSyncLowLevel(void *xlp, int val, CARD32 timeStamp)
223
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
226
xl->agpSyncTimeStamp = timeStamp;
230
viaDMATimeStampLowLevel(void *xlp)
232
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
235
viaBlit(xl, 32, xl->tsOffset, 1, xl->tsOffset, 1, 1, 1, 0, 0,
236
VIABLIT_FILL, xl->curTimeStamp);
237
return xl->curTimeStamp++;
243
viaDMAWaitTimeStamp(XvMCLowLevel * xl, CARD32 timeStamp, int doSleep)
245
struct timeval now, then;
246
struct timezone here;
247
struct timespec sleep, rem;
249
if (xl->use_agp && (timeStamp > xl->lastReadTimeStamp)) {
252
here.tz_minuteswest = 0;
254
gettimeofday(&then, &here);
256
while (timeStamp > (xl->lastReadTimeStamp = *xl->tsP)) {
257
gettimeofday(&now, &here);
258
if (timeDiff(&now, &then) > VIA_DMAWAITTIMEOUT) {
259
if ((timeStamp > (xl->lastReadTimeStamp = *xl->tsP))) {
260
xl->errors |= LL_DMA_TIMEDOUT;
265
nanosleep(&sleep, &rem);
271
viaDMAInitTimeStamp(XvMCLowLevel * xl)
276
xl->tsMem.context = *(xl->drmcontext);
278
xl->tsMem.type = VIA_MEM_VIDEO;
279
if ((ret = drmCommandWriteRead(xl->fd, DRM_VIA_ALLOCMEM, &xl->tsMem,
280
sizeof(xl->tsMem))) < 0)
282
if (xl->tsMem.size != 64)
284
xl->tsOffset = (xl->tsMem.offset + 31) & ~31;
285
xl->tsP = (CARD32 *) xl->fbAddress + (xl->tsOffset >> 2);
286
xl->curTimeStamp = 1;
293
viaDMACleanupTimeStamp(XvMCLowLevel * xl)
296
if (!(xl->tsMem.size) || !xl->use_agp)
298
return drmCommandWrite(xl->fd, DRM_VIA_FREEMEM, &xl->tsMem,
303
viaMpegGetStatus(XvMCLowLevel * xl)
305
return MPEGIN(xl, 0x54);
309
viaMpegIsBusy(XvMCLowLevel * xl, CARD32 mask, CARD32 idle)
311
CARD32 tmp = viaMpegGetStatus(xl);
315
* FIXME: Are errors really shown when error concealment is on?
321
return (tmp & mask) != idle;
325
syncDMA(XvMCLowLevel * xl, unsigned int doSleep)
329
* Ideally, we'd like to have an interrupt wait here, but, according to second hand
330
* information, the hardware does not support this, although earlier S3 chips do that.
331
* It is therefore not implemented into the DRM, and we'll do a user space wait here.
334
struct timeval now, then;
335
struct timezone here;
336
struct timespec sleep, rem;
340
here.tz_minuteswest = 0;
342
gettimeofday(&then, &here);
343
while (!(REGIN(xl, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY)) {
344
gettimeofday(&now, &here);
345
if (timeDiff(&now, &then) > VIA_DMAWAITTIMEOUT) {
346
if (!(REGIN(xl, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY)) {
347
xl->errors |= LL_DMA_TIMEDOUT;
352
nanosleep(&sleep, &rem);
354
while (REGIN(xl, VIA_REG_STATUS) & VIA_CMD_RGTR_BUSY) {
355
gettimeofday(&now, &here);
356
if (timeDiff(&now, &then) > VIA_DMAWAITTIMEOUT) {
357
if (REGIN(xl, VIA_REG_STATUS) & VIA_CMD_RGTR_BUSY) {
358
xl->errors |= LL_DMA_TIMEDOUT;
363
nanosleep(&sleep, &rem);
368
syncVideo(XvMCLowLevel * xl, unsigned int doSleep)
371
* Wait for HQV completion. Nothing strange here. We assume that the HQV
372
* Handles syncing to the V1 / V3 engines by itself. It should be safe to
373
* always wait for SUBPIC_FLIP completion although subpictures are not
377
struct timeval now, then;
378
struct timezone here;
379
struct timespec sleep, rem;
383
here.tz_minuteswest = 0;
385
gettimeofday(&then, &here);
386
while (VIDIN(xl, HQV_CONTROL) & (HQV_SW_FLIP | HQV_SUBPIC_FLIP)) {
387
gettimeofday(&now, &here);
388
if (timeDiff(&now, &then) > VIA_SYNCWAITTIMEOUT) {
389
if (VIDIN(xl, HQV_CONTROL) & (HQV_SW_FLIP | HQV_SUBPIC_FLIP)) {
390
xl->errors |= LL_VIDEO_TIMEDOUT;
395
nanosleep(&sleep, &rem);
400
syncAccel(XvMCLowLevel * xl, unsigned int mode, unsigned int doSleep)
402
struct timeval now, then;
403
struct timezone here;
404
struct timespec sleep, rem;
405
CARD32 mask = ((mode & LL_MODE_2D) ? VIA_2D_ENG_BUSY : 0) |
406
((mode & LL_MODE_3D) ? VIA_3D_ENG_BUSY : 0);
410
here.tz_minuteswest = 0;
412
gettimeofday(&then, &here);
413
while (REGIN(xl, VIA_REG_STATUS) & mask) {
414
gettimeofday(&now, &here);
415
if (timeDiff(&now, &then) > VIA_SYNCWAITTIMEOUT) {
416
if (REGIN(xl, VIA_REG_STATUS) & mask) {
417
xl->errors |= LL_ACCEL_TIMEDOUT;
422
nanosleep(&sleep, &rem);
427
syncMpeg(XvMCLowLevel * xl, unsigned int mode, unsigned int doSleep)
430
* Ideally, we'd like to have an interrupt wait here, but from information from VIA
431
* at least the MPEG completion interrupt is broken on the CLE266, which was
432
* discovered during validation of the chip.
435
struct timeval now, then;
436
struct timezone here;
437
struct timespec sleep, rem;
444
here.tz_minuteswest = 0;
446
gettimeofday(&then, &here);
447
if (mode & LL_MODE_DECODER_SLICE) {
448
busyMask = VIA_SLICEBUSYMASK;
449
idleVal = VIA_SLICEIDLEVAL;
451
if (mode & LL_MODE_DECODER_IDLE) {
452
busyMask |= VIA_BUSYMASK;
453
idleVal = VIA_IDLEVAL;
455
while (viaMpegIsBusy(xl, busyMask, idleVal)) {
456
gettimeofday(&now, &here);
457
if (timeDiff(&now, &then) > VIA_XVMC_DECODERTIMEOUT) {
458
if (viaMpegIsBusy(xl, busyMask, idleVal)) {
459
xl->errors |= LL_DECODER_TIMEDOUT;
464
nanosleep(&sleep, &rem);
467
ret = viaMpegGetStatus(xl);
469
xl->errors |= ((ret & 0x70) >> 3);
475
pciFlush(XvMCLowLevel * xl)
478
drm_via_cmdbuffer_t b;
479
unsigned mode = xl->curWaitFlags;
481
b.buf = (char *)xl->pci_buffer;
482
b.size = xl->pci_pos * sizeof(CARD32);
483
if (xl->performLocking)
485
if ((mode != LL_MODE_VIDEO) && (mode != 0))
487
if ((mode & LL_MODE_2D) || (mode & LL_MODE_3D))
488
syncAccel(xl, mode, 0);
489
if (mode & LL_MODE_VIDEO)
491
if (mode & (LL_MODE_DECODER_SLICE | LL_MODE_DECODER_IDLE))
492
syncMpeg(xl, mode, 0);
493
ret = drmCommandWrite(xl->fd, DRM_VIA_PCICMD, &b, sizeof(b));
494
if (xl->performLocking)
497
xl->errors |= LL_PCI_COMMAND_ERR;
500
xl->curWaitFlags = 0;
504
agpFlush(XvMCLowLevel * xl)
506
drm_via_cmdbuffer_t b;
510
b.buf = (char *)xl->agp_buffer;
511
b.size = xl->agp_pos * sizeof(CARD32);
513
syncXvMCLowLevel(xl, LL_MODE_DECODER_IDLE, 1,
514
xl->agpSyncTimeStamp);
517
if (xl->performLocking)
520
ret = drmCommandWrite(xl->fd, DRM_VIA_CMDBUFFER, &b, sizeof(b));
521
} while (-EAGAIN == ret);
522
if (xl->performLocking)
526
xl->errors |= LL_AGP_COMMAND_ERR;
530
xl->curWaitFlags &= LL_MODE_VIDEO;
532
unsigned mode = xl->curWaitFlags;
534
b.buf = (char *)xl->agp_buffer;
535
b.size = xl->agp_pos * sizeof(CARD32);
536
if (xl->performLocking)
538
if ((mode != LL_MODE_VIDEO) && (mode != 0))
540
if ((mode & LL_MODE_2D) || (mode & LL_MODE_3D))
541
syncAccel(xl, mode, 0);
542
if (mode & LL_MODE_VIDEO)
544
if (mode & (LL_MODE_DECODER_SLICE | LL_MODE_DECODER_IDLE))
545
syncMpeg(xl, mode, 0);
546
ret = drmCommandWrite(xl->fd, DRM_VIA_PCICMD, &b, sizeof(b));
547
if (xl->performLocking)
550
xl->errors |= LL_PCI_COMMAND_ERR;
553
xl->curWaitFlags = 0;
558
flushXvMCLowLevel(void *xlp)
561
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
573
flushPCIXvMCLowLevel(void *xlp)
575
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
579
if (!xl->use_agp && xl->agp_pos)
584
pciCommand(XvMCLowLevel * xl, unsigned offset, unsigned value, unsigned flags)
586
if (xl->pci_pos > (LL_PCI_CMDBUF_SIZE - 2))
589
xl->curWaitFlags |= flags;
590
xl->pci_buffer[xl->pci_pos++] = (offset >> 2) | 0xF0000000;
591
xl->pci_buffer[xl->pci_pos++] = value;
595
viaMpegSetSurfaceStride(void *xlp, ViaXvMCContext * ctx)
597
CARD32 y_stride = ctx->yStride;
598
CARD32 uv_stride = y_stride >> 1;
599
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
601
BEGIN_RING_AGP(xl, 2);
602
OUT_RING_QW_AGP(xl, H1_ADDR(0xc50),
603
(y_stride >> 3) | ((uv_stride >> 3) << 16));
604
WAITFLAGS(xl, LL_MODE_DECODER_IDLE);
608
viaVideoSetSWFLipLocked(void *xlp, unsigned yOffs, unsigned uOffs,
609
unsigned vOffs, unsigned yStride, unsigned uvStride)
611
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
613
pciCommand(xl, HQV_SRC_STARTADDR_Y, yOffs, LL_MODE_VIDEO);
614
pciCommand(xl, HQV_SRC_STARTADDR_U, uOffs, 0);
615
pciCommand(xl, HQV_SRC_STARTADDR_V, vOffs, 0);
619
viaVideoSWFlipLocked(void *xlp, unsigned flags, int progressiveSequence)
622
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
627
if ((flags & XVMC_FRAME_PICTURE) == XVMC_BOTTOM_FIELD) {
629
orWd = HQV_FIELD_UV |
633
HQV_SW_FLIP | HQV_FLIP_ODD | HQV_FLIP_STATUS | HQV_SUBPIC_FLIP;
634
} else if ((flags & XVMC_FRAME_PICTURE) == XVMC_TOP_FIELD) {
635
andWd = ~HQV_FLIP_ODD;
636
orWd = HQV_FIELD_UV |
640
HQV_SW_FLIP | HQV_FLIP_STATUS | HQV_SUBPIC_FLIP;
641
} else if ((flags & XVMC_FRAME_PICTURE) == XVMC_FRAME_PICTURE) {
642
andWd = ~(HQV_DEINTERLACE |
643
HQV_FRAME_2_FIELD | HQV_FIELD_2_FRAME | HQV_FIELD_UV);
644
orWd = HQV_SW_FLIP | HQV_FLIP_STATUS | HQV_SUBPIC_FLIP;
646
if (progressiveSequence) {
647
andWd &= ~HQV_FIELD_UV;
648
orWd &= ~HQV_FIELD_UV;
651
pciCommand(xl, HQV_CONTROL, (VIDIN(xl,
652
HQV_CONTROL) & andWd) | orWd, 0);
656
viaMpegSetFB(void *xlp, unsigned i,
657
unsigned yOffs, unsigned uOffs, unsigned vOffs)
659
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
662
BEGIN_RING_AGP(xl, 6);
663
OUT_RING_QW_AGP(xl, H1_ADDR(0xc20 + i), yOffs >> 3);
664
OUT_RING_QW_AGP(xl, H1_ADDR(0xc24 + i), uOffs >> 3);
665
OUT_RING_QW_AGP(xl, H1_ADDR(0xc28 + i), vOffs >> 3);
666
WAITFLAGS(xl, LL_MODE_DECODER_IDLE);
670
viaMpegBeginPicture(void *xlp, ViaXvMCContext * ctx,
671
unsigned width, unsigned height, const XvMCMpegControl * control)
674
unsigned j, mb_width, mb_height;
675
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
677
mb_width = (width + 15) >> 4;
680
((control->mpeg_coding == XVMC_MPEG_2) &&
681
(control->flags & XVMC_PROGRESSIVE_SEQUENCE)) ?
682
2 * ((height + 31) >> 5) : (((height + 15) >> 4));
684
BEGIN_RING_AGP(xl, 144);
685
WAITFLAGS(xl, LL_MODE_DECODER_IDLE);
687
OUT_RING_QW_AGP(xl, H1_ADDR(0xc00),
688
((control->picture_structure & XVMC_FRAME_PICTURE) << 2) |
689
((control->picture_coding_type & 3) << 4) |
690
((control->flags & XVMC_ALTERNATE_SCAN) ? (1 << 6) : 0));
692
if (!(ctx->intraLoaded)) {
693
OUT_RING_QW_AGP(xl, H1_ADDR(0xc5c), 0);
694
for (j = 0; j < 64; j += 4) {
695
OUT_RING_QW_AGP(xl, H1_ADDR(0xc60),
696
ctx->intra_quantiser_matrix[j] |
697
(ctx->intra_quantiser_matrix[j + 1] << 8) |
698
(ctx->intra_quantiser_matrix[j + 2] << 16) |
699
(ctx->intra_quantiser_matrix[j + 3] << 24));
701
ctx->intraLoaded = 1;
704
if (!(ctx->nonIntraLoaded)) {
705
OUT_RING_QW_AGP(xl, H1_ADDR(0xc5c), 1);
706
for (j = 0; j < 64; j += 4) {
707
OUT_RING_QW_AGP(xl, H1_ADDR(0xc60),
708
ctx->non_intra_quantiser_matrix[j] |
709
(ctx->non_intra_quantiser_matrix[j + 1] << 8) |
710
(ctx->non_intra_quantiser_matrix[j + 2] << 16) |
711
(ctx->non_intra_quantiser_matrix[j + 3] << 24));
713
ctx->nonIntraLoaded = 1;
716
if (!(ctx->chromaIntraLoaded)) {
717
OUT_RING_QW_AGP(xl, H1_ADDR(0xc5c), 2);
718
for (j = 0; j < 64; j += 4) {
719
OUT_RING_QW_AGP(xl, H1_ADDR(0xc60),
720
ctx->chroma_intra_quantiser_matrix[j] |
721
(ctx->chroma_intra_quantiser_matrix[j + 1] << 8) |
722
(ctx->chroma_intra_quantiser_matrix[j + 2] << 16) |
723
(ctx->chroma_intra_quantiser_matrix[j + 3] << 24));
725
ctx->chromaIntraLoaded = 1;
728
if (!(ctx->chromaNonIntraLoaded)) {
729
OUT_RING_QW_AGP(xl, H1_ADDR(0xc5c), 3);
730
for (j = 0; j < 64; j += 4) {
731
OUT_RING_QW_AGP(xl, H1_ADDR(0xc60),
732
ctx->chroma_non_intra_quantiser_matrix[j] |
733
(ctx->chroma_non_intra_quantiser_matrix[j + 1] << 8) |
734
(ctx->chroma_non_intra_quantiser_matrix[j + 2] << 16) |
735
(ctx->chroma_non_intra_quantiser_matrix[j + 3] << 24));
737
ctx->chromaNonIntraLoaded = 1;
740
OUT_RING_QW_AGP(xl, H1_ADDR(0xc90),
741
((mb_width * mb_height) & 0x3fff) |
742
((control->flags & XVMC_PRED_DCT_FRAME) ? (1 << 14) : 0) |
743
((control->flags & XVMC_TOP_FIELD_FIRST) ? (1 << 15) : 0) |
744
((control->mpeg_coding == XVMC_MPEG_2) ? (1 << 16) : 0) |
745
((mb_width & 0xff) << 18));
747
OUT_RING_QW_AGP(xl, H1_ADDR(0xc94),
748
((control->flags & XVMC_CONCEALMENT_MOTION_VECTORS) ? 1 : 0) |
749
((control->flags & XVMC_Q_SCALE_TYPE) ? 2 : 0) |
750
((control->intra_dc_precision & 3) << 2) |
751
(((1 + 0x100000 / mb_width) & 0xfffff) << 4) |
752
((control->flags & XVMC_INTRA_VLC_FORMAT) ? (1 << 24) : 0));
754
OUT_RING_QW_AGP(xl, H1_ADDR(0xc98),
755
(((control->FHMV_range) & 0xf) << 0) |
756
(((control->FVMV_range) & 0xf) << 4) |
757
(((control->BHMV_range) & 0xf) << 8) |
758
(((control->BVMV_range) & 0xf) << 12) |
759
((control->flags & XVMC_SECOND_FIELD) ? (1 << 20) : 0) |
765
viaMpegReset(void *xlp)
768
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
770
BEGIN_RING_AGP(xl, 100);
771
WAITFLAGS(xl, LL_MODE_DECODER_IDLE);
773
for (i = 0; i < 14; i++)
774
OUT_RING_QW_AGP(xl, H1_ADDR(0xc08), 0);
776
OUT_RING_QW_AGP(xl, H1_ADDR(0xc98), 0x400000);
778
for (i = 0; i < 6; i++) {
779
OUT_RING_QW_AGP(xl, H1_ADDR(0xc0c), 0x43 | 0x20);
780
for (j = 0xc10; j < 0xc20; j += 4)
781
OUT_RING_QW_AGP(xl, H1_ADDR(j), 0);
784
OUT_RING_QW_AGP(xl, H1_ADDR(0xc0c), 0xc3 | 0x20);
785
for (j = 0xc10; j < 0xc20; j += 4)
786
OUT_RING_QW_AGP(xl, H1_ADDR(j), 0);
791
viaMpegWriteSlice(void *xlp, CARD8 * slice, int nBytes, CARD32 sCode)
796
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
798
if (xl->errors & (LL_DECODER_TIMEDOUT |
799
LL_IDCT_FIFO_ERROR | LL_SLICE_FIFO_ERROR | LL_SLICE_FAULT))
806
buf = (CARD32 *) slice;
813
BEGIN_RING_AGP(xl, 4);
814
WAITFLAGS(xl, LL_MODE_DECODER_IDLE);
816
OUT_RING_QW_AGP(xl, H1_ADDR(0xc9c), nBytes);
819
OUT_RING_QW_AGP(xl, H1_ADDR(0xca0), sCode);
825
count += (LL_AGP_CMDBUF_SIZE - 20) >> 1;
826
count = (count > n) ? n : count;
827
BEGIN_RING_AGP(xl, (count - i) << 1);
829
for (; i < count; i++) {
830
OUT_RING_QW_AGP(xl, H1_ADDR(0xca0), *buf++);
834
BEGIN_RING_AGP(xl, 6);
837
OUT_RING_QW_AGP(xl, H1_ADDR(0xca0), *buf & ((1 << (r << 3)) - 1));
839
OUT_RING_QW_AGP(xl, H1_ADDR(0xca0), 0);
840
OUT_RING_QW_AGP(xl, H1_ADDR(0xca0), 0);
845
viaVideoSubPictureOffLocked(void *xlp)
849
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
851
stride = VIDIN(xl, SUBP_CONTROL_STRIDE);
853
pciCommand(xl, SUBP_CONTROL_STRIDE, stride & ~SUBP_HQV_ENABLE,
858
viaVideoSubPictureLocked(void *xlp, ViaXvMCSubPicture * pViaSubPic)
863
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
865
for (i = 0; i < VIA_SUBPIC_PALETTE_SIZE; ++i) {
866
pciCommand(xl, RAM_TABLE_CONTROL, pViaSubPic->palette[i],
870
pciCommand(xl, SUBP_STARTADDR, pViaSubPic->offset, 0);
871
cWord = (pViaSubPic->stride & SUBP_STRIDE_MASK) | SUBP_HQV_ENABLE;
872
cWord |= (pViaSubPic->ia44) ? SUBP_IA44 : SUBP_AI44;
873
pciCommand(xl, SUBP_CONTROL_STRIDE, cWord, 0);
877
viaBlit(void *xlp, unsigned bpp, unsigned srcBase,
878
unsigned srcPitch, unsigned dstBase, unsigned dstPitch,
879
unsigned w, unsigned h, int xdir, int ydir, unsigned blitMode,
883
CARD32 dwGEMode = 0, srcY = 0, srcX, dstY = 0, dstX;
885
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
892
dwGEMode |= VIA_GEM_16bpp;
895
dwGEMode |= VIA_GEM_32bpp;
898
dwGEMode |= VIA_GEM_8bpp;
906
dwGEMode |= VIA_GEM_16bpp;
911
dwGEMode |= VIA_GEM_32bpp;
916
dwGEMode |= VIA_GEM_8bpp;
920
BEGIN_RING_AGP(xl, 20);
921
WAITFLAGS(xl, LL_MODE_2D);
923
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_GEMODE), dwGEMode);
938
case VIABLIT_TRANSCOPY:
939
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_SRCCOLORKEY), color);
940
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_KEYCONTROL), 0x4000);
941
cmd |= VIA_GEC_BLT | (VIA_BLIT_COPY << 24);
944
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_FGCOLOR), color);
945
cmd |= VIA_GEC_BLT | VIA_GEC_FIXCOLOR_PAT | (VIA_BLIT_FILL << 24);
948
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_KEYCONTROL), 0x0);
949
cmd |= VIA_GEC_BLT | (VIA_BLIT_COPY << 24);
952
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_SRCBASE), (srcBase & ~31) >> 3);
953
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_DSTBASE), (dstBase & ~31) >> 3);
954
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_PITCH), VIA_PITCH_ENABLE |
955
(srcPitch >> 3) | (((dstPitch) >> 3) << 16));
956
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_SRCPOS), ((srcY << 16) | srcX));
957
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_DSTPOS), ((dstY << 16) | dstX));
958
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_DIMENSION),
959
(((h - 1) << 16) | (w - 1)));
960
OUT_RING_QW_AGP(xl, H1_ADDR(VIA_REG_GECMD), cmd);
964
syncXvMCLowLevel(void *xlp, unsigned int mode, unsigned int doSleep,
968
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
976
if ((mode & (LL_MODE_VIDEO | LL_MODE_3D)) || !xl->use_agp) {
977
if (xl->performLocking)
979
if ((mode != LL_MODE_VIDEO))
980
syncDMA(xl, doSleep);
981
if (mode & LL_MODE_3D)
982
syncAccel(xl, mode, doSleep);
983
if (mode & LL_MODE_VIDEO)
984
syncVideo(xl, doSleep);
985
if (xl->performLocking)
988
viaDMAWaitTimeStamp(xl, timeStamp, doSleep);
991
if (mode & (LL_MODE_DECODER_SLICE | LL_MODE_DECODER_IDLE))
992
syncMpeg(xl, mode, doSleep);
1001
initXvMCLowLevel(int fd, drm_context_t * ctx,
1002
drmLockPtr hwLock, drmAddress mmioAddress,
1003
drmAddress fbAddress, unsigned fbStride, unsigned fbDepth,
1004
unsigned width, unsigned height, int useAgp, unsigned chipId)
1009
if (chipId == PCI_CHIP_VT3259 || chipId == PCI_CHIP_VT3364) {
1010
fprintf(stderr, "You are using an XvMC driver for the wrong chip.\n");
1011
fprintf(stderr, "Chipid is 0x%04x.\n", chipId);
1015
xl = (XvMCLowLevel *) malloc(sizeof(XvMCLowLevel));
1022
xl->use_agp = useAgp;
1024
xl->drmcontext = ctx;
1025
xl->hwLock = hwLock;
1026
xl->mmioAddress = mmioAddress;
1027
xl->fbAddress = fbAddress;
1028
xl->curWaitFlags = 0;
1029
xl->performLocking = 1;
1032
ret = viaDMAInitTimeStamp(xl);
1041
setLowLevelLocking(void *xlp, int performLocking)
1043
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
1045
xl->performLocking = performLocking;
1049
closeXvMCLowLevel(void *xlp)
1051
XvMCLowLevel *xl = (XvMCLowLevel *) xlp;
1053
viaDMACleanupTimeStamp(xl);