~ubuntu-branches/ubuntu/intrepid/gstreamer0.10-ffmpeg/intrepid

« back to all changes in this revision

Viewing changes to gst-libs/ext/ffmpeg/libavcodec/sh4/dsputil_align.c

  • Committer: Bazaar Package Importer
  • Author(s): Sebastien Bacher
  • Date: 2005-12-17 23:59:34 UTC
  • Revision ID: james.westby@ubuntu.com-20051217235934-qu7f84arvb9r3id3
Tags: upstream-0.10.0
ImportĀ upstreamĀ versionĀ 0.10.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 * aligned/packed access motion 
 
3
 *
 
4
 * Copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
 
5
 *
 
6
 * This library is free software; you can redistribute it and/or
 
7
 * modify it under the terms of the GNU Lesser General Public
 
8
 * License as published by the Free Software Foundation; either
 
9
 * version 2 of the License, or (at your option) any later version.
 
10
 *
 
11
 * This library is distributed in the hope that it will be useful,
 
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 
14
 * Lesser General Public License for more details.
 
15
 *
 
16
 * You should have received a copy of the GNU Lesser General Public
 
17
 * License along with this library; if not, write to the Free Software
 
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
19
 */
 
20
 
 
21
 
 
22
#include "avcodec.h"
 
23
#include "dsputil.h"
 
24
 
 
25
 
 
26
#define LP(p)   *(uint32_t*)(p)
 
27
 
 
28
 
 
29
#define UNPACK(ph,pl,tt0,tt1) do { \
 
30
        uint32_t t0,t1; t0=tt0;t1=tt1; \
 
31
        ph = ( (t0 & ~BYTE_VEC32(0x03))>>2) + ( (t1 & ~BYTE_VEC32(0x03))>>2); \
 
32
        pl = (t0 & BYTE_VEC32(0x03)) + (t1 & BYTE_VEC32(0x03)); } while(0)
 
33
 
 
34
#define rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x02))>>2) & BYTE_VEC32(0x03))
 
35
#define no_rnd_PACK(ph,pl,nph,npl)      ph + nph + (((pl + npl + BYTE_VEC32(0x01))>>2) & BYTE_VEC32(0x03))
 
36
 
 
37
/* little endian */
 
38
#define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) )
 
39
#define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)>>(8*(ofs+1)))|((b)<<(32-8*(ofs+1))) )
 
40
/* big
 
41
#define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)<<(8*ofs))|((b)>>(32-8*ofs)) )
 
42
#define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)<<(8+8*ofs))|((b)>>(32-8-8*ofs)) )
 
43
*/
 
44
 
 
45
 
 
46
#define put(d,s)        d = s
 
47
#define avg(d,s)        d = rnd_avg32(s,d)
 
48
 
 
49
#define OP_C4(ofs) \
 
50
        ref-=ofs; \
 
51
        do { \
 
52
                OP(LP(dest),MERGE1(LP(ref),LP(ref+4),ofs)); \
 
53
                ref+=stride; \
 
54
                dest+=stride; \
 
55
        } while(--height)
 
56
 
 
57
#define OP_C40() \
 
58
        do { \
 
59
                OP(LP(dest),LP(ref)); \
 
60
                ref+=stride; \
 
61
                dest+=stride; \
 
62
        } while(--height)
 
63
 
 
64
 
 
65
#define OP      put
 
66
 
 
67
static void put_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height)
 
68
{
 
69
        switch((int)ref&3){
 
70
        case 0: OP_C40(); return;
 
71
        case 1: OP_C4(1); return;
 
72
        case 2: OP_C4(2); return;
 
73
        case 3: OP_C4(3); return;
 
74
        }
 
75
}
 
76
 
 
77
#undef  OP
 
78
#define OP      avg
 
79
 
 
80
static void avg_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height)
 
81
{
 
82
        switch((int)ref&3){
 
83
        case 0: OP_C40(); return;
 
84
        case 1: OP_C4(1); return;
 
85
        case 2: OP_C4(2); return;
 
86
        case 3: OP_C4(3); return;
 
87
        }
 
88
}
 
89
 
 
90
#undef  OP
 
91
 
 
92
#define OP_C(ofs,sz,avg2) \
 
93
{ \
 
94
        ref-=ofs; \
 
95
        do { \
 
96
                uint32_t        t0,t1; \
 
97
                t0 = LP(ref+0); \
 
98
                t1 = LP(ref+4); \
 
99
                OP(LP(dest+0), MERGE1(t0,t1,ofs)); \
 
100
                t0 = LP(ref+8); \
 
101
                OP(LP(dest+4), MERGE1(t1,t0,ofs)); \
 
102
if (sz==16) { \
 
103
                t1 = LP(ref+12); \
 
104
                OP(LP(dest+8), MERGE1(t0,t1,ofs)); \
 
105
                t0 = LP(ref+16); \
 
106
                OP(LP(dest+12), MERGE1(t1,t0,ofs)); \
 
107
} \
 
108
                ref+=stride; \
 
109
                dest+= stride; \
 
110
        } while(--height); \
 
111
}
 
112
 
 
113
/* aligned */
 
114
#define OP_C0(sz,avg2) \
 
115
{ \
 
116
        do { \
 
117
                OP(LP(dest+0), LP(ref+0)); \
 
118
                OP(LP(dest+4), LP(ref+4)); \
 
119
if (sz==16) { \
 
120
                OP(LP(dest+8), LP(ref+8)); \
 
121
                OP(LP(dest+12), LP(ref+12)); \
 
122
} \
 
123
                ref+=stride; \
 
124
                dest+= stride; \
 
125
        } while(--height); \
 
126
}
 
127
 
 
128
#define OP_X(ofs,sz,avg2) \
 
129
{ \
 
130
        ref-=ofs; \
 
131
        do { \
 
132
                uint32_t        t0,t1; \
 
133
                t0 = LP(ref+0); \
 
134
                t1 = LP(ref+4); \
 
135
                OP(LP(dest+0), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
 
136
                t0 = LP(ref+8); \
 
137
                OP(LP(dest+4), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
 
138
if (sz==16) { \
 
139
                t1 = LP(ref+12); \
 
140
                OP(LP(dest+8), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
 
141
                t0 = LP(ref+16); \
 
142
                OP(LP(dest+12), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
 
143
} \
 
144
                ref+=stride; \
 
145
                dest+= stride; \
 
146
        } while(--height); \
 
147
}
 
148
 
 
149
/* aligned */
 
150
#define OP_Y0(sz,avg2) \
 
151
{ \
 
152
        uint32_t t0,t1,t2,t3,t; \
 
153
\
 
154
        t0 = LP(ref+0); \
 
155
        t1 = LP(ref+4); \
 
156
if (sz==16) { \
 
157
        t2 = LP(ref+8); \
 
158
        t3 = LP(ref+12); \
 
159
} \
 
160
        do { \
 
161
                ref += stride; \
 
162
\
 
163
                t = LP(ref+0); \
 
164
                OP(LP(dest+0), avg2(t0,t)); t0 = t; \
 
165
                t = LP(ref+4); \
 
166
                OP(LP(dest+4), avg2(t1,t)); t1 = t; \
 
167
if (sz==16) { \
 
168
                t = LP(ref+8); \
 
169
                OP(LP(dest+8), avg2(t2,t)); t2 = t; \
 
170
                t = LP(ref+12); \
 
171
                OP(LP(dest+12), avg2(t3,t)); t3 = t; \
 
172
} \
 
173
                dest+= stride; \
 
174
        } while(--height); \
 
175
}
 
176
 
 
177
#define OP_Y(ofs,sz,avg2) \
 
178
{ \
 
179
        uint32_t t0,t1,t2,t3,t,w0,w1; \
 
180
\
 
181
        ref-=ofs; \
 
182
        w0 = LP(ref+0); \
 
183
        w1 = LP(ref+4); \
 
184
        t0 = MERGE1(w0,w1,ofs); \
 
185
        w0 = LP(ref+8); \
 
186
        t1 = MERGE1(w1,w0,ofs); \
 
187
if (sz==16) { \
 
188
        w1 = LP(ref+12); \
 
189
        t2 = MERGE1(w0,w1,ofs); \
 
190
        w0 = LP(ref+16); \
 
191
        t3 = MERGE1(w1,w0,ofs); \
 
192
} \
 
193
        do { \
 
194
                ref += stride; \
 
195
\
 
196
                w0 = LP(ref+0); \
 
197
                w1 = LP(ref+4); \
 
198
                t = MERGE1(w0,w1,ofs); \
 
199
                OP(LP(dest+0), avg2(t0,t)); t0 = t; \
 
200
                w0 = LP(ref+8); \
 
201
                t = MERGE1(w1,w0,ofs); \
 
202
                OP(LP(dest+4), avg2(t1,t)); t1 = t; \
 
203
if (sz==16) { \
 
204
                w1 = LP(ref+12); \
 
205
                t = MERGE1(w0,w1,ofs); \
 
206
                OP(LP(dest+8), avg2(t2,t)); t2 = t; \
 
207
                w0 = LP(ref+16); \
 
208
                t = MERGE1(w1,w0,ofs); \
 
209
                OP(LP(dest+12), avg2(t3,t)); t3 = t; \
 
210
} \
 
211
                dest+=stride; \
 
212
        } while(--height); \
 
213
}
 
214
 
 
215
#define OP_X0(sz,avg2) OP_X(0,sz,avg2)
 
216
#define OP_XY0(sz,PACK) OP_XY(0,sz,PACK)
 
217
#define OP_XY(ofs,sz,PACK) \
 
218
{ \
 
219
        uint32_t        t2,t3,w0,w1; \
 
220
        uint32_t        a0,a1,a2,a3,a4,a5,a6,a7; \
 
221
\
 
222
        ref -= ofs; \
 
223
        w0 = LP(ref+0); \
 
224
        w1 = LP(ref+4); \
 
225
        UNPACK(a0,a1,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
 
226
        w0 = LP(ref+8); \
 
227
        UNPACK(a2,a3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
 
228
if (sz==16) { \
 
229
        w1 = LP(ref+12); \
 
230
        UNPACK(a4,a5,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
 
231
        w0 = LP(ref+16); \
 
232
        UNPACK(a6,a7,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
 
233
} \
 
234
        do { \
 
235
                ref+=stride; \
 
236
                w0 = LP(ref+0); \
 
237
                w1 = LP(ref+4); \
 
238
                UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
 
239
                OP(LP(dest+0),PACK(a0,a1,t2,t3)); \
 
240
                a0 = t2; a1 = t3; \
 
241
                w0 = LP(ref+8); \
 
242
                UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
 
243
                OP(LP(dest+4),PACK(a2,a3,t2,t3)); \
 
244
                a2 = t2; a3 = t3; \
 
245
if (sz==16) { \
 
246
                w1 = LP(ref+12); \
 
247
                UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
 
248
                OP(LP(dest+8),PACK(a4,a5,t2,t3)); \
 
249
                a4 = t2; a5 = t3; \
 
250
                w0 = LP(ref+16); \
 
251
                UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
 
252
                OP(LP(dest+12),PACK(a6,a7,t2,t3)); \
 
253
                a6 = t2; a7 = t3; \
 
254
} \
 
255
                dest+=stride; \
 
256
        } while(--height); \
 
257
}
 
258
 
 
259
#define DEFFUNC(op,rnd,xy,sz,OP_N,avgfunc) \
 
260
static void op##_##rnd##_pixels##sz##_##xy (uint8_t * dest, const uint8_t * ref,        \
 
261
                                   const int stride, int height)        \
 
262
{ \
 
263
        switch((int)ref&3) { \
 
264
        case 0:OP_N##0(sz,rnd##_##avgfunc); return; \
 
265
        case 1:OP_N(1,sz,rnd##_##avgfunc); return; \
 
266
        case 2:OP_N(2,sz,rnd##_##avgfunc); return; \
 
267
        case 3:OP_N(3,sz,rnd##_##avgfunc); return; \
 
268
        } \
 
269
}
 
270
 
 
271
#define OP put
 
272
 
 
273
DEFFUNC(put,   rnd,o,8,OP_C,avg2)
 
274
DEFFUNC(put,   rnd,x,8,OP_X,avg2)
 
275
DEFFUNC(put,no_rnd,x,8,OP_X,avg2)
 
276
DEFFUNC(put,   rnd,y,8,OP_Y,avg2)
 
277
DEFFUNC(put,no_rnd,y,8,OP_Y,avg2)
 
278
DEFFUNC(put,   rnd,xy,8,OP_XY,PACK)
 
279
DEFFUNC(put,no_rnd,xy,8,OP_XY,PACK)
 
280
DEFFUNC(put,   rnd,o,16,OP_C,avg2)
 
281
DEFFUNC(put,   rnd,x,16,OP_X,avg2)
 
282
DEFFUNC(put,no_rnd,x,16,OP_X,avg2)
 
283
DEFFUNC(put,   rnd,y,16,OP_Y,avg2)
 
284
DEFFUNC(put,no_rnd,y,16,OP_Y,avg2)
 
285
DEFFUNC(put,   rnd,xy,16,OP_XY,PACK)
 
286
DEFFUNC(put,no_rnd,xy,16,OP_XY,PACK)
 
287
 
 
288
#undef OP
 
289
#define OP avg
 
290
 
 
291
DEFFUNC(avg,   rnd,o,8,OP_C,avg2)
 
292
DEFFUNC(avg,   rnd,x,8,OP_X,avg2)
 
293
DEFFUNC(avg,no_rnd,x,8,OP_X,avg2)
 
294
DEFFUNC(avg,   rnd,y,8,OP_Y,avg2)
 
295
DEFFUNC(avg,no_rnd,y,8,OP_Y,avg2)
 
296
DEFFUNC(avg,   rnd,xy,8,OP_XY,PACK)
 
297
DEFFUNC(avg,no_rnd,xy,8,OP_XY,PACK)
 
298
DEFFUNC(avg,   rnd,o,16,OP_C,avg2)
 
299
DEFFUNC(avg,   rnd,x,16,OP_X,avg2)
 
300
DEFFUNC(avg,no_rnd,x,16,OP_X,avg2)
 
301
DEFFUNC(avg,   rnd,y,16,OP_Y,avg2)
 
302
DEFFUNC(avg,no_rnd,y,16,OP_Y,avg2)
 
303
DEFFUNC(avg,   rnd,xy,16,OP_XY,PACK)
 
304
DEFFUNC(avg,no_rnd,xy,16,OP_XY,PACK)
 
305
 
 
306
#undef OP
 
307
 
 
308
#define put_no_rnd_pixels8_o    put_rnd_pixels8_o
 
309
#define put_no_rnd_pixels16_o   put_rnd_pixels16_o
 
310
#define avg_no_rnd_pixels8_o    avg_rnd_pixels8_o
 
311
#define avg_no_rnd_pixels16_o   avg_rnd_pixels16_o
 
312
 
 
313
#define put_pixels8_c   put_rnd_pixels8_o
 
314
#define put_pixels16_c  put_rnd_pixels16_o
 
315
#define avg_pixels8_c   avg_rnd_pixels8_o
 
316
#define avg_pixels16_c  avg_rnd_pixels16_o
 
317
#define put_no_rnd_pixels8_c    put_rnd_pixels8_o
 
318
#define put_no_rnd_pixels16_c   put_rnd_pixels16_o
 
319
#define avg_no_rnd_pixels8_c    avg_rnd_pixels8_o
 
320
#define avg_no_rnd_pixels16_c   avg_rnd_pixels16_o
 
321
 
 
322
#define QPEL
 
323
 
 
324
#ifdef QPEL
 
325
 
 
326
#include "qpel.c"
 
327
 
 
328
#endif
 
329
 
 
330
void dsputil_init_align(DSPContext* c, AVCodecContext *avctx)
 
331
{
 
332
        c->put_pixels_tab[0][0] = put_rnd_pixels16_o;
 
333
        c->put_pixels_tab[0][1] = put_rnd_pixels16_x;
 
334
        c->put_pixels_tab[0][2] = put_rnd_pixels16_y;
 
335
        c->put_pixels_tab[0][3] = put_rnd_pixels16_xy;
 
336
        c->put_pixels_tab[1][0] = put_rnd_pixels8_o;
 
337
        c->put_pixels_tab[1][1] = put_rnd_pixels8_x;
 
338
        c->put_pixels_tab[1][2] = put_rnd_pixels8_y;
 
339
        c->put_pixels_tab[1][3] = put_rnd_pixels8_xy;
 
340
 
 
341
        c->put_no_rnd_pixels_tab[0][0] = put_no_rnd_pixels16_o;
 
342
        c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x;
 
343
        c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y;
 
344
        c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy;
 
345
        c->put_no_rnd_pixels_tab[1][0] = put_no_rnd_pixels8_o;
 
346
        c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x;
 
347
        c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y;
 
348
        c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy;
 
349
 
 
350
        c->avg_pixels_tab[0][0] = avg_rnd_pixels16_o;
 
351
        c->avg_pixels_tab[0][1] = avg_rnd_pixels16_x;
 
352
        c->avg_pixels_tab[0][2] = avg_rnd_pixels16_y;
 
353
        c->avg_pixels_tab[0][3] = avg_rnd_pixels16_xy;
 
354
        c->avg_pixels_tab[1][0] = avg_rnd_pixels8_o;
 
355
        c->avg_pixels_tab[1][1] = avg_rnd_pixels8_x;
 
356
        c->avg_pixels_tab[1][2] = avg_rnd_pixels8_y;
 
357
        c->avg_pixels_tab[1][3] = avg_rnd_pixels8_xy;
 
358
 
 
359
        c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_o;
 
360
        c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x;
 
361
        c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y;
 
362
        c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy;
 
363
        c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_o;
 
364
        c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x;
 
365
        c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y;
 
366
        c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy;
 
367
 
 
368
#ifdef QPEL
 
369
 
 
370
#define dspfunc(PFX, IDX, NUM) \
 
371
    c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
 
372
    c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
 
373
    c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
 
374
    c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
 
375
    c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
 
376
    c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
 
377
    c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
 
378
    c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
 
379
    c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
 
380
    c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
 
381
    c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
 
382
    c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
 
383
    c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
 
384
    c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
 
385
    c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
 
386
    c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
 
387
 
 
388
    dspfunc(put_qpel, 0, 16);
 
389
    dspfunc(put_no_rnd_qpel, 0, 16);
 
390
 
 
391
    dspfunc(avg_qpel, 0, 16);
 
392
    /* dspfunc(avg_no_rnd_qpel, 0, 16); */
 
393
 
 
394
    dspfunc(put_qpel, 1, 8);
 
395
    dspfunc(put_no_rnd_qpel, 1, 8);
 
396
 
 
397
    dspfunc(avg_qpel, 1, 8);
 
398
    /* dspfunc(avg_no_rnd_qpel, 1, 8); */
 
399
 
 
400
    dspfunc(put_h264_qpel, 0, 16);
 
401
    dspfunc(put_h264_qpel, 1, 8);
 
402
    dspfunc(put_h264_qpel, 2, 4);
 
403
    dspfunc(avg_h264_qpel, 0, 16);
 
404
    dspfunc(avg_h264_qpel, 1, 8);
 
405
    dspfunc(avg_h264_qpel, 2, 4);
 
406
 
 
407
#undef dspfunc
 
408
    c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_c;
 
409
    c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_c;
 
410
    c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_c;
 
411
    c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_c;
 
412
    c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c;
 
413
    c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c;
 
414
 
 
415
    c->put_mspel_pixels_tab[0]= put_mspel8_mc00_c;
 
416
    c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
 
417
    c->put_mspel_pixels_tab[2]= put_mspel8_mc20_c;
 
418
    c->put_mspel_pixels_tab[3]= put_mspel8_mc30_c;
 
419
    c->put_mspel_pixels_tab[4]= put_mspel8_mc02_c;
 
420
    c->put_mspel_pixels_tab[5]= put_mspel8_mc12_c;
 
421
    c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c;
 
422
    c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c;
 
423
 
 
424
    c->gmc1 = gmc1_c;
 
425
    c->gmc = gmc_c;
 
426
 
 
427
#endif
 
428
}