2
* Samsung TV Mixer driver
4
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
6
* Tomasz Stanislawski, <t.stanislaws@samsung.com>
8
* This program is free software; you can redistribute it and/or modify
9
* it under the terms of the GNU General Public License as published
10
* by the Free Software Foundiation. either version 2 of the License,
11
* or (at your option) any later version
15
#include "regs-mixer.h"
18
#include <linux/delay.h>
20
/* Register access subroutines */
22
static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
24
return readl(mdev->res.vp_regs + reg_id);
27
static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
29
writel(val, mdev->res.vp_regs + reg_id);
32
static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
35
u32 old = vp_read(mdev, reg_id);
37
val = (val & mask) | (old & ~mask);
38
writel(val, mdev->res.vp_regs + reg_id);
41
static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
43
return readl(mdev->res.mxr_regs + reg_id);
46
static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
48
writel(val, mdev->res.mxr_regs + reg_id);
51
static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
54
u32 old = mxr_read(mdev, reg_id);
56
val = (val & mask) | (old & ~mask);
57
writel(val, mdev->res.mxr_regs + reg_id);
60
void mxr_vsync_set_update(struct mxr_device *mdev, int en)
62
/* block update on vsync */
63
mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
64
MXR_STATUS_SYNC_ENABLE);
65
vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
68
static void __mxr_reg_vp_reset(struct mxr_device *mdev)
72
vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
73
for (tries = 100; tries; --tries) {
74
/* waiting until VP_SRESET_PROCESSING is 0 */
75
if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
79
WARN(tries == 0, "failed to reset Video Processor\n");
82
static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
84
void mxr_reg_reset(struct mxr_device *mdev)
87
u32 val; /* value stored to register */
89
spin_lock_irqsave(&mdev->reg_slock, flags);
90
mxr_vsync_set_update(mdev, MXR_DISABLE);
92
/* set output in RGB888 mode */
93
mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
95
/* 16 beat burst in DMA */
96
mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
97
MXR_STATUS_BURST_MASK);
99
/* setting default layer priority: layer1 > video > layer0
100
* because typical usage scenario would be
101
* layer0 - framebuffer
102
* video - video overlay
105
val = MXR_LAYER_CFG_GRP0_VAL(1);
106
val |= MXR_LAYER_CFG_VP_VAL(2);
107
val |= MXR_LAYER_CFG_GRP1_VAL(3);
108
mxr_write(mdev, MXR_LAYER_CFG, val);
110
/* use dark gray background color */
111
mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
112
mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
113
mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
115
/* setting graphical layers */
117
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
118
val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
119
val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
121
/* the same configuration for both layers */
122
mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
123
mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
125
/* configuration of Video Processor Registers */
126
__mxr_reg_vp_reset(mdev);
127
mxr_reg_vp_default_filter(mdev);
129
/* enable all interrupts */
130
mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
132
mxr_vsync_set_update(mdev, MXR_ENABLE);
133
spin_unlock_irqrestore(&mdev->reg_slock, flags);
136
void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
137
const struct mxr_format *fmt, const struct mxr_geometry *geo)
142
spin_lock_irqsave(&mdev->reg_slock, flags);
143
mxr_vsync_set_update(mdev, MXR_DISABLE);
146
mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
147
MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
150
mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
151
val = MXR_GRP_WH_WIDTH(geo->src.width);
152
val |= MXR_GRP_WH_HEIGHT(geo->src.height);
153
val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
154
val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
155
mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
157
/* setup offsets in source image */
158
val = MXR_GRP_SXY_SX(geo->src.x_offset);
159
val |= MXR_GRP_SXY_SY(geo->src.y_offset);
160
mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
162
/* setup offsets in display image */
163
val = MXR_GRP_DXY_DX(geo->dst.x_offset);
164
val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
165
mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
167
mxr_vsync_set_update(mdev, MXR_ENABLE);
168
spin_unlock_irqrestore(&mdev->reg_slock, flags);
171
void mxr_reg_vp_format(struct mxr_device *mdev,
172
const struct mxr_format *fmt, const struct mxr_geometry *geo)
176
spin_lock_irqsave(&mdev->reg_slock, flags);
177
mxr_vsync_set_update(mdev, MXR_DISABLE);
179
vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
181
/* setting size of input image */
182
vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
183
VP_IMG_VSIZE(geo->src.full_height));
184
/* chroma height has to reduced by 2 to avoid chroma distorions */
185
vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
186
VP_IMG_VSIZE(geo->src.full_height / 2));
188
vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
189
vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
190
vp_write(mdev, VP_SRC_H_POSITION,
191
VP_SRC_H_POSITION_VAL(geo->src.x_offset));
192
vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
194
vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
195
vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
196
if (geo->dst.field == V4L2_FIELD_INTERLACED) {
197
vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
198
vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
200
vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
201
vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
204
vp_write(mdev, VP_H_RATIO, geo->x_ratio);
205
vp_write(mdev, VP_V_RATIO, geo->y_ratio);
207
vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
209
mxr_vsync_set_update(mdev, MXR_ENABLE);
210
spin_unlock_irqrestore(&mdev->reg_slock, flags);
214
void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
216
u32 val = addr ? ~0 : 0;
219
spin_lock_irqsave(&mdev->reg_slock, flags);
220
mxr_vsync_set_update(mdev, MXR_DISABLE);
223
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
225
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
226
mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
228
mxr_vsync_set_update(mdev, MXR_ENABLE);
229
spin_unlock_irqrestore(&mdev->reg_slock, flags);
232
void mxr_reg_vp_buffer(struct mxr_device *mdev,
233
dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
235
u32 val = luma_addr[0] ? ~0 : 0;
238
spin_lock_irqsave(&mdev->reg_slock, flags);
239
mxr_vsync_set_update(mdev, MXR_DISABLE);
241
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
242
vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
243
/* TODO: fix tiled mode */
244
vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
245
vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
246
vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
247
vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
249
mxr_vsync_set_update(mdev, MXR_ENABLE);
250
spin_unlock_irqrestore(&mdev->reg_slock, flags);
253
static void mxr_irq_layer_handle(struct mxr_layer *layer)
255
struct list_head *head = &layer->enq_list;
256
struct mxr_buffer *done;
258
/* skip non-existing layer */
262
spin_lock(&layer->enq_slock);
263
if (layer->state == MXR_LAYER_IDLE)
266
done = layer->shadow_buf;
267
layer->shadow_buf = layer->update_buf;
269
if (list_empty(head)) {
270
if (layer->state != MXR_LAYER_STREAMING)
271
layer->update_buf = NULL;
273
struct mxr_buffer *next;
274
next = list_first_entry(head, struct mxr_buffer, list);
275
list_del(&next->list);
276
layer->update_buf = next;
279
layer->ops.buffer_set(layer, layer->update_buf);
281
if (done && done != layer->shadow_buf)
282
vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
285
spin_unlock(&layer->enq_slock);
288
irqreturn_t mxr_irq_handler(int irq, void *dev_data)
290
struct mxr_device *mdev = dev_data;
293
spin_lock(&mdev->reg_slock);
294
val = mxr_read(mdev, MXR_INT_STATUS);
296
/* wake up process waiting for VSYNC */
297
if (val & MXR_INT_STATUS_VSYNC) {
298
set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
299
wake_up(&mdev->event_queue);
302
/* clear interrupts */
303
if (~val & MXR_INT_EN_VSYNC) {
304
/* vsync interrupt use different bit for read and clear */
305
val &= ~MXR_INT_EN_VSYNC;
306
val |= MXR_INT_CLEAR_VSYNC;
308
mxr_write(mdev, MXR_INT_STATUS, val);
310
spin_unlock(&mdev->reg_slock);
311
/* leave on non-vsync event */
312
if (~val & MXR_INT_CLEAR_VSYNC)
314
for (i = 0; i < MXR_MAX_LAYERS; ++i)
315
mxr_irq_layer_handle(mdev->layer[i]);
319
void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
323
val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
324
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
327
void mxr_reg_streamon(struct mxr_device *mdev)
331
spin_lock_irqsave(&mdev->reg_slock, flags);
332
/* single write -> no need to block vsync update */
335
mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
337
spin_unlock_irqrestore(&mdev->reg_slock, flags);
340
void mxr_reg_streamoff(struct mxr_device *mdev)
344
spin_lock_irqsave(&mdev->reg_slock, flags);
345
/* single write -> no need to block vsync update */
348
mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
350
spin_unlock_irqrestore(&mdev->reg_slock, flags);
353
int mxr_reg_wait4vsync(struct mxr_device *mdev)
357
clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
358
/* TODO: consider adding interruptible */
359
ret = wait_event_timeout(mdev->event_queue,
360
test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
361
msecs_to_jiffies(1000));
366
mxr_warn(mdev, "no vsync detected - timeout\n");
370
void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
371
struct v4l2_mbus_framefmt *fmt)
376
spin_lock_irqsave(&mdev->reg_slock, flags);
377
mxr_vsync_set_update(mdev, MXR_DISABLE);
379
/* selecting colorspace accepted by output */
380
if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
381
val |= MXR_CFG_OUT_YUV444;
383
val |= MXR_CFG_OUT_RGB888;
385
/* choosing between interlace and progressive mode */
386
if (fmt->field == V4L2_FIELD_INTERLACED)
387
val |= MXR_CFG_SCAN_INTERLACE;
389
val |= MXR_CFG_SCAN_PROGRASSIVE;
391
/* choosing between porper HD and SD mode */
392
if (fmt->height == 480)
393
val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
394
else if (fmt->height == 576)
395
val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
396
else if (fmt->height == 720)
397
val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
398
else if (fmt->height == 1080)
399
val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
401
WARN(1, "unrecognized mbus height %u!\n", fmt->height);
403
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
406
val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
407
vp_write_mask(mdev, VP_MODE, val,
408
VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
410
mxr_vsync_set_update(mdev, MXR_ENABLE);
411
spin_unlock_irqrestore(&mdev->reg_slock, flags);
414
void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
416
/* no extra actions need to be done */
419
void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
421
/* no extra actions need to be done */
424
static const u8 filter_y_horiz_tap8[] = {
425
0, -1, -1, -1, -1, -1, -1, -1,
426
-1, -1, -1, -1, -1, 0, 0, 0,
427
0, 2, 4, 5, 6, 6, 6, 6,
428
6, 5, 5, 4, 3, 2, 1, 1,
429
0, -6, -12, -16, -18, -20, -21, -20,
430
-20, -18, -16, -13, -10, -8, -5, -2,
431
127, 126, 125, 121, 114, 107, 99, 89,
432
79, 68, 57, 46, 35, 25, 16, 8,
435
static const u8 filter_y_vert_tap4[] = {
436
0, -3, -6, -8, -8, -8, -8, -7,
437
-6, -5, -4, -3, -2, -1, -1, 0,
438
127, 126, 124, 118, 111, 102, 92, 81,
439
70, 59, 48, 37, 27, 19, 11, 5,
440
0, 5, 11, 19, 27, 37, 48, 59,
441
70, 81, 92, 102, 111, 118, 124, 126,
442
0, 0, -1, -1, -2, -3, -4, -5,
443
-6, -7, -8, -8, -8, -8, -6, -3,
446
static const u8 filter_cr_horiz_tap4[] = {
447
0, -3, -6, -8, -8, -8, -8, -7,
448
-6, -5, -4, -3, -2, -1, -1, 0,
449
127, 126, 124, 118, 111, 102, 92, 81,
450
70, 59, 48, 37, 27, 19, 11, 5,
453
static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
454
int reg_id, const u8 *data, unsigned int size)
456
/* assure 4-byte align */
458
for (; size; size -= 4, reg_id += 4, data += 4) {
459
u32 val = (data[0] << 24) | (data[1] << 16) |
460
(data[2] << 8) | data[3];
461
vp_write(mdev, reg_id, val);
465
static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
467
mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
468
filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
469
mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
470
filter_y_vert_tap4, sizeof filter_y_vert_tap4);
471
mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
472
filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
475
static void mxr_reg_mxr_dump(struct mxr_device *mdev)
477
#define DUMPREG(reg_id) \
479
mxr_dbg(mdev, #reg_id " = %08x\n", \
480
(u32)readl(mdev->res.mxr_regs + reg_id)); \
486
DUMPREG(MXR_INT_STATUS);
488
DUMPREG(MXR_LAYER_CFG);
489
DUMPREG(MXR_VIDEO_CFG);
491
DUMPREG(MXR_GRAPHIC0_CFG);
492
DUMPREG(MXR_GRAPHIC0_BASE);
493
DUMPREG(MXR_GRAPHIC0_SPAN);
494
DUMPREG(MXR_GRAPHIC0_WH);
495
DUMPREG(MXR_GRAPHIC0_SXY);
496
DUMPREG(MXR_GRAPHIC0_DXY);
498
DUMPREG(MXR_GRAPHIC1_CFG);
499
DUMPREG(MXR_GRAPHIC1_BASE);
500
DUMPREG(MXR_GRAPHIC1_SPAN);
501
DUMPREG(MXR_GRAPHIC1_WH);
502
DUMPREG(MXR_GRAPHIC1_SXY);
503
DUMPREG(MXR_GRAPHIC1_DXY);
507
static void mxr_reg_vp_dump(struct mxr_device *mdev)
509
#define DUMPREG(reg_id) \
511
mxr_dbg(mdev, #reg_id " = %08x\n", \
512
(u32) readl(mdev->res.vp_regs + reg_id)); \
518
DUMPREG(VP_SHADOW_UPDATE);
519
DUMPREG(VP_FIELD_ID);
521
DUMPREG(VP_IMG_SIZE_Y);
522
DUMPREG(VP_IMG_SIZE_C);
523
DUMPREG(VP_PER_RATE_CTRL);
524
DUMPREG(VP_TOP_Y_PTR);
525
DUMPREG(VP_BOT_Y_PTR);
526
DUMPREG(VP_TOP_C_PTR);
527
DUMPREG(VP_BOT_C_PTR);
528
DUMPREG(VP_ENDIAN_MODE);
529
DUMPREG(VP_SRC_H_POSITION);
530
DUMPREG(VP_SRC_V_POSITION);
531
DUMPREG(VP_SRC_WIDTH);
532
DUMPREG(VP_SRC_HEIGHT);
533
DUMPREG(VP_DST_H_POSITION);
534
DUMPREG(VP_DST_V_POSITION);
535
DUMPREG(VP_DST_WIDTH);
536
DUMPREG(VP_DST_HEIGHT);
543
void mxr_reg_dump(struct mxr_device *mdev)
545
mxr_reg_mxr_dump(mdev);
546
mxr_reg_vp_dump(mdev);