1
/* Copyright (C) 2001-2006 Artifex Software, Inc.
4
This software is provided AS-IS with no warranty, either express or
7
This software is distributed under license and may not be copied, modified
8
or distributed except as expressly authorized under the terms of that
9
license. Refer to licensing information at http://www.artifex.com/
10
or contact Artifex Software, Inc., 7 Mt. Lassen Drive - Suite A-134,
11
San Rafael, CA 94903, U.S.A., +1(415)492-9861, for further information.
14
/* $Id: gscie.c 8764 2008-05-21 18:27:42Z mvrhel $ */
15
/* CIE color rendering cache management */
21
#include "gsmatrix.h" /* for gscolor2.h */
22
#include "gxcspace.h" /* for gxcie.c */
23
#include "gscolor2.h" /* for gs_set/currentcolorrendering */
26
#include "gxdevice.h" /* for gxcmap.h */
32
* Define whether to optimize the CIE mapping process by combining steps.
33
* This should only be disabled (commented out) for debugging.
35
#define OPTIMIZE_CIE_MAPPING
37
/* Forward references */
38
static int cie_joint_caches_init(gx_cie_joint_caches *,
39
const gs_cie_common *,
41
static void cie_joint_caches_complete(gx_cie_joint_caches *,
42
const gs_cie_common *,
44
const gs_cie_render *);
45
static void cie_cache_restrict(cie_cache_floats *, const gs_range *);
46
static void cie_mult3(const gs_vector3 *, const gs_matrix3 *,
48
static void cie_matrix_mult3(const gs_matrix3 *, const gs_matrix3 *,
50
static void cie_invert3(const gs_matrix3 *, gs_matrix3 *);
51
static void cie_matrix_init(gs_matrix3 *);
53
/* Allocator structure types */
54
private_st_joint_caches();
55
extern_st(st_imager_state);
57
#define RESTRICTED_INDEX(v, n, itemp)\
58
((uint)(itemp = (int)(v)) >= (n) ?\
59
(itemp < 0 ? 0 : (n) - 1) : itemp)
61
/* Define cache interpolation threshold values. */
62
#ifdef CIE_CACHE_INTERPOLATE
63
# ifdef CIE_INTERPOLATE_THRESHOLD
64
# define CACHE_THRESHOLD CIE_INTERPOLATE_THRESHOLD
66
# define CACHE_THRESHOLD 0 /* always interpolate */
69
# define CACHE_THRESHOLD 1.0e6 /* never interpolate */
71
#ifdef CIE_RENDER_TABLE_INTERPOLATE
72
# define RENDER_TABLE_THRESHOLD 0
74
# define RENDER_TABLE_THRESHOLD 1.0e6
78
* Determine whether a function is a linear transformation of the form
79
* f(x) = scale * x + origin.
82
cache_is_linear(cie_linear_params_t *params, const cie_cache_floats *pcf)
84
double origin = pcf->values[0];
85
double diff = pcf->values[countof(pcf->values) - 1] - origin;
86
double scale = diff / (countof(pcf->values) - 1);
88
double test = origin + scale;
90
for (i = 1; i < countof(pcf->values) - 1; ++i, test += scale)
91
if (fabs(pcf->values[i] - test) >= 0.5 / countof(pcf->values))
92
return (params->is_linear = false);
93
params->origin = origin - pcf->params.base;
95
diff * pcf->params.factor / (countof(pcf->values) - 1);
96
return (params->is_linear = true);
100
cache_set_linear(cie_cache_floats *pcf)
102
if (pcf->params.is_identity) {
103
if_debug1('c', "[c]is_linear(0x%lx) = true (is_identity)\n",
105
pcf->params.linear.is_linear = true;
106
pcf->params.linear.origin = 0;
107
pcf->params.linear.scale = 1;
108
} else if (cache_is_linear(&pcf->params.linear, pcf)) {
109
if (pcf->params.linear.origin == 0 &&
110
fabs(pcf->params.linear.scale - 1) < 0.00001)
111
pcf->params.is_identity = true;
113
"[c]is_linear(0x%lx) = true, origin = %g, scale = %g%s\n",
114
(ulong)pcf, pcf->params.linear.origin,
115
pcf->params.linear.scale,
116
(pcf->params.is_identity ? " (=> is_identity)" : ""));
120
if_debug1('c', "[c]linear(0x%lx) = false\n", (ulong)pcf);
124
cache3_set_linear(gx_cie_vector_cache3_t *pvc)
126
cache_set_linear(&pvc->caches[0].floats);
127
cache_set_linear(&pvc->caches[1].floats);
128
cache_set_linear(&pvc->caches[2].floats);
133
if_debug_vector3(const char *str, const gs_vector3 *vec)
135
if_debug4('c', "%s[%g %g %g]\n", str, vec->u, vec->v, vec->w);
138
if_debug_matrix3(const char *str, const gs_matrix3 *mat)
140
if_debug10('c', "%s [%g %g %g] [%g %g %g] [%g %g %g]\n", str,
141
mat->cu.u, mat->cu.v, mat->cu.w,
142
mat->cv.u, mat->cv.v, mat->cv.w,
143
mat->cw.u, mat->cw.v, mat->cw.w);
146
# define if_debug_vector3(str, vec) DO_NOTHING
147
# define if_debug_matrix3(str, mat) DO_NOTHING
150
/* ------ Default values for CIE dictionary elements ------ */
152
/* Default transformation procedures. */
155
a_identity(floatp in, const gs_cie_a * pcie)
160
a_from_cache(floatp in, const gs_cie_a * pcie)
162
return gs_cie_cached_value(in, &pcie->caches.DecodeA.floats);
166
abc_identity(floatp in, const gs_cie_abc * pcie)
171
abc_from_cache_0(floatp in, const gs_cie_abc * pcie)
173
return gs_cie_cached_value(in, &pcie->caches.DecodeABC.caches[0].floats);
176
abc_from_cache_1(floatp in, const gs_cie_abc * pcie)
178
return gs_cie_cached_value(in, &pcie->caches.DecodeABC.caches[1].floats);
181
abc_from_cache_2(floatp in, const gs_cie_abc * pcie)
183
return gs_cie_cached_value(in, &pcie->caches.DecodeABC.caches[2].floats);
187
def_identity(floatp in, const gs_cie_def * pcie)
192
def_from_cache_0(floatp in, const gs_cie_def * pcie)
194
return gs_cie_cached_value(in, &pcie->caches_def.DecodeDEF[0].floats);
197
def_from_cache_1(floatp in, const gs_cie_def * pcie)
199
return gs_cie_cached_value(in, &pcie->caches_def.DecodeDEF[1].floats);
202
def_from_cache_2(floatp in, const gs_cie_def * pcie)
204
return gs_cie_cached_value(in, &pcie->caches_def.DecodeDEF[2].floats);
208
defg_identity(floatp in, const gs_cie_defg * pcie)
213
defg_from_cache_0(floatp in, const gs_cie_defg * pcie)
215
return gs_cie_cached_value(in, &pcie->caches_defg.DecodeDEFG[0].floats);
218
defg_from_cache_1(floatp in, const gs_cie_defg * pcie)
220
return gs_cie_cached_value(in, &pcie->caches_defg.DecodeDEFG[1].floats);
223
defg_from_cache_2(floatp in, const gs_cie_defg * pcie)
225
return gs_cie_cached_value(in, &pcie->caches_defg.DecodeDEFG[2].floats);
228
defg_from_cache_3(floatp in, const gs_cie_defg * pcie)
230
return gs_cie_cached_value(in, &pcie->caches_defg.DecodeDEFG[3].floats);
234
common_identity(floatp in, const gs_cie_common * pcie)
239
lmn_from_cache_0(floatp in, const gs_cie_common * pcie)
241
return gs_cie_cached_value(in, &pcie->caches.DecodeLMN[0].floats);
244
lmn_from_cache_1(floatp in, const gs_cie_common * pcie)
246
return gs_cie_cached_value(in, &pcie->caches.DecodeLMN[1].floats);
249
lmn_from_cache_2(floatp in, const gs_cie_common * pcie)
251
return gs_cie_cached_value(in, &pcie->caches.DecodeLMN[2].floats);
254
/* Transformation procedures for accessing an already-loaded cache. */
257
gs_cie_cached_value(floatp in, const cie_cache_floats *pcache)
260
* We need to get the same results when we sample an already-loaded
261
* cache, so we need to round the index just a tiny bit.
264
(int)((in - pcache->params.base) * pcache->params.factor + 0.0001);
266
CIE_CLAMP_INDEX(index);
267
return pcache->values[index];
270
/* Default vectors and matrices. */
272
const gs_range3 Range3_default = {
273
{ {0, 1}, {0, 1}, {0, 1} }
275
const gs_range4 Range4_default = {
276
{ {0, 1}, {0, 1}, {0, 1}, {0, 1} }
278
const gs_cie_defg_proc4 DecodeDEFG_default = {
279
{defg_identity, defg_identity, defg_identity, defg_identity}
281
const gs_cie_defg_proc4 DecodeDEFG_from_cache = {
282
{defg_from_cache_0, defg_from_cache_1, defg_from_cache_2, defg_from_cache_3}
284
const gs_cie_def_proc3 DecodeDEF_default = {
285
{def_identity, def_identity, def_identity}
287
const gs_cie_def_proc3 DecodeDEF_from_cache = {
288
{def_from_cache_0, def_from_cache_1, def_from_cache_2}
290
const gs_cie_abc_proc3 DecodeABC_default = {
291
{abc_identity, abc_identity, abc_identity}
293
const gs_cie_abc_proc3 DecodeABC_from_cache = {
294
{abc_from_cache_0, abc_from_cache_1, abc_from_cache_2}
296
const gs_cie_common_proc3 DecodeLMN_default = {
297
{common_identity, common_identity, common_identity}
299
const gs_cie_common_proc3 DecodeLMN_from_cache = {
300
{lmn_from_cache_0, lmn_from_cache_1, lmn_from_cache_2}
302
const gs_matrix3 Matrix3_default = {
308
const gs_range RangeA_default = {0, 1};
309
const gs_cie_a_proc DecodeA_default = a_identity;
310
const gs_cie_a_proc DecodeA_from_cache = a_from_cache;
311
const gs_vector3 MatrixA_default = {1, 1, 1};
312
const gs_vector3 BlackPoint_default = {0, 0, 0};
314
/* Initialize a CIE color. */
315
/* This only happens on setcolorspace. */
317
gx_init_CIE(gs_client_color * pcc, const gs_color_space * pcs)
319
gx_init_paint_4(pcc, pcs);
320
/* (0...) may not be within the range of allowable values. */
321
(*pcs->type->restrict_color)(pcc, pcs);
324
/* Restrict CIE colors. */
327
cie_restrict(float *pv, const gs_range *range)
329
if (*pv <= range->rmin)
331
else if (*pv >= range->rmax)
336
gx_restrict_CIEDEFG(gs_client_color * pcc, const gs_color_space * pcs)
338
const gs_cie_defg *pcie = pcs->params.defg;
340
cie_restrict(&pcc->paint.values[0], &pcie->RangeDEFG.ranges[0]);
341
cie_restrict(&pcc->paint.values[1], &pcie->RangeDEFG.ranges[1]);
342
cie_restrict(&pcc->paint.values[2], &pcie->RangeDEFG.ranges[2]);
343
cie_restrict(&pcc->paint.values[3], &pcie->RangeDEFG.ranges[3]);
346
gx_restrict_CIEDEF(gs_client_color * pcc, const gs_color_space * pcs)
348
const gs_cie_def *pcie = pcs->params.def;
350
cie_restrict(&pcc->paint.values[0], &pcie->RangeDEF.ranges[0]);
351
cie_restrict(&pcc->paint.values[1], &pcie->RangeDEF.ranges[1]);
352
cie_restrict(&pcc->paint.values[2], &pcie->RangeDEF.ranges[2]);
355
gx_restrict_CIEABC(gs_client_color * pcc, const gs_color_space * pcs)
357
const gs_cie_abc *pcie = pcs->params.abc;
359
cie_restrict(&pcc->paint.values[0], &pcie->RangeABC.ranges[0]);
360
cie_restrict(&pcc->paint.values[1], &pcie->RangeABC.ranges[1]);
361
cie_restrict(&pcc->paint.values[2], &pcie->RangeABC.ranges[2]);
364
gx_restrict_CIEA(gs_client_color * pcc, const gs_color_space * pcs)
366
const gs_cie_a *pcie = pcs->params.a;
368
cie_restrict(&pcc->paint.values[0], &pcie->RangeA);
371
/* ================ Table setup ================ */
373
/* ------ Install a CIE color space ------ */
375
static void cie_cache_mult(gx_cie_vector_cache *, const gs_vector3 *,
376
const cie_cache_floats *, floatp);
377
static bool cie_cache_mult3(gx_cie_vector_cache3_t *,
378
const gs_matrix3 *, floatp);
381
gx_install_cie_abc(gs_cie_abc *pcie, gs_state * pgs)
383
if_debug_matrix3("[c]CIE MatrixABC =", &pcie->MatrixABC);
384
cie_matrix_init(&pcie->MatrixABC);
385
CIE_LOAD_CACHE_BODY(pcie->caches.DecodeABC.caches, pcie->RangeABC.ranges,
386
&pcie->DecodeABC, DecodeABC_default, pcie,
388
gx_cie_load_common_cache(&pcie->common, pgs);
389
gs_cie_abc_complete(pcie);
390
return gs_cie_cs_complete(pgs, true);
394
gx_install_CIEDEFG(gs_color_space * pcs, gs_state * pgs)
396
gs_cie_defg *pcie = pcs->params.defg;
398
#if ENABLE_CUSTOM_COLOR_CALLBACK
401
* Check if we want to use the callback color processing for this
404
client_custom_color_params_t * pcb =
405
(client_custom_color_params_t *) pgs->memory->gs_lib_ctx->custom_color_callback;
408
if (pcb->client_procs->install_CIEBasedDEFG(pcb, pcs, pgs))
409
/* Exit if the client will handle the colorspace completely */
414
CIE_LOAD_CACHE_BODY(pcie->caches_defg.DecodeDEFG, pcie->RangeDEFG.ranges,
415
&pcie->DecodeDEFG, DecodeDEFG_default, pcie,
417
return gx_install_cie_abc((gs_cie_abc *)pcie, pgs);
421
gx_install_CIEDEF(gs_color_space * pcs, gs_state * pgs)
423
gs_cie_def *pcie = pcs->params.def;
425
#if ENABLE_CUSTOM_COLOR_CALLBACK
428
* Check if we want to use the callback color processing for this
431
client_custom_color_params_t * pcb =
432
(client_custom_color_params_t *) pgs->memory->gs_lib_ctx->custom_color_callback;
435
if (pcb->client_procs->install_CIEBasedDEF(pcb, pcs, pgs))
436
/* Exit if the client will handle the colorspace completely */
441
CIE_LOAD_CACHE_BODY(pcie->caches_def.DecodeDEF, pcie->RangeDEF.ranges,
442
&pcie->DecodeDEF, DecodeDEF_default, pcie,
444
return gx_install_cie_abc((gs_cie_abc *)pcie, pgs);
448
gx_install_CIEABC(gs_color_space * pcs, gs_state * pgs)
450
#if ENABLE_CUSTOM_COLOR_CALLBACK
453
* Check if we want to use the callback color processing for this
456
client_custom_color_params_t * pcb =
457
(client_custom_color_params_t *) pgs->memory->gs_lib_ctx->custom_color_callback;
460
if (pcb->client_procs->install_CIEBasedABC(pcb, pcs, pgs))
461
/* Exit if the client will handle the colorspace completely */
466
return gx_install_cie_abc(pcs->params.abc, pgs);
470
gx_install_CIEA(gs_color_space * pcs, gs_state * pgs)
472
gs_cie_a *pcie = pcs->params.a;
473
gs_sample_loop_params_t lp;
476
#if ENABLE_CUSTOM_COLOR_CALLBACK
479
* Check if we want to use the callback color processing for this
482
client_custom_color_params_t * pcb =
483
(client_custom_color_params_t *) pgs->memory->gs_lib_ctx->custom_color_callback;
486
if (pcb->client_procs->install_CIEBasedA(pcb, pcs, pgs))
487
/* Exit if the client will handle the colorspace completely */
492
gs_cie_cache_init(&pcie->caches.DecodeA.floats.params, &lp,
493
&pcie->RangeA, "DecodeA");
494
for (i = 0; i <= lp.N; ++i) {
495
float in = SAMPLE_LOOP_VALUE(i, lp);
497
pcie->caches.DecodeA.floats.values[i] = (*pcie->DecodeA)(in, pcie);
498
if_debug3('C', "[C]DecodeA[%d] = %g => %g\n",
499
i, in, pcie->caches.DecodeA.floats.values[i]);
501
gx_cie_load_common_cache(&pcie->common, pgs);
502
gs_cie_a_complete(pcie);
503
return gs_cie_cs_complete(pgs, true);
506
/* Load the common caches when installing the color space. */
507
/* This routine is exported for the benefit of gsicc.c */
509
gx_cie_load_common_cache(gs_cie_common * pcie, gs_state * pgs)
511
if_debug_matrix3("[c]CIE MatrixLMN =", &pcie->MatrixLMN);
512
cie_matrix_init(&pcie->MatrixLMN);
513
CIE_LOAD_CACHE_BODY(pcie->caches.DecodeLMN, pcie->RangeLMN.ranges,
514
&pcie->DecodeLMN, DecodeLMN_default, pcie,
518
/* Complete loading the common caches. */
519
/* This routine is exported for the benefit of gsicc.c */
521
gx_cie_common_complete(gs_cie_common *pcie)
525
for (i = 0; i < 3; ++i)
526
cache_set_linear(&pcie->caches.DecodeLMN[i].floats);
530
* Restrict the DecodeDEF[G] cache according to RangeHIJ[K], and scale to
531
* the dimensions of Table.
534
gs_cie_defx_scale(float *values, const gs_range *range, int dim)
536
double scale = (dim - 1.0) / (range->rmax - range->rmin);
539
for (i = 0; i < gx_cie_cache_size; ++i) {
540
float value = values[i];
543
(value <= range->rmin ? 0 :
544
value >= range->rmax ? dim - 1 :
545
(value - range->rmin) * scale);
549
/* Complete loading a CIEBasedDEFG color space. */
550
/* This routine is NOT idempotent. */
552
gs_cie_defg_complete(gs_cie_defg * pcie)
556
for (j = 0; j < 4; ++j)
557
gs_cie_defx_scale(pcie->caches_defg.DecodeDEFG[j].floats.values,
558
&pcie->RangeHIJK.ranges[j], pcie->Table.dims[j]);
559
gs_cie_abc_complete((gs_cie_abc *)pcie);
562
/* Complete loading a CIEBasedDEF color space. */
563
/* This routine is NOT idempotent. */
565
gs_cie_def_complete(gs_cie_def * pcie)
569
for (j = 0; j < 3; ++j)
570
gs_cie_defx_scale(pcie->caches_def.DecodeDEF[j].floats.values,
571
&pcie->RangeHIJ.ranges[j], pcie->Table.dims[j]);
572
gs_cie_abc_complete((gs_cie_abc *)pcie);
575
/* Complete loading a CIEBasedABC color space. */
576
/* This routine is idempotent. */
578
gs_cie_abc_complete(gs_cie_abc * pcie)
580
cache3_set_linear(&pcie->caches.DecodeABC);
581
pcie->caches.skipABC =
582
cie_cache_mult3(&pcie->caches.DecodeABC, &pcie->MatrixABC,
584
gx_cie_common_complete((gs_cie_common *)pcie);
587
/* Complete loading a CIEBasedA color space. */
588
/* This routine is idempotent. */
590
gs_cie_a_complete(gs_cie_a * pcie)
592
cie_cache_mult(&pcie->caches.DecodeA, &pcie->MatrixA,
593
&pcie->caches.DecodeA.floats,
595
cache_set_linear(&pcie->caches.DecodeA.floats);
596
gx_cie_common_complete((gs_cie_common *)pcie);
600
* Set the ranges where interpolation is required in a vector cache.
601
* This procedure is idempotent.
603
typedef struct cie_cache_range_temp_s {
604
cie_cached_value prev;
606
} cie_cache_range_temp_t;
608
check_interpolation_required(cie_cache_range_temp_t *pccr,
609
cie_cached_value cur, int i, floatp threshold)
611
cie_cached_value prev = pccr->prev;
613
if (cie_cached_abs(cur - prev) > threshold * min(cie_cached_abs(prev), cie_cached_abs(cur))) {
614
if (i - 1 < pccr->imin)
622
cie_cache_set_interpolation(gx_cie_vector_cache *pcache, floatp threshold)
624
cie_cached_value base = pcache->vecs.params.base;
625
cie_cached_value factor = pcache->vecs.params.factor;
626
cie_cache_range_temp_t temp[3];
629
for (j = 0; j < 3; ++j)
630
temp[j].imin = gx_cie_cache_size, temp[j].imax = -1;
631
temp[0].prev = pcache->vecs.values[0].u;
632
temp[1].prev = pcache->vecs.values[0].v;
633
temp[2].prev = pcache->vecs.values[0].w;
635
for (i = 0; i < gx_cie_cache_size; ++i) {
636
check_interpolation_required(&temp[0], pcache->vecs.values[i].u, i,
638
check_interpolation_required(&temp[1], pcache->vecs.values[i].v, i,
640
check_interpolation_required(&temp[2], pcache->vecs.values[i].w, i,
644
for (j = 0; j < 3; ++j) {
645
pcache->vecs.params.interpolation_ranges[j].rmin =
646
base + (cie_cached_value)((double)temp[j].imin / factor);
647
pcache->vecs.params.interpolation_ranges[j].rmax =
648
base + (cie_cached_value)((double)temp[j].imax / factor);
649
if_debug3('c', "[c]interpolation_ranges[%d] = %g, %g\n", j,
650
cie_cached2float(pcache->vecs.params.interpolation_ranges[j].rmin),
651
cie_cached2float(pcache->vecs.params.interpolation_ranges[j].rmax));
657
* Convert a scalar cache to a vector cache by multiplying the scalar
658
* values by a vector. Also set the range where interpolation is needed.
659
* This procedure is idempotent.
662
cie_cache_mult(gx_cie_vector_cache * pcache, const gs_vector3 * pvec,
663
const cie_cache_floats * pcf, floatp threshold)
665
float u = pvec->u, v = pvec->v, w = pvec->w;
668
pcache->vecs.params.base = float2cie_cached(pcf->params.base);
669
pcache->vecs.params.factor = float2cie_cached(pcf->params.factor);
670
pcache->vecs.params.limit =
671
float2cie_cached((gx_cie_cache_size - 1) / pcf->params.factor +
673
for (i = 0; i < gx_cie_cache_size; ++i) {
674
float f = pcf->values[i];
676
pcache->vecs.values[i].u = float2cie_cached(f * u);
677
pcache->vecs.values[i].v = float2cie_cached(f * v);
678
pcache->vecs.values[i].w = float2cie_cached(f * w);
680
cie_cache_set_interpolation(pcache, threshold);
684
* Set the interpolation ranges in a 3-vector cache, based on the ranges in
685
* the individual vector caches. This procedure is idempotent.
688
cie_cache3_set_interpolation(gx_cie_vector_cache3_t * pvc)
692
/* Iterate over output components. */
693
for (j = 0; j < 3; ++j) {
694
/* Iterate over sub-caches. */
695
cie_interpolation_range_t *p =
696
&pvc->caches[0].vecs.params.interpolation_ranges[j];
697
cie_cached_value rmin = p->rmin, rmax = p->rmax;
699
for (k = 1; k < 3; ++k) {
700
p = &pvc->caches[k].vecs.params.interpolation_ranges[j];
701
rmin = min(rmin, p->rmin), rmax = max(rmax, p->rmax);
703
pvc->interpolation_ranges[j].rmin = rmin;
704
pvc->interpolation_ranges[j].rmax = rmax;
705
if_debug3('c', "[c]Merged interpolation_ranges[%d] = %g, %g\n",
711
* Convert 3 scalar caches to vector caches by multiplying by a matrix.
712
* Return true iff the resulting cache is an identity transformation.
713
* This procedure is idempotent.
716
cie_cache_mult3(gx_cie_vector_cache3_t * pvc, const gs_matrix3 * pmat,
719
cie_cache_mult(&pvc->caches[0], &pmat->cu, &pvc->caches[0].floats, threshold);
720
cie_cache_mult(&pvc->caches[1], &pmat->cv, &pvc->caches[1].floats, threshold);
721
cie_cache_mult(&pvc->caches[2], &pmat->cw, &pvc->caches[2].floats, threshold);
722
cie_cache3_set_interpolation(pvc);
723
return pmat->is_identity & pvc->caches[0].floats.params.is_identity &
724
pvc->caches[1].floats.params.is_identity &
725
pvc->caches[2].floats.params.is_identity;
728
/* ------ Install a rendering dictionary ------ */
730
/* setcolorrendering */
732
gs_setcolorrendering(gs_state * pgs, gs_cie_render * pcrd)
734
int code = gs_cie_render_complete(pcrd);
735
const gs_cie_render *pcrd_old = pgs->cie_render;
740
if (pcrd_old != 0 && pcrd->id == pcrd_old->id)
741
return 0; /* detect needless reselecting */
744
#define CRD_SAME(elt) !memcmp(&pcrd->elt, &pcrd_old->elt, sizeof(pcrd->elt))
745
CRD_SAME(points.WhitePoint) && CRD_SAME(points.BlackPoint) &&
746
CRD_SAME(MatrixPQR) && CRD_SAME(RangePQR) &&
747
CRD_SAME(TransformPQR);
749
rc_assign(pgs->cie_render, pcrd, "gs_setcolorrendering");
750
/* Initialize the joint caches if needed. */
752
code = gs_cie_cs_complete(pgs, true);
753
gx_unset_dev_color(pgs);
757
/* currentcolorrendering */
758
const gs_cie_render *
759
gs_currentcolorrendering(const gs_state * pgs)
761
return pgs->cie_render;
764
/* Unshare (allocating if necessary) the joint caches. */
765
gx_cie_joint_caches *
766
gx_unshare_cie_caches(gs_state * pgs)
768
gx_cie_joint_caches *pjc = pgs->cie_joint_caches;
770
rc_unshare_struct(pgs->cie_joint_caches, gx_cie_joint_caches,
771
&st_joint_caches, pgs->memory,
772
return 0, "gx_unshare_cie_caches");
773
if (pgs->cie_joint_caches != pjc) {
774
pjc = pgs->cie_joint_caches;
775
pjc->cspace_id = pjc->render_id = gs_no_id;
776
pjc->id_status = pjc->status = CIE_JC_STATUS_BUILT;
781
gx_cie_joint_caches *
782
gx_currentciecaches(gs_state * pgs)
784
return pgs->cie_joint_caches;
787
/* Compute the parameters for loading a cache, setting base and factor. */
788
/* This procedure is idempotent. */
790
gs_cie_cache_init(cie_cache_params * pcache, gs_sample_loop_params_t * pslp,
791
const gs_range * domain, client_name_t cname)
794
We need to map the values in the range [domain->rmin..domain->rmax].
795
However, if rmin < 0 < rmax and the function is non-linear, this can
796
lead to anomalies at zero, which is the default value for CIE colors.
797
The "correct" way to approach this is to run the mapping functions on
798
demand, but we don't want to deal with the complexities of the
799
callbacks this would involve (especially in the middle of rendering
800
images); instead, we adjust the range so that zero maps precisely to a
805
N = gx_cie_cache_size - 1;
808
h(v) = N * (v - A) / R; // the index of v in the cache
811
If X is not an integer, we can decrease A and/increase B to make it
812
one. Let A' and B' be the adjusted values of A and B respectively,
813
and let K be the integer derived from X (either floor(X) or ceil(X)).
816
f(K) = (K * B' + (N - K) * A') / N).
818
We want f(K) = 0. This occurs precisely when, for any real number
824
In order to ensure A' <= A and B' >= B, we require
829
Since A' and B' must be exactly representable as floats, we round C
830
upward to ensure that it has no more than M mantissa bits, where
832
M = ARCH_FLOAT_MANTISSA_BITS - ceil(log2(N)).
834
float A = domain->rmin, B = domain->rmax;
835
double R = B - A, delta;
836
#define NN (gx_cie_cache_size - 1) /* 'N' is a member name, see end of proc */
838
#define CEIL_LOG2_N CIE_LOG2_CACHE_SIZE
840
/* Adjust the range if necessary. */
841
if (A < 0 && B >= 0) {
842
const double X = -N * A / R; /* know X > 0 */
843
/* Choose K to minimize range expansion. */
844
const int K = (int)(A + B < 0 ? floor(X) : ceil(X)); /* know 0 < K < N */
845
const double Ca = -A / K, Cb = B / (N - K); /* know Ca, Cb > 0 */
846
double C = max(Ca, Cb); /* know C > 0 */
847
const int M = ARCH_FLOAT_MANTISSA_BITS - CEIL_LOG2_N;
849
const double cfrac = frexp(C, &cexp);
851
if_debug4('c', "[c]adjusting cache_init(%8g, %8g), X = %8g, K = %d:\n",
853
/* Round C to no more than M significant bits. See above. */
854
C = ldexp(ceil(ldexp(cfrac, M)), cexp - M);
855
/* Finally, compute A' and B'. */
858
if_debug2('c', "[c] => %8g, %8g\n", A, B);
862
#ifdef CIE_CACHE_INTERPOLATE
863
pcache->base = A; /* no rounding */
865
pcache->base = A - delta / 2; /* so lookup will round */
868
* If size of the domain is zero, then use 1.0 as the scaling
869
* factor. This prevents divide by zero errors in later calculations.
870
* This should only occurs with zero matrices. It does occur with
871
* Genoa test file 050-01.ps.
873
pcache->factor = (any_abs(delta) < 1e-30 ? 1.0 : N / R);
874
if_debug4('c', "[c]cache %s 0x%lx base=%g, factor=%g\n",
875
(const char *)cname, (ulong) pcache,
876
pcache->base, pcache->factor);
884
/* ------ Complete a rendering structure ------ */
887
* Compute the derived values in a CRD that don't involve the cached
888
* procedure values. This procedure is idempotent.
890
static void cie_transform_range3(const gs_range3 *, const gs_matrix3 *,
893
gs_cie_render_init(gs_cie_render * pcrd)
895
gs_matrix3 PQR_inverse;
897
if (pcrd->status >= CIE_RENDER_STATUS_INITED)
898
return 0; /* init already done */
899
if_debug_matrix3("[c]CRD MatrixLMN =", &pcrd->MatrixLMN);
900
cie_matrix_init(&pcrd->MatrixLMN);
901
if_debug_matrix3("[c]CRD MatrixABC =", &pcrd->MatrixABC);
902
cie_matrix_init(&pcrd->MatrixABC);
903
if_debug_matrix3("[c]CRD MatrixPQR =", &pcrd->MatrixPQR);
904
cie_matrix_init(&pcrd->MatrixPQR);
905
cie_invert3(&pcrd->MatrixPQR, &PQR_inverse);
906
cie_matrix_mult3(&pcrd->MatrixLMN, &PQR_inverse,
907
&pcrd->MatrixPQR_inverse_LMN);
908
cie_transform_range3(&pcrd->RangePQR, &pcrd->MatrixPQR_inverse_LMN,
910
cie_transform_range3(&pcrd->RangeLMN, &pcrd->MatrixABC,
912
cie_mult3(&pcrd->points.WhitePoint, &pcrd->MatrixPQR, &pcrd->wdpqr);
913
cie_mult3(&pcrd->points.BlackPoint, &pcrd->MatrixPQR, &pcrd->bdpqr);
914
pcrd->status = CIE_RENDER_STATUS_INITED;
919
* Sample the EncodeLMN, EncodeABC, and RenderTableT CRD procedures, and
920
* load the caches. This procedure is idempotent.
923
gs_cie_render_sample(gs_cie_render * pcrd)
927
if (pcrd->status >= CIE_RENDER_STATUS_SAMPLED)
928
return 0; /* sampling already done */
929
code = gs_cie_render_init(pcrd);
932
CIE_LOAD_CACHE_BODY(pcrd->caches.EncodeLMN.caches, pcrd->DomainLMN.ranges,
933
&pcrd->EncodeLMN, Encode_default, pcrd, "EncodeLMN");
934
cache3_set_linear(&pcrd->caches.EncodeLMN);
935
CIE_LOAD_CACHE_BODY(pcrd->caches.EncodeABC, pcrd->DomainABC.ranges,
936
&pcrd->EncodeABC, Encode_default, pcrd, "EncodeABC");
937
if (pcrd->RenderTable.lookup.table != 0) {
938
int i, j, m = pcrd->RenderTable.lookup.m;
939
gs_sample_loop_params_t lp;
940
bool is_identity = true;
942
for (j = 0; j < m; j++) {
943
gs_cie_cache_init(&pcrd->caches.RenderTableT[j].fracs.params,
944
&lp, &Range3_default.ranges[0],
946
is_identity &= pcrd->RenderTable.T.procs[j] ==
947
RenderTableT_default.procs[j];
949
pcrd->caches.RenderTableT_is_identity = is_identity;
951
* Unfortunately, we defined the first argument of the RenderTable
952
* T procedures as being a byte, limiting the number of distinct
953
* cache entries to 256 rather than gx_cie_cache_size.
954
* We confine this decision to this loop, rather than propagating
955
* it to the procedures that use the cached data, so that we can
956
* change it more easily at some future time.
958
for (i = 0; i < gx_cie_cache_size; i++) {
959
#if gx_cie_log2_cache_size >= 8
960
byte value = i >> (gx_cie_log2_cache_size - 8);
962
byte value = (i << (8 - gx_cie_log2_cache_size)) +
963
(i >> (gx_cie_log2_cache_size * 2 - 8));
965
for (j = 0; j < m; j++) {
966
pcrd->caches.RenderTableT[j].fracs.values[i] =
967
(*pcrd->RenderTable.T.procs[j])(value, pcrd);
968
if_debug3('C', "[C]RenderTableT[%d,%d] = %g\n",
970
frac2float(pcrd->caches.RenderTableT[j].fracs.values[i]));
974
pcrd->status = CIE_RENDER_STATUS_SAMPLED;
978
/* Transform a set of ranges. */
980
cie_transform_range(const gs_range3 * in, floatp mu, floatp mv, floatp mw,
983
float umin = mu * in->ranges[0].rmin, umax = mu * in->ranges[0].rmax;
984
float vmin = mv * in->ranges[1].rmin, vmax = mv * in->ranges[1].rmax;
985
float wmin = mw * in->ranges[2].rmin, wmax = mw * in->ranges[2].rmax;
989
temp = umin, umin = umax, umax = temp;
991
temp = vmin, vmin = vmax, vmax = temp;
993
temp = wmin, wmin = wmax, wmax = temp;
994
out->rmin = umin + vmin + wmin;
995
out->rmax = umax + vmax + wmax;
998
cie_transform_range3(const gs_range3 * in, const gs_matrix3 * mat,
1001
cie_transform_range(in, mat->cu.u, mat->cv.u, mat->cw.u,
1003
cie_transform_range(in, mat->cu.v, mat->cv.v, mat->cw.v,
1005
cie_transform_range(in, mat->cu.w, mat->cv.w, mat->cw.w,
1010
* Finish preparing a CRD for installation, by restricting and/or
1011
* transforming the cached procedure values.
1012
* This procedure is idempotent.
1015
gs_cie_render_complete(gs_cie_render * pcrd)
1019
if (pcrd->status >= CIE_RENDER_STATUS_COMPLETED)
1020
return 0; /* completion already done */
1021
code = gs_cie_render_sample(pcrd);
1025
* Since range restriction happens immediately after
1026
* the cache lookup, we can save a step by restricting
1027
* the values in the cache entries.
1029
* If there is no lookup table, we want the final ABC values
1030
* to be fracs; if there is a table, we want them to be
1031
* appropriately scaled ints.
1033
pcrd->MatrixABCEncode = pcrd->MatrixABC;
1038
for (c = 0; c < 3; c++) {
1039
gx_cie_float_fixed_cache *pcache = &pcrd->caches.EncodeABC[c];
1041
cie_cache_restrict(&pcrd->caches.EncodeLMN.caches[c].floats,
1042
&pcrd->RangeLMN.ranges[c]);
1043
cie_cache_restrict(&pcrd->caches.EncodeABC[c].floats,
1044
&pcrd->RangeABC.ranges[c]);
1045
if (pcrd->RenderTable.lookup.table == 0) {
1046
cie_cache_restrict(&pcache->floats,
1047
&Range3_default.ranges[0]);
1048
gs_cie_cache_to_fracs(&pcache->floats, &pcache->fixeds.fracs);
1049
pcache->fixeds.fracs.params.is_identity = false;
1052
int n = pcrd->RenderTable.lookup.dims[c];
1054
#ifdef CIE_RENDER_TABLE_INTERPOLATE
1055
# define SCALED_INDEX(f, n, itemp)\
1056
RESTRICTED_INDEX(f * (1 << _cie_interpolate_bits),\
1057
(n) << _cie_interpolate_bits, itemp)
1059
int m = pcrd->RenderTable.lookup.m;
1061
(c == 0 ? 1 : c == 1 ?
1062
m * pcrd->RenderTable.lookup.dims[2] : m);
1063
# define SCALED_INDEX(f, n, itemp)\
1064
(RESTRICTED_INDEX(f, n, itemp) * k)
1066
const gs_range *prange = pcrd->RangeABC.ranges + c;
1067
double scale = (n - 1) / (prange->rmax - prange->rmin);
1069
for (i = 0; i < gx_cie_cache_size; ++i) {
1071
(pcache->floats.values[i] - prange->rmin) * scale
1072
#ifndef CIE_RENDER_TABLE_INTERPOLATE
1079
"[c]cache[%d][%d] = %g => %g => %d\n",
1080
c, i, pcache->floats.values[i], v,
1081
SCALED_INDEX(v, n, itemp));
1082
pcache->fixeds.ints.values[i] =
1083
SCALED_INDEX(v, n, itemp);
1085
pcache->fixeds.ints.params = pcache->floats.params;
1086
pcache->fixeds.ints.params.is_identity = false;
1090
/* Fold the scaling of the EncodeABC cache index */
1091
/* into MatrixABC. */
1093
f = pcrd->caches.EncodeABC[i].floats.params.factor;\
1094
pcrd->MatrixABCEncode.cu.t *= f;\
1095
pcrd->MatrixABCEncode.cv.t *= f;\
1096
pcrd->MatrixABCEncode.cw.t *= f;\
1097
pcrd->EncodeABC_base[i] =\
1098
float2cie_cached(pcrd->caches.EncodeABC[i].floats.params.base * f)
1103
pcrd->MatrixABCEncode.is_identity = 0;
1105
cie_cache_mult3(&pcrd->caches.EncodeLMN, &pcrd->MatrixABCEncode,
1107
pcrd->status = CIE_RENDER_STATUS_COMPLETED;
1111
/* Apply a range restriction to a cache. */
1113
cie_cache_restrict(cie_cache_floats * pcache, const gs_range * prange)
1117
for (i = 0; i < gx_cie_cache_size; i++) {
1118
float v = pcache->values[i];
1120
if (v < prange->rmin)
1121
pcache->values[i] = prange->rmin;
1122
else if (v > prange->rmax)
1123
pcache->values[i] = prange->rmax;
1127
/* Convert a cache from floats to fracs. */
1128
/* Note that the two may be aliased. */
1130
gs_cie_cache_to_fracs(const cie_cache_floats *pfloats, cie_cache_fracs *pfracs)
1134
/* Loop from bottom to top so that we don't */
1135
/* overwrite elements before they're used. */
1136
for (i = 0; i < gx_cie_cache_size; ++i)
1137
pfracs->values[i] = float2frac(pfloats->values[i]);
1138
pfracs->params = pfloats->params;
1141
/* ------ Fill in the joint cache ------ */
1143
/* If the current color space is a CIE space, or has a CIE base space, */
1144
/* return a pointer to the common part of the space; otherwise return 0. */
1145
static const gs_cie_common *
1146
cie_cs_common_abc(const gs_color_space *pcs_orig, const gs_cie_abc **ppabc)
1148
const gs_color_space *pcs = pcs_orig;
1152
switch (pcs->type->index) {
1153
case gs_color_space_index_CIEDEF:
1154
*ppabc = (const gs_cie_abc *)pcs->params.def;
1155
return &pcs->params.def->common;
1156
case gs_color_space_index_CIEDEFG:
1157
*ppabc = (const gs_cie_abc *)pcs->params.defg;
1158
return &pcs->params.defg->common;
1159
case gs_color_space_index_CIEABC:
1160
*ppabc = pcs->params.abc;
1161
return &pcs->params.abc->common;
1162
case gs_color_space_index_CIEA:
1163
return &pcs->params.a->common;
1164
case gs_color_space_index_CIEICC:
1165
return &pcs->params.icc.picc_info->common;
1167
pcs = gs_cspace_base_space(pcs);
1174
const gs_cie_common *
1175
gs_cie_cs_common(const gs_state * pgs)
1177
const gs_cie_abc *ignore_pabc;
1179
return cie_cs_common_abc(pgs->color_space, &ignore_pabc);
1183
* Mark the joint caches as needing completion. This is done lazily,
1184
* when a color is being mapped. However, make sure the joint caches
1188
gs_cie_cs_complete(gs_state * pgs, bool init)
1190
gx_cie_joint_caches *pjc = gx_unshare_cie_caches(pgs);
1193
return_error(gs_error_VMerror);
1194
pjc->status = (init ? CIE_JC_STATUS_BUILT : CIE_JC_STATUS_INITED);
1197
/* Actually complete the joint caches. */
1199
gs_cie_jc_complete(const gs_imager_state *pis, const gs_color_space *pcs)
1201
const gs_cie_abc *pabc;
1202
const gs_cie_common *common = cie_cs_common_abc(pcs, &pabc);
1203
gs_cie_render *pcrd = pis->cie_render;
1204
gx_cie_joint_caches *pjc = pis->cie_joint_caches;
1206
if (pjc->cspace_id == pcs->id &&
1207
pjc->render_id == pcrd->id)
1208
pjc->status = pjc->id_status;
1209
switch (pjc->status) {
1210
case CIE_JC_STATUS_BUILT: {
1211
int code = cie_joint_caches_init(pjc, common, pcrd);
1217
case CIE_JC_STATUS_INITED:
1218
cie_joint_caches_complete(pjc, common, pabc, pcrd);
1219
pjc->cspace_id = pcs->id;
1220
pjc->render_id = pcrd->id;
1221
pjc->id_status = pjc->status = CIE_JC_STATUS_COMPLETED;
1223
case CIE_JC_STATUS_COMPLETED:
1230
* Compute the source and destination WhitePoint and BlackPoint for
1231
* the TransformPQR procedure.
1234
gs_cie_compute_points_sd(gx_cie_joint_caches *pjc,
1235
const gs_cie_common * pcie,
1236
const gs_cie_render * pcrd)
1238
gs_cie_wbsd *pwbsd = &pjc->points_sd;
1240
pwbsd->ws.xyz = pcie->points.WhitePoint;
1241
cie_mult3(&pwbsd->ws.xyz, &pcrd->MatrixPQR, &pwbsd->ws.pqr);
1242
pwbsd->bs.xyz = pcie->points.BlackPoint;
1243
cie_mult3(&pwbsd->bs.xyz, &pcrd->MatrixPQR, &pwbsd->bs.pqr);
1244
pwbsd->wd.xyz = pcrd->points.WhitePoint;
1245
pwbsd->wd.pqr = pcrd->wdpqr;
1246
pwbsd->bd.xyz = pcrd->points.BlackPoint;
1247
pwbsd->bd.pqr = pcrd->bdpqr;
1252
* Sample the TransformPQR procedure for the joint caches.
1253
* This routine is idempotent.
1256
cie_joint_caches_init(gx_cie_joint_caches * pjc,
1257
const gs_cie_common * pcie,
1258
gs_cie_render * pcrd)
1263
gs_cie_compute_points_sd(pjc, pcie, pcrd);
1265
* If a client pre-loaded the cache, we can't adjust the range.
1266
* ****** WRONG ******
1268
if (pcrd->TransformPQR.proc == TransformPQR_from_cache.proc)
1270
is_identity = pcrd->TransformPQR.proc == TransformPQR_default.proc;
1271
for (j = 0; j < 3; j++) {
1273
gs_sample_loop_params_t lp;
1275
gs_cie_cache_init(&pjc->TransformPQR.caches[j].floats.params, &lp,
1276
&pcrd->RangePQR.ranges[j], "TransformPQR");
1277
for (i = 0; i <= lp.N; ++i) {
1278
float in = SAMPLE_LOOP_VALUE(i, lp);
1280
int code = (*pcrd->TransformPQR.proc)(j, in, &pjc->points_sd,
1285
pjc->TransformPQR.caches[j].floats.values[i] = out;
1286
if_debug4('C', "[C]TransformPQR[%d,%d] = %g => %g\n",
1289
pjc->TransformPQR.caches[j].floats.params.is_identity = is_identity;
1295
* Complete the loading of the joint caches.
1296
* This routine is idempotent.
1299
cie_joint_caches_complete(gx_cie_joint_caches * pjc,
1300
const gs_cie_common * pcie,
1301
const gs_cie_abc * pabc /* NULL if CIEA */,
1302
const gs_cie_render * pcrd)
1304
gs_matrix3 mat3, mat2;
1305
gs_matrix3 MatrixLMN_PQR;
1308
pjc->remap_finish = gx_cie_real_remap_finish;
1311
* We number the pipeline steps as follows:
1312
* 1 - DecodeABC/MatrixABC
1313
* 2 - DecodeLMN/MatrixLMN/MatrixPQR
1314
* 3 - TransformPQR/MatrixPQR'/MatrixLMN
1315
* 4 - EncodeLMN/MatrixABC
1316
* 5 - EncodeABC, RenderTable (we don't do anything with this here)
1317
* We work from back to front, combining steps where possible.
1318
* Currently we only combine steps if a procedure is the identity
1319
* transform, but we could do it whenever the procedure is linear.
1320
* A project for another day....
1325
#ifdef OPTIMIZE_CIE_MAPPING
1326
if (pcrd->caches.EncodeLMN.caches[0].floats.params.is_identity &&
1327
pcrd->caches.EncodeLMN.caches[1].floats.params.is_identity &&
1328
pcrd->caches.EncodeLMN.caches[2].floats.params.is_identity
1330
/* Fold step 4 into step 3. */
1331
if_debug0('c', "[c]EncodeLMN is identity, folding MatrixABC(Encode) into MatrixPQR'+LMN.\n");
1332
cie_matrix_mult3(&pcrd->MatrixABCEncode, &pcrd->MatrixPQR_inverse_LMN,
1334
pjc->skipEncodeLMN = true;
1336
#endif /* OPTIMIZE_CIE_MAPPING */
1338
if_debug0('c', "[c]EncodeLMN is not identity.\n");
1339
mat3 = pcrd->MatrixPQR_inverse_LMN;
1340
pjc->skipEncodeLMN = false;
1345
cache3_set_linear(&pjc->TransformPQR);
1346
cie_matrix_mult3(&pcrd->MatrixPQR, &pcie->MatrixLMN,
1349
#ifdef OPTIMIZE_CIE_MAPPING
1350
if (pjc->TransformPQR.caches[0].floats.params.is_identity &
1351
pjc->TransformPQR.caches[1].floats.params.is_identity &
1352
pjc->TransformPQR.caches[2].floats.params.is_identity
1354
/* Fold step 3 into step 2. */
1355
if_debug0('c', "[c]TransformPQR is identity, folding MatrixPQR'+LMN into MatrixLMN+PQR.\n");
1356
cie_matrix_mult3(&mat3, &MatrixLMN_PQR, &mat2);
1357
pjc->skipPQR = true;
1359
#endif /* OPTIMIZE_CIE_MAPPING */
1361
if_debug0('c', "[c]TransformPQR is not identity.\n");
1362
mat2 = MatrixLMN_PQR;
1363
for (j = 0; j < 3; j++) {
1364
cie_cache_restrict(&pjc->TransformPQR.caches[j].floats,
1365
&pcrd->RangePQR.ranges[j]);
1367
cie_cache_mult3(&pjc->TransformPQR, &mat3, CACHE_THRESHOLD);
1368
pjc->skipPQR = false;
1373
#ifdef OPTIMIZE_CIE_MAPPING
1374
if (pcie->caches.DecodeLMN[0].floats.params.is_identity &
1375
pcie->caches.DecodeLMN[1].floats.params.is_identity &
1376
pcie->caches.DecodeLMN[2].floats.params.is_identity
1378
if_debug0('c', "[c]DecodeLMN is identity, folding MatrixLMN+PQR into MatrixABC.\n");
1380
pjc->skipDecodeLMN = mat2.is_identity;
1381
pjc->skipDecodeABC = false;
1382
if (!pjc->skipDecodeLMN) {
1383
for (j = 0; j < 3; j++) {
1384
cie_cache_mult(&pjc->DecodeLMN.caches[j], &mat2.cu + j,
1385
&pcie->caches.DecodeLMN[j].floats,
1388
cie_cache3_set_interpolation(&pjc->DecodeLMN);
1392
* Fold step 2 into step 1. This is a little different because
1393
* the data for step 1 are in the color space structure.
1397
cie_matrix_mult3(&mat2, &pabc->MatrixABC, &mat1);
1398
for (j = 0; j < 3; j++) {
1399
cie_cache_mult(&pjc->DecodeLMN.caches[j], &mat1.cu + j,
1400
&pabc->caches.DecodeABC.caches[j].floats,
1403
cie_cache3_set_interpolation(&pjc->DecodeLMN);
1404
pjc->skipDecodeLMN = false;
1405
pjc->skipDecodeABC = true;
1408
#endif /* OPTIMIZE_CIE_MAPPING */
1410
if_debug0('c', "[c]DecodeLMN is not identity.\n");
1411
for (j = 0; j < 3; j++) {
1412
cie_cache_mult(&pjc->DecodeLMN.caches[j], &mat2.cu + j,
1413
&pcie->caches.DecodeLMN[j].floats,
1416
cie_cache3_set_interpolation(&pjc->DecodeLMN);
1417
pjc->skipDecodeLMN = false;
1418
pjc->skipDecodeABC = pabc != 0 && pabc->caches.skipABC;
1424
* Initialize (just enough of) an imager state so that "concretizing" colors
1425
* using this imager state will do only the CIE->XYZ mapping. This is a
1426
* semi-hack for the PDF writer.
1429
gx_cie_to_xyz_alloc(gs_imager_state **ppis, const gs_color_space *pcs,
1433
* In addition to the imager state itself, we need the joint caches.
1435
gs_imager_state *pis =
1436
gs_alloc_struct(mem, gs_imager_state, &st_imager_state,
1437
"gx_cie_to_xyz_alloc(imager state)");
1438
gx_cie_joint_caches *pjc;
1439
const gs_cie_abc *pabc;
1440
const gs_cie_common *pcie = cie_cs_common_abc(pcs, &pabc);
1444
return_error(gs_error_VMerror);
1445
memset(pis, 0, sizeof(*pis)); /* mostly paranoia */
1447
gs_imager_state_initialize(pis, mem);
1449
pjc = gs_alloc_struct(mem, gx_cie_joint_caches, &st_joint_caches,
1450
"gx_cie_to_xyz_free(joint caches)");
1452
gs_free_object(mem, pis, "gx_cie_to_xyz_alloc(imager state)");
1453
return_error(gs_error_VMerror);
1457
* Perform an abbreviated version of cie_joint_caches_complete.
1458
* Don't bother with any optimizations.
1460
for (j = 0; j < 3; j++) {
1461
cie_cache_mult(&pjc->DecodeLMN.caches[j], &pcie->MatrixLMN.cu + j,
1462
&pcie->caches.DecodeLMN[j].floats,
1465
cie_cache3_set_interpolation(&pjc->DecodeLMN);
1466
pjc->skipDecodeLMN = false;
1467
pjc->skipDecodeABC = pabc != 0 && pabc->caches.skipABC;
1468
/* Mark the joint caches as completed. */
1469
pjc->remap_finish = gx_cie_xyz_remap_finish;
1470
pjc->cspace_id = pcs->id;
1471
pjc->status = CIE_JC_STATUS_COMPLETED;
1472
pis->cie_joint_caches = pjc;
1473
pis->cie_to_xyz = true;
1478
gx_cie_to_xyz_free(gs_imager_state *pis)
1480
gs_memory_t *mem = pis->memory;
1482
gs_free_object(mem, pis->cie_joint_caches,
1483
"gx_cie_to_xyz_free(joint caches)");
1484
gs_free_object(mem, pis, "gx_cie_to_xyz_free(imager state)");
1487
/* ================ Utilities ================ */
1489
/* Multiply a vector by a matrix. */
1490
/* Note that we are computing M * V where v is a column vector. */
1492
cie_mult3(const gs_vector3 * in, register const gs_matrix3 * mat,
1495
if_debug_vector3("[c]mult", in);
1496
if_debug_matrix3(" *", mat);
1498
float u = in->u, v = in->v, w = in->w;
1500
out->u = (u * mat->cu.u) + (v * mat->cv.u) + (w * mat->cw.u);
1501
out->v = (u * mat->cu.v) + (v * mat->cv.v) + (w * mat->cw.v);
1502
out->w = (u * mat->cu.w) + (v * mat->cv.w) + (w * mat->cw.w);
1504
if_debug_vector3(" =", out);
1508
* Multiply two matrices. Note that the composition of the transformations
1509
* M1 followed by M2 is M2 * M1, not M1 * M2. (See gscie.h for details.)
1512
cie_matrix_mult3(const gs_matrix3 *ma, const gs_matrix3 *mb, gs_matrix3 *mc)
1515
gs_matrix3 *mp = (mc == ma || mc == mb ? &mprod : mc);
1517
if_debug_matrix3("[c]matrix_mult", ma);
1518
if_debug_matrix3(" *", mb);
1519
cie_mult3(&mb->cu, ma, &mp->cu);
1520
cie_mult3(&mb->cv, ma, &mp->cv);
1521
cie_mult3(&mb->cw, ma, &mp->cw);
1522
cie_matrix_init(mp);
1523
if_debug_matrix3(" =", mp);
1528
/* Invert a matrix. */
1529
/* The output must not be an alias for the input. */
1531
cie_invert3(const gs_matrix3 *in, gs_matrix3 *out)
1532
{ /* This is a brute force algorithm; maybe there are better. */
1533
/* We label the array elements */
1546
double coA = in->E * in->I - in->F * in->H;
1547
double coB = in->F * in->G - in->D * in->I;
1548
double coC = in->D * in->H - in->E * in->G;
1549
double det = in->A * coA + in->B * coB + in->C * coC;
1551
if_debug_matrix3("[c]invert", in);
1555
out->B = (in->C * in->H - in->B * in->I) / det;
1556
out->E = (in->A * in->I - in->C * in->G) / det;
1557
out->H = (in->B * in->G - in->A * in->H) / det;
1558
out->C = (in->B * in->F - in->C * in->E) / det;
1559
out->F = (in->C * in->D - in->A * in->F) / det;
1560
out->I = (in->A * in->E - in->B * in->D) / det;
1561
if_debug_matrix3(" =", out);
1571
out->is_identity = in->is_identity;
1574
/* Set the is_identity flag that accelerates multiplication. */
1576
cie_matrix_init(register gs_matrix3 * mat)
1579
mat->cu.u == 1.0 && is_fzero2(mat->cu.v, mat->cu.w) &&
1580
mat->cv.v == 1.0 && is_fzero2(mat->cv.u, mat->cv.w) &&
1581
mat->cw.w == 1.0 && is_fzero2(mat->cw.u, mat->cw.v);
1585
gx_color_space_needs_cie_caches(const gs_color_space * pcs)
1587
switch (pcs->type->index) {
1588
case gs_color_space_index_CIEDEFG:
1589
case gs_color_space_index_CIEDEF:
1590
case gs_color_space_index_CIEABC:
1591
case gs_color_space_index_CIEA:
1592
case gs_color_space_index_CIEICC:
1594
case gs_color_space_index_DevicePixel:
1595
case gs_color_space_index_DeviceN:
1596
case gs_color_space_index_Separation:
1597
case gs_color_space_index_Indexed:
1598
case gs_color_space_index_Pattern:
1599
return gx_color_space_needs_cie_caches(pcs->base_space);