~vcs-imports/mammoth-replicator/trunk

« back to all changes in this revision

Viewing changes to src/backend/utils/cache/catcache.c

  • Committer: alvherre
  • Date: 2005-12-16 21:24:52 UTC
  • Revision ID: svn-v4:db760fc0-0f08-0410-9d63-cc6633f64896:trunk:1
Initial import of the REL8_0_3 sources from the Pgsql CVS repository.

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*-------------------------------------------------------------------------
 
2
 *
 
3
 * catcache.c
 
4
 *        System catalog cache for tuples matching a key.
 
5
 *
 
6
 * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
 
7
 * Portions Copyright (c) 1994, Regents of the University of California
 
8
 *
 
9
 *
 
10
 * IDENTIFICATION
 
11
 *        $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.118 2004-12-31 22:01:25 pgsql Exp $
 
12
 *
 
13
 *-------------------------------------------------------------------------
 
14
 */
 
15
#include "postgres.h"
 
16
 
 
17
#include "access/genam.h"
 
18
#include "access/hash.h"
 
19
#include "access/heapam.h"
 
20
#include "access/valid.h"
 
21
#include "catalog/pg_opclass.h"
 
22
#include "catalog/pg_operator.h"
 
23
#include "catalog/pg_type.h"
 
24
#include "catalog/catname.h"
 
25
#include "catalog/indexing.h"
 
26
#include "miscadmin.h"
 
27
#ifdef CATCACHE_STATS
 
28
#include "storage/ipc.h"                /* for on_proc_exit */
 
29
#endif
 
30
#include "utils/builtins.h"
 
31
#include "utils/fmgroids.h"
 
32
#include "utils/catcache.h"
 
33
#include "utils/relcache.h"
 
34
#include "utils/resowner.h"
 
35
#include "utils/syscache.h"
 
36
 
 
37
 
 
38
 /* #define CACHEDEBUG */       /* turns DEBUG elogs on */
 
39
 
 
40
/*
 
41
 * Constants related to size of the catcache.
 
42
 *
 
43
 * NCCBUCKETS must be a power of two and must be less than 64K (because
 
44
 * SharedInvalCatcacheMsg crams hash indexes into a uint16 field).      In
 
45
 * practice it should be a lot less, anyway, to avoid chewing up too much
 
46
 * space on hash bucket headers.
 
47
 *
 
48
 * MAXCCTUPLES could be as small as a few hundred, if per-backend memory
 
49
 * consumption is at a premium.
 
50
 */
 
51
#define NCCBUCKETS 256                  /* Hash buckets per CatCache */
 
52
#define MAXCCTUPLES 5000                /* Maximum # of tuples in all caches */
 
53
 
 
54
/*
 
55
 * Given a hash value and the size of the hash table, find the bucket
 
56
 * in which the hash value belongs. Since the hash table must contain
 
57
 * a power-of-2 number of elements, this is a simple bitmask.
 
58
 */
 
59
#define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
 
60
 
 
61
 
 
62
/*
 
63
 *              variables, macros and other stuff
 
64
 */
 
65
 
 
66
#ifdef CACHEDEBUG
 
67
#define CACHE1_elog(a,b)                                elog(a,b)
 
68
#define CACHE2_elog(a,b,c)                              elog(a,b,c)
 
69
#define CACHE3_elog(a,b,c,d)                    elog(a,b,c,d)
 
70
#define CACHE4_elog(a,b,c,d,e)                  elog(a,b,c,d,e)
 
71
#define CACHE5_elog(a,b,c,d,e,f)                elog(a,b,c,d,e,f)
 
72
#define CACHE6_elog(a,b,c,d,e,f,g)              elog(a,b,c,d,e,f,g)
 
73
#else
 
74
#define CACHE1_elog(a,b)
 
75
#define CACHE2_elog(a,b,c)
 
76
#define CACHE3_elog(a,b,c,d)
 
77
#define CACHE4_elog(a,b,c,d,e)
 
78
#define CACHE5_elog(a,b,c,d,e,f)
 
79
#define CACHE6_elog(a,b,c,d,e,f,g)
 
80
#endif
 
81
 
 
82
/* Cache management header --- pointer is NULL until created */
 
83
static CatCacheHeader *CacheHdr = NULL;
 
84
 
 
85
 
 
86
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
 
87
                                                         ScanKey cur_skey);
 
88
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
 
89
                                                                  HeapTuple tuple);
 
90
 
 
91
#ifdef CATCACHE_STATS
 
92
static void CatCachePrintStats(void);
 
93
#endif
 
94
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
 
95
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
 
96
static void CatalogCacheInitializeCache(CatCache *cache);
 
97
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
 
98
                                                uint32 hashValue, Index hashIndex,
 
99
                                                bool negative);
 
100
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
 
101
 
 
102
 
 
103
/*
 
104
 *                                      internal support functions
 
105
 */
 
106
 
 
107
/*
 
108
 * Look up the hash and equality functions for system types that are used
 
109
 * as cache key fields.
 
110
 *
 
111
 * XXX this should be replaced by catalog lookups,
 
112
 * but that seems to pose considerable risk of circularity...
 
113
 */
 
114
static void
 
115
GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
 
116
{
 
117
        switch (keytype)
 
118
        {
 
119
                case BOOLOID:
 
120
                        *hashfunc = hashchar;
 
121
                        *eqfunc = F_BOOLEQ;
 
122
                        break;
 
123
                case CHAROID:
 
124
                        *hashfunc = hashchar;
 
125
                        *eqfunc = F_CHAREQ;
 
126
                        break;
 
127
                case NAMEOID:
 
128
                        *hashfunc = hashname;
 
129
                        *eqfunc = F_NAMEEQ;
 
130
                        break;
 
131
                case INT2OID:
 
132
                        *hashfunc = hashint2;
 
133
                        *eqfunc = F_INT2EQ;
 
134
                        break;
 
135
                case INT2VECTOROID:
 
136
                        *hashfunc = hashint2vector;
 
137
                        *eqfunc = F_INT2VECTOREQ;
 
138
                        break;
 
139
                case INT4OID:
 
140
                        *hashfunc = hashint4;
 
141
                        *eqfunc = F_INT4EQ;
 
142
                        break;
 
143
                case TEXTOID:
 
144
                        *hashfunc = hashtext;
 
145
                        *eqfunc = F_TEXTEQ;
 
146
                        break;
 
147
                case OIDOID:
 
148
                case REGPROCOID:
 
149
                case REGPROCEDUREOID:
 
150
                case REGOPEROID:
 
151
                case REGOPERATOROID:
 
152
                case REGCLASSOID:
 
153
                case REGTYPEOID:
 
154
                        *hashfunc = hashoid;
 
155
                        *eqfunc = F_OIDEQ;
 
156
                        break;
 
157
                case OIDVECTOROID:
 
158
                        *hashfunc = hashoidvector;
 
159
                        *eqfunc = F_OIDVECTOREQ;
 
160
                        break;
 
161
                default:
 
162
                        elog(FATAL, "type %u not supported as catcache key", keytype);
 
163
                        break;
 
164
        }
 
165
}
 
166
 
 
167
/*
 
168
 *              CatalogCacheComputeHashValue
 
169
 *
 
170
 * Compute the hash value associated with a given set of lookup keys
 
171
 */
 
172
static uint32
 
173
CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
 
174
{
 
175
        uint32          hashValue = 0;
 
176
 
 
177
        CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
 
178
                                cache->cc_relname,
 
179
                                nkeys,
 
180
                                cache);
 
181
 
 
182
        switch (nkeys)
 
183
        {
 
184
                case 4:
 
185
                        hashValue ^=
 
186
                                DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
 
187
                                                                                  cur_skey[3].sk_argument)) << 9;
 
188
                        /* FALLTHROUGH */
 
189
                case 3:
 
190
                        hashValue ^=
 
191
                                DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
 
192
                                                                                  cur_skey[2].sk_argument)) << 6;
 
193
                        /* FALLTHROUGH */
 
194
                case 2:
 
195
                        hashValue ^=
 
196
                                DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
 
197
                                                                                  cur_skey[1].sk_argument)) << 3;
 
198
                        /* FALLTHROUGH */
 
199
                case 1:
 
200
                        hashValue ^=
 
201
                                DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
 
202
                                                                                           cur_skey[0].sk_argument));
 
203
                        break;
 
204
                default:
 
205
                        elog(FATAL, "wrong number of hash keys: %d", nkeys);
 
206
                        break;
 
207
        }
 
208
 
 
209
        return hashValue;
 
210
}
 
211
 
 
212
/*
 
213
 *              CatalogCacheComputeTupleHashValue
 
214
 *
 
215
 * Compute the hash value associated with a given tuple to be cached
 
216
 */
 
217
static uint32
 
218
CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
 
219
{
 
220
        ScanKeyData cur_skey[4];
 
221
        bool            isNull = false;
 
222
 
 
223
        /* Copy pre-initialized overhead data for scankey */
 
224
        memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
 
225
 
 
226
        /* Now extract key fields from tuple, insert into scankey */
 
227
        switch (cache->cc_nkeys)
 
228
        {
 
229
                case 4:
 
230
                        cur_skey[3].sk_argument =
 
231
                                (cache->cc_key[3] == ObjectIdAttributeNumber)
 
232
                                ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
 
233
                                : fastgetattr(tuple,
 
234
                                                          cache->cc_key[3],
 
235
                                                          cache->cc_tupdesc,
 
236
                                                          &isNull);
 
237
                        Assert(!isNull);
 
238
                        /* FALLTHROUGH */
 
239
                case 3:
 
240
                        cur_skey[2].sk_argument =
 
241
                                (cache->cc_key[2] == ObjectIdAttributeNumber)
 
242
                                ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
 
243
                                : fastgetattr(tuple,
 
244
                                                          cache->cc_key[2],
 
245
                                                          cache->cc_tupdesc,
 
246
                                                          &isNull);
 
247
                        Assert(!isNull);
 
248
                        /* FALLTHROUGH */
 
249
                case 2:
 
250
                        cur_skey[1].sk_argument =
 
251
                                (cache->cc_key[1] == ObjectIdAttributeNumber)
 
252
                                ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
 
253
                                : fastgetattr(tuple,
 
254
                                                          cache->cc_key[1],
 
255
                                                          cache->cc_tupdesc,
 
256
                                                          &isNull);
 
257
                        Assert(!isNull);
 
258
                        /* FALLTHROUGH */
 
259
                case 1:
 
260
                        cur_skey[0].sk_argument =
 
261
                                (cache->cc_key[0] == ObjectIdAttributeNumber)
 
262
                                ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
 
263
                                : fastgetattr(tuple,
 
264
                                                          cache->cc_key[0],
 
265
                                                          cache->cc_tupdesc,
 
266
                                                          &isNull);
 
267
                        Assert(!isNull);
 
268
                        break;
 
269
                default:
 
270
                        elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
 
271
                        break;
 
272
        }
 
273
 
 
274
        return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
 
275
}
 
276
 
 
277
 
 
278
#ifdef CATCACHE_STATS
 
279
 
 
280
static void
 
281
CatCachePrintStats(void)
 
282
{
 
283
        CatCache   *cache;
 
284
        long            cc_searches = 0;
 
285
        long            cc_hits = 0;
 
286
        long            cc_neg_hits = 0;
 
287
        long            cc_newloads = 0;
 
288
        long            cc_invals = 0;
 
289
        long            cc_discards = 0;
 
290
        long            cc_lsearches = 0;
 
291
        long            cc_lhits = 0;
 
292
 
 
293
        elog(DEBUG2, "catcache stats dump: %d/%d tuples in catcaches",
 
294
                 CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
 
295
 
 
296
        for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
 
297
        {
 
298
                if (cache->cc_ntup == 0 && cache->cc_searches == 0)
 
299
                        continue;                       /* don't print unused caches */
 
300
                elog(DEBUG2, "catcache %s/%s: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
 
301
                         cache->cc_relname,
 
302
                         cache->cc_indname,
 
303
                         cache->cc_ntup,
 
304
                         cache->cc_searches,
 
305
                         cache->cc_hits,
 
306
                         cache->cc_neg_hits,
 
307
                         cache->cc_hits + cache->cc_neg_hits,
 
308
                         cache->cc_newloads,
 
309
                         cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
 
310
                         cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
 
311
                         cache->cc_invals,
 
312
                         cache->cc_discards,
 
313
                         cache->cc_lsearches,
 
314
                         cache->cc_lhits);
 
315
                cc_searches += cache->cc_searches;
 
316
                cc_hits += cache->cc_hits;
 
317
                cc_neg_hits += cache->cc_neg_hits;
 
318
                cc_newloads += cache->cc_newloads;
 
319
                cc_invals += cache->cc_invals;
 
320
                cc_discards += cache->cc_discards;
 
321
                cc_lsearches += cache->cc_lsearches;
 
322
                cc_lhits += cache->cc_lhits;
 
323
        }
 
324
        elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
 
325
                 CacheHdr->ch_ntup,
 
326
                 cc_searches,
 
327
                 cc_hits,
 
328
                 cc_neg_hits,
 
329
                 cc_hits + cc_neg_hits,
 
330
                 cc_newloads,
 
331
                 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
 
332
                 cc_searches - cc_hits - cc_neg_hits,
 
333
                 cc_invals,
 
334
                 cc_discards,
 
335
                 cc_lsearches,
 
336
                 cc_lhits);
 
337
}
 
338
#endif   /* CATCACHE_STATS */
 
339
 
 
340
 
 
341
/*
 
342
 *              CatCacheRemoveCTup
 
343
 *
 
344
 * Unlink and delete the given cache entry
 
345
 *
 
346
 * NB: if it is a member of a CatCList, the CatCList is deleted too.
 
347
 */
 
348
static void
 
349
CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
 
350
{
 
351
        Assert(ct->refcount == 0);
 
352
        Assert(ct->my_cache == cache);
 
353
 
 
354
        if (ct->c_list)
 
355
                CatCacheRemoveCList(cache, ct->c_list);
 
356
 
 
357
        /* delink from linked lists */
 
358
        DLRemove(&ct->lrulist_elem);
 
359
        DLRemove(&ct->cache_elem);
 
360
 
 
361
        /* free associated tuple data */
 
362
        if (ct->tuple.t_data != NULL)
 
363
                pfree(ct->tuple.t_data);
 
364
        pfree(ct);
 
365
 
 
366
        --cache->cc_ntup;
 
367
        --CacheHdr->ch_ntup;
 
368
}
 
369
 
 
370
/*
 
371
 *              CatCacheRemoveCList
 
372
 *
 
373
 * Unlink and delete the given cache list entry
 
374
 */
 
375
static void
 
376
CatCacheRemoveCList(CatCache *cache, CatCList *cl)
 
377
{
 
378
        int                     i;
 
379
 
 
380
        Assert(cl->refcount == 0);
 
381
        Assert(cl->my_cache == cache);
 
382
 
 
383
        /* delink from member tuples */
 
384
        for (i = cl->n_members; --i >= 0;)
 
385
        {
 
386
                CatCTup    *ct = cl->members[i];
 
387
 
 
388
                Assert(ct->c_list == cl);
 
389
                ct->c_list = NULL;
 
390
        }
 
391
 
 
392
        /* delink from linked list */
 
393
        DLRemove(&cl->cache_elem);
 
394
 
 
395
        /* free associated tuple data */
 
396
        if (cl->tuple.t_data != NULL)
 
397
                pfree(cl->tuple.t_data);
 
398
        pfree(cl);
 
399
}
 
400
 
 
401
 
 
402
/*
 
403
 *      CatalogCacheIdInvalidate
 
404
 *
 
405
 *      Invalidate entries in the specified cache, given a hash value and
 
406
 *      item pointer.  Positive entries are deleted if they match the item
 
407
 *      pointer.  Negative entries must be deleted if they match the hash
 
408
 *      value (since we do not have the exact key of the tuple that's being
 
409
 *      inserted).      But this should only rarely result in loss of a cache
 
410
 *      entry that could have been kept.
 
411
 *
 
412
 *      Note that it's not very relevant whether the tuple identified by
 
413
 *      the item pointer is being inserted or deleted.  We don't expect to
 
414
 *      find matching positive entries in the one case, and we don't expect
 
415
 *      to find matching negative entries in the other; but we will do the
 
416
 *      right things in any case.
 
417
 *
 
418
 *      This routine is only quasi-public: it should only be used by inval.c.
 
419
 */
 
420
void
 
421
CatalogCacheIdInvalidate(int cacheId,
 
422
                                                 uint32 hashValue,
 
423
                                                 ItemPointer pointer)
 
424
{
 
425
        CatCache   *ccp;
 
426
 
 
427
        /*
 
428
         * sanity checks
 
429
         */
 
430
        Assert(ItemPointerIsValid(pointer));
 
431
        CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
 
432
 
 
433
        /*
 
434
         * inspect caches to find the proper cache
 
435
         */
 
436
        for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
 
437
        {
 
438
                Index           hashIndex;
 
439
                Dlelem     *elt,
 
440
                                   *nextelt;
 
441
 
 
442
                if (cacheId != ccp->id)
 
443
                        continue;
 
444
 
 
445
                /*
 
446
                 * We don't bother to check whether the cache has finished
 
447
                 * initialization yet; if not, there will be no entries in it so
 
448
                 * no problem.
 
449
                 */
 
450
 
 
451
                /*
 
452
                 * Invalidate *all* CatCLists in this cache; it's too hard to tell
 
453
                 * which searches might still be correct, so just zap 'em all.
 
454
                 */
 
455
                for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
 
456
                {
 
457
                        CatCList   *cl = (CatCList *) DLE_VAL(elt);
 
458
 
 
459
                        nextelt = DLGetSucc(elt);
 
460
 
 
461
                        if (cl->refcount > 0)
 
462
                                cl->dead = true;
 
463
                        else
 
464
                                CatCacheRemoveCList(ccp, cl);
 
465
                }
 
466
 
 
467
                /*
 
468
                 * inspect the proper hash bucket for tuple matches
 
469
                 */
 
470
                hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
 
471
 
 
472
                for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
 
473
                {
 
474
                        CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
 
475
 
 
476
                        nextelt = DLGetSucc(elt);
 
477
 
 
478
                        if (hashValue != ct->hash_value)
 
479
                                continue;               /* ignore non-matching hash values */
 
480
 
 
481
                        if (ct->negative ||
 
482
                                ItemPointerEquals(pointer, &ct->tuple.t_self))
 
483
                        {
 
484
                                if (ct->refcount > 0)
 
485
                                        ct->dead = true;
 
486
                                else
 
487
                                        CatCacheRemoveCTup(ccp, ct);
 
488
                                CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
 
489
#ifdef CATCACHE_STATS
 
490
                                ccp->cc_invals++;
 
491
#endif
 
492
                                /* could be multiple matches, so keep looking! */
 
493
                        }
 
494
                }
 
495
                break;                                  /* need only search this one cache */
 
496
        }
 
497
}
 
498
 
 
499
/* ----------------------------------------------------------------
 
500
 *                                         public functions
 
501
 * ----------------------------------------------------------------
 
502
 */
 
503
 
 
504
 
 
505
/*
 
506
 * Standard routine for creating cache context if it doesn't exist yet
 
507
 *
 
508
 * There are a lot of places (probably far more than necessary) that check
 
509
 * whether CacheMemoryContext exists yet and want to create it if not.
 
510
 * We centralize knowledge of exactly how to create it here.
 
511
 */
 
512
void
 
513
CreateCacheMemoryContext(void)
 
514
{
 
515
        /*
 
516
         * Purely for paranoia, check that context doesn't exist; caller
 
517
         * probably did so already.
 
518
         */
 
519
        if (!CacheMemoryContext)
 
520
                CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
 
521
                                                                                                   "CacheMemoryContext",
 
522
                                                                                                ALLOCSET_DEFAULT_MINSIZE,
 
523
                                                                                           ALLOCSET_DEFAULT_INITSIZE,
 
524
                                                                                           ALLOCSET_DEFAULT_MAXSIZE);
 
525
}
 
526
 
 
527
 
 
528
/*
 
529
 *              AtEOXact_CatCache
 
530
 *
 
531
 * Clean up catcaches at end of main transaction (either commit or abort)
 
532
 *
 
533
 * We scan the caches to reset refcounts to zero.  This is of course
 
534
 * necessary in the abort case, since elog() may have interrupted routines.
 
535
 * In the commit case, any nonzero counts indicate failure to call
 
536
 * ReleaseSysCache, so we put out a notice for debugging purposes.
 
537
 */
 
538
void
 
539
AtEOXact_CatCache(bool isCommit)
 
540
{
 
541
        CatCache   *ccp;
 
542
        Dlelem     *elt,
 
543
                           *nextelt;
 
544
 
 
545
        /*
 
546
         * First clean up CatCLists
 
547
         */
 
548
        for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
 
549
        {
 
550
                for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
 
551
                {
 
552
                        CatCList   *cl = (CatCList *) DLE_VAL(elt);
 
553
 
 
554
                        nextelt = DLGetSucc(elt);
 
555
 
 
556
                        if (cl->refcount != 0)
 
557
                        {
 
558
                                if (isCommit)
 
559
                                        elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
 
560
                                                 ccp->cc_relname, ccp->id, cl, cl->refcount);
 
561
                                cl->refcount = 0;
 
562
                        }
 
563
 
 
564
                        /* Clean up any now-deletable dead entries */
 
565
                        if (cl->dead)
 
566
                                CatCacheRemoveCList(ccp, cl);
 
567
                }
 
568
        }
 
569
 
 
570
        /*
 
571
         * Now clean up tuples; we can scan them all using the global LRU list
 
572
         */
 
573
        for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = nextelt)
 
574
        {
 
575
                CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
 
576
 
 
577
                nextelt = DLGetSucc(elt);
 
578
 
 
579
                if (ct->refcount != 0)
 
580
                {
 
581
                        if (isCommit)
 
582
                                elog(WARNING, "cache reference leak: cache %s (%d), tuple %u has count %d",
 
583
                                         ct->my_cache->cc_relname, ct->my_cache->id,
 
584
                                         HeapTupleGetOid(&ct->tuple),
 
585
                                         ct->refcount);
 
586
                        ct->refcount = 0;
 
587
                }
 
588
 
 
589
                /* Clean up any now-deletable dead entries */
 
590
                if (ct->dead)
 
591
                        CatCacheRemoveCTup(ct->my_cache, ct);
 
592
        }
 
593
}
 
594
 
 
595
/*
 
596
 *              ResetCatalogCache
 
597
 *
 
598
 * Reset one catalog cache to empty.
 
599
 *
 
600
 * This is not very efficient if the target cache is nearly empty.
 
601
 * However, it shouldn't need to be efficient; we don't invoke it often.
 
602
 */
 
603
static void
 
604
ResetCatalogCache(CatCache *cache)
 
605
{
 
606
        Dlelem     *elt,
 
607
                           *nextelt;
 
608
        int                     i;
 
609
 
 
610
        /* Remove each list in this cache, or at least mark it dead */
 
611
        for (elt = DLGetHead(&cache->cc_lists); elt; elt = nextelt)
 
612
        {
 
613
                CatCList   *cl = (CatCList *) DLE_VAL(elt);
 
614
 
 
615
                nextelt = DLGetSucc(elt);
 
616
 
 
617
                if (cl->refcount > 0)
 
618
                        cl->dead = true;
 
619
                else
 
620
                        CatCacheRemoveCList(cache, cl);
 
621
        }
 
622
 
 
623
        /* Remove each tuple in this cache, or at least mark it dead */
 
624
        for (i = 0; i < cache->cc_nbuckets; i++)
 
625
        {
 
626
                for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
 
627
                {
 
628
                        CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
 
629
 
 
630
                        nextelt = DLGetSucc(elt);
 
631
 
 
632
                        if (ct->refcount > 0)
 
633
                                ct->dead = true;
 
634
                        else
 
635
                                CatCacheRemoveCTup(cache, ct);
 
636
#ifdef CATCACHE_STATS
 
637
                        cache->cc_invals++;
 
638
#endif
 
639
                }
 
640
        }
 
641
}
 
642
 
 
643
/*
 
644
 *              ResetCatalogCaches
 
645
 *
 
646
 * Reset all caches when a shared cache inval event forces it
 
647
 */
 
648
void
 
649
ResetCatalogCaches(void)
 
650
{
 
651
        CatCache   *cache;
 
652
 
 
653
        CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
 
654
 
 
655
        for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
 
656
                ResetCatalogCache(cache);
 
657
 
 
658
        CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
 
659
}
 
660
 
 
661
/*
 
662
 *              CatalogCacheFlushRelation
 
663
 *
 
664
 *      This is called by RelationFlushRelation() to clear out cached information
 
665
 *      about a relation being dropped.  (This could be a DROP TABLE command,
 
666
 *      or a temp table being dropped at end of transaction, or a table created
 
667
 *      during the current transaction that is being dropped because of abort.)
 
668
 *      Remove all cache entries relevant to the specified relation OID.
 
669
 *
 
670
 *      A special case occurs when relId is itself one of the cacheable system
 
671
 *      tables --- although those'll never be dropped, they can get flushed from
 
672
 *      the relcache (VACUUM causes this, for example).  In that case we need
 
673
 *      to flush all cache entries that came from that table.  (At one point we
 
674
 *      also tried to force re-execution of CatalogCacheInitializeCache for
 
675
 *      the cache(s) on that table.  This is a bad idea since it leads to all
 
676
 *      kinds of trouble if a cache flush occurs while loading cache entries.
 
677
 *      We now avoid the need to do it by copying cc_tupdesc out of the relcache,
 
678
 *      rather than relying on the relcache to keep a tupdesc for us.  Of course
 
679
 *      this assumes the tupdesc of a cachable system table will not change...)
 
680
 */
 
681
void
 
682
CatalogCacheFlushRelation(Oid relId)
 
683
{
 
684
        CatCache   *cache;
 
685
 
 
686
        CACHE2_elog(DEBUG2, "CatalogCacheFlushRelation called for %u", relId);
 
687
 
 
688
        for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
 
689
        {
 
690
                int                     i;
 
691
 
 
692
                /* We can ignore uninitialized caches, since they must be empty */
 
693
                if (cache->cc_tupdesc == NULL)
 
694
                        continue;
 
695
 
 
696
                /* Does this cache store tuples of the target relation itself? */
 
697
                if (cache->cc_tupdesc->attrs[0]->attrelid == relId)
 
698
                {
 
699
                        /* Yes, so flush all its contents */
 
700
                        ResetCatalogCache(cache);
 
701
                        continue;
 
702
                }
 
703
 
 
704
                /* Does this cache store tuples associated with relations at all? */
 
705
                if (cache->cc_reloidattr == 0)
 
706
                        continue;                       /* nope, leave it alone */
 
707
 
 
708
                /* Yes, scan the tuples and remove those related to relId */
 
709
                for (i = 0; i < cache->cc_nbuckets; i++)
 
710
                {
 
711
                        Dlelem     *elt,
 
712
                                           *nextelt;
 
713
 
 
714
                        for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
 
715
                        {
 
716
                                CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
 
717
                                Oid                     tupRelid;
 
718
 
 
719
                                nextelt = DLGetSucc(elt);
 
720
 
 
721
                                /*
 
722
                                 * Negative entries are never considered related to a rel,
 
723
                                 * even if the rel is part of their lookup key.
 
724
                                 */
 
725
                                if (ct->negative)
 
726
                                        continue;
 
727
 
 
728
                                if (cache->cc_reloidattr == ObjectIdAttributeNumber)
 
729
                                        tupRelid = HeapTupleGetOid(&ct->tuple);
 
730
                                else
 
731
                                {
 
732
                                        bool            isNull;
 
733
 
 
734
                                        tupRelid =
 
735
                                                DatumGetObjectId(fastgetattr(&ct->tuple,
 
736
                                                                                                         cache->cc_reloidattr,
 
737
                                                                                                         cache->cc_tupdesc,
 
738
                                                                                                         &isNull));
 
739
                                        Assert(!isNull);
 
740
                                }
 
741
 
 
742
                                if (tupRelid == relId)
 
743
                                {
 
744
                                        if (ct->refcount > 0)
 
745
                                                ct->dead = true;
 
746
                                        else
 
747
                                                CatCacheRemoveCTup(cache, ct);
 
748
#ifdef CATCACHE_STATS
 
749
                                        cache->cc_invals++;
 
750
#endif
 
751
                                }
 
752
                        }
 
753
                }
 
754
        }
 
755
 
 
756
        CACHE1_elog(DEBUG2, "end of CatalogCacheFlushRelation call");
 
757
}
 
758
 
 
759
/*
 
760
 *              InitCatCache
 
761
 *
 
762
 *      This allocates and initializes a cache for a system catalog relation.
 
763
 *      Actually, the cache is only partially initialized to avoid opening the
 
764
 *      relation.  The relation will be opened and the rest of the cache
 
765
 *      structure initialized on the first access.
 
766
 */
 
767
#ifdef CACHEDEBUG
 
768
#define InitCatCache_DEBUG2 \
 
769
do { \
 
770
        elog(DEBUG2, "InitCatCache: rel=%s id=%d nkeys=%d size=%d", \
 
771
                cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_nbuckets); \
 
772
} while(0)
 
773
 
 
774
#else
 
775
#define InitCatCache_DEBUG2
 
776
#endif
 
777
 
 
778
CatCache *
 
779
InitCatCache(int id,
 
780
                         const char *relname,
 
781
                         const char *indname,
 
782
                         int reloidattr,
 
783
                         int nkeys,
 
784
                         const int *key)
 
785
{
 
786
        CatCache   *cp;
 
787
        MemoryContext oldcxt;
 
788
        int                     i;
 
789
 
 
790
        /*
 
791
         * first switch to the cache context so our allocations do not vanish
 
792
         * at the end of a transaction
 
793
         */
 
794
        if (!CacheMemoryContext)
 
795
                CreateCacheMemoryContext();
 
796
 
 
797
        oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
 
798
 
 
799
        /*
 
800
         * if first time through, initialize the cache group header, including
 
801
         * global LRU list header
 
802
         */
 
803
        if (CacheHdr == NULL)
 
804
        {
 
805
                CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
 
806
                CacheHdr->ch_caches = NULL;
 
807
                CacheHdr->ch_ntup = 0;
 
808
                CacheHdr->ch_maxtup = MAXCCTUPLES;
 
809
                DLInitList(&CacheHdr->ch_lrulist);
 
810
#ifdef CATCACHE_STATS
 
811
                on_proc_exit(CatCachePrintStats, 0);
 
812
#endif
 
813
        }
 
814
 
 
815
        /*
 
816
         * allocate a new cache structure
 
817
         *
 
818
         * Note: we assume zeroing initializes the Dllist headers correctly
 
819
         */
 
820
        cp = (CatCache *) palloc0(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
 
821
 
 
822
        /*
 
823
         * initialize the cache's relation information for the relation
 
824
         * corresponding to this cache, and initialize some of the new cache's
 
825
         * other internal fields.  But don't open the relation yet.
 
826
         */
 
827
        cp->id = id;
 
828
        cp->cc_relname = relname;
 
829
        cp->cc_indname = indname;
 
830
        cp->cc_reloid = InvalidOid; /* temporary */
 
831
        cp->cc_relisshared = false; /* temporary */
 
832
        cp->cc_tupdesc = (TupleDesc) NULL;
 
833
        cp->cc_reloidattr = reloidattr;
 
834
        cp->cc_ntup = 0;
 
835
        cp->cc_nbuckets = NCCBUCKETS;
 
836
        cp->cc_nkeys = nkeys;
 
837
        for (i = 0; i < nkeys; ++i)
 
838
                cp->cc_key[i] = key[i];
 
839
 
 
840
        /*
 
841
         * new cache is initialized as far as we can go for now. print some
 
842
         * debugging information, if appropriate.
 
843
         */
 
844
        InitCatCache_DEBUG2;
 
845
 
 
846
        /*
 
847
         * add completed cache to top of group header's list
 
848
         */
 
849
        cp->cc_next = CacheHdr->ch_caches;
 
850
        CacheHdr->ch_caches = cp;
 
851
 
 
852
        /*
 
853
         * back to the old context before we return...
 
854
         */
 
855
        MemoryContextSwitchTo(oldcxt);
 
856
 
 
857
        return cp;
 
858
}
 
859
 
 
860
/*
 
861
 *              CatalogCacheInitializeCache
 
862
 *
 
863
 * This function does final initialization of a catcache: obtain the tuple
 
864
 * descriptor and set up the hash and equality function links.  We assume
 
865
 * that the relcache entry can be opened at this point!
 
866
 */
 
867
#ifdef CACHEDEBUG
 
868
#define CatalogCacheInitializeCache_DEBUG2 \
 
869
        elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p %s", cache, \
 
870
                 cache->cc_relname)
 
871
 
 
872
#define CatalogCacheInitializeCache_DEBUG2 \
 
873
do { \
 
874
                if (cache->cc_key[i] > 0) { \
 
875
                        elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
 
876
                                i+1, cache->cc_nkeys, cache->cc_key[i], \
 
877
                                 tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
 
878
                } else { \
 
879
                        elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
 
880
                                i+1, cache->cc_nkeys, cache->cc_key[i]); \
 
881
                } \
 
882
} while(0)
 
883
 
 
884
#else
 
885
#define CatalogCacheInitializeCache_DEBUG2
 
886
#define CatalogCacheInitializeCache_DEBUG2
 
887
#endif
 
888
 
 
889
static void
 
890
CatalogCacheInitializeCache(CatCache *cache)
 
891
{
 
892
        Relation        relation;
 
893
        MemoryContext oldcxt;
 
894
        TupleDesc       tupdesc;
 
895
        int                     i;
 
896
 
 
897
        CatalogCacheInitializeCache_DEBUG2;
 
898
 
 
899
        /*
 
900
         * Open the relation without locking --- we only need the tupdesc,
 
901
         * which we assume will never change ...
 
902
         */
 
903
        relation = heap_openr(cache->cc_relname, NoLock);
 
904
        Assert(RelationIsValid(relation));
 
905
 
 
906
        /*
 
907
         * switch to the cache context so our allocations do not vanish at the
 
908
         * end of a transaction
 
909
         */
 
910
        Assert(CacheMemoryContext != NULL);
 
911
 
 
912
        oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
 
913
 
 
914
        /*
 
915
         * copy the relcache's tuple descriptor to permanent cache storage
 
916
         */
 
917
        tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
 
918
 
 
919
        /*
 
920
         * get the relation's OID and relisshared flag, too
 
921
         */
 
922
        cache->cc_reloid = RelationGetRelid(relation);
 
923
        cache->cc_relisshared = RelationGetForm(relation)->relisshared;
 
924
 
 
925
        /*
 
926
         * return to the caller's memory context and close the rel
 
927
         */
 
928
        MemoryContextSwitchTo(oldcxt);
 
929
 
 
930
        heap_close(relation, NoLock);
 
931
 
 
932
        CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
 
933
                                cache->cc_relname, cache->cc_nkeys);
 
934
 
 
935
        /*
 
936
         * initialize cache's key information
 
937
         */
 
938
        for (i = 0; i < cache->cc_nkeys; ++i)
 
939
        {
 
940
                Oid                     keytype;
 
941
                RegProcedure eqfunc;
 
942
 
 
943
                CatalogCacheInitializeCache_DEBUG2;
 
944
 
 
945
                if (cache->cc_key[i] > 0)
 
946
                        keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
 
947
                else
 
948
                {
 
949
                        if (cache->cc_key[i] != ObjectIdAttributeNumber)
 
950
                                elog(FATAL, "only sys attr supported in caches is OID");
 
951
                        keytype = OIDOID;
 
952
                }
 
953
 
 
954
                GetCCHashEqFuncs(keytype,
 
955
                                                 &cache->cc_hashfunc[i],
 
956
                                                 &eqfunc);
 
957
 
 
958
                cache->cc_isname[i] = (keytype == NAMEOID);
 
959
 
 
960
                /*
 
961
                 * Do equality-function lookup (we assume this won't need a
 
962
                 * catalog lookup for any supported type)
 
963
                 */
 
964
                fmgr_info_cxt(eqfunc,
 
965
                                          &cache->cc_skey[i].sk_func,
 
966
                                          CacheMemoryContext);
 
967
 
 
968
                /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
 
969
                cache->cc_skey[i].sk_attno = cache->cc_key[i];
 
970
 
 
971
                /* Fill in sk_strategy as well --- always standard equality */
 
972
                cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
 
973
                cache->cc_skey[i].sk_subtype = InvalidOid;
 
974
 
 
975
                CACHE4_elog(DEBUG2, "CatalogCacheInit %s %d %p",
 
976
                                        cache->cc_relname,
 
977
                                        i,
 
978
                                        cache);
 
979
        }
 
980
 
 
981
        /*
 
982
         * mark this cache fully initialized
 
983
         */
 
984
        cache->cc_tupdesc = tupdesc;
 
985
}
 
986
 
 
987
/*
 
988
 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
 
989
 *
 
990
 * The only reason to call this routine is to ensure that the relcache
 
991
 * has created entries for all the catalogs and indexes referenced by
 
992
 * catcaches.  Therefore, open the index too.  An exception is the indexes
 
993
 * on pg_am, which we don't use (cf. IndexScanOK).
 
994
 */
 
995
void
 
996
InitCatCachePhase2(CatCache *cache)
 
997
{
 
998
        if (cache->cc_tupdesc == NULL)
 
999
                CatalogCacheInitializeCache(cache);
 
1000
 
 
1001
        if (cache->id != AMOID &&
 
1002
                cache->id != AMNAME)
 
1003
        {
 
1004
                Relation        idesc;
 
1005
 
 
1006
                idesc = index_openr(cache->cc_indname);
 
1007
                index_close(idesc);
 
1008
        }
 
1009
}
 
1010
 
 
1011
 
 
1012
/*
 
1013
 *              IndexScanOK
 
1014
 *
 
1015
 *              This function checks for tuples that will be fetched by
 
1016
 *              IndexSupportInitialize() during relcache initialization for
 
1017
 *              certain system indexes that support critical syscaches.
 
1018
 *              We can't use an indexscan to fetch these, else we'll get into
 
1019
 *              infinite recursion.  A plain heap scan will work, however.
 
1020
 *
 
1021
 *              Once we have completed relcache initialization (signaled by
 
1022
 *              criticalRelcachesBuilt), we don't have to worry anymore.
 
1023
 */
 
1024
static bool
 
1025
IndexScanOK(CatCache *cache, ScanKey cur_skey)
 
1026
{
 
1027
        if (cache->id == INDEXRELID)
 
1028
        {
 
1029
                /*
 
1030
                 * Since the OIDs of indexes aren't hardwired, it's painful to
 
1031
                 * figure out which is which.  Just force all pg_index searches to
 
1032
                 * be heap scans while building the relcaches.
 
1033
                 */
 
1034
                if (!criticalRelcachesBuilt)
 
1035
                        return false;
 
1036
        }
 
1037
        else if (cache->id == AMOID ||
 
1038
                         cache->id == AMNAME)
 
1039
        {
 
1040
                /*
 
1041
                 * Always do heap scans in pg_am, because it's so small there's
 
1042
                 * not much point in an indexscan anyway.  We *must* do this when
 
1043
                 * initially building critical relcache entries, but we might as
 
1044
                 * well just always do it.
 
1045
                 */
 
1046
                return false;
 
1047
        }
 
1048
        else if (cache->id == OPEROID)
 
1049
        {
 
1050
                if (!criticalRelcachesBuilt)
 
1051
                {
 
1052
                        /* Looking for an OID comparison function? */
 
1053
                        Oid                     lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
 
1054
 
 
1055
                        if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
 
1056
                                return false;
 
1057
                }
 
1058
        }
 
1059
 
 
1060
        /* Normal case, allow index scan */
 
1061
        return true;
 
1062
}
 
1063
 
 
1064
/*
 
1065
 *      SearchCatCache
 
1066
 *
 
1067
 *              This call searches a system cache for a tuple, opening the relation
 
1068
 *              if necessary (on the first access to a particular cache).
 
1069
 *
 
1070
 *              The result is NULL if not found, or a pointer to a HeapTuple in
 
1071
 *              the cache.      The caller must not modify the tuple, and must call
 
1072
 *              ReleaseCatCache() when done with it.
 
1073
 *
 
1074
 * The search key values should be expressed as Datums of the key columns'
 
1075
 * datatype(s).  (Pass zeroes for any unused parameters.)  As a special
 
1076
 * exception, the passed-in key for a NAME column can be just a C string;
 
1077
 * the caller need not go to the trouble of converting it to a fully
 
1078
 * null-padded NAME.
 
1079
 */
 
1080
HeapTuple
 
1081
SearchCatCache(CatCache *cache,
 
1082
                           Datum v1,
 
1083
                           Datum v2,
 
1084
                           Datum v3,
 
1085
                           Datum v4)
 
1086
{
 
1087
        ScanKeyData cur_skey[4];
 
1088
        uint32          hashValue;
 
1089
        Index           hashIndex;
 
1090
        Dlelem     *elt;
 
1091
        CatCTup    *ct;
 
1092
        Relation        relation;
 
1093
        SysScanDesc scandesc;
 
1094
        HeapTuple       ntp;
 
1095
 
 
1096
        /*
 
1097
         * one-time startup overhead for each cache
 
1098
         */
 
1099
        if (cache->cc_tupdesc == NULL)
 
1100
                CatalogCacheInitializeCache(cache);
 
1101
 
 
1102
#ifdef CATCACHE_STATS
 
1103
        cache->cc_searches++;
 
1104
#endif
 
1105
 
 
1106
        /*
 
1107
         * initialize the search key information
 
1108
         */
 
1109
        memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
 
1110
        cur_skey[0].sk_argument = v1;
 
1111
        cur_skey[1].sk_argument = v2;
 
1112
        cur_skey[2].sk_argument = v3;
 
1113
        cur_skey[3].sk_argument = v4;
 
1114
 
 
1115
        /*
 
1116
         * find the hash bucket in which to look for the tuple
 
1117
         */
 
1118
        hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
 
1119
        hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
 
1120
 
 
1121
        /*
 
1122
         * scan the hash bucket until we find a match or exhaust our tuples
 
1123
         */
 
1124
        for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
 
1125
                 elt;
 
1126
                 elt = DLGetSucc(elt))
 
1127
        {
 
1128
                bool            res;
 
1129
 
 
1130
                ct = (CatCTup *) DLE_VAL(elt);
 
1131
 
 
1132
                if (ct->dead)
 
1133
                        continue;                       /* ignore dead entries */
 
1134
 
 
1135
                if (ct->hash_value != hashValue)
 
1136
                        continue;                       /* quickly skip entry if wrong hash val */
 
1137
 
 
1138
                /*
 
1139
                 * see if the cached tuple matches our key.
 
1140
                 */
 
1141
                HeapKeyTest(&ct->tuple,
 
1142
                                        cache->cc_tupdesc,
 
1143
                                        cache->cc_nkeys,
 
1144
                                        cur_skey,
 
1145
                                        res);
 
1146
                if (!res)
 
1147
                        continue;
 
1148
 
 
1149
                /*
 
1150
                 * we found a match in the cache: move it to the front of the
 
1151
                 * global LRU list.  We also move it to the front of the list for
 
1152
                 * its hashbucket, in order to speed subsequent searches.  (The
 
1153
                 * most frequently accessed elements in any hashbucket will tend
 
1154
                 * to be near the front of the hashbucket's list.)
 
1155
                 */
 
1156
                DLMoveToFront(&ct->lrulist_elem);
 
1157
                DLMoveToFront(&ct->cache_elem);
 
1158
 
 
1159
                /*
 
1160
                 * If it's a positive entry, bump its refcount and return it. If
 
1161
                 * it's negative, we can report failure to the caller.
 
1162
                 */
 
1163
                if (!ct->negative)
 
1164
                {
 
1165
                        ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
 
1166
                        ct->refcount++;
 
1167
                        ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
 
1168
 
 
1169
                        CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
 
1170
                                                cache->cc_relname, hashIndex);
 
1171
 
 
1172
#ifdef CATCACHE_STATS
 
1173
                        cache->cc_hits++;
 
1174
#endif
 
1175
 
 
1176
                        return &ct->tuple;
 
1177
                }
 
1178
                else
 
1179
                {
 
1180
                        CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
 
1181
                                                cache->cc_relname, hashIndex);
 
1182
 
 
1183
#ifdef CATCACHE_STATS
 
1184
                        cache->cc_neg_hits++;
 
1185
#endif
 
1186
 
 
1187
                        return NULL;
 
1188
                }
 
1189
        }
 
1190
 
 
1191
        /*
 
1192
         * Tuple was not found in cache, so we have to try to retrieve it
 
1193
         * directly from the relation.  If found, we will add it to the cache;
 
1194
         * if not found, we will add a negative cache entry instead.
 
1195
         *
 
1196
         * NOTE: it is possible for recursive cache lookups to occur while
 
1197
         * reading the relation --- for example, due to shared-cache-inval
 
1198
         * messages being processed during heap_open().  This is OK.  It's
 
1199
         * even possible for one of those lookups to find and enter the very
 
1200
         * same tuple we are trying to fetch here.      If that happens, we will
 
1201
         * enter a second copy of the tuple into the cache.  The first copy
 
1202
         * will never be referenced again, and will eventually age out of the
 
1203
         * cache, so there's no functional problem.  This case is rare enough
 
1204
         * that it's not worth expending extra cycles to detect.
 
1205
         */
 
1206
        relation = heap_open(cache->cc_reloid, AccessShareLock);
 
1207
 
 
1208
        scandesc = systable_beginscan(relation,
 
1209
                                                                  cache->cc_indname,
 
1210
                                                                  IndexScanOK(cache, cur_skey),
 
1211
                                                                  SnapshotNow,
 
1212
                                                                  cache->cc_nkeys,
 
1213
                                                                  cur_skey);
 
1214
 
 
1215
        ct = NULL;
 
1216
 
 
1217
        while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
 
1218
        {
 
1219
                ct = CatalogCacheCreateEntry(cache, ntp,
 
1220
                                                                         hashValue, hashIndex,
 
1221
                                                                         false);
 
1222
                /* immediately set the refcount to 1 */
 
1223
                ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
 
1224
                ct->refcount++;
 
1225
                ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
 
1226
                break;                                  /* assume only one match */
 
1227
        }
 
1228
 
 
1229
        systable_endscan(scandesc);
 
1230
 
 
1231
        heap_close(relation, AccessShareLock);
 
1232
 
 
1233
        /*
 
1234
         * If tuple was not found, we need to build a negative cache entry
 
1235
         * containing a fake tuple.  The fake tuple has the correct key
 
1236
         * columns, but nulls everywhere else.
 
1237
         */
 
1238
        if (ct == NULL)
 
1239
        {
 
1240
                ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
 
1241
                ct = CatalogCacheCreateEntry(cache, ntp,
 
1242
                                                                         hashValue, hashIndex,
 
1243
                                                                         true);
 
1244
                heap_freetuple(ntp);
 
1245
 
 
1246
                CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
 
1247
                                        cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
 
1248
                CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
 
1249
                                        cache->cc_relname, hashIndex);
 
1250
 
 
1251
                /*
 
1252
                 * We are not returning the negative entry to the caller, so leave
 
1253
                 * its refcount zero.
 
1254
                 */
 
1255
 
 
1256
                return NULL;
 
1257
        }
 
1258
 
 
1259
        CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
 
1260
                                cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
 
1261
        CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
 
1262
                                cache->cc_relname, hashIndex);
 
1263
 
 
1264
#ifdef CATCACHE_STATS
 
1265
        cache->cc_newloads++;
 
1266
#endif
 
1267
 
 
1268
        return &ct->tuple;
 
1269
}
 
1270
 
 
1271
/*
 
1272
 *      ReleaseCatCache
 
1273
 *
 
1274
 *      Decrement the reference count of a catcache entry (releasing the
 
1275
 *      hold grabbed by a successful SearchCatCache).
 
1276
 *
 
1277
 *      NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
 
1278
 *      will be freed as soon as their refcount goes to zero.  In combination
 
1279
 *      with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
 
1280
 *      to catch references to already-released catcache entries.
 
1281
 */
 
1282
void
 
1283
ReleaseCatCache(HeapTuple tuple)
 
1284
{
 
1285
        CatCTup    *ct = (CatCTup *) (((char *) tuple) -
 
1286
                                                                  offsetof(CatCTup, tuple));
 
1287
 
 
1288
        /* Safety checks to ensure we were handed a cache entry */
 
1289
        Assert(ct->ct_magic == CT_MAGIC);
 
1290
        Assert(ct->refcount > 0);
 
1291
 
 
1292
        ct->refcount--;
 
1293
        ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
 
1294
 
 
1295
        if (ct->refcount == 0
 
1296
#ifndef CATCACHE_FORCE_RELEASE
 
1297
                && ct->dead
 
1298
#endif
 
1299
                )
 
1300
                CatCacheRemoveCTup(ct->my_cache, ct);
 
1301
}
 
1302
 
 
1303
 
 
1304
/*
 
1305
 *      SearchCatCacheList
 
1306
 *
 
1307
 *              Generate a list of all tuples matching a partial key (that is,
 
1308
 *              a key specifying just the first K of the cache's N key columns).
 
1309
 *
 
1310
 *              The caller must not modify the list object or the pointed-to tuples,
 
1311
 *              and must call ReleaseCatCacheList() when done with the list.
 
1312
 */
 
1313
CatCList *
 
1314
SearchCatCacheList(CatCache *cache,
 
1315
                                   int nkeys,
 
1316
                                   Datum v1,
 
1317
                                   Datum v2,
 
1318
                                   Datum v3,
 
1319
                                   Datum v4)
 
1320
{
 
1321
        ScanKeyData cur_skey[4];
 
1322
        uint32          lHashValue;
 
1323
        Dlelem     *elt;
 
1324
        CatCList   *cl;
 
1325
        CatCTup    *ct;
 
1326
        List       *ctlist;
 
1327
        ListCell   *ctlist_item;
 
1328
        int                     nmembers;
 
1329
        Relation        relation;
 
1330
        SysScanDesc scandesc;
 
1331
        bool            ordered;
 
1332
        HeapTuple       ntp;
 
1333
        MemoryContext oldcxt;
 
1334
        int                     i;
 
1335
 
 
1336
        /*
 
1337
         * one-time startup overhead for each cache
 
1338
         */
 
1339
        if (cache->cc_tupdesc == NULL)
 
1340
                CatalogCacheInitializeCache(cache);
 
1341
 
 
1342
        Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
 
1343
 
 
1344
#ifdef CATCACHE_STATS
 
1345
        cache->cc_lsearches++;
 
1346
#endif
 
1347
 
 
1348
        /*
 
1349
         * initialize the search key information
 
1350
         */
 
1351
        memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
 
1352
        cur_skey[0].sk_argument = v1;
 
1353
        cur_skey[1].sk_argument = v2;
 
1354
        cur_skey[2].sk_argument = v3;
 
1355
        cur_skey[3].sk_argument = v4;
 
1356
 
 
1357
        /*
 
1358
         * compute a hash value of the given keys for faster search.  We don't
 
1359
         * presently divide the CatCList items into buckets, but this still
 
1360
         * lets us skip non-matching items quickly most of the time.
 
1361
         */
 
1362
        lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
 
1363
 
 
1364
        /*
 
1365
         * scan the items until we find a match or exhaust our list
 
1366
         */
 
1367
        for (elt = DLGetHead(&cache->cc_lists);
 
1368
                 elt;
 
1369
                 elt = DLGetSucc(elt))
 
1370
        {
 
1371
                bool            res;
 
1372
 
 
1373
                cl = (CatCList *) DLE_VAL(elt);
 
1374
 
 
1375
                if (cl->dead)
 
1376
                        continue;                       /* ignore dead entries */
 
1377
 
 
1378
                if (cl->hash_value != lHashValue)
 
1379
                        continue;                       /* quickly skip entry if wrong hash val */
 
1380
 
 
1381
                /*
 
1382
                 * see if the cached list matches our key.
 
1383
                 */
 
1384
                if (cl->nkeys != nkeys)
 
1385
                        continue;
 
1386
                HeapKeyTest(&cl->tuple,
 
1387
                                        cache->cc_tupdesc,
 
1388
                                        nkeys,
 
1389
                                        cur_skey,
 
1390
                                        res);
 
1391
                if (!res)
 
1392
                        continue;
 
1393
 
 
1394
                /*
 
1395
                 * we found a matching list: move each of its members to the front
 
1396
                 * of the global LRU list.      Also move the list itself to the front
 
1397
                 * of the cache's list-of-lists, to speed subsequent searches. (We
 
1398
                 * do not move the members to the fronts of their hashbucket
 
1399
                 * lists, however, since there's no point in that unless they are
 
1400
                 * searched for individually.)  Also bump the members' refcounts.
 
1401
                 * (member refcounts are NOT registered separately with the
 
1402
                 * resource owner.)
 
1403
                 */
 
1404
                ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
 
1405
                for (i = 0; i < cl->n_members; i++)
 
1406
                {
 
1407
                        cl->members[i]->refcount++;
 
1408
                        DLMoveToFront(&cl->members[i]->lrulist_elem);
 
1409
                }
 
1410
                DLMoveToFront(&cl->cache_elem);
 
1411
 
 
1412
                /* Bump the list's refcount and return it */
 
1413
                cl->refcount++;
 
1414
                ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
 
1415
 
 
1416
                CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
 
1417
                                        cache->cc_relname);
 
1418
 
 
1419
#ifdef CATCACHE_STATS
 
1420
                cache->cc_lhits++;
 
1421
#endif
 
1422
 
 
1423
                return cl;
 
1424
        }
 
1425
 
 
1426
        /*
 
1427
         * List was not found in cache, so we have to build it by reading the
 
1428
         * relation.  For each matching tuple found in the relation, use an
 
1429
         * existing cache entry if possible, else build a new one.
 
1430
         */
 
1431
        relation = heap_open(cache->cc_reloid, AccessShareLock);
 
1432
 
 
1433
        scandesc = systable_beginscan(relation,
 
1434
                                                                  cache->cc_indname,
 
1435
                                                                  true,
 
1436
                                                                  SnapshotNow,
 
1437
                                                                  nkeys,
 
1438
                                                                  cur_skey);
 
1439
 
 
1440
        /* The list will be ordered iff we are doing an index scan */
 
1441
        ordered = (scandesc->irel != NULL);
 
1442
 
 
1443
        ctlist = NIL;
 
1444
        nmembers = 0;
 
1445
 
 
1446
        while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
 
1447
        {
 
1448
                uint32          hashValue;
 
1449
                Index           hashIndex;
 
1450
 
 
1451
                /*
 
1452
                 * See if there's an entry for this tuple already.
 
1453
                 */
 
1454
                ct = NULL;
 
1455
                hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
 
1456
                hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
 
1457
 
 
1458
                for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
 
1459
                         elt;
 
1460
                         elt = DLGetSucc(elt))
 
1461
                {
 
1462
                        ct = (CatCTup *) DLE_VAL(elt);
 
1463
 
 
1464
                        if (ct->dead || ct->negative)
 
1465
                                continue;               /* ignore dead and negative entries */
 
1466
 
 
1467
                        if (ct->hash_value != hashValue)
 
1468
                                continue;               /* quickly skip entry if wrong hash val */
 
1469
 
 
1470
                        if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
 
1471
                                continue;               /* not same tuple */
 
1472
 
 
1473
                        /*
 
1474
                         * Found a match, but can't use it if it belongs to another
 
1475
                         * list already
 
1476
                         */
 
1477
                        if (ct->c_list)
 
1478
                                continue;
 
1479
 
 
1480
                        /* Found a match, so move it to front */
 
1481
                        DLMoveToFront(&ct->lrulist_elem);
 
1482
 
 
1483
                        break;
 
1484
                }
 
1485
 
 
1486
                if (elt == NULL)
 
1487
                {
 
1488
                        /* We didn't find a usable entry, so make a new one */
 
1489
                        ct = CatalogCacheCreateEntry(cache, ntp,
 
1490
                                                                                 hashValue, hashIndex,
 
1491
                                                                                 false);
 
1492
                }
 
1493
 
 
1494
                /*
 
1495
                 * We have to bump the member refcounts immediately to ensure they
 
1496
                 * won't get dropped from the cache while loading other members.
 
1497
                 * If we get an error before we finish constructing the CatCList
 
1498
                 * then we will leak those reference counts.  This is annoying but
 
1499
                 * it has no real consequence beyond possibly generating some
 
1500
                 * warning messages at the next transaction commit, so it's not
 
1501
                 * worth fixing.
 
1502
                 */
 
1503
                ct->refcount++;
 
1504
                ctlist = lappend(ctlist, ct);
 
1505
                nmembers++;
 
1506
        }
 
1507
 
 
1508
        systable_endscan(scandesc);
 
1509
 
 
1510
        heap_close(relation, AccessShareLock);
 
1511
 
 
1512
        /*
 
1513
         * Now we can build the CatCList entry.  First we need a dummy tuple
 
1514
         * containing the key values...
 
1515
         */
 
1516
        ntp = build_dummy_tuple(cache, nkeys, cur_skey);
 
1517
        oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
 
1518
        cl = (CatCList *) palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *));
 
1519
        heap_copytuple_with_tuple(ntp, &cl->tuple);
 
1520
        MemoryContextSwitchTo(oldcxt);
 
1521
        heap_freetuple(ntp);
 
1522
 
 
1523
        cl->cl_magic = CL_MAGIC;
 
1524
        cl->my_cache = cache;
 
1525
        DLInitElem(&cl->cache_elem, cl);
 
1526
        cl->refcount = 0;                       /* for the moment */
 
1527
        cl->dead = false;
 
1528
        cl->ordered = ordered;
 
1529
        cl->nkeys = nkeys;
 
1530
        cl->hash_value = lHashValue;
 
1531
        cl->n_members = nmembers;
 
1532
 
 
1533
        Assert(nmembers == list_length(ctlist));
 
1534
        ctlist_item = list_head(ctlist);
 
1535
        for (i = 0; i < nmembers; i++)
 
1536
        {
 
1537
                cl->members[i] = ct = (CatCTup *) lfirst(ctlist_item);
 
1538
                Assert(ct->c_list == NULL);
 
1539
                ct->c_list = cl;
 
1540
                /* mark list dead if any members already dead */
 
1541
                if (ct->dead)
 
1542
                        cl->dead = true;
 
1543
                ctlist_item = lnext(ctlist_item);
 
1544
        }
 
1545
 
 
1546
        DLAddHead(&cache->cc_lists, &cl->cache_elem);
 
1547
 
 
1548
        CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
 
1549
                                cache->cc_relname, nmembers);
 
1550
 
 
1551
        /* Finally, bump the list's refcount and return it */
 
1552
        ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
 
1553
        cl->refcount++;
 
1554
        ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
 
1555
 
 
1556
        return cl;
 
1557
}
 
1558
 
 
1559
/*
 
1560
 *      ReleaseCatCacheList
 
1561
 *
 
1562
 *      Decrement the reference counts of a catcache list.
 
1563
 */
 
1564
void
 
1565
ReleaseCatCacheList(CatCList *list)
 
1566
{
 
1567
        int                     i;
 
1568
 
 
1569
        /* Safety checks to ensure we were handed a cache entry */
 
1570
        Assert(list->cl_magic == CL_MAGIC);
 
1571
        Assert(list->refcount > 0);
 
1572
 
 
1573
        for (i = list->n_members; --i >= 0;)
 
1574
        {
 
1575
                CatCTup    *ct = list->members[i];
 
1576
 
 
1577
                Assert(ct->refcount > 0);
 
1578
 
 
1579
                ct->refcount--;
 
1580
 
 
1581
                if (ct->dead)
 
1582
                        list->dead = true;
 
1583
                /* can't remove tuple before list is removed */
 
1584
        }
 
1585
 
 
1586
        list->refcount--;
 
1587
        ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
 
1588
 
 
1589
        if (list->refcount == 0
 
1590
#ifndef CATCACHE_FORCE_RELEASE
 
1591
                && list->dead
 
1592
#endif
 
1593
                )
 
1594
                CatCacheRemoveCList(list->my_cache, list);
 
1595
}
 
1596
 
 
1597
 
 
1598
/*
 
1599
 * CatalogCacheCreateEntry
 
1600
 *              Create a new CatCTup entry, copying the given HeapTuple and other
 
1601
 *              supplied data into it.  The new entry initially has refcount 0.
 
1602
 */
 
1603
static CatCTup *
 
1604
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
 
1605
                                                uint32 hashValue, Index hashIndex, bool negative)
 
1606
{
 
1607
        CatCTup    *ct;
 
1608
        MemoryContext oldcxt;
 
1609
 
 
1610
        /*
 
1611
         * Allocate CatCTup header in cache memory, and copy the tuple there
 
1612
         * too.
 
1613
         */
 
1614
        oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
 
1615
        ct = (CatCTup *) palloc(sizeof(CatCTup));
 
1616
        heap_copytuple_with_tuple(ntp, &ct->tuple);
 
1617
        MemoryContextSwitchTo(oldcxt);
 
1618
 
 
1619
        /*
 
1620
         * Finish initializing the CatCTup header, and add it to the cache's
 
1621
         * linked lists and counts.
 
1622
         */
 
1623
        ct->ct_magic = CT_MAGIC;
 
1624
        ct->my_cache = cache;
 
1625
        DLInitElem(&ct->lrulist_elem, (void *) ct);
 
1626
        DLInitElem(&ct->cache_elem, (void *) ct);
 
1627
        ct->c_list = NULL;
 
1628
        ct->refcount = 0;                       /* for the moment */
 
1629
        ct->dead = false;
 
1630
        ct->negative = negative;
 
1631
        ct->hash_value = hashValue;
 
1632
 
 
1633
        DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
 
1634
        DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
 
1635
 
 
1636
        cache->cc_ntup++;
 
1637
        CacheHdr->ch_ntup++;
 
1638
 
 
1639
        /*
 
1640
         * If we've exceeded the desired size of the caches, try to throw away
 
1641
         * the least recently used entry.  NB: be careful not to throw away
 
1642
         * the newly-built entry...
 
1643
         */
 
1644
        if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
 
1645
        {
 
1646
                Dlelem     *elt,
 
1647
                                   *prevelt;
 
1648
 
 
1649
                for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
 
1650
                {
 
1651
                        CatCTup    *oldct = (CatCTup *) DLE_VAL(elt);
 
1652
 
 
1653
                        prevelt = DLGetPred(elt);
 
1654
 
 
1655
                        if (oldct->refcount == 0 && oldct != ct)
 
1656
                        {
 
1657
                                CACHE2_elog(DEBUG2, "CatCacheCreateEntry(%s): Overflow, LRU removal",
 
1658
                                                        cache->cc_relname);
 
1659
#ifdef CATCACHE_STATS
 
1660
                                oldct->my_cache->cc_discards++;
 
1661
#endif
 
1662
                                CatCacheRemoveCTup(oldct->my_cache, oldct);
 
1663
                                if (CacheHdr->ch_ntup <= CacheHdr->ch_maxtup)
 
1664
                                        break;
 
1665
                        }
 
1666
                }
 
1667
        }
 
1668
 
 
1669
        return ct;
 
1670
}
 
1671
 
 
1672
/*
 
1673
 * build_dummy_tuple
 
1674
 *              Generate a palloc'd HeapTuple that contains the specified key
 
1675
 *              columns, and NULLs for other columns.
 
1676
 *
 
1677
 * This is used to store the keys for negative cache entries and CatCList
 
1678
 * entries, which don't have real tuples associated with them.
 
1679
 */
 
1680
static HeapTuple
 
1681
build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
 
1682
{
 
1683
        HeapTuple       ntp;
 
1684
        TupleDesc       tupDesc = cache->cc_tupdesc;
 
1685
        Datum      *values;
 
1686
        char       *nulls;
 
1687
        Oid                     tupOid = InvalidOid;
 
1688
        NameData        tempNames[4];
 
1689
        int                     i;
 
1690
 
 
1691
        values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
 
1692
        nulls = (char *) palloc(tupDesc->natts * sizeof(char));
 
1693
 
 
1694
        memset(values, 0, tupDesc->natts * sizeof(Datum));
 
1695
        memset(nulls, 'n', tupDesc->natts * sizeof(char));
 
1696
 
 
1697
        for (i = 0; i < nkeys; i++)
 
1698
        {
 
1699
                int                     attindex = cache->cc_key[i];
 
1700
                Datum           keyval = skeys[i].sk_argument;
 
1701
 
 
1702
                if (attindex > 0)
 
1703
                {
 
1704
                        /*
 
1705
                         * Here we must be careful in case the caller passed a C
 
1706
                         * string where a NAME is wanted: convert the given argument
 
1707
                         * to a correctly padded NAME.  Otherwise the memcpy() done in
 
1708
                         * heap_formtuple could fall off the end of memory.
 
1709
                         */
 
1710
                        if (cache->cc_isname[i])
 
1711
                        {
 
1712
                                Name            newval = &tempNames[i];
 
1713
 
 
1714
                                namestrcpy(newval, DatumGetCString(keyval));
 
1715
                                keyval = NameGetDatum(newval);
 
1716
                        }
 
1717
                        values[attindex - 1] = keyval;
 
1718
                        nulls[attindex - 1] = ' ';
 
1719
                }
 
1720
                else
 
1721
                {
 
1722
                        Assert(attindex == ObjectIdAttributeNumber);
 
1723
                        tupOid = DatumGetObjectId(keyval);
 
1724
                }
 
1725
        }
 
1726
 
 
1727
        ntp = heap_formtuple(tupDesc, values, nulls);
 
1728
        if (tupOid != InvalidOid)
 
1729
                HeapTupleSetOid(ntp, tupOid);
 
1730
 
 
1731
        pfree(values);
 
1732
        pfree(nulls);
 
1733
 
 
1734
        return ntp;
 
1735
}
 
1736
 
 
1737
 
 
1738
/*
 
1739
 *      PrepareToInvalidateCacheTuple()
 
1740
 *
 
1741
 *      This is part of a rather subtle chain of events, so pay attention:
 
1742
 *
 
1743
 *      When a tuple is inserted or deleted, it cannot be flushed from the
 
1744
 *      catcaches immediately, for reasons explained at the top of cache/inval.c.
 
1745
 *      Instead we have to add entry(s) for the tuple to a list of pending tuple
 
1746
 *      invalidations that will be done at the end of the command or transaction.
 
1747
 *
 
1748
 *      The lists of tuples that need to be flushed are kept by inval.c.  This
 
1749
 *      routine is a helper routine for inval.c.  Given a tuple belonging to
 
1750
 *      the specified relation, find all catcaches it could be in, compute the
 
1751
 *      correct hash value for each such catcache, and call the specified function
 
1752
 *      to record the cache id, hash value, and tuple ItemPointer in inval.c's
 
1753
 *      lists.  CatalogCacheIdInvalidate will be called later, if appropriate,
 
1754
 *      using the recorded information.
 
1755
 *
 
1756
 *      Note that it is irrelevant whether the given tuple is actually loaded
 
1757
 *      into the catcache at the moment.  Even if it's not there now, it might
 
1758
 *      be by the end of the command, or there might be a matching negative entry
 
1759
 *      to flush --- or other backends' caches might have such entries --- so
 
1760
 *      we have to make list entries to flush it later.
 
1761
 *
 
1762
 *      Also note that it's not an error if there are no catcaches for the
 
1763
 *      specified relation.  inval.c doesn't know exactly which rels have
 
1764
 *      catcaches --- it will call this routine for any tuple that's in a
 
1765
 *      system relation.
 
1766
 */
 
1767
void
 
1768
PrepareToInvalidateCacheTuple(Relation relation,
 
1769
                                                          HeapTuple tuple,
 
1770
                                                void (*function) (int, uint32, ItemPointer, Oid))
 
1771
{
 
1772
        CatCache   *ccp;
 
1773
        Oid                     reloid;
 
1774
 
 
1775
        CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
 
1776
 
 
1777
        /*
 
1778
         * sanity checks
 
1779
         */
 
1780
        Assert(RelationIsValid(relation));
 
1781
        Assert(HeapTupleIsValid(tuple));
 
1782
        Assert(PointerIsValid(function));
 
1783
        Assert(CacheHdr != NULL);
 
1784
 
 
1785
        reloid = RelationGetRelid(relation);
 
1786
 
 
1787
        /* ----------------
 
1788
         *      for each cache
 
1789
         *         if the cache contains tuples from the specified relation
 
1790
         *                 compute the tuple's hash value in this cache,
 
1791
         *                 and call the passed function to register the information.
 
1792
         * ----------------
 
1793
         */
 
1794
 
 
1795
        for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
 
1796
        {
 
1797
                /* Just in case cache hasn't finished initialization yet... */
 
1798
                if (ccp->cc_tupdesc == NULL)
 
1799
                        CatalogCacheInitializeCache(ccp);
 
1800
 
 
1801
                if (ccp->cc_reloid != reloid)
 
1802
                        continue;
 
1803
 
 
1804
                (*function) (ccp->id,
 
1805
                                         CatalogCacheComputeTupleHashValue(ccp, tuple),
 
1806
                                         &tuple->t_self,
 
1807
                                         ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);
 
1808
        }
 
1809
}