1
/*-------------------------------------------------------------------------
4
* System catalog cache for tuples matching a key.
6
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
7
* Portions Copyright (c) 1994, Regents of the University of California
11
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.118 2004-12-31 22:01:25 pgsql Exp $
13
*-------------------------------------------------------------------------
17
#include "access/genam.h"
18
#include "access/hash.h"
19
#include "access/heapam.h"
20
#include "access/valid.h"
21
#include "catalog/pg_opclass.h"
22
#include "catalog/pg_operator.h"
23
#include "catalog/pg_type.h"
24
#include "catalog/catname.h"
25
#include "catalog/indexing.h"
26
#include "miscadmin.h"
28
#include "storage/ipc.h" /* for on_proc_exit */
30
#include "utils/builtins.h"
31
#include "utils/fmgroids.h"
32
#include "utils/catcache.h"
33
#include "utils/relcache.h"
34
#include "utils/resowner.h"
35
#include "utils/syscache.h"
38
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
41
* Constants related to size of the catcache.
43
* NCCBUCKETS must be a power of two and must be less than 64K (because
44
* SharedInvalCatcacheMsg crams hash indexes into a uint16 field). In
45
* practice it should be a lot less, anyway, to avoid chewing up too much
46
* space on hash bucket headers.
48
* MAXCCTUPLES could be as small as a few hundred, if per-backend memory
49
* consumption is at a premium.
51
#define NCCBUCKETS 256 /* Hash buckets per CatCache */
52
#define MAXCCTUPLES 5000 /* Maximum # of tuples in all caches */
55
* Given a hash value and the size of the hash table, find the bucket
56
* in which the hash value belongs. Since the hash table must contain
57
* a power-of-2 number of elements, this is a simple bitmask.
59
#define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
63
* variables, macros and other stuff
67
#define CACHE1_elog(a,b) elog(a,b)
68
#define CACHE2_elog(a,b,c) elog(a,b,c)
69
#define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
70
#define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
71
#define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
72
#define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
74
#define CACHE1_elog(a,b)
75
#define CACHE2_elog(a,b,c)
76
#define CACHE3_elog(a,b,c,d)
77
#define CACHE4_elog(a,b,c,d,e)
78
#define CACHE5_elog(a,b,c,d,e,f)
79
#define CACHE6_elog(a,b,c,d,e,f,g)
82
/* Cache management header --- pointer is NULL until created */
83
static CatCacheHeader *CacheHdr = NULL;
86
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
88
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
92
static void CatCachePrintStats(void);
94
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
95
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
96
static void CatalogCacheInitializeCache(CatCache *cache);
97
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
98
uint32 hashValue, Index hashIndex,
100
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
104
* internal support functions
108
* Look up the hash and equality functions for system types that are used
109
* as cache key fields.
111
* XXX this should be replaced by catalog lookups,
112
* but that seems to pose considerable risk of circularity...
115
GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
120
*hashfunc = hashchar;
124
*hashfunc = hashchar;
128
*hashfunc = hashname;
132
*hashfunc = hashint2;
136
*hashfunc = hashint2vector;
137
*eqfunc = F_INT2VECTOREQ;
140
*hashfunc = hashint4;
144
*hashfunc = hashtext;
149
case REGPROCEDUREOID:
158
*hashfunc = hashoidvector;
159
*eqfunc = F_OIDVECTOREQ;
162
elog(FATAL, "type %u not supported as catcache key", keytype);
168
* CatalogCacheComputeHashValue
170
* Compute the hash value associated with a given set of lookup keys
173
CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
175
uint32 hashValue = 0;
177
CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
186
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
187
cur_skey[3].sk_argument)) << 9;
191
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
192
cur_skey[2].sk_argument)) << 6;
196
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
197
cur_skey[1].sk_argument)) << 3;
201
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
202
cur_skey[0].sk_argument));
205
elog(FATAL, "wrong number of hash keys: %d", nkeys);
213
* CatalogCacheComputeTupleHashValue
215
* Compute the hash value associated with a given tuple to be cached
218
CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
220
ScanKeyData cur_skey[4];
223
/* Copy pre-initialized overhead data for scankey */
224
memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
226
/* Now extract key fields from tuple, insert into scankey */
227
switch (cache->cc_nkeys)
230
cur_skey[3].sk_argument =
231
(cache->cc_key[3] == ObjectIdAttributeNumber)
232
? ObjectIdGetDatum(HeapTupleGetOid(tuple))
240
cur_skey[2].sk_argument =
241
(cache->cc_key[2] == ObjectIdAttributeNumber)
242
? ObjectIdGetDatum(HeapTupleGetOid(tuple))
250
cur_skey[1].sk_argument =
251
(cache->cc_key[1] == ObjectIdAttributeNumber)
252
? ObjectIdGetDatum(HeapTupleGetOid(tuple))
260
cur_skey[0].sk_argument =
261
(cache->cc_key[0] == ObjectIdAttributeNumber)
262
? ObjectIdGetDatum(HeapTupleGetOid(tuple))
270
elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
274
return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
278
#ifdef CATCACHE_STATS
281
CatCachePrintStats(void)
284
long cc_searches = 0;
286
long cc_neg_hits = 0;
287
long cc_newloads = 0;
289
long cc_discards = 0;
290
long cc_lsearches = 0;
293
elog(DEBUG2, "catcache stats dump: %d/%d tuples in catcaches",
294
CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
296
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
298
if (cache->cc_ntup == 0 && cache->cc_searches == 0)
299
continue; /* don't print unused caches */
300
elog(DEBUG2, "catcache %s/%s: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
307
cache->cc_hits + cache->cc_neg_hits,
309
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
310
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
315
cc_searches += cache->cc_searches;
316
cc_hits += cache->cc_hits;
317
cc_neg_hits += cache->cc_neg_hits;
318
cc_newloads += cache->cc_newloads;
319
cc_invals += cache->cc_invals;
320
cc_discards += cache->cc_discards;
321
cc_lsearches += cache->cc_lsearches;
322
cc_lhits += cache->cc_lhits;
324
elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
329
cc_hits + cc_neg_hits,
331
cc_searches - cc_hits - cc_neg_hits - cc_newloads,
332
cc_searches - cc_hits - cc_neg_hits,
338
#endif /* CATCACHE_STATS */
344
* Unlink and delete the given cache entry
346
* NB: if it is a member of a CatCList, the CatCList is deleted too.
349
CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
351
Assert(ct->refcount == 0);
352
Assert(ct->my_cache == cache);
355
CatCacheRemoveCList(cache, ct->c_list);
357
/* delink from linked lists */
358
DLRemove(&ct->lrulist_elem);
359
DLRemove(&ct->cache_elem);
361
/* free associated tuple data */
362
if (ct->tuple.t_data != NULL)
363
pfree(ct->tuple.t_data);
371
* CatCacheRemoveCList
373
* Unlink and delete the given cache list entry
376
CatCacheRemoveCList(CatCache *cache, CatCList *cl)
380
Assert(cl->refcount == 0);
381
Assert(cl->my_cache == cache);
383
/* delink from member tuples */
384
for (i = cl->n_members; --i >= 0;)
386
CatCTup *ct = cl->members[i];
388
Assert(ct->c_list == cl);
392
/* delink from linked list */
393
DLRemove(&cl->cache_elem);
395
/* free associated tuple data */
396
if (cl->tuple.t_data != NULL)
397
pfree(cl->tuple.t_data);
403
* CatalogCacheIdInvalidate
405
* Invalidate entries in the specified cache, given a hash value and
406
* item pointer. Positive entries are deleted if they match the item
407
* pointer. Negative entries must be deleted if they match the hash
408
* value (since we do not have the exact key of the tuple that's being
409
* inserted). But this should only rarely result in loss of a cache
410
* entry that could have been kept.
412
* Note that it's not very relevant whether the tuple identified by
413
* the item pointer is being inserted or deleted. We don't expect to
414
* find matching positive entries in the one case, and we don't expect
415
* to find matching negative entries in the other; but we will do the
416
* right things in any case.
418
* This routine is only quasi-public: it should only be used by inval.c.
421
CatalogCacheIdInvalidate(int cacheId,
430
Assert(ItemPointerIsValid(pointer));
431
CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
434
* inspect caches to find the proper cache
436
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
442
if (cacheId != ccp->id)
446
* We don't bother to check whether the cache has finished
447
* initialization yet; if not, there will be no entries in it so
452
* Invalidate *all* CatCLists in this cache; it's too hard to tell
453
* which searches might still be correct, so just zap 'em all.
455
for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
457
CatCList *cl = (CatCList *) DLE_VAL(elt);
459
nextelt = DLGetSucc(elt);
461
if (cl->refcount > 0)
464
CatCacheRemoveCList(ccp, cl);
468
* inspect the proper hash bucket for tuple matches
470
hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
472
for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
474
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
476
nextelt = DLGetSucc(elt);
478
if (hashValue != ct->hash_value)
479
continue; /* ignore non-matching hash values */
482
ItemPointerEquals(pointer, &ct->tuple.t_self))
484
if (ct->refcount > 0)
487
CatCacheRemoveCTup(ccp, ct);
488
CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
489
#ifdef CATCACHE_STATS
492
/* could be multiple matches, so keep looking! */
495
break; /* need only search this one cache */
499
/* ----------------------------------------------------------------
501
* ----------------------------------------------------------------
506
* Standard routine for creating cache context if it doesn't exist yet
508
* There are a lot of places (probably far more than necessary) that check
509
* whether CacheMemoryContext exists yet and want to create it if not.
510
* We centralize knowledge of exactly how to create it here.
513
CreateCacheMemoryContext(void)
516
* Purely for paranoia, check that context doesn't exist; caller
517
* probably did so already.
519
if (!CacheMemoryContext)
520
CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
521
"CacheMemoryContext",
522
ALLOCSET_DEFAULT_MINSIZE,
523
ALLOCSET_DEFAULT_INITSIZE,
524
ALLOCSET_DEFAULT_MAXSIZE);
531
* Clean up catcaches at end of main transaction (either commit or abort)
533
* We scan the caches to reset refcounts to zero. This is of course
534
* necessary in the abort case, since elog() may have interrupted routines.
535
* In the commit case, any nonzero counts indicate failure to call
536
* ReleaseSysCache, so we put out a notice for debugging purposes.
539
AtEOXact_CatCache(bool isCommit)
546
* First clean up CatCLists
548
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
550
for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
552
CatCList *cl = (CatCList *) DLE_VAL(elt);
554
nextelt = DLGetSucc(elt);
556
if (cl->refcount != 0)
559
elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
560
ccp->cc_relname, ccp->id, cl, cl->refcount);
564
/* Clean up any now-deletable dead entries */
566
CatCacheRemoveCList(ccp, cl);
571
* Now clean up tuples; we can scan them all using the global LRU list
573
for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = nextelt)
575
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
577
nextelt = DLGetSucc(elt);
579
if (ct->refcount != 0)
582
elog(WARNING, "cache reference leak: cache %s (%d), tuple %u has count %d",
583
ct->my_cache->cc_relname, ct->my_cache->id,
584
HeapTupleGetOid(&ct->tuple),
589
/* Clean up any now-deletable dead entries */
591
CatCacheRemoveCTup(ct->my_cache, ct);
598
* Reset one catalog cache to empty.
600
* This is not very efficient if the target cache is nearly empty.
601
* However, it shouldn't need to be efficient; we don't invoke it often.
604
ResetCatalogCache(CatCache *cache)
610
/* Remove each list in this cache, or at least mark it dead */
611
for (elt = DLGetHead(&cache->cc_lists); elt; elt = nextelt)
613
CatCList *cl = (CatCList *) DLE_VAL(elt);
615
nextelt = DLGetSucc(elt);
617
if (cl->refcount > 0)
620
CatCacheRemoveCList(cache, cl);
623
/* Remove each tuple in this cache, or at least mark it dead */
624
for (i = 0; i < cache->cc_nbuckets; i++)
626
for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
628
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
630
nextelt = DLGetSucc(elt);
632
if (ct->refcount > 0)
635
CatCacheRemoveCTup(cache, ct);
636
#ifdef CATCACHE_STATS
646
* Reset all caches when a shared cache inval event forces it
649
ResetCatalogCaches(void)
653
CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
655
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
656
ResetCatalogCache(cache);
658
CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
662
* CatalogCacheFlushRelation
664
* This is called by RelationFlushRelation() to clear out cached information
665
* about a relation being dropped. (This could be a DROP TABLE command,
666
* or a temp table being dropped at end of transaction, or a table created
667
* during the current transaction that is being dropped because of abort.)
668
* Remove all cache entries relevant to the specified relation OID.
670
* A special case occurs when relId is itself one of the cacheable system
671
* tables --- although those'll never be dropped, they can get flushed from
672
* the relcache (VACUUM causes this, for example). In that case we need
673
* to flush all cache entries that came from that table. (At one point we
674
* also tried to force re-execution of CatalogCacheInitializeCache for
675
* the cache(s) on that table. This is a bad idea since it leads to all
676
* kinds of trouble if a cache flush occurs while loading cache entries.
677
* We now avoid the need to do it by copying cc_tupdesc out of the relcache,
678
* rather than relying on the relcache to keep a tupdesc for us. Of course
679
* this assumes the tupdesc of a cachable system table will not change...)
682
CatalogCacheFlushRelation(Oid relId)
686
CACHE2_elog(DEBUG2, "CatalogCacheFlushRelation called for %u", relId);
688
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
692
/* We can ignore uninitialized caches, since they must be empty */
693
if (cache->cc_tupdesc == NULL)
696
/* Does this cache store tuples of the target relation itself? */
697
if (cache->cc_tupdesc->attrs[0]->attrelid == relId)
699
/* Yes, so flush all its contents */
700
ResetCatalogCache(cache);
704
/* Does this cache store tuples associated with relations at all? */
705
if (cache->cc_reloidattr == 0)
706
continue; /* nope, leave it alone */
708
/* Yes, scan the tuples and remove those related to relId */
709
for (i = 0; i < cache->cc_nbuckets; i++)
714
for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
716
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
719
nextelt = DLGetSucc(elt);
722
* Negative entries are never considered related to a rel,
723
* even if the rel is part of their lookup key.
728
if (cache->cc_reloidattr == ObjectIdAttributeNumber)
729
tupRelid = HeapTupleGetOid(&ct->tuple);
735
DatumGetObjectId(fastgetattr(&ct->tuple,
736
cache->cc_reloidattr,
742
if (tupRelid == relId)
744
if (ct->refcount > 0)
747
CatCacheRemoveCTup(cache, ct);
748
#ifdef CATCACHE_STATS
756
CACHE1_elog(DEBUG2, "end of CatalogCacheFlushRelation call");
762
* This allocates and initializes a cache for a system catalog relation.
763
* Actually, the cache is only partially initialized to avoid opening the
764
* relation. The relation will be opened and the rest of the cache
765
* structure initialized on the first access.
768
#define InitCatCache_DEBUG2 \
770
elog(DEBUG2, "InitCatCache: rel=%s id=%d nkeys=%d size=%d", \
771
cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_nbuckets); \
775
#define InitCatCache_DEBUG2
787
MemoryContext oldcxt;
791
* first switch to the cache context so our allocations do not vanish
792
* at the end of a transaction
794
if (!CacheMemoryContext)
795
CreateCacheMemoryContext();
797
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
800
* if first time through, initialize the cache group header, including
801
* global LRU list header
803
if (CacheHdr == NULL)
805
CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
806
CacheHdr->ch_caches = NULL;
807
CacheHdr->ch_ntup = 0;
808
CacheHdr->ch_maxtup = MAXCCTUPLES;
809
DLInitList(&CacheHdr->ch_lrulist);
810
#ifdef CATCACHE_STATS
811
on_proc_exit(CatCachePrintStats, 0);
816
* allocate a new cache structure
818
* Note: we assume zeroing initializes the Dllist headers correctly
820
cp = (CatCache *) palloc0(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
823
* initialize the cache's relation information for the relation
824
* corresponding to this cache, and initialize some of the new cache's
825
* other internal fields. But don't open the relation yet.
828
cp->cc_relname = relname;
829
cp->cc_indname = indname;
830
cp->cc_reloid = InvalidOid; /* temporary */
831
cp->cc_relisshared = false; /* temporary */
832
cp->cc_tupdesc = (TupleDesc) NULL;
833
cp->cc_reloidattr = reloidattr;
835
cp->cc_nbuckets = NCCBUCKETS;
836
cp->cc_nkeys = nkeys;
837
for (i = 0; i < nkeys; ++i)
838
cp->cc_key[i] = key[i];
841
* new cache is initialized as far as we can go for now. print some
842
* debugging information, if appropriate.
847
* add completed cache to top of group header's list
849
cp->cc_next = CacheHdr->ch_caches;
850
CacheHdr->ch_caches = cp;
853
* back to the old context before we return...
855
MemoryContextSwitchTo(oldcxt);
861
* CatalogCacheInitializeCache
863
* This function does final initialization of a catcache: obtain the tuple
864
* descriptor and set up the hash and equality function links. We assume
865
* that the relcache entry can be opened at this point!
868
#define CatalogCacheInitializeCache_DEBUG2 \
869
elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p %s", cache, \
872
#define CatalogCacheInitializeCache_DEBUG2 \
874
if (cache->cc_key[i] > 0) { \
875
elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
876
i+1, cache->cc_nkeys, cache->cc_key[i], \
877
tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
879
elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
880
i+1, cache->cc_nkeys, cache->cc_key[i]); \
885
#define CatalogCacheInitializeCache_DEBUG2
886
#define CatalogCacheInitializeCache_DEBUG2
890
CatalogCacheInitializeCache(CatCache *cache)
893
MemoryContext oldcxt;
897
CatalogCacheInitializeCache_DEBUG2;
900
* Open the relation without locking --- we only need the tupdesc,
901
* which we assume will never change ...
903
relation = heap_openr(cache->cc_relname, NoLock);
904
Assert(RelationIsValid(relation));
907
* switch to the cache context so our allocations do not vanish at the
908
* end of a transaction
910
Assert(CacheMemoryContext != NULL);
912
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
915
* copy the relcache's tuple descriptor to permanent cache storage
917
tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
920
* get the relation's OID and relisshared flag, too
922
cache->cc_reloid = RelationGetRelid(relation);
923
cache->cc_relisshared = RelationGetForm(relation)->relisshared;
926
* return to the caller's memory context and close the rel
928
MemoryContextSwitchTo(oldcxt);
930
heap_close(relation, NoLock);
932
CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
933
cache->cc_relname, cache->cc_nkeys);
936
* initialize cache's key information
938
for (i = 0; i < cache->cc_nkeys; ++i)
943
CatalogCacheInitializeCache_DEBUG2;
945
if (cache->cc_key[i] > 0)
946
keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
949
if (cache->cc_key[i] != ObjectIdAttributeNumber)
950
elog(FATAL, "only sys attr supported in caches is OID");
954
GetCCHashEqFuncs(keytype,
955
&cache->cc_hashfunc[i],
958
cache->cc_isname[i] = (keytype == NAMEOID);
961
* Do equality-function lookup (we assume this won't need a
962
* catalog lookup for any supported type)
964
fmgr_info_cxt(eqfunc,
965
&cache->cc_skey[i].sk_func,
968
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
969
cache->cc_skey[i].sk_attno = cache->cc_key[i];
971
/* Fill in sk_strategy as well --- always standard equality */
972
cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
973
cache->cc_skey[i].sk_subtype = InvalidOid;
975
CACHE4_elog(DEBUG2, "CatalogCacheInit %s %d %p",
982
* mark this cache fully initialized
984
cache->cc_tupdesc = tupdesc;
988
* InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
990
* The only reason to call this routine is to ensure that the relcache
991
* has created entries for all the catalogs and indexes referenced by
992
* catcaches. Therefore, open the index too. An exception is the indexes
993
* on pg_am, which we don't use (cf. IndexScanOK).
996
InitCatCachePhase2(CatCache *cache)
998
if (cache->cc_tupdesc == NULL)
999
CatalogCacheInitializeCache(cache);
1001
if (cache->id != AMOID &&
1002
cache->id != AMNAME)
1006
idesc = index_openr(cache->cc_indname);
1015
* This function checks for tuples that will be fetched by
1016
* IndexSupportInitialize() during relcache initialization for
1017
* certain system indexes that support critical syscaches.
1018
* We can't use an indexscan to fetch these, else we'll get into
1019
* infinite recursion. A plain heap scan will work, however.
1021
* Once we have completed relcache initialization (signaled by
1022
* criticalRelcachesBuilt), we don't have to worry anymore.
1025
IndexScanOK(CatCache *cache, ScanKey cur_skey)
1027
if (cache->id == INDEXRELID)
1030
* Since the OIDs of indexes aren't hardwired, it's painful to
1031
* figure out which is which. Just force all pg_index searches to
1032
* be heap scans while building the relcaches.
1034
if (!criticalRelcachesBuilt)
1037
else if (cache->id == AMOID ||
1038
cache->id == AMNAME)
1041
* Always do heap scans in pg_am, because it's so small there's
1042
* not much point in an indexscan anyway. We *must* do this when
1043
* initially building critical relcache entries, but we might as
1044
* well just always do it.
1048
else if (cache->id == OPEROID)
1050
if (!criticalRelcachesBuilt)
1052
/* Looking for an OID comparison function? */
1053
Oid lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
1055
if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
1060
/* Normal case, allow index scan */
1067
* This call searches a system cache for a tuple, opening the relation
1068
* if necessary (on the first access to a particular cache).
1070
* The result is NULL if not found, or a pointer to a HeapTuple in
1071
* the cache. The caller must not modify the tuple, and must call
1072
* ReleaseCatCache() when done with it.
1074
* The search key values should be expressed as Datums of the key columns'
1075
* datatype(s). (Pass zeroes for any unused parameters.) As a special
1076
* exception, the passed-in key for a NAME column can be just a C string;
1077
* the caller need not go to the trouble of converting it to a fully
1081
SearchCatCache(CatCache *cache,
1087
ScanKeyData cur_skey[4];
1093
SysScanDesc scandesc;
1097
* one-time startup overhead for each cache
1099
if (cache->cc_tupdesc == NULL)
1100
CatalogCacheInitializeCache(cache);
1102
#ifdef CATCACHE_STATS
1103
cache->cc_searches++;
1107
* initialize the search key information
1109
memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1110
cur_skey[0].sk_argument = v1;
1111
cur_skey[1].sk_argument = v2;
1112
cur_skey[2].sk_argument = v3;
1113
cur_skey[3].sk_argument = v4;
1116
* find the hash bucket in which to look for the tuple
1118
hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1119
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1122
* scan the hash bucket until we find a match or exhaust our tuples
1124
for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1126
elt = DLGetSucc(elt))
1130
ct = (CatCTup *) DLE_VAL(elt);
1133
continue; /* ignore dead entries */
1135
if (ct->hash_value != hashValue)
1136
continue; /* quickly skip entry if wrong hash val */
1139
* see if the cached tuple matches our key.
1141
HeapKeyTest(&ct->tuple,
1150
* we found a match in the cache: move it to the front of the
1151
* global LRU list. We also move it to the front of the list for
1152
* its hashbucket, in order to speed subsequent searches. (The
1153
* most frequently accessed elements in any hashbucket will tend
1154
* to be near the front of the hashbucket's list.)
1156
DLMoveToFront(&ct->lrulist_elem);
1157
DLMoveToFront(&ct->cache_elem);
1160
* If it's a positive entry, bump its refcount and return it. If
1161
* it's negative, we can report failure to the caller.
1165
ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1167
ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1169
CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1170
cache->cc_relname, hashIndex);
1172
#ifdef CATCACHE_STATS
1180
CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1181
cache->cc_relname, hashIndex);
1183
#ifdef CATCACHE_STATS
1184
cache->cc_neg_hits++;
1192
* Tuple was not found in cache, so we have to try to retrieve it
1193
* directly from the relation. If found, we will add it to the cache;
1194
* if not found, we will add a negative cache entry instead.
1196
* NOTE: it is possible for recursive cache lookups to occur while
1197
* reading the relation --- for example, due to shared-cache-inval
1198
* messages being processed during heap_open(). This is OK. It's
1199
* even possible for one of those lookups to find and enter the very
1200
* same tuple we are trying to fetch here. If that happens, we will
1201
* enter a second copy of the tuple into the cache. The first copy
1202
* will never be referenced again, and will eventually age out of the
1203
* cache, so there's no functional problem. This case is rare enough
1204
* that it's not worth expending extra cycles to detect.
1206
relation = heap_open(cache->cc_reloid, AccessShareLock);
1208
scandesc = systable_beginscan(relation,
1210
IndexScanOK(cache, cur_skey),
1217
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1219
ct = CatalogCacheCreateEntry(cache, ntp,
1220
hashValue, hashIndex,
1222
/* immediately set the refcount to 1 */
1223
ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1225
ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1226
break; /* assume only one match */
1229
systable_endscan(scandesc);
1231
heap_close(relation, AccessShareLock);
1234
* If tuple was not found, we need to build a negative cache entry
1235
* containing a fake tuple. The fake tuple has the correct key
1236
* columns, but nulls everywhere else.
1240
ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1241
ct = CatalogCacheCreateEntry(cache, ntp,
1242
hashValue, hashIndex,
1244
heap_freetuple(ntp);
1246
CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1247
cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1248
CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1249
cache->cc_relname, hashIndex);
1252
* We are not returning the negative entry to the caller, so leave
1253
* its refcount zero.
1259
CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1260
cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1261
CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1262
cache->cc_relname, hashIndex);
1264
#ifdef CATCACHE_STATS
1265
cache->cc_newloads++;
1274
* Decrement the reference count of a catcache entry (releasing the
1275
* hold grabbed by a successful SearchCatCache).
1277
* NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1278
* will be freed as soon as their refcount goes to zero. In combination
1279
* with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1280
* to catch references to already-released catcache entries.
1283
ReleaseCatCache(HeapTuple tuple)
1285
CatCTup *ct = (CatCTup *) (((char *) tuple) -
1286
offsetof(CatCTup, tuple));
1288
/* Safety checks to ensure we were handed a cache entry */
1289
Assert(ct->ct_magic == CT_MAGIC);
1290
Assert(ct->refcount > 0);
1293
ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1295
if (ct->refcount == 0
1296
#ifndef CATCACHE_FORCE_RELEASE
1300
CatCacheRemoveCTup(ct->my_cache, ct);
1305
* SearchCatCacheList
1307
* Generate a list of all tuples matching a partial key (that is,
1308
* a key specifying just the first K of the cache's N key columns).
1310
* The caller must not modify the list object or the pointed-to tuples,
1311
* and must call ReleaseCatCacheList() when done with the list.
1314
SearchCatCacheList(CatCache *cache,
1321
ScanKeyData cur_skey[4];
1327
ListCell *ctlist_item;
1330
SysScanDesc scandesc;
1333
MemoryContext oldcxt;
1337
* one-time startup overhead for each cache
1339
if (cache->cc_tupdesc == NULL)
1340
CatalogCacheInitializeCache(cache);
1342
Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1344
#ifdef CATCACHE_STATS
1345
cache->cc_lsearches++;
1349
* initialize the search key information
1351
memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1352
cur_skey[0].sk_argument = v1;
1353
cur_skey[1].sk_argument = v2;
1354
cur_skey[2].sk_argument = v3;
1355
cur_skey[3].sk_argument = v4;
1358
* compute a hash value of the given keys for faster search. We don't
1359
* presently divide the CatCList items into buckets, but this still
1360
* lets us skip non-matching items quickly most of the time.
1362
lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1365
* scan the items until we find a match or exhaust our list
1367
for (elt = DLGetHead(&cache->cc_lists);
1369
elt = DLGetSucc(elt))
1373
cl = (CatCList *) DLE_VAL(elt);
1376
continue; /* ignore dead entries */
1378
if (cl->hash_value != lHashValue)
1379
continue; /* quickly skip entry if wrong hash val */
1382
* see if the cached list matches our key.
1384
if (cl->nkeys != nkeys)
1386
HeapKeyTest(&cl->tuple,
1395
* we found a matching list: move each of its members to the front
1396
* of the global LRU list. Also move the list itself to the front
1397
* of the cache's list-of-lists, to speed subsequent searches. (We
1398
* do not move the members to the fronts of their hashbucket
1399
* lists, however, since there's no point in that unless they are
1400
* searched for individually.) Also bump the members' refcounts.
1401
* (member refcounts are NOT registered separately with the
1404
ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1405
for (i = 0; i < cl->n_members; i++)
1407
cl->members[i]->refcount++;
1408
DLMoveToFront(&cl->members[i]->lrulist_elem);
1410
DLMoveToFront(&cl->cache_elem);
1412
/* Bump the list's refcount and return it */
1414
ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1416
CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1419
#ifdef CATCACHE_STATS
1427
* List was not found in cache, so we have to build it by reading the
1428
* relation. For each matching tuple found in the relation, use an
1429
* existing cache entry if possible, else build a new one.
1431
relation = heap_open(cache->cc_reloid, AccessShareLock);
1433
scandesc = systable_beginscan(relation,
1440
/* The list will be ordered iff we are doing an index scan */
1441
ordered = (scandesc->irel != NULL);
1446
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1452
* See if there's an entry for this tuple already.
1455
hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1456
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1458
for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1460
elt = DLGetSucc(elt))
1462
ct = (CatCTup *) DLE_VAL(elt);
1464
if (ct->dead || ct->negative)
1465
continue; /* ignore dead and negative entries */
1467
if (ct->hash_value != hashValue)
1468
continue; /* quickly skip entry if wrong hash val */
1470
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1471
continue; /* not same tuple */
1474
* Found a match, but can't use it if it belongs to another
1480
/* Found a match, so move it to front */
1481
DLMoveToFront(&ct->lrulist_elem);
1488
/* We didn't find a usable entry, so make a new one */
1489
ct = CatalogCacheCreateEntry(cache, ntp,
1490
hashValue, hashIndex,
1495
* We have to bump the member refcounts immediately to ensure they
1496
* won't get dropped from the cache while loading other members.
1497
* If we get an error before we finish constructing the CatCList
1498
* then we will leak those reference counts. This is annoying but
1499
* it has no real consequence beyond possibly generating some
1500
* warning messages at the next transaction commit, so it's not
1504
ctlist = lappend(ctlist, ct);
1508
systable_endscan(scandesc);
1510
heap_close(relation, AccessShareLock);
1513
* Now we can build the CatCList entry. First we need a dummy tuple
1514
* containing the key values...
1516
ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1517
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1518
cl = (CatCList *) palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *));
1519
heap_copytuple_with_tuple(ntp, &cl->tuple);
1520
MemoryContextSwitchTo(oldcxt);
1521
heap_freetuple(ntp);
1523
cl->cl_magic = CL_MAGIC;
1524
cl->my_cache = cache;
1525
DLInitElem(&cl->cache_elem, cl);
1526
cl->refcount = 0; /* for the moment */
1528
cl->ordered = ordered;
1530
cl->hash_value = lHashValue;
1531
cl->n_members = nmembers;
1533
Assert(nmembers == list_length(ctlist));
1534
ctlist_item = list_head(ctlist);
1535
for (i = 0; i < nmembers; i++)
1537
cl->members[i] = ct = (CatCTup *) lfirst(ctlist_item);
1538
Assert(ct->c_list == NULL);
1540
/* mark list dead if any members already dead */
1543
ctlist_item = lnext(ctlist_item);
1546
DLAddHead(&cache->cc_lists, &cl->cache_elem);
1548
CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1549
cache->cc_relname, nmembers);
1551
/* Finally, bump the list's refcount and return it */
1552
ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1554
ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1560
* ReleaseCatCacheList
1562
* Decrement the reference counts of a catcache list.
1565
ReleaseCatCacheList(CatCList *list)
1569
/* Safety checks to ensure we were handed a cache entry */
1570
Assert(list->cl_magic == CL_MAGIC);
1571
Assert(list->refcount > 0);
1573
for (i = list->n_members; --i >= 0;)
1575
CatCTup *ct = list->members[i];
1577
Assert(ct->refcount > 0);
1583
/* can't remove tuple before list is removed */
1587
ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1589
if (list->refcount == 0
1590
#ifndef CATCACHE_FORCE_RELEASE
1594
CatCacheRemoveCList(list->my_cache, list);
1599
* CatalogCacheCreateEntry
1600
* Create a new CatCTup entry, copying the given HeapTuple and other
1601
* supplied data into it. The new entry initially has refcount 0.
1604
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
1605
uint32 hashValue, Index hashIndex, bool negative)
1608
MemoryContext oldcxt;
1611
* Allocate CatCTup header in cache memory, and copy the tuple there
1614
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1615
ct = (CatCTup *) palloc(sizeof(CatCTup));
1616
heap_copytuple_with_tuple(ntp, &ct->tuple);
1617
MemoryContextSwitchTo(oldcxt);
1620
* Finish initializing the CatCTup header, and add it to the cache's
1621
* linked lists and counts.
1623
ct->ct_magic = CT_MAGIC;
1624
ct->my_cache = cache;
1625
DLInitElem(&ct->lrulist_elem, (void *) ct);
1626
DLInitElem(&ct->cache_elem, (void *) ct);
1628
ct->refcount = 0; /* for the moment */
1630
ct->negative = negative;
1631
ct->hash_value = hashValue;
1633
DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
1634
DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1637
CacheHdr->ch_ntup++;
1640
* If we've exceeded the desired size of the caches, try to throw away
1641
* the least recently used entry. NB: be careful not to throw away
1642
* the newly-built entry...
1644
if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
1649
for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
1651
CatCTup *oldct = (CatCTup *) DLE_VAL(elt);
1653
prevelt = DLGetPred(elt);
1655
if (oldct->refcount == 0 && oldct != ct)
1657
CACHE2_elog(DEBUG2, "CatCacheCreateEntry(%s): Overflow, LRU removal",
1659
#ifdef CATCACHE_STATS
1660
oldct->my_cache->cc_discards++;
1662
CatCacheRemoveCTup(oldct->my_cache, oldct);
1663
if (CacheHdr->ch_ntup <= CacheHdr->ch_maxtup)
1674
* Generate a palloc'd HeapTuple that contains the specified key
1675
* columns, and NULLs for other columns.
1677
* This is used to store the keys for negative cache entries and CatCList
1678
* entries, which don't have real tuples associated with them.
1681
build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1684
TupleDesc tupDesc = cache->cc_tupdesc;
1687
Oid tupOid = InvalidOid;
1688
NameData tempNames[4];
1691
values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1692
nulls = (char *) palloc(tupDesc->natts * sizeof(char));
1694
memset(values, 0, tupDesc->natts * sizeof(Datum));
1695
memset(nulls, 'n', tupDesc->natts * sizeof(char));
1697
for (i = 0; i < nkeys; i++)
1699
int attindex = cache->cc_key[i];
1700
Datum keyval = skeys[i].sk_argument;
1705
* Here we must be careful in case the caller passed a C
1706
* string where a NAME is wanted: convert the given argument
1707
* to a correctly padded NAME. Otherwise the memcpy() done in
1708
* heap_formtuple could fall off the end of memory.
1710
if (cache->cc_isname[i])
1712
Name newval = &tempNames[i];
1714
namestrcpy(newval, DatumGetCString(keyval));
1715
keyval = NameGetDatum(newval);
1717
values[attindex - 1] = keyval;
1718
nulls[attindex - 1] = ' ';
1722
Assert(attindex == ObjectIdAttributeNumber);
1723
tupOid = DatumGetObjectId(keyval);
1727
ntp = heap_formtuple(tupDesc, values, nulls);
1728
if (tupOid != InvalidOid)
1729
HeapTupleSetOid(ntp, tupOid);
1739
* PrepareToInvalidateCacheTuple()
1741
* This is part of a rather subtle chain of events, so pay attention:
1743
* When a tuple is inserted or deleted, it cannot be flushed from the
1744
* catcaches immediately, for reasons explained at the top of cache/inval.c.
1745
* Instead we have to add entry(s) for the tuple to a list of pending tuple
1746
* invalidations that will be done at the end of the command or transaction.
1748
* The lists of tuples that need to be flushed are kept by inval.c. This
1749
* routine is a helper routine for inval.c. Given a tuple belonging to
1750
* the specified relation, find all catcaches it could be in, compute the
1751
* correct hash value for each such catcache, and call the specified function
1752
* to record the cache id, hash value, and tuple ItemPointer in inval.c's
1753
* lists. CatalogCacheIdInvalidate will be called later, if appropriate,
1754
* using the recorded information.
1756
* Note that it is irrelevant whether the given tuple is actually loaded
1757
* into the catcache at the moment. Even if it's not there now, it might
1758
* be by the end of the command, or there might be a matching negative entry
1759
* to flush --- or other backends' caches might have such entries --- so
1760
* we have to make list entries to flush it later.
1762
* Also note that it's not an error if there are no catcaches for the
1763
* specified relation. inval.c doesn't know exactly which rels have
1764
* catcaches --- it will call this routine for any tuple that's in a
1768
PrepareToInvalidateCacheTuple(Relation relation,
1770
void (*function) (int, uint32, ItemPointer, Oid))
1775
CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1780
Assert(RelationIsValid(relation));
1781
Assert(HeapTupleIsValid(tuple));
1782
Assert(PointerIsValid(function));
1783
Assert(CacheHdr != NULL);
1785
reloid = RelationGetRelid(relation);
1789
* if the cache contains tuples from the specified relation
1790
* compute the tuple's hash value in this cache,
1791
* and call the passed function to register the information.
1795
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1797
/* Just in case cache hasn't finished initialization yet... */
1798
if (ccp->cc_tupdesc == NULL)
1799
CatalogCacheInitializeCache(ccp);
1801
if (ccp->cc_reloid != reloid)
1804
(*function) (ccp->id,
1805
CatalogCacheComputeTupleHashValue(ccp, tuple),
1807
ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);