1
/*-------------------------------------------------------------------------
4
* Routines to compute (and set) relation sizes and path costs
6
* Path costs are measured in arbitrary units established by these basic
9
* seq_page_cost Cost of a sequential page fetch
10
* random_page_cost Cost of a non-sequential page fetch
11
* cpu_tuple_cost Cost of typical CPU time to process a tuple
12
* cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13
* cpu_operator_cost Cost of CPU time to execute an operator or function
15
* We expect that the kernel will typically do some amount of read-ahead
16
* optimization; this in conjunction with seek costs means that seq_page_cost
17
* is normally considerably less than random_page_cost. (However, if the
18
* database is fully cached in RAM, it is reasonable to set them equal.)
20
* We also use a rough estimate "effective_cache_size" of the number of
21
* disk pages in Postgres + OS-level disk cache. (We can't simply use
22
* NBuffers for this purpose because that would ignore the effects of
23
* the kernel's disk cache.)
25
* Obviously, taking constants for these values is an oversimplification,
26
* but it's tough enough to get any useful estimates even at this level of
27
* detail. Note that all of these parameters are user-settable, in case
28
* the default values are drastically off for a particular platform.
30
* We compute two separate costs for each path:
31
* total_cost: total estimated cost to fetch all tuples
32
* startup_cost: cost that is expended before first tuple is fetched
33
* In some scenarios, such as when there is a LIMIT or we are implementing
34
* an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35
* path's result. A caller can estimate the cost of fetching a partial
36
* result by interpolating between startup_cost and total_cost. In detail:
37
* actual_cost = startup_cost +
38
* (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39
* Note that a base relation's rows count (and, by extension, plan_rows for
40
* plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41
* that this equation works properly. (Also, these routines guarantee not to
42
* set the rows count to zero, so there will be no zero divide.) The LIMIT is
43
* applied as a top-level plan node.
45
* For largely historical reasons, most of the routines in this module use
46
* the passed result Path only to store their startup_cost and total_cost
47
* results into. All the input data they need is passed as separate
48
* parameters, even though much of it could be extracted from the Path.
49
* An exception is made for the cost_XXXjoin() routines, which expect all
50
* the non-cost fields of the passed XXXPath to be filled in.
53
* Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
54
* Portions Copyright (c) 1994, Regents of the University of California
59
*-------------------------------------------------------------------------
66
#include "executor/nodeHash.h"
67
#include "miscadmin.h"
68
#include "nodes/nodeFuncs.h"
69
#include "optimizer/clauses.h"
70
#include "optimizer/cost.h"
71
#include "optimizer/pathnode.h"
72
#include "optimizer/placeholder.h"
73
#include "optimizer/planmain.h"
74
#include "parser/parsetree.h"
75
#include "utils/lsyscache.h"
76
#include "utils/selfuncs.h"
77
#include "utils/tuplesort.h"
80
#define LOG2(x) (log(x) / 0.693147180559945)
83
* Some Paths return less than the nominal number of rows of their parent
84
* relations; join nodes need to do this to get the correct input count:
86
#define PATH_ROWS(path) \
87
(IsA(path, UniquePath) ? \
88
((UniquePath *) (path))->rows : \
92
double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
93
double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
94
double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
95
double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
96
double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
98
int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
100
Cost disable_cost = 100000000.0;
102
bool enable_seqscan = true;
103
bool enable_indexscan = true;
104
bool enable_bitmapscan = true;
105
bool enable_tidscan = true;
106
bool enable_sort = true;
107
bool enable_hashagg = true;
108
bool enable_nestloop = true;
109
bool enable_mergejoin = true;
110
bool enable_hashjoin = true;
116
} cost_qual_eval_context;
118
static MergeScanSelCache *cached_scansel(PlannerInfo *root,
121
static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
122
static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
124
static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
125
static double relation_byte_size(double tuples, int width);
126
static double page_size(double tuples, int width);
131
* Force a row-count estimate to a sane value.
134
clamp_row_est(double nrows)
137
* Force estimate to be at least one row, to make explain output look
138
* better and to avoid possible divide-by-zero when interpolating costs.
139
* Make it an integer, too.
152
* Determines and returns the cost of scanning a relation sequentially.
155
cost_seqscan(Path *path, PlannerInfo *root,
158
Cost startup_cost = 0;
162
/* Should only be applied to base relations */
163
Assert(baserel->relid > 0);
164
Assert(baserel->rtekind == RTE_RELATION);
167
startup_cost += disable_cost;
172
run_cost += seq_page_cost * baserel->pages;
175
startup_cost += baserel->baserestrictcost.startup;
176
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
177
run_cost += cpu_per_tuple * baserel->tuples;
179
path->startup_cost = startup_cost;
180
path->total_cost = startup_cost + run_cost;
185
* Determines and returns the cost of scanning a relation using an index.
187
* 'index' is the index to be used
188
* 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
189
* 'outer_rel' is the outer relation when we are considering using the index
190
* scan as the inside of a nestloop join (hence, some of the indexQuals
191
* are join clauses, and we should expect repeated scans of the index);
192
* NULL for a plain index scan
194
* cost_index() takes an IndexPath not just a Path, because it sets a few
195
* additional fields of the IndexPath besides startup_cost and total_cost.
196
* These fields are needed if the IndexPath is used in a BitmapIndexScan.
198
* NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
199
* Any additional quals evaluated as qpquals may reduce the number of returned
200
* tuples, but they won't reduce the number of tuples we have to fetch from
201
* the table, so they don't reduce the scan cost.
203
* NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
204
* it was a list of bare clause expressions.
207
cost_index(IndexPath *path, PlannerInfo *root,
210
RelOptInfo *outer_rel)
212
RelOptInfo *baserel = index->rel;
213
Cost startup_cost = 0;
215
Cost indexStartupCost;
217
Selectivity indexSelectivity;
218
double indexCorrelation,
223
double tuples_fetched;
224
double pages_fetched;
226
/* Should only be applied to base relations */
227
Assert(IsA(baserel, RelOptInfo) &&
228
IsA(index, IndexOptInfo));
229
Assert(baserel->relid > 0);
230
Assert(baserel->rtekind == RTE_RELATION);
232
if (!enable_indexscan)
233
startup_cost += disable_cost;
236
* Call index-access-method-specific code to estimate the processing cost
237
* for scanning the index, as well as the selectivity of the index (ie,
238
* the fraction of main-table tuples we will have to retrieve) and its
239
* correlation to the main-table tuple order.
241
OidFunctionCall8(index->amcostestimate,
242
PointerGetDatum(root),
243
PointerGetDatum(index),
244
PointerGetDatum(indexQuals),
245
PointerGetDatum(outer_rel),
246
PointerGetDatum(&indexStartupCost),
247
PointerGetDatum(&indexTotalCost),
248
PointerGetDatum(&indexSelectivity),
249
PointerGetDatum(&indexCorrelation));
252
* Save amcostestimate's results for possible use in bitmap scan planning.
253
* We don't bother to save indexStartupCost or indexCorrelation, because a
254
* bitmap scan doesn't care about either.
256
path->indextotalcost = indexTotalCost;
257
path->indexselectivity = indexSelectivity;
259
/* all costs for touching index itself included here */
260
startup_cost += indexStartupCost;
261
run_cost += indexTotalCost - indexStartupCost;
263
/* estimate number of main-table tuples fetched */
264
tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
267
* Estimate number of main-table pages fetched, and compute I/O cost.
269
* When the index ordering is uncorrelated with the table ordering,
270
* we use an approximation proposed by Mackert and Lohman (see
271
* index_pages_fetched() for details) to compute the number of pages
272
* fetched, and then charge random_page_cost per page fetched.
274
* When the index ordering is exactly correlated with the table ordering
275
* (just after a CLUSTER, for example), the number of pages fetched should
276
* be exactly selectivity * table_size. What's more, all but the first
277
* will be sequential fetches, not the random fetches that occur in the
278
* uncorrelated case. So if the number of pages is more than 1, we
280
* random_page_cost + (pages_fetched - 1) * seq_page_cost
281
* For partially-correlated indexes, we ought to charge somewhere between
282
* these two estimates. We currently interpolate linearly between the
283
* estimates based on the correlation squared (XXX is that appropriate?).
286
if (outer_rel != NULL && outer_rel->rows > 1)
289
* For repeated indexscans, the appropriate estimate for the
290
* uncorrelated case is to scale up the number of tuples fetched in
291
* the Mackert and Lohman formula by the number of scans, so that we
292
* estimate the number of pages fetched by all the scans; then
293
* pro-rate the costs for one scan. In this case we assume all the
294
* fetches are random accesses.
296
double num_scans = outer_rel->rows;
298
pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
300
(double) index->pages,
303
max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
306
* In the perfectly correlated case, the number of pages touched by
307
* each scan is selectivity * table_size, and we can use the Mackert
308
* and Lohman formula at the page level to estimate how much work is
309
* saved by caching across scans. We still assume all the fetches are
310
* random, though, which is an overestimate that's hard to correct for
311
* without double-counting the cache effects. (But in most cases
312
* where such a plan is actually interesting, only one page would get
313
* fetched per scan anyway, so it shouldn't matter much.)
315
pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
317
pages_fetched = index_pages_fetched(pages_fetched * num_scans,
319
(double) index->pages,
322
min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
327
* Normal case: apply the Mackert and Lohman formula, and then
328
* interpolate between that and the correlation-derived result.
330
pages_fetched = index_pages_fetched(tuples_fetched,
332
(double) index->pages,
335
/* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
336
max_IO_cost = pages_fetched * random_page_cost;
338
/* min_IO_cost is for the perfectly correlated case (csquared=1) */
339
pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
340
min_IO_cost = random_page_cost;
341
if (pages_fetched > 1)
342
min_IO_cost += (pages_fetched - 1) * seq_page_cost;
346
* Now interpolate based on estimated index order correlation to get total
347
* disk I/O cost for main table accesses.
349
csquared = indexCorrelation * indexCorrelation;
351
run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
354
* Estimate CPU costs per tuple.
356
* Normally the indexquals will be removed from the list of restriction
357
* clauses that we have to evaluate as qpquals, so we should subtract
358
* their costs from baserestrictcost. But if we are doing a join then
359
* some of the indexquals are join clauses and shouldn't be subtracted.
360
* Rather than work out exactly how much to subtract, we don't subtract
363
startup_cost += baserel->baserestrictcost.startup;
364
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
366
if (outer_rel == NULL)
368
QualCost index_qual_cost;
370
cost_qual_eval(&index_qual_cost, indexQuals, root);
371
/* any startup cost still has to be paid ... */
372
cpu_per_tuple -= index_qual_cost.per_tuple;
375
run_cost += cpu_per_tuple * tuples_fetched;
377
path->path.startup_cost = startup_cost;
378
path->path.total_cost = startup_cost + run_cost;
382
* index_pages_fetched
383
* Estimate the number of pages actually fetched after accounting for
386
* We use an approximation proposed by Mackert and Lohman, "Index Scans
387
* Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
388
* on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
389
* The Mackert and Lohman approximation is that the number of pages
392
* min(2TNs/(2T+Ns), T) when T <= b
393
* 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
394
* b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
396
* T = # pages in table
397
* N = # tuples in table
398
* s = selectivity = fraction of table to be scanned
399
* b = # buffer pages available (we include kernel space here)
401
* We assume that effective_cache_size is the total number of buffer pages
402
* available for the whole query, and pro-rate that space across all the
403
* tables in the query and the index currently under consideration. (This
404
* ignores space needed for other indexes used by the query, but since we
405
* don't know which indexes will get used, we can't estimate that very well;
406
* and in any case counting all the tables may well be an overestimate, since
407
* depending on the join plan not all the tables may be scanned concurrently.)
409
* The product Ns is the number of tuples fetched; we pass in that
410
* product rather than calculating it here. "pages" is the number of pages
411
* in the object under consideration (either an index or a table).
412
* "index_pages" is the amount to add to the total table space, which was
413
* computed for us by query_planner.
415
* Caller is expected to have ensured that tuples_fetched is greater than zero
416
* and rounded to integer (see clamp_row_est). The result will likewise be
417
* greater than zero and integral.
420
index_pages_fetched(double tuples_fetched, BlockNumber pages,
421
double index_pages, PlannerInfo *root)
423
double pages_fetched;
428
/* T is # pages in table, but don't allow it to be zero */
429
T = (pages > 1) ? (double) pages : 1.0;
431
/* Compute number of pages assumed to be competing for cache space */
432
total_pages = root->total_table_pages + index_pages;
433
total_pages = Max(total_pages, 1.0);
434
Assert(T <= total_pages);
436
/* b is pro-rated share of effective_cache_size */
437
b = (double) effective_cache_size *T / total_pages;
439
/* force it positive and integral */
445
/* This part is the Mackert and Lohman formula */
449
(2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
450
if (pages_fetched >= T)
453
pages_fetched = ceil(pages_fetched);
459
lim = (2.0 * T * b) / (2.0 * T - b);
460
if (tuples_fetched <= lim)
463
(2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
468
b + (tuples_fetched - lim) * (T - b) / T;
470
pages_fetched = ceil(pages_fetched);
472
return pages_fetched;
476
* get_indexpath_pages
477
* Determine the total size of the indexes used in a bitmap index path.
479
* Note: if the same index is used more than once in a bitmap tree, we will
480
* count it multiple times, which perhaps is the wrong thing ... but it's
481
* not completely clear, and detecting duplicates is difficult, so ignore it
485
get_indexpath_pages(Path *bitmapqual)
490
if (IsA(bitmapqual, BitmapAndPath))
492
BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
494
foreach(l, apath->bitmapquals)
496
result += get_indexpath_pages((Path *) lfirst(l));
499
else if (IsA(bitmapqual, BitmapOrPath))
501
BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
503
foreach(l, opath->bitmapquals)
505
result += get_indexpath_pages((Path *) lfirst(l));
508
else if (IsA(bitmapqual, IndexPath))
510
IndexPath *ipath = (IndexPath *) bitmapqual;
512
result = (double) ipath->indexinfo->pages;
515
elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
521
* cost_bitmap_heap_scan
522
* Determines and returns the cost of scanning a relation using a bitmap
523
* index-then-heap plan.
525
* 'baserel' is the relation to be scanned
526
* 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
527
* 'outer_rel' is the outer relation when we are considering using the bitmap
528
* scan as the inside of a nestloop join (hence, some of the indexQuals
529
* are join clauses, and we should expect repeated scans of the table);
530
* NULL for a plain bitmap scan
532
* Note: if this is a join inner path, the component IndexPaths in bitmapqual
533
* should have been costed accordingly.
536
cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
537
Path *bitmapqual, RelOptInfo *outer_rel)
539
Cost startup_cost = 0;
542
Selectivity indexSelectivity;
545
double tuples_fetched;
546
double pages_fetched;
549
/* Should only be applied to base relations */
550
Assert(IsA(baserel, RelOptInfo));
551
Assert(baserel->relid > 0);
552
Assert(baserel->rtekind == RTE_RELATION);
554
if (!enable_bitmapscan)
555
startup_cost += disable_cost;
558
* Fetch total cost of obtaining the bitmap, as well as its total
561
cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
563
startup_cost += indexTotalCost;
566
* Estimate number of main-table pages fetched.
568
tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
570
T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
572
if (outer_rel != NULL && outer_rel->rows > 1)
575
* For repeated bitmap scans, scale up the number of tuples fetched in
576
* the Mackert and Lohman formula by the number of scans, so that we
577
* estimate the number of pages fetched by all the scans. Then
578
* pro-rate for one scan.
580
double num_scans = outer_rel->rows;
582
pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
584
get_indexpath_pages(bitmapqual),
586
pages_fetched /= num_scans;
591
* For a single scan, the number of heap pages that need to be fetched
592
* is the same as the Mackert and Lohman formula for the case T <= b
593
* (ie, no re-reads needed).
595
pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
597
if (pages_fetched >= T)
600
pages_fetched = ceil(pages_fetched);
603
* For small numbers of pages we should charge random_page_cost apiece,
604
* while if nearly all the table's pages are being read, it's more
605
* appropriate to charge seq_page_cost apiece. The effect is nonlinear,
606
* too. For lack of a better idea, interpolate like this to determine the
609
if (pages_fetched >= 2.0)
610
cost_per_page = random_page_cost -
611
(random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
613
cost_per_page = random_page_cost;
615
run_cost += pages_fetched * cost_per_page;
618
* Estimate CPU costs per tuple.
620
* Often the indexquals don't need to be rechecked at each tuple ... but
621
* not always, especially not if there are enough tuples involved that the
622
* bitmaps become lossy. For the moment, just assume they will be
625
startup_cost += baserel->baserestrictcost.startup;
626
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
628
run_cost += cpu_per_tuple * tuples_fetched;
630
path->startup_cost = startup_cost;
631
path->total_cost = startup_cost + run_cost;
635
* cost_bitmap_tree_node
636
* Extract cost and selectivity from a bitmap tree node (index/and/or)
639
cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
641
if (IsA(path, IndexPath))
643
*cost = ((IndexPath *) path)->indextotalcost;
644
*selec = ((IndexPath *) path)->indexselectivity;
647
* Charge a small amount per retrieved tuple to reflect the costs of
648
* manipulating the bitmap. This is mostly to make sure that a bitmap
649
* scan doesn't look to be the same cost as an indexscan to retrieve a
652
*cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
654
else if (IsA(path, BitmapAndPath))
656
*cost = path->total_cost;
657
*selec = ((BitmapAndPath *) path)->bitmapselectivity;
659
else if (IsA(path, BitmapOrPath))
661
*cost = path->total_cost;
662
*selec = ((BitmapOrPath *) path)->bitmapselectivity;
666
elog(ERROR, "unrecognized node type: %d", nodeTag(path));
667
*cost = *selec = 0; /* keep compiler quiet */
672
* cost_bitmap_and_node
673
* Estimate the cost of a BitmapAnd node
675
* Note that this considers only the costs of index scanning and bitmap
676
* creation, not the eventual heap access. In that sense the object isn't
677
* truly a Path, but it has enough path-like properties (costs in particular)
678
* to warrant treating it as one.
681
cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
688
* We estimate AND selectivity on the assumption that the inputs are
689
* independent. This is probably often wrong, but we don't have the info
692
* The runtime cost of the BitmapAnd itself is estimated at 100x
693
* cpu_operator_cost for each tbm_intersect needed. Probably too small,
694
* definitely too simplistic?
698
foreach(l, path->bitmapquals)
700
Path *subpath = (Path *) lfirst(l);
702
Selectivity subselec;
704
cost_bitmap_tree_node(subpath, &subCost, &subselec);
708
totalCost += subCost;
709
if (l != list_head(path->bitmapquals))
710
totalCost += 100.0 * cpu_operator_cost;
712
path->bitmapselectivity = selec;
713
path->path.startup_cost = totalCost;
714
path->path.total_cost = totalCost;
718
* cost_bitmap_or_node
719
* Estimate the cost of a BitmapOr node
721
* See comments for cost_bitmap_and_node.
724
cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
731
* We estimate OR selectivity on the assumption that the inputs are
732
* non-overlapping, since that's often the case in "x IN (list)" type
733
* situations. Of course, we clamp to 1.0 at the end.
735
* The runtime cost of the BitmapOr itself is estimated at 100x
736
* cpu_operator_cost for each tbm_union needed. Probably too small,
737
* definitely too simplistic? We are aware that the tbm_unions are
738
* optimized out when the inputs are BitmapIndexScans.
742
foreach(l, path->bitmapquals)
744
Path *subpath = (Path *) lfirst(l);
746
Selectivity subselec;
748
cost_bitmap_tree_node(subpath, &subCost, &subselec);
752
totalCost += subCost;
753
if (l != list_head(path->bitmapquals) &&
754
!IsA(subpath, IndexPath))
755
totalCost += 100.0 * cpu_operator_cost;
757
path->bitmapselectivity = Min(selec, 1.0);
758
path->path.startup_cost = totalCost;
759
path->path.total_cost = totalCost;
764
* Determines and returns the cost of scanning a relation using TIDs.
767
cost_tidscan(Path *path, PlannerInfo *root,
768
RelOptInfo *baserel, List *tidquals)
770
Cost startup_cost = 0;
772
bool isCurrentOf = false;
774
QualCost tid_qual_cost;
778
/* Should only be applied to base relations */
779
Assert(baserel->relid > 0);
780
Assert(baserel->rtekind == RTE_RELATION);
782
/* Count how many tuples we expect to retrieve */
786
if (IsA(lfirst(l), ScalarArrayOpExpr))
788
/* Each element of the array yields 1 tuple */
789
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
790
Node *arraynode = (Node *) lsecond(saop->args);
792
ntuples += estimate_array_length(arraynode);
794
else if (IsA(lfirst(l), CurrentOfExpr))
796
/* CURRENT OF yields 1 tuple */
802
/* It's just CTID = something, count 1 tuple */
808
* We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
809
* understands how to do it correctly. Therefore, honor enable_tidscan
810
* only when CURRENT OF isn't present. Also note that cost_qual_eval
811
* counts a CurrentOfExpr as having startup cost disable_cost, which we
812
* subtract off here; that's to prevent other plan types such as seqscan
817
Assert(baserel->baserestrictcost.startup >= disable_cost);
818
startup_cost -= disable_cost;
820
else if (!enable_tidscan)
821
startup_cost += disable_cost;
824
* The TID qual expressions will be computed once, any other baserestrict
825
* quals once per retrived tuple.
827
cost_qual_eval(&tid_qual_cost, tidquals, root);
829
/* disk costs --- assume each tuple on a different page */
830
run_cost += random_page_cost * ntuples;
833
startup_cost += baserel->baserestrictcost.startup +
834
tid_qual_cost.per_tuple;
835
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple -
836
tid_qual_cost.per_tuple;
837
run_cost += cpu_per_tuple * ntuples;
839
path->startup_cost = startup_cost;
840
path->total_cost = startup_cost + run_cost;
845
* Determines and returns the cost of scanning a subquery RTE.
848
cost_subqueryscan(Path *path, RelOptInfo *baserel)
854
/* Should only be applied to base relations that are subqueries */
855
Assert(baserel->relid > 0);
856
Assert(baserel->rtekind == RTE_SUBQUERY);
859
* Cost of path is cost of evaluating the subplan, plus cost of evaluating
860
* any restriction clauses that will be attached to the SubqueryScan node,
861
* plus cpu_tuple_cost to account for selection and projection overhead.
863
path->startup_cost = baserel->subplan->startup_cost;
864
path->total_cost = baserel->subplan->total_cost;
866
startup_cost = baserel->baserestrictcost.startup;
867
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
868
run_cost = cpu_per_tuple * baserel->tuples;
870
path->startup_cost += startup_cost;
871
path->total_cost += startup_cost + run_cost;
876
* Determines and returns the cost of scanning a function RTE.
879
cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
881
Cost startup_cost = 0;
887
/* Should only be applied to base relations that are functions */
888
Assert(baserel->relid > 0);
889
rte = planner_rt_fetch(baserel->relid, root);
890
Assert(rte->rtekind == RTE_FUNCTION);
892
/* Estimate costs of executing the function expression */
893
cost_qual_eval_node(&exprcost, rte->funcexpr, root);
895
startup_cost += exprcost.startup;
896
cpu_per_tuple = exprcost.per_tuple;
898
/* Add scanning CPU costs */
899
startup_cost += baserel->baserestrictcost.startup;
900
cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
901
run_cost += cpu_per_tuple * baserel->tuples;
903
path->startup_cost = startup_cost;
904
path->total_cost = startup_cost + run_cost;
909
* Determines and returns the cost of scanning a VALUES RTE.
912
cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
914
Cost startup_cost = 0;
918
/* Should only be applied to base relations that are values lists */
919
Assert(baserel->relid > 0);
920
Assert(baserel->rtekind == RTE_VALUES);
923
* For now, estimate list evaluation cost at one operator eval per list
924
* (probably pretty bogus, but is it worth being smarter?)
926
cpu_per_tuple = cpu_operator_cost;
928
/* Add scanning CPU costs */
929
startup_cost += baserel->baserestrictcost.startup;
930
cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
931
run_cost += cpu_per_tuple * baserel->tuples;
933
path->startup_cost = startup_cost;
934
path->total_cost = startup_cost + run_cost;
939
* Determines and returns the cost of scanning a CTE RTE.
941
* Note: this is used for both self-reference and regular CTEs; the
942
* possible cost differences are below the threshold of what we could
943
* estimate accurately anyway. Note that the costs of evaluating the
944
* referenced CTE query are added into the final plan as initplan costs,
945
* and should NOT be counted here.
948
cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
950
Cost startup_cost = 0;
954
/* Should only be applied to base relations that are CTEs */
955
Assert(baserel->relid > 0);
956
Assert(baserel->rtekind == RTE_CTE);
958
/* Charge one CPU tuple cost per row for tuplestore manipulation */
959
cpu_per_tuple = cpu_tuple_cost;
961
/* Add scanning CPU costs */
962
startup_cost += baserel->baserestrictcost.startup;
963
cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
964
run_cost += cpu_per_tuple * baserel->tuples;
966
path->startup_cost = startup_cost;
967
path->total_cost = startup_cost + run_cost;
971
* cost_recursive_union
972
* Determines and returns the cost of performing a recursive union,
973
* and also the estimated output size.
975
* We are given Plans for the nonrecursive and recursive terms.
977
* Note that the arguments and output are Plans, not Paths as in most of
978
* the rest of this module. That's because we don't bother setting up a
979
* Path representation for recursive union --- we have only one way to do it.
982
cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
988
/* We probably have decent estimates for the non-recursive term */
989
startup_cost = nrterm->startup_cost;
990
total_cost = nrterm->total_cost;
991
total_rows = nrterm->plan_rows;
994
* We arbitrarily assume that about 10 recursive iterations will be
995
* needed, and that we've managed to get a good fix on the cost and
996
* output size of each one of them. These are mighty shaky assumptions
997
* but it's hard to see how to do better.
999
total_cost += 10 * rterm->total_cost;
1000
total_rows += 10 * rterm->plan_rows;
1003
* Also charge cpu_tuple_cost per row to account for the costs of
1004
* manipulating the tuplestores. (We don't worry about possible
1005
* spill-to-disk costs.)
1007
total_cost += cpu_tuple_cost * total_rows;
1009
runion->startup_cost = startup_cost;
1010
runion->total_cost = total_cost;
1011
runion->plan_rows = total_rows;
1012
runion->plan_width = Max(nrterm->plan_width, rterm->plan_width);
1017
* Determines and returns the cost of sorting a relation, including
1018
* the cost of reading the input data.
1020
* If the total volume of data to sort is less than work_mem, we will do
1021
* an in-memory sort, which requires no I/O and about t*log2(t) tuple
1022
* comparisons for t tuples.
1024
* If the total volume exceeds work_mem, we switch to a tape-style merge
1025
* algorithm. There will still be about t*log2(t) tuple comparisons in
1026
* total, but we will also need to write and read each tuple once per
1027
* merge pass. We expect about ceil(logM(r)) merge passes where r is the
1028
* number of initial runs formed and M is the merge order used by tuplesort.c.
1029
* Since the average initial run should be about twice work_mem, we have
1030
* disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
1031
* cpu = comparison_cost * t * log2(t)
1033
* If the sort is bounded (i.e., only the first k result tuples are needed)
1034
* and k tuples can fit into work_mem, we use a heap method that keeps only
1035
* k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1037
* The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1038
* accesses (XXX can't we refine that guess?)
1040
* We charge two operator evals per tuple comparison, which should be in
1041
* the right ballpark in most cases.
1043
* 'pathkeys' is a list of sort keys
1044
* 'input_cost' is the total cost for reading the input data
1045
* 'tuples' is the number of tuples in the relation
1046
* 'width' is the average tuple width in bytes
1047
* 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1049
* NOTE: some callers currently pass NIL for pathkeys because they
1050
* can't conveniently supply the sort keys. Since this routine doesn't
1051
* currently do anything with pathkeys anyway, that doesn't matter...
1052
* but if it ever does, it should react gracefully to lack of key data.
1053
* (Actually, the thing we'd most likely be interested in is just the number
1054
* of sort keys, which all callers *could* supply.)
1057
cost_sort(Path *path, PlannerInfo *root,
1058
List *pathkeys, Cost input_cost, double tuples, int width,
1059
double limit_tuples)
1061
Cost startup_cost = input_cost;
1063
double input_bytes = relation_byte_size(tuples, width);
1064
double output_bytes;
1065
double output_tuples;
1066
long work_mem_bytes = work_mem * 1024L;
1069
startup_cost += disable_cost;
1072
* We want to be sure the cost of a sort is never estimated as zero, even
1073
* if passed-in tuple count is zero. Besides, mustn't do log(0)...
1078
/* Do we have a useful LIMIT? */
1079
if (limit_tuples > 0 && limit_tuples < tuples)
1081
output_tuples = limit_tuples;
1082
output_bytes = relation_byte_size(output_tuples, width);
1086
output_tuples = tuples;
1087
output_bytes = input_bytes;
1090
if (output_bytes > work_mem_bytes)
1093
* We'll have to use a disk-based sort of all the tuples
1095
double npages = ceil(input_bytes / BLCKSZ);
1096
double nruns = (input_bytes / work_mem_bytes) * 0.5;
1097
double mergeorder = tuplesort_merge_order(work_mem_bytes);
1099
double npageaccesses;
1104
* Assume about two operator evals per tuple comparison and N log2 N
1107
startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1111
/* Compute logM(r) as log(r) / log(M) */
1112
if (nruns > mergeorder)
1113
log_runs = ceil(log(nruns) / log(mergeorder));
1116
npageaccesses = 2.0 * npages * log_runs;
1117
/* Assume 3/4ths of accesses are sequential, 1/4th are not */
1118
startup_cost += npageaccesses *
1119
(seq_page_cost * 0.75 + random_page_cost * 0.25);
1121
else if (tuples > 2 * output_tuples || input_bytes > work_mem_bytes)
1124
* We'll use a bounded heap-sort keeping just K tuples in memory, for
1125
* a total number of tuple comparisons of N log2 K; but the constant
1126
* factor is a bit higher than for quicksort. Tweak it so that the
1127
* cost curve is continuous at the crossover point.
1129
startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(2.0 * output_tuples);
1133
/* We'll use plain quicksort on all the input tuples */
1134
startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1138
* Also charge a small amount (arbitrarily set equal to operator cost) per
1139
* extracted tuple. Note it's correct to use tuples not output_tuples
1140
* here --- the upper LIMIT will pro-rate the run cost so we'd be double
1141
* counting the LIMIT otherwise.
1143
run_cost += cpu_operator_cost * tuples;
1145
path->startup_cost = startup_cost;
1146
path->total_cost = startup_cost + run_cost;
1150
* sort_exceeds_work_mem
1151
* Given a finished Sort plan node, detect whether it is expected to
1152
* spill to disk (ie, will need more than work_mem workspace)
1154
* This assumes there will be no available LIMIT.
1157
sort_exceeds_work_mem(Sort *sort)
1159
double input_bytes = relation_byte_size(sort->plan.plan_rows,
1160
sort->plan.plan_width);
1161
long work_mem_bytes = work_mem * 1024L;
1163
return (input_bytes > work_mem_bytes);
1168
* Determines and returns the cost of materializing a relation, including
1169
* the cost of reading the input data.
1171
* If the total volume of data to materialize exceeds work_mem, we will need
1172
* to write it to disk, so the cost is much higher in that case.
1175
cost_material(Path *path,
1176
Cost input_cost, double tuples, int width)
1178
Cost startup_cost = input_cost;
1180
double nbytes = relation_byte_size(tuples, width);
1181
long work_mem_bytes = work_mem * 1024L;
1184
if (nbytes > work_mem_bytes)
1186
double npages = ceil(nbytes / BLCKSZ);
1188
/* We'll write during startup and read during retrieval */
1189
startup_cost += seq_page_cost * npages;
1190
run_cost += seq_page_cost * npages;
1194
* Charge a very small amount per inserted tuple, to reflect bookkeeping
1195
* costs. We use cpu_tuple_cost/10 for this. This is needed to break the
1196
* tie that would otherwise exist between nestloop with A outer,
1197
* materialized B inner and nestloop with B outer, materialized A inner.
1198
* The extra cost ensures we'll prefer materializing the smaller rel.
1200
startup_cost += cpu_tuple_cost * 0.1 * tuples;
1203
* Also charge a small amount per extracted tuple. We use cpu_tuple_cost
1204
* so that it doesn't appear worthwhile to materialize a bare seqscan.
1206
run_cost += cpu_tuple_cost * tuples;
1208
path->startup_cost = startup_cost;
1209
path->total_cost = startup_cost + run_cost;
1214
* Determines and returns the cost of performing an Agg plan node,
1215
* including the cost of its input.
1217
* Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1218
* are for appropriately-sorted input.
1221
cost_agg(Path *path, PlannerInfo *root,
1222
AggStrategy aggstrategy, int numAggs,
1223
int numGroupCols, double numGroups,
1224
Cost input_startup_cost, Cost input_total_cost,
1225
double input_tuples)
1231
* We charge one cpu_operator_cost per aggregate function per input tuple,
1232
* and another one per output tuple (corresponding to transfn and finalfn
1233
* calls respectively). If we are grouping, we charge an additional
1234
* cpu_operator_cost per grouping column per input tuple for grouping
1237
* We will produce a single output tuple if not grouping, and a tuple per
1238
* group otherwise. We charge cpu_tuple_cost for each output tuple.
1240
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1241
* same total CPU cost, but AGG_SORTED has lower startup cost. If the
1242
* input path is already sorted appropriately, AGG_SORTED should be
1243
* preferred (since it has no risk of memory overflow). This will happen
1244
* as long as the computed total costs are indeed exactly equal --- but if
1245
* there's roundoff error we might do the wrong thing. So be sure that
1246
* the computations below form the same intermediate values in the same
1249
* Note: ideally we should use the pg_proc.procost costs of each
1250
* aggregate's component functions, but for now that seems like an
1251
* excessive amount of work.
1253
if (aggstrategy == AGG_PLAIN)
1255
startup_cost = input_total_cost;
1256
startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1257
/* we aren't grouping */
1258
total_cost = startup_cost + cpu_tuple_cost;
1260
else if (aggstrategy == AGG_SORTED)
1262
/* Here we are able to deliver output on-the-fly */
1263
startup_cost = input_startup_cost;
1264
total_cost = input_total_cost;
1265
/* calcs phrased this way to match HASHED case, see note above */
1266
total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1267
total_cost += cpu_operator_cost * input_tuples * numAggs;
1268
total_cost += cpu_operator_cost * numGroups * numAggs;
1269
total_cost += cpu_tuple_cost * numGroups;
1273
/* must be AGG_HASHED */
1274
startup_cost = input_total_cost;
1275
startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1276
startup_cost += cpu_operator_cost * input_tuples * numAggs;
1277
total_cost = startup_cost;
1278
total_cost += cpu_operator_cost * numGroups * numAggs;
1279
total_cost += cpu_tuple_cost * numGroups;
1282
path->startup_cost = startup_cost;
1283
path->total_cost = total_cost;
1288
* Determines and returns the cost of performing a WindowAgg plan node,
1289
* including the cost of its input.
1291
* Input is assumed already properly sorted.
1294
cost_windowagg(Path *path, PlannerInfo *root,
1295
int numWindowFuncs, int numPartCols, int numOrderCols,
1296
Cost input_startup_cost, Cost input_total_cost,
1297
double input_tuples)
1302
startup_cost = input_startup_cost;
1303
total_cost = input_total_cost;
1306
* We charge one cpu_operator_cost per window function per tuple (often a
1307
* drastic underestimate, but without a way to gauge how many tuples the
1308
* window function will fetch, it's hard to do better). We also charge
1309
* cpu_operator_cost per grouping column per tuple for grouping
1310
* comparisons, plus cpu_tuple_cost per tuple for general overhead.
1312
total_cost += cpu_operator_cost * input_tuples * numWindowFuncs;
1313
total_cost += cpu_operator_cost * input_tuples * (numPartCols + numOrderCols);
1314
total_cost += cpu_tuple_cost * input_tuples;
1316
path->startup_cost = startup_cost;
1317
path->total_cost = total_cost;
1322
* Determines and returns the cost of performing a Group plan node,
1323
* including the cost of its input.
1325
* Note: caller must ensure that input costs are for appropriately-sorted
1329
cost_group(Path *path, PlannerInfo *root,
1330
int numGroupCols, double numGroups,
1331
Cost input_startup_cost, Cost input_total_cost,
1332
double input_tuples)
1337
startup_cost = input_startup_cost;
1338
total_cost = input_total_cost;
1341
* Charge one cpu_operator_cost per comparison per input tuple. We assume
1342
* all columns get compared at most of the tuples.
1344
total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1346
path->startup_cost = startup_cost;
1347
path->total_cost = total_cost;
1351
* If a nestloop's inner path is an indexscan, be sure to use its estimated
1352
* output row count, which may be lower than the restriction-clause-only row
1353
* count of its parent. (We don't include this case in the PATH_ROWS macro
1354
* because it applies *only* to a nestloop's inner relation.) We have to
1355
* be prepared to recurse through Append nodes in case of an appendrel.
1358
nestloop_inner_path_rows(Path *path)
1362
if (IsA(path, IndexPath))
1363
result = ((IndexPath *) path)->rows;
1364
else if (IsA(path, BitmapHeapPath))
1365
result = ((BitmapHeapPath *) path)->rows;
1366
else if (IsA(path, AppendPath))
1371
foreach(l, ((AppendPath *) path)->subpaths)
1373
result += nestloop_inner_path_rows((Path *) lfirst(l));
1377
result = PATH_ROWS(path);
1384
* Determines and returns the cost of joining two relations using the
1385
* nested loop algorithm.
1387
* 'path' is already filled in except for the cost fields
1388
* 'sjinfo' is extra info about the join for selectivity estimation
1391
cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1393
Path *outer_path = path->outerjoinpath;
1394
Path *inner_path = path->innerjoinpath;
1395
Cost startup_cost = 0;
1398
QualCost restrict_qual_cost;
1399
double outer_path_rows = PATH_ROWS(outer_path);
1400
double inner_path_rows = nestloop_inner_path_rows(inner_path);
1403
if (!enable_nestloop)
1404
startup_cost += disable_cost;
1406
/* cost of source data */
1409
* NOTE: clearly, we must pay both outer and inner paths' startup_cost
1410
* before we can start returning tuples, so the join's startup cost is
1411
* their sum. What's not so clear is whether the inner path's
1412
* startup_cost must be paid again on each rescan of the inner path. This
1413
* is not true if the inner path is materialized or is a hashjoin, but
1414
* probably is true otherwise.
1416
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1417
run_cost += outer_path->total_cost - outer_path->startup_cost;
1418
if (IsA(inner_path, MaterialPath) ||
1419
IsA(inner_path, HashPath))
1421
/* charge only run cost for each iteration of inner path */
1426
* charge startup cost for each iteration of inner path, except we
1427
* already charged the first startup_cost in our own startup
1429
run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1431
run_cost += outer_path_rows *
1432
(inner_path->total_cost - inner_path->startup_cost);
1435
* Compute number of tuples processed (not number emitted!)
1437
ntuples = outer_path_rows * inner_path_rows;
1440
cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
1441
startup_cost += restrict_qual_cost.startup;
1442
cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1443
run_cost += cpu_per_tuple * ntuples;
1445
path->path.startup_cost = startup_cost;
1446
path->path.total_cost = startup_cost + run_cost;
1451
* Determines and returns the cost of joining two relations using the
1452
* merge join algorithm.
1454
* 'path' is already filled in except for the cost fields
1455
* 'sjinfo' is extra info about the join for selectivity estimation
1457
* Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1458
* outersortkeys and innersortkeys are lists of the keys to be used
1459
* to sort the outer and inner relations, or NIL if no explicit
1460
* sort is needed because the source path is already ordered.
1463
cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1465
Path *outer_path = path->jpath.outerjoinpath;
1466
Path *inner_path = path->jpath.innerjoinpath;
1467
List *mergeclauses = path->path_mergeclauses;
1468
List *outersortkeys = path->outersortkeys;
1469
List *innersortkeys = path->innersortkeys;
1470
Cost startup_cost = 0;
1473
QualCost merge_qual_cost;
1474
QualCost qp_qual_cost;
1475
double outer_path_rows = PATH_ROWS(outer_path);
1476
double inner_path_rows = PATH_ROWS(inner_path);
1481
double mergejointuples,
1484
Selectivity outerstartsel,
1488
Path sort_path; /* dummy for result of cost_sort */
1490
/* Protect some assumptions below that rowcounts aren't zero */
1491
if (outer_path_rows <= 0)
1492
outer_path_rows = 1;
1493
if (inner_path_rows <= 0)
1494
inner_path_rows = 1;
1496
if (!enable_mergejoin)
1497
startup_cost += disable_cost;
1500
* Compute cost of the mergequals and qpquals (other restriction clauses)
1503
cost_qual_eval(&merge_qual_cost, mergeclauses, root);
1504
cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1505
qp_qual_cost.startup -= merge_qual_cost.startup;
1506
qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1509
* Get approx # tuples passing the mergequals. We use approx_tuple_count
1510
* here because we need an estimate done with JOIN_INNER semantics.
1512
mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
1515
* When there are equal merge keys in the outer relation, the mergejoin
1516
* must rescan any matching tuples in the inner relation. This means
1517
* re-fetching inner tuples. Our cost model for this is that a re-fetch
1518
* costs the same as an original fetch, which is probably an overestimate;
1519
* but on the other hand we ignore the bookkeeping costs of mark/restore.
1520
* Not clear if it's worth developing a more refined model.
1522
* For regular inner and outer joins, the number of re-fetches can be
1523
* estimated approximately as size of merge join output minus size of
1524
* inner relation. Assume that the distinct key values are 1, 2, ..., and
1525
* denote the number of values of each key in the outer relation as m1,
1526
* m2, ...; in the inner relation, n1, n2, ... Then we have
1528
* size of join = m1 * n1 + m2 * n2 + ...
1530
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1531
* n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1534
* This equation works correctly for outer tuples having no inner match
1535
* (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1536
* are effectively subtracting those from the number of rescanned tuples,
1537
* when we should not. Can we do better without expensive selectivity
1540
* The whole issue is moot if we are working from a unique-ified outer
1543
if (IsA(outer_path, UniquePath))
1544
rescannedtuples = 0;
1547
rescannedtuples = mergejointuples - inner_path_rows;
1548
/* Must clamp because of possible underestimate */
1549
if (rescannedtuples < 0)
1550
rescannedtuples = 0;
1552
/* We'll inflate inner run cost this much to account for rescanning */
1553
rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1556
* A merge join will stop as soon as it exhausts either input stream
1557
* (unless it's an outer join, in which case the outer side has to be
1558
* scanned all the way anyway). Estimate fraction of the left and right
1559
* inputs that will actually need to be scanned. Likewise, we can
1560
* estimate the number of rows that will be skipped before the first
1561
* join pair is found, which should be factored into startup cost.
1562
* We use only the first (most significant) merge clause for this purpose.
1563
* Since mergejoinscansel() is a fairly expensive computation, we cache
1564
* the results in the merge clause RestrictInfo.
1566
if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1568
RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
1573
MergeScanSelCache *cache;
1575
/* Get the input pathkeys to determine the sort-order details */
1576
opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
1577
ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
1580
opathkey = (PathKey *) linitial(opathkeys);
1581
ipathkey = (PathKey *) linitial(ipathkeys);
1582
/* debugging check */
1583
if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1584
opathkey->pk_strategy != ipathkey->pk_strategy ||
1585
opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1586
elog(ERROR, "left and right pathkeys do not match in mergejoin");
1588
/* Get the selectivity with caching */
1589
cache = cached_scansel(root, firstclause, opathkey);
1591
if (bms_is_subset(firstclause->left_relids,
1592
outer_path->parent->relids))
1594
/* left side of clause is outer */
1595
outerstartsel = cache->leftstartsel;
1596
outerendsel = cache->leftendsel;
1597
innerstartsel = cache->rightstartsel;
1598
innerendsel = cache->rightendsel;
1602
/* left side of clause is inner */
1603
outerstartsel = cache->rightstartsel;
1604
outerendsel = cache->rightendsel;
1605
innerstartsel = cache->leftstartsel;
1606
innerendsel = cache->leftendsel;
1608
if (path->jpath.jointype == JOIN_LEFT ||
1609
path->jpath.jointype == JOIN_ANTI)
1611
outerstartsel = 0.0;
1614
else if (path->jpath.jointype == JOIN_RIGHT)
1616
innerstartsel = 0.0;
1622
/* cope with clauseless or full mergejoin */
1623
outerstartsel = innerstartsel = 0.0;
1624
outerendsel = innerendsel = 1.0;
1628
* Convert selectivities to row counts. We force outer_rows and
1629
* inner_rows to be at least 1, but the skip_rows estimates can be zero.
1631
outer_skip_rows = rint(outer_path_rows * outerstartsel);
1632
inner_skip_rows = rint(inner_path_rows * innerstartsel);
1633
outer_rows = clamp_row_est(outer_path_rows * outerendsel);
1634
inner_rows = clamp_row_est(inner_path_rows * innerendsel);
1636
Assert(outer_skip_rows <= outer_rows);
1637
Assert(inner_skip_rows <= inner_rows);
1640
* Readjust scan selectivities to account for above rounding. This is
1641
* normally an insignificant effect, but when there are only a few rows in
1642
* the inputs, failing to do this makes for a large percentage error.
1644
outerstartsel = outer_skip_rows / outer_path_rows;
1645
innerstartsel = inner_skip_rows / inner_path_rows;
1646
outerendsel = outer_rows / outer_path_rows;
1647
innerendsel = inner_rows / inner_path_rows;
1649
Assert(outerstartsel <= outerendsel);
1650
Assert(innerstartsel <= innerendsel);
1652
/* cost of source data */
1654
if (outersortkeys) /* do we need to sort outer? */
1656
cost_sort(&sort_path,
1659
outer_path->total_cost,
1661
outer_path->parent->width,
1663
startup_cost += sort_path.startup_cost;
1664
startup_cost += (sort_path.total_cost - sort_path.startup_cost)
1666
run_cost += (sort_path.total_cost - sort_path.startup_cost)
1667
* (outerendsel - outerstartsel);
1671
startup_cost += outer_path->startup_cost;
1672
startup_cost += (outer_path->total_cost - outer_path->startup_cost)
1674
run_cost += (outer_path->total_cost - outer_path->startup_cost)
1675
* (outerendsel - outerstartsel);
1678
if (innersortkeys) /* do we need to sort inner? */
1680
cost_sort(&sort_path,
1683
inner_path->total_cost,
1685
inner_path->parent->width,
1687
startup_cost += sort_path.startup_cost;
1688
startup_cost += (sort_path.total_cost - sort_path.startup_cost)
1689
* innerstartsel * rescanratio;
1690
run_cost += (sort_path.total_cost - sort_path.startup_cost)
1691
* (innerendsel - innerstartsel) * rescanratio;
1694
* If the inner sort is expected to spill to disk, we want to add a
1695
* materialize node to shield it from the need to handle mark/restore.
1696
* This will allow it to perform the last merge pass on-the-fly, while
1697
* in most cases not requiring the materialize to spill to disk.
1698
* Charge an extra cpu_tuple_cost per tuple to account for the
1699
* materialize node. (Keep this estimate in sync with similar ones in
1700
* create_mergejoin_path and create_mergejoin_plan.)
1702
if (relation_byte_size(inner_path_rows, inner_path->parent->width) >
1704
run_cost += cpu_tuple_cost * inner_path_rows;
1708
startup_cost += inner_path->startup_cost;
1709
startup_cost += (inner_path->total_cost - inner_path->startup_cost)
1710
* innerstartsel * rescanratio;
1711
run_cost += (inner_path->total_cost - inner_path->startup_cost)
1712
* (innerendsel - innerstartsel) * rescanratio;
1718
* The number of tuple comparisons needed is approximately number of outer
1719
* rows plus number of inner rows plus number of rescanned tuples (can we
1720
* refine this?). At each one, we need to evaluate the mergejoin quals.
1722
startup_cost += merge_qual_cost.startup;
1723
startup_cost += merge_qual_cost.per_tuple *
1724
(outer_skip_rows + inner_skip_rows * rescanratio);
1725
run_cost += merge_qual_cost.per_tuple *
1726
((outer_rows - outer_skip_rows) +
1727
(inner_rows - inner_skip_rows) * rescanratio);
1730
* For each tuple that gets through the mergejoin proper, we charge
1731
* cpu_tuple_cost plus the cost of evaluating additional restriction
1732
* clauses that are to be applied at the join. (This is pessimistic since
1733
* not all of the quals may get evaluated at each tuple.)
1735
startup_cost += qp_qual_cost.startup;
1736
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1737
run_cost += cpu_per_tuple * mergejointuples;
1739
path->jpath.path.startup_cost = startup_cost;
1740
path->jpath.path.total_cost = startup_cost + run_cost;
1744
* run mergejoinscansel() with caching
1746
static MergeScanSelCache *
1747
cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
1749
MergeScanSelCache *cache;
1751
Selectivity leftstartsel,
1755
MemoryContext oldcontext;
1757
/* Do we have this result already? */
1758
foreach(lc, rinfo->scansel_cache)
1760
cache = (MergeScanSelCache *) lfirst(lc);
1761
if (cache->opfamily == pathkey->pk_opfamily &&
1762
cache->strategy == pathkey->pk_strategy &&
1763
cache->nulls_first == pathkey->pk_nulls_first)
1767
/* Nope, do the computation */
1768
mergejoinscansel(root,
1769
(Node *) rinfo->clause,
1770
pathkey->pk_opfamily,
1771
pathkey->pk_strategy,
1772
pathkey->pk_nulls_first,
1778
/* Cache the result in suitably long-lived workspace */
1779
oldcontext = MemoryContextSwitchTo(root->planner_cxt);
1781
cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
1782
cache->opfamily = pathkey->pk_opfamily;
1783
cache->strategy = pathkey->pk_strategy;
1784
cache->nulls_first = pathkey->pk_nulls_first;
1785
cache->leftstartsel = leftstartsel;
1786
cache->leftendsel = leftendsel;
1787
cache->rightstartsel = rightstartsel;
1788
cache->rightendsel = rightendsel;
1790
rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
1792
MemoryContextSwitchTo(oldcontext);
1799
* Determines and returns the cost of joining two relations using the
1800
* hash join algorithm.
1802
* 'path' is already filled in except for the cost fields
1803
* 'sjinfo' is extra info about the join for selectivity estimation
1805
* Note: path's hashclauses should be a subset of the joinrestrictinfo list
1808
cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1810
Path *outer_path = path->jpath.outerjoinpath;
1811
Path *inner_path = path->jpath.innerjoinpath;
1812
List *hashclauses = path->path_hashclauses;
1813
Cost startup_cost = 0;
1816
QualCost hash_qual_cost;
1817
QualCost qp_qual_cost;
1818
double hashjointuples;
1819
double outer_path_rows = PATH_ROWS(outer_path);
1820
double inner_path_rows = PATH_ROWS(inner_path);
1821
int num_hashclauses = list_length(hashclauses);
1825
double virtualbuckets;
1826
Selectivity innerbucketsize;
1829
if (!enable_hashjoin)
1830
startup_cost += disable_cost;
1833
* Compute cost of the hashquals and qpquals (other restriction clauses)
1836
cost_qual_eval(&hash_qual_cost, hashclauses, root);
1837
cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1838
qp_qual_cost.startup -= hash_qual_cost.startup;
1839
qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1842
* Get approx # tuples passing the hashquals. We use approx_tuple_count
1843
* here because we need an estimate done with JOIN_INNER semantics.
1845
hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
1847
/* cost of source data */
1848
startup_cost += outer_path->startup_cost;
1849
run_cost += outer_path->total_cost - outer_path->startup_cost;
1850
startup_cost += inner_path->total_cost;
1853
* Cost of computing hash function: must do it once per input tuple. We
1854
* charge one cpu_operator_cost for each column's hash function. Also,
1855
* tack on one cpu_tuple_cost per inner row, to model the costs of
1856
* inserting the row into the hashtable.
1858
* XXX when a hashclause is more complex than a single operator, we really
1859
* should charge the extra eval costs of the left or right side, as
1860
* appropriate, here. This seems more work than it's worth at the moment.
1862
startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
1864
run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1867
* Get hash table size that executor would use for inner relation.
1869
* XXX for the moment, always assume that skew optimization will be
1870
* performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
1871
* trying to determine that for sure.
1873
* XXX at some point it might be interesting to try to account for skew
1874
* optimization in the cost estimate, but for now, we don't.
1876
ExecChooseHashTableSize(inner_path_rows,
1877
inner_path->parent->width,
1882
virtualbuckets = (double) numbuckets *(double) numbatches;
1883
/* mark the path with estimated # of batches */
1884
path->num_batches = numbatches;
1887
* Determine bucketsize fraction for inner relation. We use the smallest
1888
* bucketsize estimated for any individual hashclause; this is undoubtedly
1891
* BUT: if inner relation has been unique-ified, we can assume it's good
1892
* for hashing. This is important both because it's the right answer, and
1893
* because we avoid contaminating the cache with a value that's wrong for
1894
* non-unique-ified paths.
1896
if (IsA(inner_path, UniquePath))
1897
innerbucketsize = 1.0 / virtualbuckets;
1900
innerbucketsize = 1.0;
1901
foreach(hcl, hashclauses)
1903
RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1904
Selectivity thisbucketsize;
1906
Assert(IsA(restrictinfo, RestrictInfo));
1909
* First we have to figure out which side of the hashjoin clause
1910
* is the inner side.
1912
* Since we tend to visit the same clauses over and over when
1913
* planning a large query, we cache the bucketsize estimate in the
1914
* RestrictInfo node to avoid repeated lookups of statistics.
1916
if (bms_is_subset(restrictinfo->right_relids,
1917
inner_path->parent->relids))
1919
/* righthand side is inner */
1920
thisbucketsize = restrictinfo->right_bucketsize;
1921
if (thisbucketsize < 0)
1923
/* not cached yet */
1925
estimate_hash_bucketsize(root,
1926
get_rightop(restrictinfo->clause),
1928
restrictinfo->right_bucketsize = thisbucketsize;
1933
Assert(bms_is_subset(restrictinfo->left_relids,
1934
inner_path->parent->relids));
1935
/* lefthand side is inner */
1936
thisbucketsize = restrictinfo->left_bucketsize;
1937
if (thisbucketsize < 0)
1939
/* not cached yet */
1941
estimate_hash_bucketsize(root,
1942
get_leftop(restrictinfo->clause),
1944
restrictinfo->left_bucketsize = thisbucketsize;
1948
if (innerbucketsize > thisbucketsize)
1949
innerbucketsize = thisbucketsize;
1954
* If inner relation is too big then we will need to "batch" the join,
1955
* which implies writing and reading most of the tuples to disk an extra
1956
* time. Charge seq_page_cost per page, since the I/O should be nice and
1957
* sequential. Writing the inner rel counts as startup cost, all the rest
1962
double outerpages = page_size(outer_path_rows,
1963
outer_path->parent->width);
1964
double innerpages = page_size(inner_path_rows,
1965
inner_path->parent->width);
1967
startup_cost += seq_page_cost * innerpages;
1968
run_cost += seq_page_cost * (innerpages + 2 * outerpages);
1974
* The number of tuple comparisons needed is the number of outer tuples
1975
* times the typical number of tuples in a hash bucket, which is the inner
1976
* relation size times its bucketsize fraction. At each one, we need to
1977
* evaluate the hashjoin quals. But actually, charging the full qual eval
1978
* cost at each tuple is pessimistic, since we don't evaluate the quals
1979
* unless the hash values match exactly. For lack of a better idea, halve
1980
* the cost estimate to allow for that.
1982
startup_cost += hash_qual_cost.startup;
1983
run_cost += hash_qual_cost.per_tuple *
1984
outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
1987
* For each tuple that gets through the hashjoin proper, we charge
1988
* cpu_tuple_cost plus the cost of evaluating additional restriction
1989
* clauses that are to be applied at the join. (This is pessimistic since
1990
* not all of the quals may get evaluated at each tuple.)
1992
startup_cost += qp_qual_cost.startup;
1993
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1994
run_cost += cpu_per_tuple * hashjointuples;
1996
path->jpath.path.startup_cost = startup_cost;
1997
path->jpath.path.total_cost = startup_cost + run_cost;
2003
* Figure the costs for a SubPlan (or initplan).
2005
* Note: we could dig the subplan's Plan out of the root list, but in practice
2006
* all callers have it handy already, so we make them pass it.
2009
cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
2013
/* Figure any cost for evaluating the testexpr */
2014
cost_qual_eval(&sp_cost,
2015
make_ands_implicit((Expr *) subplan->testexpr),
2018
if (subplan->useHashTable)
2021
* If we are using a hash table for the subquery outputs, then the
2022
* cost of evaluating the query is a one-time cost. We charge one
2023
* cpu_operator_cost per tuple for the work of loading the hashtable,
2026
sp_cost.startup += plan->total_cost +
2027
cpu_operator_cost * plan->plan_rows;
2030
* The per-tuple costs include the cost of evaluating the lefthand
2031
* expressions, plus the cost of probing the hashtable. We already
2032
* accounted for the lefthand expressions as part of the testexpr,
2033
* and will also have counted one cpu_operator_cost for each
2034
* comparison operator. That is probably too low for the probing
2035
* cost, but it's hard to make a better estimate, so live with it for
2042
* Otherwise we will be rescanning the subplan output on each
2043
* evaluation. We need to estimate how much of the output we will
2044
* actually need to scan. NOTE: this logic should agree with the
2045
* tuple_fraction estimates used by make_subplan() in
2048
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
2050
if (subplan->subLinkType == EXISTS_SUBLINK)
2052
/* we only need to fetch 1 tuple */
2053
sp_cost.per_tuple += plan_run_cost / plan->plan_rows;
2055
else if (subplan->subLinkType == ALL_SUBLINK ||
2056
subplan->subLinkType == ANY_SUBLINK)
2058
/* assume we need 50% of the tuples */
2059
sp_cost.per_tuple += 0.50 * plan_run_cost;
2060
/* also charge a cpu_operator_cost per row examined */
2061
sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
2065
/* assume we need all tuples */
2066
sp_cost.per_tuple += plan_run_cost;
2070
* Also account for subplan's startup cost. If the subplan is
2071
* uncorrelated or undirect correlated, AND its topmost node is a Sort
2072
* or Material node, assume that we'll only need to pay its startup
2073
* cost once; otherwise assume we pay the startup cost every time.
2075
if (subplan->parParam == NIL &&
2077
IsA(plan, Material)))
2078
sp_cost.startup += plan->startup_cost;
2080
sp_cost.per_tuple += plan->startup_cost;
2083
subplan->startup_cost = sp_cost.startup;
2084
subplan->per_call_cost = sp_cost.per_tuple;
2090
* Estimate the CPU costs of evaluating a WHERE clause.
2091
* The input can be either an implicitly-ANDed list of boolean
2092
* expressions, or a list of RestrictInfo nodes. (The latter is
2093
* preferred since it allows caching of the results.)
2094
* The result includes both a one-time (startup) component,
2095
* and a per-evaluation component.
2098
cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
2100
cost_qual_eval_context context;
2103
context.root = root;
2104
context.total.startup = 0;
2105
context.total.per_tuple = 0;
2107
/* We don't charge any cost for the implicit ANDing at top level ... */
2111
Node *qual = (Node *) lfirst(l);
2113
cost_qual_eval_walker(qual, &context);
2116
*cost = context.total;
2120
* cost_qual_eval_node
2121
* As above, for a single RestrictInfo or expression.
2124
cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
2126
cost_qual_eval_context context;
2128
context.root = root;
2129
context.total.startup = 0;
2130
context.total.per_tuple = 0;
2132
cost_qual_eval_walker(qual, &context);
2134
*cost = context.total;
2138
cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
2144
* RestrictInfo nodes contain an eval_cost field reserved for this
2145
* routine's use, so that it's not necessary to evaluate the qual clause's
2146
* cost more than once. If the clause's cost hasn't been computed yet,
2147
* the field's startup value will contain -1.
2149
if (IsA(node, RestrictInfo))
2151
RestrictInfo *rinfo = (RestrictInfo *) node;
2153
if (rinfo->eval_cost.startup < 0)
2155
cost_qual_eval_context locContext;
2157
locContext.root = context->root;
2158
locContext.total.startup = 0;
2159
locContext.total.per_tuple = 0;
2162
* For an OR clause, recurse into the marked-up tree so that we
2163
* set the eval_cost for contained RestrictInfos too.
2165
if (rinfo->orclause)
2166
cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
2168
cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
2171
* If the RestrictInfo is marked pseudoconstant, it will be tested
2172
* only once, so treat its cost as all startup cost.
2174
if (rinfo->pseudoconstant)
2176
/* count one execution during startup */
2177
locContext.total.startup += locContext.total.per_tuple;
2178
locContext.total.per_tuple = 0;
2180
rinfo->eval_cost = locContext.total;
2182
context->total.startup += rinfo->eval_cost.startup;
2183
context->total.per_tuple += rinfo->eval_cost.per_tuple;
2184
/* do NOT recurse into children */
2189
* For each operator or function node in the given tree, we charge the
2190
* estimated execution cost given by pg_proc.procost (remember to multiply
2191
* this by cpu_operator_cost).
2193
* Vars and Consts are charged zero, and so are boolean operators (AND,
2194
* OR, NOT). Simplistic, but a lot better than no model at all.
2196
* Note that Aggref and WindowFunc nodes are (and should be) treated
2197
* like Vars --- whatever execution cost they have is absorbed into
2198
* plan-node-specific costing. As far as expression evaluation is
2199
* concerned they're just like Vars.
2201
* Should we try to account for the possibility of short-circuit
2202
* evaluation of AND/OR? Probably *not*, because that would make the
2203
* results depend on the clause ordering, and we are not in any position
2204
* to expect that the current ordering of the clauses is the one that's
2205
* going to end up being used. (Is it worth applying order_qual_clauses
2206
* much earlier in the planning process to fix this?)
2208
if (IsA(node, FuncExpr))
2210
context->total.per_tuple +=
2211
get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
2213
else if (IsA(node, OpExpr) ||
2214
IsA(node, DistinctExpr) ||
2215
IsA(node, NullIfExpr))
2217
/* rely on struct equivalence to treat these all alike */
2218
set_opfuncid((OpExpr *) node);
2219
context->total.per_tuple +=
2220
get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
2222
else if (IsA(node, ScalarArrayOpExpr))
2225
* Estimate that the operator will be applied to about half of the
2226
* array elements before the answer is determined.
2228
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
2229
Node *arraynode = (Node *) lsecond(saop->args);
2231
set_sa_opfuncid(saop);
2232
context->total.per_tuple += get_func_cost(saop->opfuncid) *
2233
cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
2235
else if (IsA(node, CoerceViaIO))
2237
CoerceViaIO *iocoerce = (CoerceViaIO *) node;
2242
/* check the result type's input function */
2243
getTypeInputInfo(iocoerce->resulttype,
2244
&iofunc, &typioparam);
2245
context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2246
/* check the input type's output function */
2247
getTypeOutputInfo(exprType((Node *) iocoerce->arg),
2248
&iofunc, &typisvarlena);
2249
context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2251
else if (IsA(node, ArrayCoerceExpr))
2253
ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
2254
Node *arraynode = (Node *) acoerce->arg;
2256
if (OidIsValid(acoerce->elemfuncid))
2257
context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
2258
cpu_operator_cost * estimate_array_length(arraynode);
2260
else if (IsA(node, RowCompareExpr))
2262
/* Conservatively assume we will check all the columns */
2263
RowCompareExpr *rcexpr = (RowCompareExpr *) node;
2266
foreach(lc, rcexpr->opnos)
2268
Oid opid = lfirst_oid(lc);
2270
context->total.per_tuple += get_func_cost(get_opcode(opid)) *
2274
else if (IsA(node, CurrentOfExpr))
2276
/* Report high cost to prevent selection of anything but TID scan */
2277
context->total.startup += disable_cost;
2279
else if (IsA(node, SubLink))
2281
/* This routine should not be applied to un-planned expressions */
2282
elog(ERROR, "cannot handle unplanned sub-select");
2284
else if (IsA(node, SubPlan))
2287
* A subplan node in an expression typically indicates that the
2288
* subplan will be executed on each evaluation, so charge accordingly.
2289
* (Sub-selects that can be executed as InitPlans have already been
2290
* removed from the expression.)
2292
SubPlan *subplan = (SubPlan *) node;
2294
context->total.startup += subplan->startup_cost;
2295
context->total.per_tuple += subplan->per_call_cost;
2298
* We don't want to recurse into the testexpr, because it was already
2299
* counted in the SubPlan node's costs. So we're done.
2303
else if (IsA(node, AlternativeSubPlan))
2306
* Arbitrarily use the first alternative plan for costing. (We should
2307
* certainly only include one alternative, and we don't yet have
2308
* enough information to know which one the executor is most likely
2311
AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
2313
return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
2317
/* recurse into children */
2318
return expression_tree_walker(node, cost_qual_eval_walker,
2324
* approx_tuple_count
2325
* Quick-and-dirty estimation of the number of join rows passing
2326
* a set of qual conditions.
2328
* The quals can be either an implicitly-ANDed list of boolean expressions,
2329
* or a list of RestrictInfo nodes (typically the latter).
2331
* We intentionally compute the selectivity under JOIN_INNER rules, even
2332
* if it's some type of outer join. This is appropriate because we are
2333
* trying to figure out how many tuples pass the initial merge or hash
2336
* This is quick-and-dirty because we bypass clauselist_selectivity, and
2337
* simply multiply the independent clause selectivities together. Now
2338
* clauselist_selectivity often can't do any better than that anyhow, but
2339
* for some situations (such as range constraints) it is smarter. However,
2340
* we can't effectively cache the results of clauselist_selectivity, whereas
2341
* the individual clause selectivities can be and are cached.
2343
* Since we are only using the results to estimate how many potential
2344
* output tuples are generated and passed through qpqual checking, it
2345
* seems OK to live with the approximation.
2348
approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
2351
double outer_tuples = path->outerjoinpath->parent->rows;
2352
double inner_tuples = path->innerjoinpath->parent->rows;
2353
SpecialJoinInfo sjinfo;
2354
Selectivity selec = 1.0;
2358
* Make up a SpecialJoinInfo for JOIN_INNER semantics.
2360
sjinfo.type = T_SpecialJoinInfo;
2361
sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
2362
sjinfo.min_righthand = path->innerjoinpath->parent->relids;
2363
sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
2364
sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
2365
sjinfo.jointype = JOIN_INNER;
2366
/* we don't bother trying to make the remaining fields valid */
2367
sjinfo.lhs_strict = false;
2368
sjinfo.delay_upper_joins = false;
2369
sjinfo.join_quals = NIL;
2371
/* Get the approximate selectivity */
2374
Node *qual = (Node *) lfirst(l);
2376
/* Note that clause_selectivity will be able to cache its result */
2377
selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
2380
/* Apply it to the input relation sizes */
2381
tuples = selec * outer_tuples * inner_tuples;
2383
return clamp_row_est(tuples);
2388
* set_baserel_size_estimates
2389
* Set the size estimates for the given base relation.
2391
* The rel's targetlist and restrictinfo list must have been constructed
2394
* We set the following fields of the rel node:
2395
* rows: the estimated number of output tuples (after applying
2396
* restriction clauses).
2397
* width: the estimated average output tuple width in bytes.
2398
* baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
2401
set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2405
/* Should only be applied to base relations */
2406
Assert(rel->relid > 0);
2408
nrows = rel->tuples *
2409
clauselist_selectivity(root,
2410
rel->baserestrictinfo,
2415
rel->rows = clamp_row_est(nrows);
2417
cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
2419
set_rel_width(root, rel);
2423
* set_joinrel_size_estimates
2424
* Set the size estimates for the given join relation.
2426
* The rel's targetlist must have been constructed already, and a
2427
* restriction clause list that matches the given component rels must
2430
* Since there is more than one way to make a joinrel for more than two
2431
* base relations, the results we get here could depend on which component
2432
* rel pair is provided. In theory we should get the same answers no matter
2433
* which pair is provided; in practice, since the selectivity estimation
2434
* routines don't handle all cases equally well, we might not. But there's
2435
* not much to be done about it. (Would it make sense to repeat the
2436
* calculations for each pair of input rels that's encountered, and somehow
2437
* average the results? Probably way more trouble than it's worth.)
2439
* We set only the rows field here. The width field was already set by
2440
* build_joinrel_tlist, and baserestrictcost is not used for join rels.
2443
set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
2444
RelOptInfo *outer_rel,
2445
RelOptInfo *inner_rel,
2446
SpecialJoinInfo *sjinfo,
2449
JoinType jointype = sjinfo->jointype;
2455
* Compute joinclause selectivity. Note that we are only considering
2456
* clauses that become restriction clauses at this join level; we are not
2457
* double-counting them because they were not considered in estimating the
2458
* sizes of the component rels.
2460
* For an outer join, we have to distinguish the selectivity of the join's
2461
* own clauses (JOIN/ON conditions) from any clauses that were "pushed
2462
* down". For inner joins we just count them all as joinclauses.
2464
if (IS_OUTER_JOIN(jointype))
2466
List *joinquals = NIL;
2467
List *pushedquals = NIL;
2470
/* Grovel through the clauses to separate into two lists */
2471
foreach(l, restrictlist)
2473
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2475
Assert(IsA(rinfo, RestrictInfo));
2476
if (rinfo->is_pushed_down)
2477
pushedquals = lappend(pushedquals, rinfo);
2479
joinquals = lappend(joinquals, rinfo);
2482
/* Get the separate selectivities */
2483
jselec = clauselist_selectivity(root,
2488
pselec = clauselist_selectivity(root,
2494
/* Avoid leaking a lot of ListCells */
2495
list_free(joinquals);
2496
list_free(pushedquals);
2500
jselec = clauselist_selectivity(root,
2505
pselec = 0.0; /* not used, keep compiler quiet */
2509
* Basically, we multiply size of Cartesian product by selectivity.
2511
* If we are doing an outer join, take that into account: the joinqual
2512
* selectivity has to be clamped using the knowledge that the output must
2513
* be at least as large as the non-nullable input. However, any
2514
* pushed-down quals are applied after the outer join, so their
2515
* selectivity applies fully.
2517
* For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
2518
* of LHS rows that have matches, and we apply that straightforwardly.
2523
nrows = outer_rel->rows * inner_rel->rows * jselec;
2526
nrows = outer_rel->rows * inner_rel->rows * jselec;
2527
if (nrows < outer_rel->rows)
2528
nrows = outer_rel->rows;
2532
nrows = outer_rel->rows * inner_rel->rows * jselec;
2533
if (nrows < outer_rel->rows)
2534
nrows = outer_rel->rows;
2535
if (nrows < inner_rel->rows)
2536
nrows = inner_rel->rows;
2540
nrows = outer_rel->rows * jselec;
2541
/* pselec not used */
2544
nrows = outer_rel->rows * (1.0 - jselec);
2548
/* other values not expected here */
2549
elog(ERROR, "unrecognized join type: %d", (int) jointype);
2550
nrows = 0; /* keep compiler quiet */
2554
rel->rows = clamp_row_est(nrows);
2558
* set_function_size_estimates
2559
* Set the size estimates for a base relation that is a function call.
2561
* The rel's targetlist and restrictinfo list must have been constructed
2564
* We set the same fields as set_baserel_size_estimates.
2567
set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2571
/* Should only be applied to base relations that are functions */
2572
Assert(rel->relid > 0);
2573
rte = planner_rt_fetch(rel->relid, root);
2574
Assert(rte->rtekind == RTE_FUNCTION);
2576
/* Estimate number of rows the function itself will return */
2577
rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
2579
/* Now estimate number of output rows, etc */
2580
set_baserel_size_estimates(root, rel);
2584
* set_values_size_estimates
2585
* Set the size estimates for a base relation that is a values list.
2587
* The rel's targetlist and restrictinfo list must have been constructed
2590
* We set the same fields as set_baserel_size_estimates.
2593
set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2597
/* Should only be applied to base relations that are values lists */
2598
Assert(rel->relid > 0);
2599
rte = planner_rt_fetch(rel->relid, root);
2600
Assert(rte->rtekind == RTE_VALUES);
2603
* Estimate number of rows the values list will return. We know this
2604
* precisely based on the list length (well, barring set-returning
2605
* functions in list items, but that's a refinement not catered for
2606
* anywhere else either).
2608
rel->tuples = list_length(rte->values_lists);
2610
/* Now estimate number of output rows, etc */
2611
set_baserel_size_estimates(root, rel);
2615
* set_cte_size_estimates
2616
* Set the size estimates for a base relation that is a CTE reference.
2618
* The rel's targetlist and restrictinfo list must have been constructed
2619
* already, and we need the completed plan for the CTE (if a regular CTE)
2620
* or the non-recursive term (if a self-reference).
2622
* We set the same fields as set_baserel_size_estimates.
2625
set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
2629
/* Should only be applied to base relations that are CTE references */
2630
Assert(rel->relid > 0);
2631
rte = planner_rt_fetch(rel->relid, root);
2632
Assert(rte->rtekind == RTE_CTE);
2634
if (rte->self_reference)
2637
* In a self-reference, arbitrarily assume the average worktable
2638
* size is about 10 times the nonrecursive term's size.
2640
rel->tuples = 10 * cteplan->plan_rows;
2644
/* Otherwise just believe the CTE plan's output estimate */
2645
rel->tuples = cteplan->plan_rows;
2648
/* Now estimate number of output rows, etc */
2649
set_baserel_size_estimates(root, rel);
2655
* Set the estimated output width of a base relation.
2657
* NB: this works best on plain relations because it prefers to look at
2658
* real Vars. It will fail to make use of pg_statistic info when applied
2659
* to a subquery relation, even if the subquery outputs are simple vars
2660
* that we could have gotten info for. Is it worth trying to be smarter
2663
* The per-attribute width estimates are cached for possible re-use while
2664
* building join relations.
2667
set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2669
Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
2670
int32 tuple_width = 0;
2673
foreach(lc, rel->reltargetlist)
2675
Node *node = (Node *) lfirst(lc);
2679
Var *var = (Var *) node;
2683
Assert(var->varno == rel->relid);
2684
Assert(var->varattno >= rel->min_attr);
2685
Assert(var->varattno <= rel->max_attr);
2687
ndx = var->varattno - rel->min_attr;
2690
* The width probably hasn't been cached yet, but may as well check
2692
if (rel->attr_widths[ndx] > 0)
2694
tuple_width += rel->attr_widths[ndx];
2698
/* Try to get column width from statistics */
2699
if (reloid != InvalidOid)
2701
item_width = get_attavgwidth(reloid, var->varattno);
2704
rel->attr_widths[ndx] = item_width;
2705
tuple_width += item_width;
2711
* Not a plain relation, or can't find statistics for it. Estimate
2712
* using just the type info.
2714
item_width = get_typavgwidth(var->vartype, var->vartypmod);
2715
Assert(item_width > 0);
2716
rel->attr_widths[ndx] = item_width;
2717
tuple_width += item_width;
2719
else if (IsA(node, PlaceHolderVar))
2721
PlaceHolderVar *phv = (PlaceHolderVar *) node;
2722
PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
2724
tuple_width += phinfo->ph_width;
2728
/* For now, punt on whole-row child Vars */
2729
tuple_width += 32; /* arbitrary */
2732
Assert(tuple_width >= 0);
2733
rel->width = tuple_width;
2737
* relation_byte_size
2738
* Estimate the storage space in bytes for a given number of tuples
2739
* of a given width (size in bytes).
2742
relation_byte_size(double tuples, int width)
2744
return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2749
* Returns an estimate of the number of pages covered by a given
2750
* number of tuples of a given width (size in bytes).
2753
page_size(double tuples, int width)
2755
return ceil(relation_byte_size(tuples, width) / BLCKSZ);