1
/*-------------------------------------------------------------------------
4
* Routines to compute (and set) relation sizes and path costs
6
* Path costs are measured in units of disk accesses: one sequential page
7
* fetch has cost 1. All else is scaled relative to a page fetch, using
8
* the scaling parameters
10
* random_page_cost Cost of a non-sequential page fetch
11
* cpu_tuple_cost Cost of typical CPU time to process a tuple
12
* cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13
* cpu_operator_cost Cost of CPU time to process a typical WHERE operator
15
* We also use a rough estimate "effective_cache_size" of the number of
16
* disk pages in Postgres + OS-level disk cache. (We can't simply use
17
* NBuffers for this purpose because that would ignore the effects of
18
* the kernel's disk cache.)
20
* Obviously, taking constants for these values is an oversimplification,
21
* but it's tough enough to get any useful estimates even at this level of
22
* detail. Note that all of these parameters are user-settable, in case
23
* the default values are drastically off for a particular platform.
25
* We compute two separate costs for each path:
26
* total_cost: total estimated cost to fetch all tuples
27
* startup_cost: cost that is expended before first tuple is fetched
28
* In some scenarios, such as when there is a LIMIT or we are implementing
29
* an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
30
* path's result. A caller can estimate the cost of fetching a partial
31
* result by interpolating between startup_cost and total_cost. In detail:
32
* actual_cost = startup_cost +
33
* (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
34
* Note that a base relation's rows count (and, by extension, plan_rows for
35
* plan nodes below the LIMIT node) are set without regard to any LIMIT, so
36
* that this equation works properly. (Also, these routines guarantee not to
37
* set the rows count to zero, so there will be no zero divide.) The LIMIT is
38
* applied as a top-level plan node.
40
* For largely historical reasons, most of the routines in this module use
41
* the passed result Path only to store their startup_cost and total_cost
42
* results into. All the input data they need is passed as separate
43
* parameters, even though much of it could be extracted from the Path.
44
* An exception is made for the cost_XXXjoin() routines, which expect all
45
* the non-cost fields of the passed XXXPath to be filled in.
48
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
49
* Portions Copyright (c) 1994, Regents of the University of California
52
* $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.137.4.1 2005-04-04 01:43:23 tgl Exp $
54
*-------------------------------------------------------------------------
61
#include "catalog/pg_statistic.h"
62
#include "executor/nodeHash.h"
63
#include "miscadmin.h"
64
#include "optimizer/clauses.h"
65
#include "optimizer/cost.h"
66
#include "optimizer/pathnode.h"
67
#include "optimizer/plancat.h"
68
#include "parser/parsetree.h"
69
#include "utils/selfuncs.h"
70
#include "utils/lsyscache.h"
71
#include "utils/syscache.h"
74
#define LOG2(x) (log(x) / 0.693147180559945)
75
#define LOG6(x) (log(x) / 1.79175946922805)
78
* Some Paths return less than the nominal number of rows of their parent
79
* relations; join nodes need to do this to get the correct input count:
81
#define PATH_ROWS(path) \
82
(IsA(path, UniquePath) ? \
83
((UniquePath *) (path))->rows : \
87
double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
88
double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
89
double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
90
double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
91
double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
93
Cost disable_cost = 100000000.0;
95
bool enable_seqscan = true;
96
bool enable_indexscan = true;
97
bool enable_tidscan = true;
98
bool enable_sort = true;
99
bool enable_hashagg = true;
100
bool enable_nestloop = true;
101
bool enable_mergejoin = true;
102
bool enable_hashjoin = true;
105
static bool cost_qual_eval_walker(Node *node, QualCost *total);
106
static Selectivity approx_selectivity(Query *root, List *quals,
108
static Selectivity join_in_selectivity(JoinPath *path, Query *root);
109
static void set_rel_width(Query *root, RelOptInfo *rel);
110
static double relation_byte_size(double tuples, int width);
111
static double page_size(double tuples, int width);
116
* Force a row-count estimate to a sane value.
119
clamp_row_est(double nrows)
122
* Force estimate to be at least one row, to make explain output look
123
* better and to avoid possible divide-by-zero when interpolating
124
* costs. Make it an integer, too.
137
* Determines and returns the cost of scanning a relation sequentially.
140
cost_seqscan(Path *path, Query *root,
143
Cost startup_cost = 0;
147
/* Should only be applied to base relations */
148
Assert(baserel->relid > 0);
149
Assert(baserel->rtekind == RTE_RELATION);
152
startup_cost += disable_cost;
157
* The cost of reading a page sequentially is 1.0, by definition. Note
158
* that the Unix kernel will typically do some amount of read-ahead
159
* optimization, so that this cost is less than the true cost of
160
* reading a page from disk. We ignore that issue here, but must take
161
* it into account when estimating the cost of non-sequential
164
run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
167
startup_cost += baserel->baserestrictcost.startup;
168
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
169
run_cost += cpu_per_tuple * baserel->tuples;
171
path->startup_cost = startup_cost;
172
path->total_cost = startup_cost + run_cost;
176
* cost_nonsequential_access
177
* Estimate the cost of accessing one page at random from a relation
178
* (or sort temp file) of the given size in pages.
180
* The simplistic model that the cost is random_page_cost is what we want
181
* to use for large relations; but for small ones that is a serious
182
* overestimate because of the effects of caching. This routine tries to
185
* Unfortunately we don't have any good way of estimating the effective cache
186
* size we are working with --- we know that Postgres itself has NBuffers
187
* internal buffers, but the size of the kernel's disk cache is uncertain,
188
* and how much of it we get to use is even less certain. We punt the problem
189
* for now by assuming we are given an effective_cache_size parameter.
191
* Given a guesstimated cache size, we estimate the actual I/O cost per page
192
* with the entirely ad-hoc equations (writing relsize for
193
* relpages/effective_cache_size):
195
* random_page_cost - (random_page_cost-1)/2 * (1/relsize)
197
* 1 + ((random_page_cost-1)/2) * relsize ** 2
198
* These give the right asymptotic behavior (=> 1.0 as relpages becomes
199
* small, => random_page_cost as it becomes large) and meet in the middle
200
* with the estimate that the cache is about 50% effective for a relation
201
* of the same size as effective_cache_size. (XXX this is probably all
202
* wrong, but I haven't been able to find any theory about how effective
203
* a disk cache should be presumed to be.)
206
cost_nonsequential_access(double relpages)
210
/* don't crash on bad input data */
211
if (relpages <= 0.0 || effective_cache_size <= 0.0)
212
return random_page_cost;
214
relsize = relpages / effective_cache_size;
217
return random_page_cost - (random_page_cost - 1.0) * 0.5 / relsize;
219
return 1.0 + (random_page_cost - 1.0) * 0.5 * relsize * relsize;
224
* Determines and returns the cost of scanning a relation using an index.
226
* NOTE: an indexscan plan node can actually represent several passes,
227
* but here we consider the cost of just one pass.
229
* 'root' is the query root
230
* 'baserel' is the base relation the index is for
231
* 'index' is the index to be used
232
* 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
233
* 'is_injoin' is T if we are considering using the index scan as the inside
234
* of a nestloop join (hence, some of the indexQuals are join clauses)
236
* NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
237
* Any additional quals evaluated as qpquals may reduce the number of returned
238
* tuples, but they won't reduce the number of tuples we have to fetch from
239
* the table, so they don't reduce the scan cost.
241
* NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
242
* it was a list of bare clause expressions.
245
cost_index(Path *path, Query *root,
251
Cost startup_cost = 0;
253
Cost indexStartupCost;
255
Selectivity indexSelectivity;
256
double indexCorrelation,
261
double tuples_fetched;
262
double pages_fetched;
266
/* Should only be applied to base relations */
267
Assert(IsA(baserel, RelOptInfo) &&
268
IsA(index, IndexOptInfo));
269
Assert(baserel->relid > 0);
270
Assert(baserel->rtekind == RTE_RELATION);
272
if (!enable_indexscan)
273
startup_cost += disable_cost;
276
* Call index-access-method-specific code to estimate the processing
277
* cost for scanning the index, as well as the selectivity of the
278
* index (ie, the fraction of main-table tuples we will have to
279
* retrieve) and its correlation to the main-table tuple order.
281
OidFunctionCall8(index->amcostestimate,
282
PointerGetDatum(root),
283
PointerGetDatum(baserel),
284
PointerGetDatum(index),
285
PointerGetDatum(indexQuals),
286
PointerGetDatum(&indexStartupCost),
287
PointerGetDatum(&indexTotalCost),
288
PointerGetDatum(&indexSelectivity),
289
PointerGetDatum(&indexCorrelation));
291
/* all costs for touching index itself included here */
292
startup_cost += indexStartupCost;
293
run_cost += indexTotalCost - indexStartupCost;
296
* Estimate number of main-table tuples and pages fetched.
298
* When the index ordering is uncorrelated with the table ordering,
299
* we use an approximation proposed by Mackert and Lohman, "Index Scans
300
* Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
301
* on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
302
* The Mackert and Lohman approximation is that the number of pages
305
* min(2TNs/(2T+Ns), T) when T <= b
306
* 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
307
* b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
309
* T = # pages in table
310
* N = # tuples in table
311
* s = selectivity = fraction of table to be scanned
312
* b = # buffer pages available (we include kernel space here)
314
* When the index ordering is exactly correlated with the table ordering
315
* (just after a CLUSTER, for example), the number of pages fetched should
316
* be just sT. What's more, these will be sequential fetches, not the
317
* random fetches that occur in the uncorrelated case. So, depending on
318
* the extent of correlation, we should estimate the actual I/O cost
319
* somewhere between s * T * 1.0 and PF * random_cost. We currently
320
* interpolate linearly between these two endpoints based on the
321
* correlation squared (XXX is that appropriate?).
323
* In any case the number of tuples fetched is Ns.
327
tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
329
/* This part is the Mackert and Lohman formula */
331
T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
332
b = (effective_cache_size > 1) ? effective_cache_size : 1.0;
337
(2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
338
if (pages_fetched > T)
345
lim = (2.0 * T * b) / (2.0 * T - b);
346
if (tuples_fetched <= lim)
349
(2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
354
b + (tuples_fetched - lim) * (T - b) / T;
359
* min_IO_cost corresponds to the perfectly correlated case
360
* (csquared=1), max_IO_cost to the perfectly uncorrelated case
361
* (csquared=0). Note that we just charge random_page_cost per page
362
* in the uncorrelated case, rather than using
363
* cost_nonsequential_access, since we've already accounted for
364
* caching effects by using the Mackert model.
366
min_IO_cost = ceil(indexSelectivity * T);
367
max_IO_cost = pages_fetched * random_page_cost;
370
* Now interpolate based on estimated index order correlation to get
371
* total disk I/O cost for main table accesses.
373
csquared = indexCorrelation * indexCorrelation;
375
run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
378
* Estimate CPU costs per tuple.
380
* Normally the indexquals will be removed from the list of restriction
381
* clauses that we have to evaluate as qpquals, so we should subtract
382
* their costs from baserestrictcost. But if we are doing a join then
383
* some of the indexquals are join clauses and shouldn't be
384
* subtracted. Rather than work out exactly how much to subtract, we
385
* don't subtract anything.
387
startup_cost += baserel->baserestrictcost.startup;
388
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
392
QualCost index_qual_cost;
394
cost_qual_eval(&index_qual_cost, indexQuals);
395
/* any startup cost still has to be paid ... */
396
cpu_per_tuple -= index_qual_cost.per_tuple;
399
run_cost += cpu_per_tuple * tuples_fetched;
401
path->startup_cost = startup_cost;
402
path->total_cost = startup_cost + run_cost;
407
* Determines and returns the cost of scanning a relation using TIDs.
410
cost_tidscan(Path *path, Query *root,
411
RelOptInfo *baserel, List *tideval)
413
Cost startup_cost = 0;
416
int ntuples = list_length(tideval);
418
/* Should only be applied to base relations */
419
Assert(baserel->relid > 0);
420
Assert(baserel->rtekind == RTE_RELATION);
423
startup_cost += disable_cost;
425
/* disk costs --- assume each tuple on a different page */
426
run_cost += random_page_cost * ntuples;
429
startup_cost += baserel->baserestrictcost.startup;
430
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
431
run_cost += cpu_per_tuple * ntuples;
433
path->startup_cost = startup_cost;
434
path->total_cost = startup_cost + run_cost;
439
* Determines and returns the cost of scanning a subquery RTE.
442
cost_subqueryscan(Path *path, RelOptInfo *baserel)
448
/* Should only be applied to base relations that are subqueries */
449
Assert(baserel->relid > 0);
450
Assert(baserel->rtekind == RTE_SUBQUERY);
453
* Cost of path is cost of evaluating the subplan, plus cost of
454
* evaluating any restriction clauses that will be attached to the
455
* SubqueryScan node, plus cpu_tuple_cost to account for selection and
456
* projection overhead.
458
path->startup_cost = baserel->subplan->startup_cost;
459
path->total_cost = baserel->subplan->total_cost;
461
startup_cost = baserel->baserestrictcost.startup;
462
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
463
run_cost = cpu_per_tuple * baserel->tuples;
465
path->startup_cost += startup_cost;
466
path->total_cost += startup_cost + run_cost;
471
* Determines and returns the cost of scanning a function RTE.
474
cost_functionscan(Path *path, Query *root, RelOptInfo *baserel)
476
Cost startup_cost = 0;
480
/* Should only be applied to base relations that are functions */
481
Assert(baserel->relid > 0);
482
Assert(baserel->rtekind == RTE_FUNCTION);
485
* For now, estimate function's cost at one operator eval per function
486
* call. Someday we should revive the function cost estimate columns
489
cpu_per_tuple = cpu_operator_cost;
491
/* Add scanning CPU costs */
492
startup_cost += baserel->baserestrictcost.startup;
493
cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
494
run_cost += cpu_per_tuple * baserel->tuples;
496
path->startup_cost = startup_cost;
497
path->total_cost = startup_cost + run_cost;
502
* Determines and returns the cost of sorting a relation, including
503
* the cost of reading the input data.
505
* If the total volume of data to sort is less than work_mem, we will do
506
* an in-memory sort, which requires no I/O and about t*log2(t) tuple
507
* comparisons for t tuples.
509
* If the total volume exceeds work_mem, we switch to a tape-style merge
510
* algorithm. There will still be about t*log2(t) tuple comparisons in
511
* total, but we will also need to write and read each tuple once per
512
* merge pass. We expect about ceil(log6(r)) merge passes where r is the
513
* number of initial runs formed (log6 because tuplesort.c uses six-tape
514
* merging). Since the average initial run should be about twice work_mem,
516
* disk traffic = 2 * relsize * ceil(log6(p / (2*work_mem)))
517
* cpu = comparison_cost * t * log2(t)
519
* The disk traffic is assumed to be half sequential and half random
520
* accesses (XXX can't we refine that guess?)
522
* We charge two operator evals per tuple comparison, which should be in
523
* the right ballpark in most cases.
525
* 'pathkeys' is a list of sort keys
526
* 'input_cost' is the total cost for reading the input data
527
* 'tuples' is the number of tuples in the relation
528
* 'width' is the average tuple width in bytes
530
* NOTE: some callers currently pass NIL for pathkeys because they
531
* can't conveniently supply the sort keys. Since this routine doesn't
532
* currently do anything with pathkeys anyway, that doesn't matter...
533
* but if it ever does, it should react gracefully to lack of key data.
534
* (Actually, the thing we'd most likely be interested in is just the number
535
* of sort keys, which all callers *could* supply.)
538
cost_sort(Path *path, Query *root,
539
List *pathkeys, Cost input_cost, double tuples, int width)
541
Cost startup_cost = input_cost;
543
double nbytes = relation_byte_size(tuples, width);
544
long work_mem_bytes = work_mem * 1024L;
547
startup_cost += disable_cost;
550
* We want to be sure the cost of a sort is never estimated as zero,
551
* even if passed-in tuple count is zero. Besides, mustn't do
560
* Assume about two operator evals per tuple comparison and N log2 N
563
startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
566
if (nbytes > work_mem_bytes)
568
double npages = ceil(nbytes / BLCKSZ);
569
double nruns = (nbytes / work_mem_bytes) * 0.5;
570
double log_runs = ceil(LOG6(nruns));
571
double npageaccesses;
575
npageaccesses = 2.0 * npages * log_runs;
576
/* Assume half are sequential (cost 1), half are not */
577
startup_cost += npageaccesses *
578
(1.0 + cost_nonsequential_access(npages)) * 0.5;
582
* Also charge a small amount (arbitrarily set equal to operator cost)
583
* per extracted tuple.
585
run_cost += cpu_operator_cost * tuples;
587
path->startup_cost = startup_cost;
588
path->total_cost = startup_cost + run_cost;
593
* Determines and returns the cost of materializing a relation, including
594
* the cost of reading the input data.
596
* If the total volume of data to materialize exceeds work_mem, we will need
597
* to write it to disk, so the cost is much higher in that case.
600
cost_material(Path *path,
601
Cost input_cost, double tuples, int width)
603
Cost startup_cost = input_cost;
605
double nbytes = relation_byte_size(tuples, width);
606
long work_mem_bytes = work_mem * 1024L;
609
if (nbytes > work_mem_bytes)
611
double npages = ceil(nbytes / BLCKSZ);
613
/* We'll write during startup and read during retrieval */
614
startup_cost += npages;
619
* Charge a very small amount per inserted tuple, to reflect bookkeeping
620
* costs. We use cpu_tuple_cost/10 for this. This is needed to break
621
* the tie that would otherwise exist between nestloop with A outer,
622
* materialized B inner and nestloop with B outer, materialized A inner.
623
* The extra cost ensures we'll prefer materializing the smaller rel.
625
startup_cost += cpu_tuple_cost * 0.1 * tuples;
628
* Also charge a small amount per extracted tuple. We use
629
* cpu_tuple_cost so that it doesn't appear worthwhile to materialize
632
run_cost += cpu_tuple_cost * tuples;
634
path->startup_cost = startup_cost;
635
path->total_cost = startup_cost + run_cost;
640
* Determines and returns the cost of performing an Agg plan node,
641
* including the cost of its input.
643
* Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
644
* are for appropriately-sorted input.
647
cost_agg(Path *path, Query *root,
648
AggStrategy aggstrategy, int numAggs,
649
int numGroupCols, double numGroups,
650
Cost input_startup_cost, Cost input_total_cost,
657
* We charge one cpu_operator_cost per aggregate function per input
658
* tuple, and another one per output tuple (corresponding to transfn
659
* and finalfn calls respectively). If we are grouping, we charge an
660
* additional cpu_operator_cost per grouping column per input tuple
661
* for grouping comparisons.
663
* We will produce a single output tuple if not grouping, and a tuple per
666
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
667
* same total CPU cost, but AGG_SORTED has lower startup cost. If the
668
* input path is already sorted appropriately, AGG_SORTED should be
669
* preferred (since it has no risk of memory overflow). This will
670
* happen as long as the computed total costs are indeed exactly equal
671
* --- but if there's roundoff error we might do the wrong thing. So
672
* be sure that the computations below form the same intermediate
673
* values in the same order.
675
if (aggstrategy == AGG_PLAIN)
677
startup_cost = input_total_cost;
678
startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
679
/* we aren't grouping */
680
total_cost = startup_cost;
682
else if (aggstrategy == AGG_SORTED)
684
/* Here we are able to deliver output on-the-fly */
685
startup_cost = input_startup_cost;
686
total_cost = input_total_cost;
687
/* calcs phrased this way to match HASHED case, see note above */
688
total_cost += cpu_operator_cost * input_tuples * numGroupCols;
689
total_cost += cpu_operator_cost * input_tuples * numAggs;
690
total_cost += cpu_operator_cost * numGroups * numAggs;
694
/* must be AGG_HASHED */
695
startup_cost = input_total_cost;
696
startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
697
startup_cost += cpu_operator_cost * input_tuples * numAggs;
698
total_cost = startup_cost;
699
total_cost += cpu_operator_cost * numGroups * numAggs;
702
path->startup_cost = startup_cost;
703
path->total_cost = total_cost;
708
* Determines and returns the cost of performing a Group plan node,
709
* including the cost of its input.
711
* Note: caller must ensure that input costs are for appropriately-sorted
715
cost_group(Path *path, Query *root,
716
int numGroupCols, double numGroups,
717
Cost input_startup_cost, Cost input_total_cost,
723
startup_cost = input_startup_cost;
724
total_cost = input_total_cost;
727
* Charge one cpu_operator_cost per comparison per input tuple. We
728
* assume all columns get compared at most of the tuples.
730
total_cost += cpu_operator_cost * input_tuples * numGroupCols;
732
path->startup_cost = startup_cost;
733
path->total_cost = total_cost;
738
* Determines and returns the cost of joining two relations using the
739
* nested loop algorithm.
741
* 'path' is already filled in except for the cost fields
744
cost_nestloop(NestPath *path, Query *root)
746
Path *outer_path = path->outerjoinpath;
747
Path *inner_path = path->innerjoinpath;
748
Cost startup_cost = 0;
751
QualCost restrict_qual_cost;
752
double outer_path_rows = PATH_ROWS(outer_path);
753
double inner_path_rows = PATH_ROWS(inner_path);
755
Selectivity joininfactor;
758
* If inner path is an indexscan, be sure to use its estimated output
759
* row count, which may be lower than the restriction-clause-only row
760
* count of its parent. (We don't include this case in the PATH_ROWS
761
* macro because it applies *only* to a nestloop's inner relation.)
763
if (IsA(inner_path, IndexPath))
764
inner_path_rows = ((IndexPath *) inner_path)->rows;
766
if (!enable_nestloop)
767
startup_cost += disable_cost;
770
* If we're doing JOIN_IN then we will stop scanning inner tuples for
771
* an outer tuple as soon as we have one match. Account for the
772
* effects of this by scaling down the cost estimates in proportion to
773
* the JOIN_IN selectivity. (This assumes that all the quals attached
774
* to the join are IN quals, which should be true.)
776
joininfactor = join_in_selectivity(path, root);
778
/* cost of source data */
781
* NOTE: clearly, we must pay both outer and inner paths' startup_cost
782
* before we can start returning tuples, so the join's startup cost is
783
* their sum. What's not so clear is whether the inner path's
784
* startup_cost must be paid again on each rescan of the inner path.
785
* This is not true if the inner path is materialized or is a
786
* hashjoin, but probably is true otherwise.
788
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
789
run_cost += outer_path->total_cost - outer_path->startup_cost;
790
if (IsA(inner_path, MaterialPath) ||
791
IsA(inner_path, HashPath))
793
/* charge only run cost for each iteration of inner path */
798
* charge startup cost for each iteration of inner path, except we
799
* already charged the first startup_cost in our own startup
801
run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
803
run_cost += outer_path_rows *
804
(inner_path->total_cost - inner_path->startup_cost) * joininfactor;
807
* Compute number of tuples processed (not number emitted!)
809
ntuples = outer_path_rows * inner_path_rows * joininfactor;
812
cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo);
813
startup_cost += restrict_qual_cost.startup;
814
cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
815
run_cost += cpu_per_tuple * ntuples;
817
path->path.startup_cost = startup_cost;
818
path->path.total_cost = startup_cost + run_cost;
823
* Determines and returns the cost of joining two relations using the
824
* merge join algorithm.
826
* 'path' is already filled in except for the cost fields
828
* Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
829
* outersortkeys and innersortkeys are lists of the keys to be used
830
* to sort the outer and inner relations, or NIL if no explicit
831
* sort is needed because the source path is already ordered.
834
cost_mergejoin(MergePath *path, Query *root)
836
Path *outer_path = path->jpath.outerjoinpath;
837
Path *inner_path = path->jpath.innerjoinpath;
838
List *mergeclauses = path->path_mergeclauses;
839
List *outersortkeys = path->outersortkeys;
840
List *innersortkeys = path->innersortkeys;
841
Cost startup_cost = 0;
844
Selectivity merge_selec;
845
QualCost merge_qual_cost;
846
QualCost qp_qual_cost;
847
RestrictInfo *firstclause;
848
double outer_path_rows = PATH_ROWS(outer_path);
849
double inner_path_rows = PATH_ROWS(inner_path);
852
double mergejointuples,
855
Selectivity outerscansel,
857
Selectivity joininfactor;
858
Path sort_path; /* dummy for result of cost_sort */
860
if (!enable_mergejoin)
861
startup_cost += disable_cost;
864
* Compute cost and selectivity of the mergequals and qpquals (other
865
* restriction clauses) separately. We use approx_selectivity here
866
* for speed --- in most cases, any errors won't affect the result
869
* Note: it's probably bogus to use the normal selectivity calculation
870
* here when either the outer or inner path is a UniquePath.
872
merge_selec = approx_selectivity(root, mergeclauses,
873
path->jpath.jointype);
874
cost_qual_eval(&merge_qual_cost, mergeclauses);
875
cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
876
qp_qual_cost.startup -= merge_qual_cost.startup;
877
qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
879
/* approx # tuples passing the merge quals */
880
mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
883
* When there are equal merge keys in the outer relation, the
884
* mergejoin must rescan any matching tuples in the inner relation.
885
* This means re-fetching inner tuples. Our cost model for this is
886
* that a re-fetch costs the same as an original fetch, which is
887
* probably an overestimate; but on the other hand we ignore the
888
* bookkeeping costs of mark/restore. Not clear if it's worth
889
* developing a more refined model.
891
* The number of re-fetches can be estimated approximately as size of
892
* merge join output minus size of inner relation. Assume that the
893
* distinct key values are 1, 2, ..., and denote the number of values
894
* of each key in the outer relation as m1, m2, ...; in the inner
895
* relation, n1, n2, ... Then we have
897
* size of join = m1 * n1 + m2 * n2 + ...
899
* number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
900
* n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
903
* This equation works correctly for outer tuples having no inner match
904
* (nk = 0), but not for inner tuples having no outer match (mk = 0);
905
* we are effectively subtracting those from the number of rescanned
906
* tuples, when we should not. Can we do better without expensive
907
* selectivity computations?
909
if (IsA(outer_path, UniquePath))
913
rescannedtuples = mergejointuples - inner_path_rows;
914
/* Must clamp because of possible underestimate */
915
if (rescannedtuples < 0)
918
/* We'll inflate inner run cost this much to account for rescanning */
919
rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
922
* A merge join will stop as soon as it exhausts either input stream
923
* (unless it's an outer join, in which case the outer side has to be
924
* scanned all the way anyway). Estimate fraction of the left and right
925
* inputs that will actually need to be scanned. We use only the first
926
* (most significant) merge clause for this purpose.
928
* Since this calculation is somewhat expensive, and will be the same for
929
* all mergejoin paths associated with the merge clause, we cache the
930
* results in the RestrictInfo node.
932
if (mergeclauses && path->jpath.jointype != JOIN_FULL)
934
firstclause = (RestrictInfo *) linitial(mergeclauses);
935
if (firstclause->left_mergescansel < 0) /* not computed yet? */
936
mergejoinscansel(root, (Node *) firstclause->clause,
937
&firstclause->left_mergescansel,
938
&firstclause->right_mergescansel);
940
if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
942
/* left side of clause is outer */
943
outerscansel = firstclause->left_mergescansel;
944
innerscansel = firstclause->right_mergescansel;
948
/* left side of clause is inner */
949
outerscansel = firstclause->right_mergescansel;
950
innerscansel = firstclause->left_mergescansel;
952
if (path->jpath.jointype == JOIN_LEFT)
954
else if (path->jpath.jointype == JOIN_RIGHT)
959
/* cope with clauseless or full mergejoin */
960
outerscansel = innerscansel = 1.0;
963
/* convert selectivity to row count; must scan at least one row */
964
outer_rows = clamp_row_est(outer_path_rows * outerscansel);
965
inner_rows = clamp_row_est(inner_path_rows * innerscansel);
968
* Readjust scan selectivities to account for above rounding. This is
969
* normally an insignificant effect, but when there are only a few
970
* rows in the inputs, failing to do this makes for a large percentage
973
outerscansel = outer_rows / outer_path_rows;
974
innerscansel = inner_rows / inner_path_rows;
976
/* cost of source data */
978
if (outersortkeys) /* do we need to sort outer? */
980
cost_sort(&sort_path,
983
outer_path->total_cost,
985
outer_path->parent->width);
986
startup_cost += sort_path.startup_cost;
987
run_cost += (sort_path.total_cost - sort_path.startup_cost)
992
startup_cost += outer_path->startup_cost;
993
run_cost += (outer_path->total_cost - outer_path->startup_cost)
997
if (innersortkeys) /* do we need to sort inner? */
999
cost_sort(&sort_path,
1002
inner_path->total_cost,
1004
inner_path->parent->width);
1005
startup_cost += sort_path.startup_cost;
1006
run_cost += (sort_path.total_cost - sort_path.startup_cost)
1007
* innerscansel * rescanratio;
1011
startup_cost += inner_path->startup_cost;
1012
run_cost += (inner_path->total_cost - inner_path->startup_cost)
1013
* innerscansel * rescanratio;
1019
* If we're doing JOIN_IN then we will stop outputting inner tuples
1020
* for an outer tuple as soon as we have one match. Account for the
1021
* effects of this by scaling down the cost estimates in proportion to
1022
* the expected output size. (This assumes that all the quals
1023
* attached to the join are IN quals, which should be true.)
1025
joininfactor = join_in_selectivity(&path->jpath, root);
1028
* The number of tuple comparisons needed is approximately number of
1029
* outer rows plus number of inner rows plus number of rescanned
1030
* tuples (can we refine this?). At each one, we need to evaluate the
1031
* mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
1032
* so do NOT include joininfactor.
1034
startup_cost += merge_qual_cost.startup;
1035
run_cost += merge_qual_cost.per_tuple *
1036
(outer_rows + inner_rows * rescanratio);
1039
* For each tuple that gets through the mergejoin proper, we charge
1040
* cpu_tuple_cost plus the cost of evaluating additional restriction
1041
* clauses that are to be applied at the join. (This is pessimistic
1042
* since not all of the quals may get evaluated at each tuple.) This
1043
* work is skipped in JOIN_IN mode, so apply the factor.
1045
startup_cost += qp_qual_cost.startup;
1046
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1047
run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1049
path->jpath.path.startup_cost = startup_cost;
1050
path->jpath.path.total_cost = startup_cost + run_cost;
1055
* Determines and returns the cost of joining two relations using the
1056
* hash join algorithm.
1058
* 'path' is already filled in except for the cost fields
1060
* Note: path's hashclauses should be a subset of the joinrestrictinfo list
1063
cost_hashjoin(HashPath *path, Query *root)
1065
Path *outer_path = path->jpath.outerjoinpath;
1066
Path *inner_path = path->jpath.innerjoinpath;
1067
List *hashclauses = path->path_hashclauses;
1068
Cost startup_cost = 0;
1071
Selectivity hash_selec;
1072
QualCost hash_qual_cost;
1073
QualCost qp_qual_cost;
1074
double hashjointuples;
1075
double outer_path_rows = PATH_ROWS(outer_path);
1076
double inner_path_rows = PATH_ROWS(inner_path);
1077
double outerbytes = relation_byte_size(outer_path_rows,
1078
outer_path->parent->width);
1079
double innerbytes = relation_byte_size(inner_path_rows,
1080
inner_path->parent->width);
1081
int num_hashclauses = list_length(hashclauses);
1083
int physicalbuckets;
1085
Selectivity innerbucketsize;
1086
Selectivity joininfactor;
1089
if (!enable_hashjoin)
1090
startup_cost += disable_cost;
1093
* Compute cost and selectivity of the hashquals and qpquals (other
1094
* restriction clauses) separately. We use approx_selectivity here
1095
* for speed --- in most cases, any errors won't affect the result
1098
* Note: it's probably bogus to use the normal selectivity calculation
1099
* here when either the outer or inner path is a UniquePath.
1101
hash_selec = approx_selectivity(root, hashclauses,
1102
path->jpath.jointype);
1103
cost_qual_eval(&hash_qual_cost, hashclauses);
1104
cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1105
qp_qual_cost.startup -= hash_qual_cost.startup;
1106
qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1108
/* approx # tuples passing the hash quals */
1109
hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1111
/* cost of source data */
1112
startup_cost += outer_path->startup_cost;
1113
run_cost += outer_path->total_cost - outer_path->startup_cost;
1114
startup_cost += inner_path->total_cost;
1117
* Cost of computing hash function: must do it once per input tuple.
1118
* We charge one cpu_operator_cost for each column's hash function.
1120
* XXX when a hashclause is more complex than a single operator, we
1121
* really should charge the extra eval costs of the left or right
1122
* side, as appropriate, here. This seems more work than it's worth
1125
startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
1126
run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1128
/* Get hash table size that executor would use for inner relation */
1129
ExecChooseHashTableSize(inner_path_rows,
1130
inner_path->parent->width,
1136
* Determine bucketsize fraction for inner relation. We use the
1137
* smallest bucketsize estimated for any individual hashclause; this
1138
* is undoubtedly conservative.
1140
* BUT: if inner relation has been unique-ified, we can assume it's good
1141
* for hashing. This is important both because it's the right answer,
1142
* and because we avoid contaminating the cache with a value that's
1143
* wrong for non-unique-ified paths.
1145
if (IsA(inner_path, UniquePath))
1146
innerbucketsize = 1.0 / virtualbuckets;
1149
innerbucketsize = 1.0;
1150
foreach(hcl, hashclauses)
1152
RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1153
Selectivity thisbucketsize;
1155
Assert(IsA(restrictinfo, RestrictInfo));
1158
* First we have to figure out which side of the hashjoin
1159
* clause is the inner side.
1161
* Since we tend to visit the same clauses over and over when
1162
* planning a large query, we cache the bucketsize estimate in
1163
* the RestrictInfo node to avoid repeated lookups of
1166
if (bms_is_subset(restrictinfo->right_relids,
1167
inner_path->parent->relids))
1169
/* righthand side is inner */
1170
thisbucketsize = restrictinfo->right_bucketsize;
1171
if (thisbucketsize < 0)
1173
/* not cached yet */
1175
estimate_hash_bucketsize(root,
1176
get_rightop(restrictinfo->clause),
1178
restrictinfo->right_bucketsize = thisbucketsize;
1183
Assert(bms_is_subset(restrictinfo->left_relids,
1184
inner_path->parent->relids));
1185
/* lefthand side is inner */
1186
thisbucketsize = restrictinfo->left_bucketsize;
1187
if (thisbucketsize < 0)
1189
/* not cached yet */
1191
estimate_hash_bucketsize(root,
1192
get_leftop(restrictinfo->clause),
1194
restrictinfo->left_bucketsize = thisbucketsize;
1198
if (innerbucketsize > thisbucketsize)
1199
innerbucketsize = thisbucketsize;
1204
* if inner relation is too big then we will need to "batch" the join,
1205
* which implies writing and reading most of the tuples to disk an
1206
* extra time. Charge one cost unit per page of I/O (correct since it
1207
* should be nice and sequential...). Writing the inner rel counts as
1208
* startup cost, all the rest as run cost.
1212
double outerpages = page_size(outer_path_rows,
1213
outer_path->parent->width);
1214
double innerpages = page_size(inner_path_rows,
1215
inner_path->parent->width);
1217
startup_cost += innerpages;
1218
run_cost += innerpages + 2 * outerpages;
1224
* If we're doing JOIN_IN then we will stop comparing inner tuples to
1225
* an outer tuple as soon as we have one match. Account for the
1226
* effects of this by scaling down the cost estimates in proportion to
1227
* the expected output size. (This assumes that all the quals
1228
* attached to the join are IN quals, which should be true.)
1230
joininfactor = join_in_selectivity(&path->jpath, root);
1233
* The number of tuple comparisons needed is the number of outer
1234
* tuples times the typical number of tuples in a hash bucket, which
1235
* is the inner relation size times its bucketsize fraction. At each
1236
* one, we need to evaluate the hashjoin quals.
1238
startup_cost += hash_qual_cost.startup;
1239
run_cost += hash_qual_cost.per_tuple *
1240
outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1244
* For each tuple that gets through the hashjoin proper, we charge
1245
* cpu_tuple_cost plus the cost of evaluating additional restriction
1246
* clauses that are to be applied at the join. (This is pessimistic
1247
* since not all of the quals may get evaluated at each tuple.)
1249
startup_cost += qp_qual_cost.startup;
1250
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1251
run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1254
* Bias against putting larger relation on inside. We don't want an
1255
* absolute prohibition, though, since larger relation might have
1256
* better bucketsize --- and we can't trust the size estimates
1257
* unreservedly, anyway. Instead, inflate the run cost by the square
1258
* root of the size ratio. (Why square root? No real good reason,
1259
* but it seems reasonable...)
1261
* Note: before 7.4 we implemented this by inflating startup cost; but if
1262
* there's a disable_cost component in the input paths' startup cost,
1263
* that unfairly penalizes the hash. Probably it'd be better to keep
1264
* track of disable penalty separately from cost.
1266
if (innerbytes > outerbytes && outerbytes > 0)
1267
run_cost *= sqrt(innerbytes / outerbytes);
1269
path->jpath.path.startup_cost = startup_cost;
1270
path->jpath.path.total_cost = startup_cost + run_cost;
1276
* Estimate the CPU costs of evaluating a WHERE clause.
1277
* The input can be either an implicitly-ANDed list of boolean
1278
* expressions, or a list of RestrictInfo nodes.
1279
* The result includes both a one-time (startup) component,
1280
* and a per-evaluation component.
1283
cost_qual_eval(QualCost *cost, List *quals)
1288
cost->per_tuple = 0;
1290
/* We don't charge any cost for the implicit ANDing at top level ... */
1294
Node *qual = (Node *) lfirst(l);
1297
* RestrictInfo nodes contain an eval_cost field reserved for this
1298
* routine's use, so that it's not necessary to evaluate the qual
1299
* clause's cost more than once. If the clause's cost hasn't been
1300
* computed yet, the field's startup value will contain -1.
1302
if (qual && IsA(qual, RestrictInfo))
1304
RestrictInfo *restrictinfo = (RestrictInfo *) qual;
1306
if (restrictinfo->eval_cost.startup < 0)
1308
restrictinfo->eval_cost.startup = 0;
1309
restrictinfo->eval_cost.per_tuple = 0;
1310
cost_qual_eval_walker((Node *) restrictinfo->clause,
1311
&restrictinfo->eval_cost);
1313
cost->startup += restrictinfo->eval_cost.startup;
1314
cost->per_tuple += restrictinfo->eval_cost.per_tuple;
1318
/* If it's a bare expression, must always do it the hard way */
1319
cost_qual_eval_walker(qual, cost);
1325
cost_qual_eval_walker(Node *node, QualCost *total)
1331
* Our basic strategy is to charge one cpu_operator_cost for each
1332
* operator or function node in the given tree. Vars and Consts are
1333
* charged zero, and so are boolean operators (AND, OR, NOT).
1334
* Simplistic, but a lot better than no model at all.
1336
* Should we try to account for the possibility of short-circuit
1337
* evaluation of AND/OR?
1339
if (IsA(node, FuncExpr) ||
1340
IsA(node, OpExpr) ||
1341
IsA(node, DistinctExpr) ||
1342
IsA(node, NullIfExpr))
1343
total->per_tuple += cpu_operator_cost;
1344
else if (IsA(node, ScalarArrayOpExpr))
1346
/* should charge more than 1 op cost, but how many? */
1347
total->per_tuple += cpu_operator_cost * 10;
1349
else if (IsA(node, SubLink))
1351
/* This routine should not be applied to un-planned expressions */
1352
elog(ERROR, "cannot handle unplanned sub-select");
1354
else if (IsA(node, SubPlan))
1357
* A subplan node in an expression typically indicates that the
1358
* subplan will be executed on each evaluation, so charge
1359
* accordingly. (Sub-selects that can be executed as InitPlans
1360
* have already been removed from the expression.)
1362
* An exception occurs when we have decided we can implement the
1363
* subplan by hashing.
1366
SubPlan *subplan = (SubPlan *) node;
1367
Plan *plan = subplan->plan;
1369
if (subplan->useHashTable)
1372
* If we are using a hash table for the subquery outputs, then
1373
* the cost of evaluating the query is a one-time cost. We
1374
* charge one cpu_operator_cost per tuple for the work of
1375
* loading the hashtable, too.
1377
total->startup += plan->total_cost +
1378
cpu_operator_cost * plan->plan_rows;
1381
* The per-tuple costs include the cost of evaluating the
1382
* lefthand expressions, plus the cost of probing the
1383
* hashtable. Recursion into the exprs list will handle the
1384
* lefthand expressions properly, and will count one
1385
* cpu_operator_cost for each comparison operator. That is
1386
* probably too low for the probing cost, but it's hard to
1387
* make a better estimate, so live with it for now.
1393
* Otherwise we will be rescanning the subplan output on each
1394
* evaluation. We need to estimate how much of the output we
1395
* will actually need to scan. NOTE: this logic should agree
1396
* with the estimates used by make_subplan() in
1399
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
1401
if (subplan->subLinkType == EXISTS_SUBLINK)
1403
/* we only need to fetch 1 tuple */
1404
total->per_tuple += plan_run_cost / plan->plan_rows;
1406
else if (subplan->subLinkType == ALL_SUBLINK ||
1407
subplan->subLinkType == ANY_SUBLINK)
1409
/* assume we need 50% of the tuples */
1410
total->per_tuple += 0.50 * plan_run_cost;
1411
/* also charge a cpu_operator_cost per row examined */
1412
total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1416
/* assume we need all tuples */
1417
total->per_tuple += plan_run_cost;
1421
* Also account for subplan's startup cost. If the subplan is
1422
* uncorrelated or undirect correlated, AND its topmost node
1423
* is a Sort or Material node, assume that we'll only need to
1424
* pay its startup cost once; otherwise assume we pay the
1425
* startup cost every time.
1427
if (subplan->parParam == NIL &&
1429
IsA(plan, Material)))
1430
total->startup += plan->startup_cost;
1432
total->per_tuple += plan->startup_cost;
1436
return expression_tree_walker(node, cost_qual_eval_walker,
1442
* approx_selectivity
1443
* Quick-and-dirty estimation of clause selectivities.
1444
* The input can be either an implicitly-ANDed list of boolean
1445
* expressions, or a list of RestrictInfo nodes (typically the latter).
1447
* This is quick-and-dirty because we bypass clauselist_selectivity, and
1448
* simply multiply the independent clause selectivities together. Now
1449
* clauselist_selectivity often can't do any better than that anyhow, but
1450
* for some situations (such as range constraints) it is smarter. However,
1451
* we can't effectively cache the results of clauselist_selectivity, whereas
1452
* the individual clause selectivities can be and are cached.
1454
* Since we are only using the results to estimate how many potential
1455
* output tuples are generated and passed through qpqual checking, it
1456
* seems OK to live with the approximation.
1459
approx_selectivity(Query *root, List *quals, JoinType jointype)
1461
Selectivity total = 1.0;
1466
Node *qual = (Node *) lfirst(l);
1468
/* Note that clause_selectivity will be able to cache its result */
1469
total *= clause_selectivity(root, qual, 0, jointype);
1476
* set_baserel_size_estimates
1477
* Set the size estimates for the given base relation.
1479
* The rel's targetlist and restrictinfo list must have been constructed
1482
* We set the following fields of the rel node:
1483
* rows: the estimated number of output tuples (after applying
1484
* restriction clauses).
1485
* width: the estimated average output tuple width in bytes.
1486
* baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1489
set_baserel_size_estimates(Query *root, RelOptInfo *rel)
1493
/* Should only be applied to base relations */
1494
Assert(rel->relid > 0);
1496
nrows = rel->tuples *
1497
clauselist_selectivity(root,
1498
rel->baserestrictinfo,
1502
rel->rows = clamp_row_est(nrows);
1504
cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1506
set_rel_width(root, rel);
1510
* set_joinrel_size_estimates
1511
* Set the size estimates for the given join relation.
1513
* The rel's targetlist must have been constructed already, and a
1514
* restriction clause list that matches the given component rels must
1517
* Since there is more than one way to make a joinrel for more than two
1518
* base relations, the results we get here could depend on which component
1519
* rel pair is provided. In theory we should get the same answers no matter
1520
* which pair is provided; in practice, since the selectivity estimation
1521
* routines don't handle all cases equally well, we might not. But there's
1522
* not much to be done about it. (Would it make sense to repeat the
1523
* calculations for each pair of input rels that's encountered, and somehow
1524
* average the results? Probably way more trouble than it's worth.)
1526
* It's important that the results for symmetric JoinTypes be symmetric,
1527
* eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
1528
* rel1, JOIN_RIGHT). Also, JOIN_IN should produce the same result as
1529
* JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
1531
* We set only the rows field here. The width field was already set by
1532
* build_joinrel_tlist, and baserestrictcost is not used for join rels.
1535
set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
1536
RelOptInfo *outer_rel,
1537
RelOptInfo *inner_rel,
1546
* Compute joinclause selectivity. Note that we are only considering
1547
* clauses that become restriction clauses at this join level; we are
1548
* not double-counting them because they were not considered in
1549
* estimating the sizes of the component rels.
1551
selec = clauselist_selectivity(root,
1557
* Basically, we multiply size of Cartesian product by selectivity.
1559
* If we are doing an outer join, take that into account: the output must
1560
* be at least as large as the non-nullable input. (Is there any
1561
* chance of being even smarter?)
1563
* For JOIN_IN and variants, the Cartesian product is figured with
1564
* respect to a unique-ified input, and then we can clamp to the size
1565
* of the other input.
1570
nrows = outer_rel->rows * inner_rel->rows * selec;
1573
nrows = outer_rel->rows * inner_rel->rows * selec;
1574
if (nrows < outer_rel->rows)
1575
nrows = outer_rel->rows;
1578
nrows = outer_rel->rows * inner_rel->rows * selec;
1579
if (nrows < inner_rel->rows)
1580
nrows = inner_rel->rows;
1583
nrows = outer_rel->rows * inner_rel->rows * selec;
1584
if (nrows < outer_rel->rows)
1585
nrows = outer_rel->rows;
1586
if (nrows < inner_rel->rows)
1587
nrows = inner_rel->rows;
1590
case JOIN_UNIQUE_INNER:
1591
upath = create_unique_path(root, inner_rel,
1592
inner_rel->cheapest_total_path);
1593
nrows = outer_rel->rows * upath->rows * selec;
1594
if (nrows > outer_rel->rows)
1595
nrows = outer_rel->rows;
1597
case JOIN_REVERSE_IN:
1598
case JOIN_UNIQUE_OUTER:
1599
upath = create_unique_path(root, outer_rel,
1600
outer_rel->cheapest_total_path);
1601
nrows = upath->rows * inner_rel->rows * selec;
1602
if (nrows > inner_rel->rows)
1603
nrows = inner_rel->rows;
1606
elog(ERROR, "unrecognized join type: %d", (int) jointype);
1607
nrows = 0; /* keep compiler quiet */
1611
rel->rows = clamp_row_est(nrows);
1615
* join_in_selectivity
1616
* Determines the factor by which a JOIN_IN join's result is expected
1617
* to be smaller than an ordinary inner join.
1619
* 'path' is already filled in except for the cost fields
1622
join_in_selectivity(JoinPath *path, Query *root)
1624
RelOptInfo *innerrel;
1625
UniquePath *innerunique;
1629
/* Return 1.0 whenever it's not JOIN_IN */
1630
if (path->jointype != JOIN_IN)
1634
* Return 1.0 if the inner side is already known unique. The case
1635
* where the inner path is already a UniquePath probably cannot happen
1636
* in current usage, but check it anyway for completeness. The
1637
* interesting case is where we've determined the inner relation
1638
* itself is unique, which we can check by looking at the rows
1639
* estimate for its UniquePath.
1641
if (IsA(path->innerjoinpath, UniquePath))
1643
innerrel = path->innerjoinpath->parent;
1644
innerunique = create_unique_path(root,
1646
innerrel->cheapest_total_path);
1647
if (innerunique->rows >= innerrel->rows)
1651
* Compute same result set_joinrel_size_estimates would compute for
1652
* JOIN_INNER. Note that we use the input rels' absolute size
1653
* estimates, not PATH_ROWS() which might be less; if we used
1654
* PATH_ROWS() we'd be double-counting the effects of any join clauses
1655
* used in input scans.
1657
selec = clauselist_selectivity(root,
1658
path->joinrestrictinfo,
1661
nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
1663
nrows = clamp_row_est(nrows);
1665
/* See if it's larger than the actual JOIN_IN size estimate */
1666
if (nrows > path->path.parent->rows)
1667
return path->path.parent->rows / nrows;
1673
* set_function_size_estimates
1674
* Set the size estimates for a base relation that is a function call.
1676
* The rel's targetlist and restrictinfo list must have been constructed
1679
* We set the same fields as set_baserel_size_estimates.
1682
set_function_size_estimates(Query *root, RelOptInfo *rel)
1684
/* Should only be applied to base relations that are functions */
1685
Assert(rel->relid > 0);
1686
Assert(rel->rtekind == RTE_FUNCTION);
1689
* Estimate number of rows the function itself will return.
1691
* XXX no idea how to do this yet; but should at least check whether
1692
* function returns set or not...
1696
/* Now estimate number of output rows, etc */
1697
set_baserel_size_estimates(root, rel);
1703
* Set the estimated output width of a base relation.
1705
* NB: this works best on plain relations because it prefers to look at
1706
* real Vars. It will fail to make use of pg_statistic info when applied
1707
* to a subquery relation, even if the subquery outputs are simple vars
1708
* that we could have gotten info for. Is it worth trying to be smarter
1711
* The per-attribute width estimates are cached for possible re-use while
1712
* building join relations.
1715
set_rel_width(Query *root, RelOptInfo *rel)
1717
int32 tuple_width = 0;
1720
foreach(tllist, rel->reltargetlist)
1722
Var *var = (Var *) lfirst(tllist);
1727
/* For now, punt on whole-row child Vars */
1730
tuple_width += 32; /* arbitrary */
1734
ndx = var->varattno - rel->min_attr;
1737
* The width probably hasn't been cached yet, but may as well
1740
if (rel->attr_widths[ndx] > 0)
1742
tuple_width += rel->attr_widths[ndx];
1746
relid = getrelid(var->varno, root->rtable);
1747
if (relid != InvalidOid)
1749
item_width = get_attavgwidth(relid, var->varattno);
1752
rel->attr_widths[ndx] = item_width;
1753
tuple_width += item_width;
1759
* Not a plain relation, or can't find statistics for it. Estimate
1760
* using just the type info.
1762
item_width = get_typavgwidth(var->vartype, var->vartypmod);
1763
Assert(item_width > 0);
1764
rel->attr_widths[ndx] = item_width;
1765
tuple_width += item_width;
1767
Assert(tuple_width >= 0);
1768
rel->width = tuple_width;
1772
* relation_byte_size
1773
* Estimate the storage space in bytes for a given number of tuples
1774
* of a given width (size in bytes).
1777
relation_byte_size(double tuples, int width)
1779
return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
1784
* Returns an estimate of the number of pages covered by a given
1785
* number of tuples of a given width (size in bytes).
1788
page_size(double tuples, int width)
1790
return ceil(relation_byte_size(tuples, width) / BLCKSZ);