1/*------------------------------------------------------------------------
4 * Routines to evaluate query trees
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
9 * src/backend/optimizer/geqo/geqo_eval.c
11 *-------------------------------------------------------------------------
15 =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
16 * Martin Utesch * Institute of Automatic Control *
17 = = University of Mining and Technology =
18 * utesch@aut.tu-freiberg.de * Freiberg, Germany *
19 =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
35/* A "clump" of already-joined relations within gimme_tree */
39 int size;
/* number of input relations in clump */
43 int num_gene,
bool force);
51 * Returns cost of a query tree as an individual of the population.
53 * If no legal join order can be extracted from the proposed tour,
64 struct HTAB *savehash;
67 * Create a private memory context that will hold all temp storage
68 * allocated inside gimme_tree().
70 * Since geqo_eval() will be called many times, we can't afford to let all
71 * that memory go unreclaimed until end of statement. Note we make the
72 * temp context a child of the planner's normal context, so that it will
73 * be freed even if we abort via ereport(ERROR).
81 * gimme_tree will add entries to root->join_rel_list, which may or may
82 * not already contain some entries. The newly added entries will be
83 * recycled by the MemoryContextDelete below, so we must ensure that the
84 * list is restored to its former state before exiting. We can do this by
85 * truncating the list to its original length. NOTE this assumes that any
86 * added entries are appended at the end!
88 * We also must take care not to mess up the outer join_rel_hash, if there
89 * is one. We can do this by just temporarily setting the link to NULL.
90 * (If we are dealing with enough join rels, which we very likely are, a
91 * new hash table will get built and used locally.)
93 * join_rel_level[] shouldn't be in use, so just Assert it isn't.
96 savehash =
root->join_rel_hash;
99 root->join_rel_hash = NULL;
101 /* construct the best path for the given combination of relations */
105 * compute fitness, if we found a valid join
107 * XXX geqo does not currently support optimization for partial result
108 * retrieval, nor do we take any cognizance of possible use of
109 * parameterized paths --- how to fix?
121 * Restore join_rel_list to its former state, and put back original
126 root->join_rel_hash = savehash;
128 /* release all the memory acquired within gimme_tree */
137 * Form planner estimates for a join tree constructed in the specified
140 * 'tour' is the proposed join order, of length 'num_gene'
142 * Returns a new join relation whose cheapest path is the best plan for
143 * this join order. NB: will return NULL if join order is invalid and
144 * we can't modify it into a valid order.
146 * The original implementation of this routine always joined in the specified
147 * order, and so could only build left-sided plans (and right-sided and
148 * mixtures, as a byproduct of the fact that make_join_rel() is symmetric).
149 * It could never produce a "bushy" plan. This had a couple of big problems,
150 * of which the worst was that there are situations involving join order
151 * restrictions where the only valid plans are bushy.
153 * The present implementation takes the given tour as a guideline, but
154 * postpones joins that are illegal or seem unsuitable according to some
155 * heuristic rules. This allows correct bushy plans to be generated at need,
156 * and as a nice side-effect it seems to materially improve the quality of the
157 * generated plans. Note however that since it's just a heuristic, it can
158 * still fail in some cases. (In particular, we might clump together
159 * relations that actually mustn't be joined yet due to LATERAL restrictions;
160 * since there's no provision for un-clumping, this must lead to failure.)
170 * Sometimes, a relation can't yet be joined to others due to heuristics
171 * or actual semantic restrictions. We maintain a list of "clumps" of
172 * successfully joined relations, with larger clumps at the front. Each
173 * new relation from the tour is added to the first clump it can be joined
174 * to; if there is none then it becomes a new clump of its own. When we
175 * enlarge an existing clump we check to see if it can now be merged with
176 * any other clumps. After the tour is all scanned, we forget about the
177 * heuristics and try to forcibly join any remaining clumps. If we are
178 * unable to merge all the clumps into one, fail.
182 for (rel_count = 0; rel_count < num_gene; rel_count++)
188 /* Get the next input relation */
189 cur_rel_index = (int) tour[rel_count];
193 /* Make it into a single-rel clump */
198 /* Merge it into the clumps list, using only desirable joins */
204 /* Force-join the remaining clumps in some legal order */
218 /* Did we succeed in forming a single join relation? */
226 * Merge a "clump" into the list of existing clumps for gimme_tree.
228 * We try to merge the clump into some existing clump, and repeat if
229 * successful. When no more merging is possible, insert the clump
230 * into the list, preserving the list ordering rule (namely, that
231 * clumps of larger size appear earlier).
233 * If force is true, merge anywhere a join is legal, even if it causes
234 * a cartesian join to be performed. When force is false, do only
244 /* Look for a clump that new_clump can join to */
255 * Construct a RelOptInfo representing the join of these two input
256 * relations. Note that we expect the joinrel not to exist in
257 * root->join_rel_list yet, and so the paths constructed for it
258 * will only include the ones we want.
264 /* Keep searching if join order is not valid */
267 /* Create paths for partitionwise joins. */
271 * Except for the topmost scan/join rel, consider gathering
272 * partial paths. We'll do the same for the topmost scan/join
273 * rel once we know the final targetlist (see
279 /* Find and save the cheapest paths for this joinrel */
282 /* Absorb new clump into old */
287 /* Remove old_clump from list */
291 * Recursively try to merge the enlarged old_clump with
292 * others. When no further merge is possible, we'll reinsert
301 * No merging is possible, so add new_clump as an independent clump, in
302 * proper order according to size. We can be fast for the common case
303 * where it has size 1 --- it should always go at the end.
305 if (clumps ==
NIL || new_clump->
size == 1)
306 return lappend(clumps, new_clump);
308 /* Else search for the place to insert it */
313 if (new_clump->
size > old_clump->
size)
314 break;
/* new_clump belongs before old_clump */
322 * Heuristics for gimme_tree: do we want to join these two relations?
329 * Join if there is an applicable join clause, or if there is a join order
330 * restriction forcing these rels to be joined.
336 /* Otherwise postpone the join till later. */
void generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
static bool desirable_join(PlannerInfo *root, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Cost geqo_eval(PlannerInfo *root, Gene *tour, int num_gene)
static List * merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, int num_gene, bool force)
RelOptInfo * gimme_tree(PlannerInfo *root, Gene *tour, int num_gene)
Assert(PointerIsAligned(start, uint64))
bool have_relevant_joinclause(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
RelOptInfo * make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
bool have_join_order_restriction(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
List * lappend(List *list, void *datum)
List * list_truncate(List *list, int new_size)
List * list_insert_nth(List *list, int pos, void *datum)
void pfree(void *pointer)
MemoryContext CurrentMemoryContext
void MemoryContextDelete(MemoryContext context)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
void set_cheapest(RelOptInfo *parent_rel)
static int list_length(const List *l)
#define foreach_delete_current(lst, var_or_cell)
static void * list_nth(const List *list, int n)
struct Path * cheapest_total_path