PostgreSQL Source Code: src/backend/executor/execPartition.c Source File

PostgreSQL Source Code git master
execPartition.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * execPartition.c
4 * Support routines for partitioning.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 * IDENTIFICATION
10 * src/backend/executor/execPartition.c
11 *
12 *-------------------------------------------------------------------------
13 */
14#include "postgres.h"
15
16#include "access/table.h"
17#include "access/tableam.h"
18#include "catalog/partition.h"
19#include "executor/execPartition.h"
20#include "executor/executor.h"
21#include "executor/nodeModifyTable.h"
22#include "foreign/fdwapi.h"
23#include "mb/pg_wchar.h"
24#include "miscadmin.h"
25#include "partitioning/partbounds.h"
26#include "partitioning/partdesc.h"
27#include "partitioning/partprune.h"
28#include "rewrite/rewriteManip.h"
29#include "utils/acl.h"
30#include "utils/lsyscache.h"
31#include "utils/partcache.h"
32#include "utils/rls.h"
33#include "utils/ruleutils.h"
34
35
36/*-----------------------
37 * PartitionTupleRouting - Encapsulates all information required to
38 * route a tuple inserted into a partitioned table to one of its leaf
39 * partitions.
40 *
41 * partition_root
42 * The partitioned table that's the target of the command.
43 *
44 * partition_dispatch_info
45 * Array of 'max_dispatch' elements containing a pointer to a
46 * PartitionDispatch object for every partitioned table touched by tuple
47 * routing. The entry for the target partitioned table is *always*
48 * present in the 0th element of this array. See comment for
49 * PartitionDispatchData->indexes for details on how this array is
50 * indexed.
51 *
52 * nonleaf_partitions
53 * Array of 'max_dispatch' elements containing pointers to fake
54 * ResultRelInfo objects for nonleaf partitions, useful for checking
55 * the partition constraint.
56 *
57 * num_dispatch
58 * The current number of items stored in the 'partition_dispatch_info'
59 * array. Also serves as the index of the next free array element for
60 * new PartitionDispatch objects that need to be stored.
61 *
62 * max_dispatch
63 * The current allocated size of the 'partition_dispatch_info' array.
64 *
65 * partitions
66 * Array of 'max_partitions' elements containing a pointer to a
67 * ResultRelInfo for every leaf partition touched by tuple routing.
68 * Some of these are pointers to ResultRelInfos which are borrowed out of
69 * the owning ModifyTableState node. The remainder have been built
70 * especially for tuple routing. See comment for
71 * PartitionDispatchData->indexes for details on how this array is
72 * indexed.
73 *
74 * is_borrowed_rel
75 * Array of 'max_partitions' booleans recording whether a given entry
76 * in 'partitions' is a ResultRelInfo pointer borrowed from the owning
77 * ModifyTableState node, rather than being built here.
78 *
79 * num_partitions
80 * The current number of items stored in the 'partitions' array. Also
81 * serves as the index of the next free array element for new
82 * ResultRelInfo objects that need to be stored.
83 *
84 * max_partitions
85 * The current allocated size of the 'partitions' array.
86 *
87 * memcxt
88 * Memory context used to allocate subsidiary structs.
89 *-----------------------
90 */
91 struct PartitionTupleRouting
92{
93 Relation partition_root;
94 PartitionDispatch *partition_dispatch_info;
95 ResultRelInfo **nonleaf_partitions;
96 int num_dispatch;
97 int max_dispatch;
98 ResultRelInfo **partitions;
99 bool *is_borrowed_rel;
100 int num_partitions;
101 int max_partitions;
102 MemoryContext memcxt;
103};
104
105/*-----------------------
106 * PartitionDispatch - information about one partitioned table in a partition
107 * hierarchy required to route a tuple to any of its partitions. A
108 * PartitionDispatch is always encapsulated inside a PartitionTupleRouting
109 * struct and stored inside its 'partition_dispatch_info' array.
110 *
111 * reldesc
112 * Relation descriptor of the table
113 *
114 * key
115 * Partition key information of the table
116 *
117 * keystate
118 * Execution state required for expressions in the partition key
119 *
120 * partdesc
121 * Partition descriptor of the table
122 *
123 * tupslot
124 * A standalone TupleTableSlot initialized with this table's tuple
125 * descriptor, or NULL if no tuple conversion between the parent is
126 * required.
127 *
128 * tupmap
129 * TupleConversionMap to convert from the parent's rowtype to this table's
130 * rowtype (when extracting the partition key of a tuple just before
131 * routing it through this table). A NULL value is stored if no tuple
132 * conversion is required.
133 *
134 * indexes
135 * Array of partdesc->nparts elements. For leaf partitions the index
136 * corresponds to the partition's ResultRelInfo in the encapsulating
137 * PartitionTupleRouting's partitions array. For partitioned partitions,
138 * the index corresponds to the PartitionDispatch for it in its
139 * partition_dispatch_info array. -1 indicates we've not yet allocated
140 * anything in PartitionTupleRouting for the partition.
141 *-----------------------
142 */
143 typedef struct PartitionDispatchData
144{
145 Relation reldesc;
146 PartitionKey key;
147 List *keystate; /* list of ExprState */
148 PartitionDesc partdesc;
149 TupleTableSlot *tupslot;
150 AttrMap *tupmap;
151 int indexes[FLEXIBLE_ARRAY_MEMBER];
152 } PartitionDispatchData;
153
154
155static ResultRelInfo *ExecInitPartitionInfo(ModifyTableState *mtstate,
156 EState *estate, PartitionTupleRouting *proute,
157 PartitionDispatch dispatch,
158 ResultRelInfo *rootResultRelInfo,
159 int partidx);
160static void ExecInitRoutingInfo(ModifyTableState *mtstate,
161 EState *estate,
162 PartitionTupleRouting *proute,
163 PartitionDispatch dispatch,
164 ResultRelInfo *partRelInfo,
165 int partidx,
166 bool is_borrowed_rel);
167static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate,
168 PartitionTupleRouting *proute,
169 Oid partoid, PartitionDispatch parent_pd,
170 int partidx, ResultRelInfo *rootResultRelInfo);
171static void FormPartitionKeyDatum(PartitionDispatch pd,
172 TupleTableSlot *slot,
173 EState *estate,
174 Datum *values,
175 bool *isnull);
176static int get_partition_for_tuple(PartitionDispatch pd, Datum *values,
177 bool *isnull);
178static char *ExecBuildSlotPartitionKeyDescription(Relation rel,
179 Datum *values,
180 bool *isnull,
181 int maxfieldlen);
182static List *adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri);
183static List *adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap);
184static PartitionPruneState *CreatePartitionPruneState(EState *estate,
185 PartitionPruneInfo *pruneinfo,
186 Bitmapset **all_leafpart_rtis);
187static void InitPartitionPruneContext(PartitionPruneContext *context,
188 List *pruning_steps,
189 PartitionDesc partdesc,
190 PartitionKey partkey,
191 PlanState *planstate,
192 ExprContext *econtext);
193static void InitExecPartitionPruneContexts(PartitionPruneState *prunestate,
194 PlanState *parent_plan,
195 Bitmapset *initially_valid_subplans,
196 int n_total_subplans);
197static void find_matching_subplans_recurse(PartitionPruningData *prunedata,
198 PartitionedRelPruningData *pprune,
199 bool initial_prune,
200 Bitmapset **validsubplans,
201 Bitmapset **validsubplan_rtis);
202
203
204/*
205 * ExecSetupPartitionTupleRouting - sets up information needed during
206 * tuple routing for partitioned tables, encapsulates it in
207 * PartitionTupleRouting, and returns it.
208 *
209 * Callers must use the returned PartitionTupleRouting during calls to
210 * ExecFindPartition(). The actual ResultRelInfo for a partition is only
211 * allocated when the partition is found for the first time.
212 *
213 * The current memory context is used to allocate this struct and all
214 * subsidiary structs that will be allocated from it later on. Typically
215 * it should be estate->es_query_cxt.
216 */
217PartitionTupleRouting *
218 ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
219{
220 PartitionTupleRouting *proute;
221
222 /*
223 * Here we attempt to expend as little effort as possible in setting up
224 * the PartitionTupleRouting. Each partition's ResultRelInfo is built on
225 * demand, only when we actually need to route a tuple to that partition.
226 * The reason for this is that a common case is for INSERT to insert a
227 * single tuple into a partitioned table and this must be fast.
228 */
229 proute = (PartitionTupleRouting *) palloc0(sizeof(PartitionTupleRouting));
230 proute->partition_root = rel;
231 proute->memcxt = CurrentMemoryContext;
232 /* Rest of members initialized by zeroing */
233
234 /*
235 * Initialize this table's PartitionDispatch object. Here we pass in the
236 * parent as NULL as we don't need to care about any parent of the target
237 * partitioned table.
238 */
239 ExecInitPartitionDispatchInfo(estate, proute, RelationGetRelid(rel),
240 NULL, 0, NULL);
241
242 return proute;
243}
244
245/*
246 * ExecFindPartition -- Return the ResultRelInfo for the leaf partition that
247 * the tuple contained in *slot should belong to.
248 *
249 * If the partition's ResultRelInfo does not yet exist in 'proute' then we set
250 * one up or reuse one from mtstate's resultRelInfo array. When reusing a
251 * ResultRelInfo from the mtstate we verify that the relation is a valid
252 * target for INSERTs and initialize tuple routing information.
253 *
254 * rootResultRelInfo is the relation named in the query.
255 *
256 * estate must be non-NULL; we'll need it to compute any expressions in the
257 * partition keys. Also, its per-tuple contexts are used as evaluation
258 * scratch space.
259 *
260 * If no leaf partition is found, this routine errors out with the appropriate
261 * error message. An error may also be raised if the found target partition
262 * is not a valid target for an INSERT.
263 */
264ResultRelInfo *
265 ExecFindPartition(ModifyTableState *mtstate,
266 ResultRelInfo *rootResultRelInfo,
267 PartitionTupleRouting *proute,
268 TupleTableSlot *slot, EState *estate)
269{
270 PartitionDispatch *pd = proute->partition_dispatch_info;
271 Datum values[PARTITION_MAX_KEYS];
272 bool isnull[PARTITION_MAX_KEYS];
273 Relation rel;
274 PartitionDispatch dispatch;
275 PartitionDesc partdesc;
276 ExprContext *ecxt = GetPerTupleExprContext(estate);
277 TupleTableSlot *ecxt_scantuple_saved = ecxt->ecxt_scantuple;
278 TupleTableSlot *rootslot = slot;
279 TupleTableSlot *myslot = NULL;
280 MemoryContext oldcxt;
281 ResultRelInfo *rri = NULL;
282
283 /* use per-tuple context here to avoid leaking memory */
284 oldcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
285
286 /*
287 * First check the root table's partition constraint, if any. No point in
288 * routing the tuple if it doesn't belong in the root table itself.
289 */
290 if (rootResultRelInfo->ri_RelationDesc->rd_rel->relispartition)
291 ExecPartitionCheck(rootResultRelInfo, slot, estate, true);
292
293 /* start with the root partitioned table */
294 dispatch = pd[0];
295 while (dispatch != NULL)
296 {
297 int partidx = -1;
298 bool is_leaf;
299
300 CHECK_FOR_INTERRUPTS();
301
302 rel = dispatch->reldesc;
303 partdesc = dispatch->partdesc;
304
305 /*
306 * Extract partition key from tuple. Expression evaluation machinery
307 * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to
308 * point to the correct tuple slot. The slot might have changed from
309 * what was used for the parent table if the table of the current
310 * partitioning level has different tuple descriptor from the parent.
311 * So update ecxt_scantuple accordingly.
312 */
313 ecxt->ecxt_scantuple = slot;
314 FormPartitionKeyDatum(dispatch, slot, estate, values, isnull);
315
316 /*
317 * If this partitioned table has no partitions or no partition for
318 * these values, error out.
319 */
320 if (partdesc->nparts == 0 ||
321 (partidx = get_partition_for_tuple(dispatch, values, isnull)) < 0)
322 {
323 char *val_desc;
324
325 val_desc = ExecBuildSlotPartitionKeyDescription(rel,
326 values, isnull, 64);
327 Assert(OidIsValid(RelationGetRelid(rel)));
328 ereport(ERROR,
329 (errcode(ERRCODE_CHECK_VIOLATION),
330 errmsg("no partition of relation \"%s\" found for row",
331 RelationGetRelationName(rel)),
332 val_desc ?
333 errdetail("Partition key of the failing row contains %s.",
334 val_desc) : 0,
335 errtable(rel)));
336 }
337
338 is_leaf = partdesc->is_leaf[partidx];
339 if (is_leaf)
340 {
341 /*
342 * We've reached the leaf -- hurray, we're done. Look to see if
343 * we've already got a ResultRelInfo for this partition.
344 */
345 if (likely(dispatch->indexes[partidx] >= 0))
346 {
347 /* ResultRelInfo already built */
348 Assert(dispatch->indexes[partidx] < proute->num_partitions);
349 rri = proute->partitions[dispatch->indexes[partidx]];
350 }
351 else
352 {
353 /*
354 * If the partition is known in the owning ModifyTableState
355 * node, we can re-use that ResultRelInfo instead of creating
356 * a new one with ExecInitPartitionInfo().
357 */
358 rri = ExecLookupResultRelByOid(mtstate,
359 partdesc->oids[partidx],
360 true, false);
361 if (rri)
362 {
363 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
364
365 /* Verify this ResultRelInfo allows INSERTs */
366 CheckValidResultRel(rri, CMD_INSERT,
367 node ? node->onConflictAction : ONCONFLICT_NONE,
368 NIL);
369
370 /*
371 * Initialize information needed to insert this and
372 * subsequent tuples routed to this partition.
373 */
374 ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
375 rri, partidx, true);
376 }
377 else
378 {
379 /* We need to create a new one. */
380 rri = ExecInitPartitionInfo(mtstate, estate, proute,
381 dispatch,
382 rootResultRelInfo, partidx);
383 }
384 }
385 Assert(rri != NULL);
386
387 /* Signal to terminate the loop */
388 dispatch = NULL;
389 }
390 else
391 {
392 /*
393 * Partition is a sub-partitioned table; get the PartitionDispatch
394 */
395 if (likely(dispatch->indexes[partidx] >= 0))
396 {
397 /* Already built. */
398 Assert(dispatch->indexes[partidx] < proute->num_dispatch);
399
400 rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
401
402 /*
403 * Move down to the next partition level and search again
404 * until we find a leaf partition that matches this tuple
405 */
406 dispatch = pd[dispatch->indexes[partidx]];
407 }
408 else
409 {
410 /* Not yet built. Do that now. */
411 PartitionDispatch subdispatch;
412
413 /*
414 * Create the new PartitionDispatch. We pass the current one
415 * in as the parent PartitionDispatch
416 */
417 subdispatch = ExecInitPartitionDispatchInfo(estate,
418 proute,
419 partdesc->oids[partidx],
420 dispatch, partidx,
421 mtstate->rootResultRelInfo);
422 Assert(dispatch->indexes[partidx] >= 0 &&
423 dispatch->indexes[partidx] < proute->num_dispatch);
424
425 rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
426 dispatch = subdispatch;
427 }
428
429 /*
430 * Convert the tuple to the new parent's layout, if different from
431 * the previous parent.
432 */
433 if (dispatch->tupslot)
434 {
435 AttrMap *map = dispatch->tupmap;
436 TupleTableSlot *tempslot = myslot;
437
438 myslot = dispatch->tupslot;
439 slot = execute_attr_map_slot(map, slot, myslot);
440
441 if (tempslot != NULL)
442 ExecClearTuple(tempslot);
443 }
444 }
445
446 /*
447 * If this partition is the default one, we must check its partition
448 * constraint now, which may have changed concurrently due to
449 * partitions being added to the parent.
450 *
451 * (We do this here, and do not rely on ExecInsert doing it, because
452 * we don't want to miss doing it for non-leaf partitions.)
453 */
454 if (partidx == partdesc->boundinfo->default_index)
455 {
456 /*
457 * The tuple must match the partition's layout for the constraint
458 * expression to be evaluated successfully. If the partition is
459 * sub-partitioned, that would already be the case due to the code
460 * above, but for a leaf partition the tuple still matches the
461 * parent's layout.
462 *
463 * Note that we have a map to convert from root to current
464 * partition, but not from immediate parent to current partition.
465 * So if we have to convert, do it from the root slot; if not, use
466 * the root slot as-is.
467 */
468 if (is_leaf)
469 {
470 TupleConversionMap *map = ExecGetRootToChildMap(rri, estate);
471
472 if (map)
473 slot = execute_attr_map_slot(map->attrMap, rootslot,
474 rri->ri_PartitionTupleSlot);
475 else
476 slot = rootslot;
477 }
478
479 ExecPartitionCheck(rri, slot, estate, true);
480 }
481 }
482
483 /* Release the tuple in the lowest parent's dedicated slot. */
484 if (myslot != NULL)
485 ExecClearTuple(myslot);
486 /* and restore ecxt's scantuple */
487 ecxt->ecxt_scantuple = ecxt_scantuple_saved;
488 MemoryContextSwitchTo(oldcxt);
489
490 return rri;
491}
492
493/*
494 * ExecInitPartitionInfo
495 * Lock the partition and initialize ResultRelInfo. Also setup other
496 * information for the partition and store it in the next empty slot in
497 * the proute->partitions array.
498 *
499 * Returns the ResultRelInfo
500 */
501static ResultRelInfo *
502 ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
503 PartitionTupleRouting *proute,
504 PartitionDispatch dispatch,
505 ResultRelInfo *rootResultRelInfo,
506 int partidx)
507{
508 ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
509 Oid partOid = dispatch->partdesc->oids[partidx];
510 Relation partrel;
511 int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
512 Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
513 ResultRelInfo *leaf_part_rri;
514 MemoryContext oldcxt;
515 AttrMap *part_attmap = NULL;
516 bool found_whole_row;
517
518 oldcxt = MemoryContextSwitchTo(proute->memcxt);
519
520 partrel = table_open(partOid, RowExclusiveLock);
521
522 leaf_part_rri = makeNode(ResultRelInfo);
523 InitResultRelInfo(leaf_part_rri,
524 partrel,
525 0,
526 rootResultRelInfo,
527 estate->es_instrument);
528
529 /*
530 * Verify result relation is a valid target for an INSERT. An UPDATE of a
531 * partition-key becomes a DELETE+INSERT operation, so this check is still
532 * required when the operation is CMD_UPDATE.
533 */
534 CheckValidResultRel(leaf_part_rri, CMD_INSERT,
535 node ? node->onConflictAction : ONCONFLICT_NONE, NIL);
536
537 /*
538 * Open partition indices. The user may have asked to check for conflicts
539 * within this leaf partition and do "nothing" instead of throwing an
540 * error. Be prepared in that case by initializing the index information
541 * needed by ExecInsert() to perform speculative insertions.
542 */
543 if (partrel->rd_rel->relhasindex &&
544 leaf_part_rri->ri_IndexRelationDescs == NULL)
545 ExecOpenIndices(leaf_part_rri,
546 (node != NULL &&
547 node->onConflictAction != ONCONFLICT_NONE));
548
549 /*
550 * Build WITH CHECK OPTION constraints for the partition. Note that we
551 * didn't build the withCheckOptionList for partitions within the planner,
552 * but simple translation of varattnos will suffice. This only occurs for
553 * the INSERT case or in the case of UPDATE/MERGE tuple routing where we
554 * didn't find a result rel to reuse.
555 */
556 if (node && node->withCheckOptionLists != NIL)
557 {
558 List *wcoList;
559 List *wcoExprs = NIL;
560 ListCell *ll;
561
562 /*
563 * In the case of INSERT on a partitioned table, there is only one
564 * plan. Likewise, there is only one WCO list, not one per partition.
565 * For UPDATE/MERGE, there are as many WCO lists as there are plans.
566 */
567 Assert((node->operation == CMD_INSERT &&
568 list_length(node->withCheckOptionLists) == 1 &&
569 list_length(node->resultRelations) == 1) ||
570 (node->operation == CMD_UPDATE &&
571 list_length(node->withCheckOptionLists) ==
572 list_length(node->resultRelations)) ||
573 (node->operation == CMD_MERGE &&
574 list_length(node->withCheckOptionLists) ==
575 list_length(node->resultRelations)));
576
577 /*
578 * Use the WCO list of the first plan as a reference to calculate
579 * attno's for the WCO list of this partition. In the INSERT case,
580 * that refers to the root partitioned table, whereas in the UPDATE
581 * tuple routing case, that refers to the first partition in the
582 * mtstate->resultRelInfo array. In any case, both that relation and
583 * this partition should have the same columns, so we should be able
584 * to map attributes successfully.
585 */
586 wcoList = linitial(node->withCheckOptionLists);
587
588 /*
589 * Convert Vars in it to contain this partition's attribute numbers.
590 */
591 part_attmap =
592 build_attrmap_by_name(RelationGetDescr(partrel),
593 RelationGetDescr(firstResultRel),
594 false);
595 wcoList = (List *)
596 map_variable_attnos((Node *) wcoList,
597 firstVarno, 0,
598 part_attmap,
599 RelationGetForm(partrel)->reltype,
600 &found_whole_row);
601 /* We ignore the value of found_whole_row. */
602
603 foreach(ll, wcoList)
604 {
605 WithCheckOption *wco = lfirst_node(WithCheckOption, ll);
606 ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
607 &mtstate->ps);
608
609 wcoExprs = lappend(wcoExprs, wcoExpr);
610 }
611
612 leaf_part_rri->ri_WithCheckOptions = wcoList;
613 leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs;
614 }
615
616 /*
617 * Build the RETURNING projection for the partition. Note that we didn't
618 * build the returningList for partitions within the planner, but simple
619 * translation of varattnos will suffice. This only occurs for the INSERT
620 * case or in the case of UPDATE/MERGE tuple routing where we didn't find
621 * a result rel to reuse.
622 */
623 if (node && node->returningLists != NIL)
624 {
625 TupleTableSlot *slot;
626 ExprContext *econtext;
627 List *returningList;
628
629 /* See the comment above for WCO lists. */
630 Assert((node->operation == CMD_INSERT &&
631 list_length(node->returningLists) == 1 &&
632 list_length(node->resultRelations) == 1) ||
633 (node->operation == CMD_UPDATE &&
634 list_length(node->returningLists) ==
635 list_length(node->resultRelations)) ||
636 (node->operation == CMD_MERGE &&
637 list_length(node->returningLists) ==
638 list_length(node->resultRelations)));
639
640 /*
641 * Use the RETURNING list of the first plan as a reference to
642 * calculate attno's for the RETURNING list of this partition. See
643 * the comment above for WCO lists for more details on why this is
644 * okay.
645 */
646 returningList = linitial(node->returningLists);
647
648 /*
649 * Convert Vars in it to contain this partition's attribute numbers.
650 */
651 if (part_attmap == NULL)
652 part_attmap =
653 build_attrmap_by_name(RelationGetDescr(partrel),
654 RelationGetDescr(firstResultRel),
655 false);
656 returningList = (List *)
657 map_variable_attnos((Node *) returningList,
658 firstVarno, 0,
659 part_attmap,
660 RelationGetForm(partrel)->reltype,
661 &found_whole_row);
662 /* We ignore the value of found_whole_row. */
663
664 leaf_part_rri->ri_returningList = returningList;
665
666 /*
667 * Initialize the projection itself.
668 *
669 * Use the slot and the expression context that would have been set up
670 * in ExecInitModifyTable() for projection's output.
671 */
672 Assert(mtstate->ps.ps_ResultTupleSlot != NULL);
673 slot = mtstate->ps.ps_ResultTupleSlot;
674 Assert(mtstate->ps.ps_ExprContext != NULL);
675 econtext = mtstate->ps.ps_ExprContext;
676 leaf_part_rri->ri_projectReturning =
677 ExecBuildProjectionInfo(returningList, econtext, slot,
678 &mtstate->ps, RelationGetDescr(partrel));
679 }
680
681 /* Set up information needed for routing tuples to the partition. */
682 ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
683 leaf_part_rri, partidx, false);
684
685 /*
686 * If there is an ON CONFLICT clause, initialize state for it.
687 */
688 if (node && node->onConflictAction != ONCONFLICT_NONE)
689 {
690 TupleDesc partrelDesc = RelationGetDescr(partrel);
691 ExprContext *econtext = mtstate->ps.ps_ExprContext;
692 ListCell *lc;
693 List *arbiterIndexes = NIL;
694
695 /*
696 * If there is a list of arbiter indexes, map it to a list of indexes
697 * in the partition. We do that by scanning the partition's index
698 * list and searching for ancestry relationships to each index in the
699 * ancestor table.
700 */
701 if (rootResultRelInfo->ri_onConflictArbiterIndexes != NIL)
702 {
703 List *childIdxs;
704
705 childIdxs = RelationGetIndexList(leaf_part_rri->ri_RelationDesc);
706
707 foreach(lc, childIdxs)
708 {
709 Oid childIdx = lfirst_oid(lc);
710 List *ancestors;
711 ListCell *lc2;
712
713 ancestors = get_partition_ancestors(childIdx);
714 foreach(lc2, rootResultRelInfo->ri_onConflictArbiterIndexes)
715 {
716 if (list_member_oid(ancestors, lfirst_oid(lc2)))
717 arbiterIndexes = lappend_oid(arbiterIndexes, childIdx);
718 }
719 list_free(ancestors);
720 }
721 }
722
723 /*
724 * If the resulting lists are of inequal length, something is wrong.
725 * (This shouldn't happen, since arbiter index selection should not
726 * pick up an invalid index.)
727 */
728 if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) !=
729 list_length(arbiterIndexes))
730 elog(ERROR, "invalid arbiter index list");
731 leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes;
732
733 /*
734 * In the DO UPDATE case, we have some more state to initialize.
735 */
736 if (node->onConflictAction == ONCONFLICT_UPDATE)
737 {
738 OnConflictSetState *onconfl = makeNode(OnConflictSetState);
739 TupleConversionMap *map;
740
741 map = ExecGetRootToChildMap(leaf_part_rri, estate);
742
743 Assert(node->onConflictSet != NIL);
744 Assert(rootResultRelInfo->ri_onConflict != NULL);
745
746 leaf_part_rri->ri_onConflict = onconfl;
747
748 /*
749 * Need a separate existing slot for each partition, as the
750 * partition could be of a different AM, even if the tuple
751 * descriptors match.
752 */
753 onconfl->oc_Existing =
754 table_slot_create(leaf_part_rri->ri_RelationDesc,
755 &mtstate->ps.state->es_tupleTable);
756
757 /*
758 * If the partition's tuple descriptor matches exactly the root
759 * parent (the common case), we can re-use most of the parent's ON
760 * CONFLICT SET state, skipping a bunch of work. Otherwise, we
761 * need to create state specific to this partition.
762 */
763 if (map == NULL)
764 {
765 /*
766 * It's safe to reuse these from the partition root, as we
767 * only process one tuple at a time (therefore we won't
768 * overwrite needed data in slots), and the results of
769 * projections are independent of the underlying storage.
770 * Projections and where clauses themselves don't store state
771 * / are independent of the underlying storage.
772 */
773 onconfl->oc_ProjSlot =
774 rootResultRelInfo->ri_onConflict->oc_ProjSlot;
775 onconfl->oc_ProjInfo =
776 rootResultRelInfo->ri_onConflict->oc_ProjInfo;
777 onconfl->oc_WhereClause =
778 rootResultRelInfo->ri_onConflict->oc_WhereClause;
779 }
780 else
781 {
782 List *onconflset;
783 List *onconflcols;
784
785 /*
786 * Translate expressions in onConflictSet to account for
787 * different attribute numbers. For that, map partition
788 * varattnos twice: first to catch the EXCLUDED
789 * pseudo-relation (INNER_VAR), and second to handle the main
790 * target relation (firstVarno).
791 */
792 onconflset = copyObject(node->onConflictSet);
793 if (part_attmap == NULL)
794 part_attmap =
795 build_attrmap_by_name(RelationGetDescr(partrel),
796 RelationGetDescr(firstResultRel),
797 false);
798 onconflset = (List *)
799 map_variable_attnos((Node *) onconflset,
800 INNER_VAR, 0,
801 part_attmap,
802 RelationGetForm(partrel)->reltype,
803 &found_whole_row);
804 /* We ignore the value of found_whole_row. */
805 onconflset = (List *)
806 map_variable_attnos((Node *) onconflset,
807 firstVarno, 0,
808 part_attmap,
809 RelationGetForm(partrel)->reltype,
810 &found_whole_row);
811 /* We ignore the value of found_whole_row. */
812
813 /* Finally, adjust the target colnos to match the partition. */
814 onconflcols = adjust_partition_colnos(node->onConflictCols,
815 leaf_part_rri);
816
817 /* create the tuple slot for the UPDATE SET projection */
818 onconfl->oc_ProjSlot =
819 table_slot_create(partrel,
820 &mtstate->ps.state->es_tupleTable);
821
822 /* build UPDATE SET projection state */
823 onconfl->oc_ProjInfo =
824 ExecBuildUpdateProjection(onconflset,
825 true,
826 onconflcols,
827 partrelDesc,
828 econtext,
829 onconfl->oc_ProjSlot,
830 &mtstate->ps);
831
832 /*
833 * If there is a WHERE clause, initialize state where it will
834 * be evaluated, mapping the attribute numbers appropriately.
835 * As with onConflictSet, we need to map partition varattnos
836 * to the partition's tupdesc.
837 */
838 if (node->onConflictWhere)
839 {
840 List *clause;
841
842 clause = copyObject((List *) node->onConflictWhere);
843 clause = (List *)
844 map_variable_attnos((Node *) clause,
845 INNER_VAR, 0,
846 part_attmap,
847 RelationGetForm(partrel)->reltype,
848 &found_whole_row);
849 /* We ignore the value of found_whole_row. */
850 clause = (List *)
851 map_variable_attnos((Node *) clause,
852 firstVarno, 0,
853 part_attmap,
854 RelationGetForm(partrel)->reltype,
855 &found_whole_row);
856 /* We ignore the value of found_whole_row. */
857 onconfl->oc_WhereClause =
858 ExecInitQual((List *) clause, &mtstate->ps);
859 }
860 }
861 }
862 }
863
864 /*
865 * Since we've just initialized this ResultRelInfo, it's not in any list
866 * attached to the estate as yet. Add it, so that it can be found later.
867 *
868 * Note that the entries in this list appear in no predetermined order,
869 * because partition result rels are initialized as and when they're
870 * needed.
871 */
872 MemoryContextSwitchTo(estate->es_query_cxt);
873 estate->es_tuple_routing_result_relations =
874 lappend(estate->es_tuple_routing_result_relations,
875 leaf_part_rri);
876
877 /*
878 * Initialize information about this partition that's needed to handle
879 * MERGE. We take the "first" result relation's mergeActionList as
880 * reference and make copy for this relation, converting stuff that
881 * references attribute numbers to match this relation's.
882 *
883 * This duplicates much of the logic in ExecInitMerge(), so if something
884 * changes there, look here too.
885 */
886 if (node && node->operation == CMD_MERGE)
887 {
888 List *firstMergeActionList = linitial(node->mergeActionLists);
889 ListCell *lc;
890 ExprContext *econtext = mtstate->ps.ps_ExprContext;
891 Node *joinCondition;
892
893 if (part_attmap == NULL)
894 part_attmap =
895 build_attrmap_by_name(RelationGetDescr(partrel),
896 RelationGetDescr(firstResultRel),
897 false);
898
899 if (unlikely(!leaf_part_rri->ri_projectNewInfoValid))
900 ExecInitMergeTupleSlots(mtstate, leaf_part_rri);
901
902 /* Initialize state for join condition checking. */
903 joinCondition =
904 map_variable_attnos(linitial(node->mergeJoinConditions),
905 firstVarno, 0,
906 part_attmap,
907 RelationGetForm(partrel)->reltype,
908 &found_whole_row);
909 /* We ignore the value of found_whole_row. */
910 leaf_part_rri->ri_MergeJoinCondition =
911 ExecInitQual((List *) joinCondition, &mtstate->ps);
912
913 foreach(lc, firstMergeActionList)
914 {
915 /* Make a copy for this relation to be safe. */
916 MergeAction *action = copyObject(lfirst(lc));
917 MergeActionState *action_state;
918
919 /* Generate the action's state for this relation */
920 action_state = makeNode(MergeActionState);
921 action_state->mas_action = action;
922
923 /* And put the action in the appropriate list */
924 leaf_part_rri->ri_MergeActions[action->matchKind] =
925 lappend(leaf_part_rri->ri_MergeActions[action->matchKind],
926 action_state);
927
928 switch (action->commandType)
929 {
930 case CMD_INSERT:
931
932 /*
933 * ExecCheckPlanOutput() already done on the targetlist
934 * when "first" result relation initialized and it is same
935 * for all result relations.
936 */
937 action_state->mas_proj =
938 ExecBuildProjectionInfo(action->targetList, econtext,
939 leaf_part_rri->ri_newTupleSlot,
940 &mtstate->ps,
941 RelationGetDescr(partrel));
942 break;
943 case CMD_UPDATE:
944
945 /*
946 * Convert updateColnos from "first" result relation
947 * attribute numbers to this result rel's.
948 */
949 if (part_attmap)
950 action->updateColnos =
951 adjust_partition_colnos_using_map(action->updateColnos,
952 part_attmap);
953 action_state->mas_proj =
954 ExecBuildUpdateProjection(action->targetList,
955 true,
956 action->updateColnos,
957 RelationGetDescr(leaf_part_rri->ri_RelationDesc),
958 econtext,
959 leaf_part_rri->ri_newTupleSlot,
960 NULL);
961 break;
962 case CMD_DELETE:
963 case CMD_NOTHING:
964 /* Nothing to do */
965 break;
966
967 default:
968 elog(ERROR, "unknown action in MERGE WHEN clause");
969 }
970
971 /* found_whole_row intentionally ignored. */
972 action->qual =
973 map_variable_attnos(action->qual,
974 firstVarno, 0,
975 part_attmap,
976 RelationGetForm(partrel)->reltype,
977 &found_whole_row);
978 action_state->mas_whenqual =
979 ExecInitQual((List *) action->qual, &mtstate->ps);
980 }
981 }
982 MemoryContextSwitchTo(oldcxt);
983
984 return leaf_part_rri;
985}
986
987/*
988 * ExecInitRoutingInfo
989 * Set up information needed for translating tuples between root
990 * partitioned table format and partition format, and keep track of it
991 * in PartitionTupleRouting.
992 */
993static void
994 ExecInitRoutingInfo(ModifyTableState *mtstate,
995 EState *estate,
996 PartitionTupleRouting *proute,
997 PartitionDispatch dispatch,
998 ResultRelInfo *partRelInfo,
999 int partidx,
1000 bool is_borrowed_rel)
1001{
1002 MemoryContext oldcxt;
1003 int rri_index;
1004
1005 oldcxt = MemoryContextSwitchTo(proute->memcxt);
1006
1007 /*
1008 * Set up tuple conversion between root parent and the partition if the
1009 * two have different rowtypes. If conversion is indeed required, also
1010 * initialize a slot dedicated to storing this partition's converted
1011 * tuples. Various operations that are applied to tuples after routing,
1012 * such as checking constraints, will refer to this slot.
1013 */
1014 if (ExecGetRootToChildMap(partRelInfo, estate) != NULL)
1015 {
1016 Relation partrel = partRelInfo->ri_RelationDesc;
1017
1018 /*
1019 * This pins the partition's TupleDesc, which will be released at the
1020 * end of the command.
1021 */
1022 partRelInfo->ri_PartitionTupleSlot =
1023 table_slot_create(partrel, &estate->es_tupleTable);
1024 }
1025 else
1026 partRelInfo->ri_PartitionTupleSlot = NULL;
1027
1028 /*
1029 * If the partition is a foreign table, let the FDW init itself for
1030 * routing tuples to the partition.
1031 */
1032 if (partRelInfo->ri_FdwRoutine != NULL &&
1033 partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL)
1034 partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
1035
1036 /*
1037 * Determine if the FDW supports batch insert and determine the batch size
1038 * (a FDW may support batching, but it may be disabled for the
1039 * server/table or for this particular query).
1040 *
1041 * If the FDW does not support batching, we set the batch size to 1.
1042 */
1043 if (partRelInfo->ri_FdwRoutine != NULL &&
1044 partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
1045 partRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
1046 partRelInfo->ri_BatchSize =
1047 partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(partRelInfo);
1048 else
1049 partRelInfo->ri_BatchSize = 1;
1050
1051 Assert(partRelInfo->ri_BatchSize >= 1);
1052
1053 partRelInfo->ri_CopyMultiInsertBuffer = NULL;
1054
1055 /*
1056 * Keep track of it in the PartitionTupleRouting->partitions array.
1057 */
1058 Assert(dispatch->indexes[partidx] == -1);
1059
1060 rri_index = proute->num_partitions++;
1061
1062 /* Allocate or enlarge the array, as needed */
1063 if (proute->num_partitions >= proute->max_partitions)
1064 {
1065 if (proute->max_partitions == 0)
1066 {
1067 proute->max_partitions = 8;
1068 proute->partitions = (ResultRelInfo **)
1069 palloc(sizeof(ResultRelInfo *) * proute->max_partitions);
1070 proute->is_borrowed_rel = (bool *)
1071 palloc(sizeof(bool) * proute->max_partitions);
1072 }
1073 else
1074 {
1075 proute->max_partitions *= 2;
1076 proute->partitions = (ResultRelInfo **)
1077 repalloc(proute->partitions, sizeof(ResultRelInfo *) *
1078 proute->max_partitions);
1079 proute->is_borrowed_rel = (bool *)
1080 repalloc(proute->is_borrowed_rel, sizeof(bool) *
1081 proute->max_partitions);
1082 }
1083 }
1084
1085 proute->partitions[rri_index] = partRelInfo;
1086 proute->is_borrowed_rel[rri_index] = is_borrowed_rel;
1087 dispatch->indexes[partidx] = rri_index;
1088
1089 MemoryContextSwitchTo(oldcxt);
1090}
1091
1092/*
1093 * ExecInitPartitionDispatchInfo
1094 * Lock the partitioned table (if not locked already) and initialize
1095 * PartitionDispatch for a partitioned table and store it in the next
1096 * available slot in the proute->partition_dispatch_info array. Also,
1097 * record the index into this array in the parent_pd->indexes[] array in
1098 * the partidx element so that we can properly retrieve the newly created
1099 * PartitionDispatch later.
1100 */
1101static PartitionDispatch
1102 ExecInitPartitionDispatchInfo(EState *estate,
1103 PartitionTupleRouting *proute, Oid partoid,
1104 PartitionDispatch parent_pd, int partidx,
1105 ResultRelInfo *rootResultRelInfo)
1106{
1107 Relation rel;
1108 PartitionDesc partdesc;
1109 PartitionDispatch pd;
1110 int dispatchidx;
1111 MemoryContext oldcxt;
1112
1113 /*
1114 * For data modification, it is better that executor does not include
1115 * partitions being detached, except when running in snapshot-isolation
1116 * mode. This means that a read-committed transaction immediately gets a
1117 * "no partition for tuple" error when a tuple is inserted into a
1118 * partition that's being detached concurrently, but a transaction in
1119 * repeatable-read mode can still use such a partition.
1120 */
1121 if (estate->es_partition_directory == NULL)
1122 estate->es_partition_directory =
1123 CreatePartitionDirectory(estate->es_query_cxt,
1124 !IsolationUsesXactSnapshot());
1125
1126 oldcxt = MemoryContextSwitchTo(proute->memcxt);
1127
1128 /*
1129 * Only sub-partitioned tables need to be locked here. The root
1130 * partitioned table will already have been locked as it's referenced in
1131 * the query's rtable.
1132 */
1133 if (partoid != RelationGetRelid(proute->partition_root))
1134 rel = table_open(partoid, RowExclusiveLock);
1135 else
1136 rel = proute->partition_root;
1137 partdesc = PartitionDirectoryLookup(estate->es_partition_directory, rel);
1138
1139 pd = (PartitionDispatch) palloc(offsetof(PartitionDispatchData, indexes) +
1140 partdesc->nparts * sizeof(int));
1141 pd->reldesc = rel;
1142 pd->key = RelationGetPartitionKey(rel);
1143 pd->keystate = NIL;
1144 pd->partdesc = partdesc;
1145 if (parent_pd != NULL)
1146 {
1147 TupleDesc tupdesc = RelationGetDescr(rel);
1148
1149 /*
1150 * For sub-partitioned tables where the column order differs from its
1151 * direct parent partitioned table, we must store a tuple table slot
1152 * initialized with its tuple descriptor and a tuple conversion map to
1153 * convert a tuple from its parent's rowtype to its own. This is to
1154 * make sure that we are looking at the correct row using the correct
1155 * tuple descriptor when computing its partition key for tuple
1156 * routing.
1157 */
1158 pd->tupmap = build_attrmap_by_name_if_req(RelationGetDescr(parent_pd->reldesc),
1159 tupdesc,
1160 false);
1161 pd->tupslot = pd->tupmap ?
1162 MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual) : NULL;
1163 }
1164 else
1165 {
1166 /* Not required for the root partitioned table */
1167 pd->tupmap = NULL;
1168 pd->tupslot = NULL;
1169 }
1170
1171 /*
1172 * Initialize with -1 to signify that the corresponding partition's
1173 * ResultRelInfo or PartitionDispatch has not been created yet.
1174 */
1175 memset(pd->indexes, -1, sizeof(int) * partdesc->nparts);
1176
1177 /* Track in PartitionTupleRouting for later use */
1178 dispatchidx = proute->num_dispatch++;
1179
1180 /* Allocate or enlarge the array, as needed */
1181 if (proute->num_dispatch >= proute->max_dispatch)
1182 {
1183 if (proute->max_dispatch == 0)
1184 {
1185 proute->max_dispatch = 4;
1186 proute->partition_dispatch_info = (PartitionDispatch *)
1187 palloc(sizeof(PartitionDispatch) * proute->max_dispatch);
1188 proute->nonleaf_partitions = (ResultRelInfo **)
1189 palloc(sizeof(ResultRelInfo *) * proute->max_dispatch);
1190 }
1191 else
1192 {
1193 proute->max_dispatch *= 2;
1194 proute->partition_dispatch_info = (PartitionDispatch *)
1195 repalloc(proute->partition_dispatch_info,
1196 sizeof(PartitionDispatch) * proute->max_dispatch);
1197 proute->nonleaf_partitions = (ResultRelInfo **)
1198 repalloc(proute->nonleaf_partitions,
1199 sizeof(ResultRelInfo *) * proute->max_dispatch);
1200 }
1201 }
1202 proute->partition_dispatch_info[dispatchidx] = pd;
1203
1204 /*
1205 * If setting up a PartitionDispatch for a sub-partitioned table, we may
1206 * also need a minimally valid ResultRelInfo for checking the partition
1207 * constraint later; set that up now.
1208 */
1209 if (parent_pd)
1210 {
1211 ResultRelInfo *rri = makeNode(ResultRelInfo);
1212
1213 InitResultRelInfo(rri, rel, 0, rootResultRelInfo, 0);
1214 proute->nonleaf_partitions[dispatchidx] = rri;
1215 }
1216 else
1217 proute->nonleaf_partitions[dispatchidx] = NULL;
1218
1219 /*
1220 * Finally, if setting up a PartitionDispatch for a sub-partitioned table,
1221 * install a downlink in the parent to allow quick descent.
1222 */
1223 if (parent_pd)
1224 {
1225 Assert(parent_pd->indexes[partidx] == -1);
1226 parent_pd->indexes[partidx] = dispatchidx;
1227 }
1228
1229 MemoryContextSwitchTo(oldcxt);
1230
1231 return pd;
1232}
1233
1234/*
1235 * ExecCleanupTupleRouting -- Clean up objects allocated for partition tuple
1236 * routing.
1237 *
1238 * Close all the partitioned tables, leaf partitions, and their indices.
1239 */
1240void
1241 ExecCleanupTupleRouting(ModifyTableState *mtstate,
1242 PartitionTupleRouting *proute)
1243{
1244 int i;
1245
1246 /*
1247 * Remember, proute->partition_dispatch_info[0] corresponds to the root
1248 * partitioned table, which we must not try to close, because it is the
1249 * main target table of the query that will be closed by callers such as
1250 * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root
1251 * partitioned table.
1252 */
1253 for (i = 1; i < proute->num_dispatch; i++)
1254 {
1255 PartitionDispatch pd = proute->partition_dispatch_info[i];
1256
1257 table_close(pd->reldesc, NoLock);
1258
1259 if (pd->tupslot)
1260 ExecDropSingleTupleTableSlot(pd->tupslot);
1261 }
1262
1263 for (i = 0; i < proute->num_partitions; i++)
1264 {
1265 ResultRelInfo *resultRelInfo = proute->partitions[i];
1266
1267 /* Allow any FDWs to shut down */
1268 if (resultRelInfo->ri_FdwRoutine != NULL &&
1269 resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL)
1270 resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state,
1271 resultRelInfo);
1272
1273 /*
1274 * Close it if it's not one of the result relations borrowed from the
1275 * owning ModifyTableState; those will be closed by ExecEndPlan().
1276 */
1277 if (proute->is_borrowed_rel[i])
1278 continue;
1279
1280 ExecCloseIndices(resultRelInfo);
1281 table_close(resultRelInfo->ri_RelationDesc, NoLock);
1282 }
1283}
1284
1285/* ----------------
1286 * FormPartitionKeyDatum
1287 * Construct values[] and isnull[] arrays for the partition key
1288 * of a tuple.
1289 *
1290 * pd Partition dispatch object of the partitioned table
1291 * slot Heap tuple from which to extract partition key
1292 * estate executor state for evaluating any partition key
1293 * expressions (must be non-NULL)
1294 * values Array of partition key Datums (output area)
1295 * isnull Array of is-null indicators (output area)
1296 *
1297 * the ecxt_scantuple slot of estate's per-tuple expr context must point to
1298 * the heap tuple passed in.
1299 * ----------------
1300 */
1301static void
1302 FormPartitionKeyDatum(PartitionDispatch pd,
1303 TupleTableSlot *slot,
1304 EState *estate,
1305 Datum *values,
1306 bool *isnull)
1307{
1308 ListCell *partexpr_item;
1309 int i;
1310
1311 if (pd->key->partexprs != NIL && pd->keystate == NIL)
1312 {
1313 /* Check caller has set up context correctly */
1314 Assert(estate != NULL &&
1315 GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
1316
1317 /* First time through, set up expression evaluation state */
1318 pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate);
1319 }
1320
1321 partexpr_item = list_head(pd->keystate);
1322 for (i = 0; i < pd->key->partnatts; i++)
1323 {
1324 AttrNumber keycol = pd->key->partattrs[i];
1325 Datum datum;
1326 bool isNull;
1327
1328 if (keycol != 0)
1329 {
1330 /* Plain column; get the value directly from the heap tuple */
1331 datum = slot_getattr(slot, keycol, &isNull);
1332 }
1333 else
1334 {
1335 /* Expression; need to evaluate it */
1336 if (partexpr_item == NULL)
1337 elog(ERROR, "wrong number of partition key expressions");
1338 datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item),
1339 GetPerTupleExprContext(estate),
1340 &isNull);
1341 partexpr_item = lnext(pd->keystate, partexpr_item);
1342 }
1343 values[i] = datum;
1344 isnull[i] = isNull;
1345 }
1346
1347 if (partexpr_item != NULL)
1348 elog(ERROR, "wrong number of partition key expressions");
1349}
1350
1351/*
1352 * The number of times the same partition must be found in a row before we
1353 * switch from a binary search for the given values to just checking if the
1354 * values belong to the last found partition. This must be above 0.
1355 */
1356 #define PARTITION_CACHED_FIND_THRESHOLD 16
1357
1358/*
1359 * get_partition_for_tuple
1360 * Finds partition of relation which accepts the partition key specified
1361 * in values and isnull.
1362 *
1363 * Calling this function can be quite expensive when LIST and RANGE
1364 * partitioned tables have many partitions. This is due to the binary search
1365 * that's done to find the correct partition. Many of the use cases for LIST
1366 * and RANGE partitioned tables make it likely that the same partition is
1367 * found in subsequent ExecFindPartition() calls. This is especially true for
1368 * cases such as RANGE partitioned tables on a TIMESTAMP column where the
1369 * partition key is the current time. When asked to find a partition for a
1370 * RANGE or LIST partitioned table, we record the partition index and datum
1371 * offset we've found for the given 'values' in the PartitionDesc (which is
1372 * stored in relcache), and if we keep finding the same partition
1373 * PARTITION_CACHED_FIND_THRESHOLD times in a row, then we'll enable caching
1374 * logic and instead of performing a binary search to find the correct
1375 * partition, we'll just double-check that 'values' still belong to the last
1376 * found partition, and if so, we'll return that partition index, thus
1377 * skipping the need for the binary search. If we fail to match the last
1378 * partition when double checking, then we fall back on doing a binary search.
1379 * In this case, unless we find 'values' belong to the DEFAULT partition,
1380 * we'll reset the number of times we've hit the same partition so that we
1381 * don't attempt to use the cache again until we've found that partition at
1382 * least PARTITION_CACHED_FIND_THRESHOLD times in a row.
1383 *
1384 * For cases where the partition changes on each lookup, the amount of
1385 * additional work required just amounts to recording the last found partition
1386 * and bound offset then resetting the found counter. This is cheap and does
1387 * not appear to cause any meaningful slowdowns for such cases.
1388 *
1389 * No caching of partitions is done when the last found partition is the
1390 * DEFAULT or NULL partition. For the case of the DEFAULT partition, there
1391 * is no bound offset storing the matching datum, so we cannot confirm the
1392 * indexes match. For the NULL partition, this is just so cheap, there's no
1393 * sense in caching.
1394 *
1395 * Return value is index of the partition (>= 0 and < partdesc->nparts) if one
1396 * found or -1 if none found.
1397 */
1398static int
1399 get_partition_for_tuple(PartitionDispatch pd, Datum *values, bool *isnull)
1400{
1401 int bound_offset = -1;
1402 int part_index = -1;
1403 PartitionKey key = pd->key;
1404 PartitionDesc partdesc = pd->partdesc;
1405 PartitionBoundInfo boundinfo = partdesc->boundinfo;
1406
1407 /*
1408 * In the switch statement below, when we perform a cached lookup for
1409 * RANGE and LIST partitioned tables, if we find that the last found
1410 * partition matches the 'values', we return the partition index right
1411 * away. We do this instead of breaking out of the switch as we don't
1412 * want to execute the code about the DEFAULT partition or do any updates
1413 * for any of the cache-related fields. That would be a waste of effort
1414 * as we already know it's not the DEFAULT partition and have no need to
1415 * increment the number of times we found the same partition any higher
1416 * than PARTITION_CACHED_FIND_THRESHOLD.
1417 */
1418
1419 /* Route as appropriate based on partitioning strategy. */
1420 switch (key->strategy)
1421 {
1422 case PARTITION_STRATEGY_HASH:
1423 {
1424 uint64 rowHash;
1425
1426 /* hash partitioning is too cheap to bother caching */
1427 rowHash = compute_partition_hash_value(key->partnatts,
1428 key->partsupfunc,
1429 key->partcollation,
1430 values, isnull);
1431
1432 /*
1433 * HASH partitions can't have a DEFAULT partition and we don't
1434 * do any caching work for them, so just return the part index
1435 */
1436 return boundinfo->indexes[rowHash % boundinfo->nindexes];
1437 }
1438
1439 case PARTITION_STRATEGY_LIST:
1440 if (isnull[0])
1441 {
1442 /* this is far too cheap to bother doing any caching */
1443 if (partition_bound_accepts_nulls(boundinfo))
1444 {
1445 /*
1446 * When there is a NULL partition we just return that
1447 * directly. We don't have a bound_offset so it's not
1448 * valid to drop into the code after the switch which
1449 * checks and updates the cache fields. We perhaps should
1450 * be invalidating the details of the last cached
1451 * partition but there's no real need to. Keeping those
1452 * fields set gives a chance at matching to the cached
1453 * partition on the next lookup.
1454 */
1455 return boundinfo->null_index;
1456 }
1457 }
1458 else
1459 {
1460 bool equal;
1461
1462 if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
1463 {
1464 int last_datum_offset = partdesc->last_found_datum_index;
1465 Datum lastDatum = boundinfo->datums[last_datum_offset][0];
1466 int32 cmpval;
1467
1468 /* does the last found datum index match this datum? */
1469 cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0],
1470 key->partcollation[0],
1471 lastDatum,
1472 values[0]));
1473
1474 if (cmpval == 0)
1475 return boundinfo->indexes[last_datum_offset];
1476
1477 /* fall-through and do a manual lookup */
1478 }
1479
1480 bound_offset = partition_list_bsearch(key->partsupfunc,
1481 key->partcollation,
1482 boundinfo,
1483 values[0], &equal);
1484 if (bound_offset >= 0 && equal)
1485 part_index = boundinfo->indexes[bound_offset];
1486 }
1487 break;
1488
1489 case PARTITION_STRATEGY_RANGE:
1490 {
1491 bool equal = false,
1492 range_partkey_has_null = false;
1493 int i;
1494
1495 /*
1496 * No range includes NULL, so this will be accepted by the
1497 * default partition if there is one, and otherwise rejected.
1498 */
1499 for (i = 0; i < key->partnatts; i++)
1500 {
1501 if (isnull[i])
1502 {
1503 range_partkey_has_null = true;
1504 break;
1505 }
1506 }
1507
1508 /* NULLs belong in the DEFAULT partition */
1509 if (range_partkey_has_null)
1510 break;
1511
1512 if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
1513 {
1514 int last_datum_offset = partdesc->last_found_datum_index;
1515 Datum *lastDatums = boundinfo->datums[last_datum_offset];
1516 PartitionRangeDatumKind *kind = boundinfo->kind[last_datum_offset];
1517 int32 cmpval;
1518
1519 /* check if the value is >= to the lower bound */
1520 cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1521 key->partcollation,
1522 lastDatums,
1523 kind,
1524 values,
1525 key->partnatts);
1526
1527 /*
1528 * If it's equal to the lower bound then no need to check
1529 * the upper bound.
1530 */
1531 if (cmpval == 0)
1532 return boundinfo->indexes[last_datum_offset + 1];
1533
1534 if (cmpval < 0 && last_datum_offset + 1 < boundinfo->ndatums)
1535 {
1536 /* check if the value is below the upper bound */
1537 lastDatums = boundinfo->datums[last_datum_offset + 1];
1538 kind = boundinfo->kind[last_datum_offset + 1];
1539 cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1540 key->partcollation,
1541 lastDatums,
1542 kind,
1543 values,
1544 key->partnatts);
1545
1546 if (cmpval > 0)
1547 return boundinfo->indexes[last_datum_offset + 1];
1548 }
1549 /* fall-through and do a manual lookup */
1550 }
1551
1552 bound_offset = partition_range_datum_bsearch(key->partsupfunc,
1553 key->partcollation,
1554 boundinfo,
1555 key->partnatts,
1556 values,
1557 &equal);
1558
1559 /*
1560 * The bound at bound_offset is less than or equal to the
1561 * tuple value, so the bound at offset+1 is the upper bound of
1562 * the partition we're looking for, if there actually exists
1563 * one.
1564 */
1565 part_index = boundinfo->indexes[bound_offset + 1];
1566 }
1567 break;
1568
1569 default:
1570 elog(ERROR, "unexpected partition strategy: %d",
1571 (int) key->strategy);
1572 }
1573
1574 /*
1575 * part_index < 0 means we failed to find a partition of this parent. Use
1576 * the default partition, if there is one.
1577 */
1578 if (part_index < 0)
1579 {
1580 /*
1581 * No need to reset the cache fields here. The next set of values
1582 * might end up belonging to the cached partition, so leaving the
1583 * cache alone improves the chances of a cache hit on the next lookup.
1584 */
1585 return boundinfo->default_index;
1586 }
1587
1588 /* we should only make it here when the code above set bound_offset */
1589 Assert(bound_offset >= 0);
1590
1591 /*
1592 * Attend to the cache fields. If the bound_offset matches the last
1593 * cached bound offset then we've found the same partition as last time,
1594 * so bump the count by one. If all goes well, we'll eventually reach
1595 * PARTITION_CACHED_FIND_THRESHOLD and try the cache path next time
1596 * around. Otherwise, we'll reset the cache count back to 1 to mark that
1597 * we've found this partition for the first time.
1598 */
1599 if (bound_offset == partdesc->last_found_datum_index)
1600 partdesc->last_found_count++;
1601 else
1602 {
1603 partdesc->last_found_count = 1;
1604 partdesc->last_found_part_index = part_index;
1605 partdesc->last_found_datum_index = bound_offset;
1606 }
1607
1608 return part_index;
1609}
1610
1611/*
1612 * ExecBuildSlotPartitionKeyDescription
1613 *
1614 * This works very much like BuildIndexValueDescription() and is currently
1615 * used for building error messages when ExecFindPartition() fails to find
1616 * partition for a row.
1617 */
1618static char *
1619 ExecBuildSlotPartitionKeyDescription(Relation rel,
1620 Datum *values,
1621 bool *isnull,
1622 int maxfieldlen)
1623{
1624 StringInfoData buf;
1625 PartitionKey key = RelationGetPartitionKey(rel);
1626 int partnatts = get_partition_natts(key);
1627 int i;
1628 Oid relid = RelationGetRelid(rel);
1629 AclResult aclresult;
1630
1631 if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
1632 return NULL;
1633
1634 /* If the user has table-level access, just go build the description. */
1635 aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
1636 if (aclresult != ACLCHECK_OK)
1637 {
1638 /*
1639 * Step through the columns of the partition key and make sure the
1640 * user has SELECT rights on all of them.
1641 */
1642 for (i = 0; i < partnatts; i++)
1643 {
1644 AttrNumber attnum = get_partition_col_attnum(key, i);
1645
1646 /*
1647 * If this partition key column is an expression, we return no
1648 * detail rather than try to figure out what column(s) the
1649 * expression includes and if the user has SELECT rights on them.
1650 */
1651 if (attnum == InvalidAttrNumber ||
1652 pg_attribute_aclcheck(relid, attnum, GetUserId(),
1653 ACL_SELECT) != ACLCHECK_OK)
1654 return NULL;
1655 }
1656 }
1657
1658 initStringInfo(&buf);
1659 appendStringInfo(&buf, "(%s) = (",
1660 pg_get_partkeydef_columns(relid, true));
1661
1662 for (i = 0; i < partnatts; i++)
1663 {
1664 char *val;
1665 int vallen;
1666
1667 if (isnull[i])
1668 val = "null";
1669 else
1670 {
1671 Oid foutoid;
1672 bool typisvarlena;
1673
1674 getTypeOutputInfo(get_partition_col_typid(key, i),
1675 &foutoid, &typisvarlena);
1676 val = OidOutputFunctionCall(foutoid, values[i]);
1677 }
1678
1679 if (i > 0)
1680 appendStringInfoString(&buf, ", ");
1681
1682 /* truncate if needed */
1683 vallen = strlen(val);
1684 if (vallen <= maxfieldlen)
1685 appendBinaryStringInfo(&buf, val, vallen);
1686 else
1687 {
1688 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1689 appendBinaryStringInfo(&buf, val, vallen);
1690 appendStringInfoString(&buf, "...");
1691 }
1692 }
1693
1694 appendStringInfoChar(&buf, ')');
1695
1696 return buf.data;
1697}
1698
1699/*
1700 * adjust_partition_colnos
1701 * Adjust the list of UPDATE target column numbers to account for
1702 * attribute differences between the parent and the partition.
1703 *
1704 * Note: mustn't be called if no adjustment is required.
1705 */
1706static List *
1707 adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri)
1708{
1709 TupleConversionMap *map = ExecGetChildToRootMap(leaf_part_rri);
1710
1711 Assert(map != NULL);
1712
1713 return adjust_partition_colnos_using_map(colnos, map->attrMap);
1714}
1715
1716/*
1717 * adjust_partition_colnos_using_map
1718 * Like adjust_partition_colnos, but uses a caller-supplied map instead
1719 * of assuming to map from the "root" result relation.
1720 *
1721 * Note: mustn't be called if no adjustment is required.
1722 */
1723static List *
1724 adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
1725{
1726 List *new_colnos = NIL;
1727 ListCell *lc;
1728
1729 Assert(attrMap != NULL); /* else we shouldn't be here */
1730
1731 foreach(lc, colnos)
1732 {
1733 AttrNumber parentattrno = lfirst_int(lc);
1734
1735 if (parentattrno <= 0 ||
1736 parentattrno > attrMap->maplen ||
1737 attrMap->attnums[parentattrno - 1] == 0)
1738 elog(ERROR, "unexpected attno %d in target column list",
1739 parentattrno);
1740 new_colnos = lappend_int(new_colnos,
1741 attrMap->attnums[parentattrno - 1]);
1742 }
1743
1744 return new_colnos;
1745}
1746
1747/*-------------------------------------------------------------------------
1748 * Run-Time Partition Pruning Support.
1749 *
1750 * The following series of functions exist to support the removal of unneeded
1751 * subplans for queries against partitioned tables. The supporting functions
1752 * here are designed to work with any plan type which supports an arbitrary
1753 * number of subplans, e.g. Append, MergeAppend.
1754 *
1755 * When pruning involves comparison of a partition key to a constant, it's
1756 * done by the planner. However, if we have a comparison to a non-constant
1757 * but not volatile expression, that presents an opportunity for run-time
1758 * pruning by the executor, allowing irrelevant partitions to be skipped
1759 * dynamically.
1760 *
1761 * We must distinguish expressions containing PARAM_EXEC Params from
1762 * expressions that don't contain those. Even though a PARAM_EXEC Param is
1763 * considered to be a stable expression, it can change value from one plan
1764 * node scan to the next during query execution. Stable comparison
1765 * expressions that don't involve such Params allow partition pruning to be
1766 * done once during executor startup. Expressions that do involve such Params
1767 * require us to prune separately for each scan of the parent plan node.
1768 *
1769 * Note that pruning away unneeded subplans during executor startup has the
1770 * added benefit of not having to initialize the unneeded subplans at all.
1771 *
1772 *
1773 * Functions:
1774 *
1775 * ExecDoInitialPruning:
1776 * Perform runtime "initial" pruning, if necessary, to determine the set
1777 * of child subnodes that need to be initialized during ExecInitNode() for
1778 * all plan nodes that contain a PartitionPruneInfo.
1779 *
1780 * ExecInitPartitionExecPruning:
1781 * Updates the PartitionPruneState found at given part_prune_index in
1782 * EState.es_part_prune_states for use during "exec" pruning if required.
1783 * Also returns the set of subplans to initialize that would be stored at
1784 * part_prune_index in EState.es_part_prune_results by
1785 * ExecDoInitialPruning(). Maps in PartitionPruneState are updated to
1786 * account for initial pruning possibly having eliminated some of the
1787 * subplans.
1788 *
1789 * ExecFindMatchingSubPlans:
1790 * Returns indexes of matching subplans after evaluating the expressions
1791 * that are safe to evaluate at a given point. This function is first
1792 * called during ExecDoInitialPruning() to find the initially matching
1793 * subplans based on performing the initial pruning steps and then must be
1794 * called again each time the value of a Param listed in
1795 * PartitionPruneState's 'execparamids' changes.
1796 *-------------------------------------------------------------------------
1797 */
1798
1799
1800/*
1801 * ExecDoInitialPruning
1802 * Perform runtime "initial" pruning, if necessary, to determine the set
1803 * of child subnodes that need to be initialized during ExecInitNode() for
1804 * plan nodes that support partition pruning.
1805 *
1806 * This function iterates over each PartitionPruneInfo entry in
1807 * estate->es_part_prune_infos. For each entry, it creates a PartitionPruneState
1808 * and adds it to es_part_prune_states. ExecInitPartitionExecPruning() accesses
1809 * these states through their corresponding indexes in es_part_prune_states and
1810 * assign each state to the parent node's PlanState, from where it will be used
1811 * for "exec" pruning.
1812 *
1813 * If initial pruning steps exist for a PartitionPruneInfo entry, this function
1814 * executes those pruning steps and stores the result as a bitmapset of valid
1815 * child subplans, identifying which subplans should be initialized for
1816 * execution. The results are saved in estate->es_part_prune_results.
1817 *
1818 * If no initial pruning is performed for a given PartitionPruneInfo, a NULL
1819 * entry is still added to es_part_prune_results to maintain alignment with
1820 * es_part_prune_infos. This ensures that ExecInitPartitionExecPruning() can
1821 * use the same index to retrieve the pruning results.
1822 */
1823void
1824 ExecDoInitialPruning(EState *estate)
1825{
1826 ListCell *lc;
1827
1828 foreach(lc, estate->es_part_prune_infos)
1829 {
1830 PartitionPruneInfo *pruneinfo = lfirst_node(PartitionPruneInfo, lc);
1831 PartitionPruneState *prunestate;
1832 Bitmapset *validsubplans = NULL;
1833 Bitmapset *all_leafpart_rtis = NULL;
1834 Bitmapset *validsubplan_rtis = NULL;
1835
1836 /* Create and save the PartitionPruneState. */
1837 prunestate = CreatePartitionPruneState(estate, pruneinfo,
1838 &all_leafpart_rtis);
1839 estate->es_part_prune_states = lappend(estate->es_part_prune_states,
1840 prunestate);
1841
1842 /*
1843 * Perform initial pruning steps, if any, and save the result
1844 * bitmapset or NULL as described in the header comment.
1845 */
1846 if (prunestate->do_initial_prune)
1847 validsubplans = ExecFindMatchingSubPlans(prunestate, true,
1848 &validsubplan_rtis);
1849 else
1850 validsubplan_rtis = all_leafpart_rtis;
1851
1852 estate->es_unpruned_relids = bms_add_members(estate->es_unpruned_relids,
1853 validsubplan_rtis);
1854 estate->es_part_prune_results = lappend(estate->es_part_prune_results,
1855 validsubplans);
1856 }
1857}
1858
1859/*
1860 * ExecInitPartitionExecPruning
1861 * Initialize the data structures needed for runtime "exec" partition
1862 * pruning and return the result of initial pruning, if available.
1863 *
1864 * 'relids' identifies the relation to which both the parent plan and the
1865 * PartitionPruneInfo given by 'part_prune_index' belong.
1866 *
1867 * On return, *initially_valid_subplans is assigned the set of indexes of
1868 * child subplans that must be initialized along with the parent plan node.
1869 * Initial pruning would have been performed by ExecDoInitialPruning(), if
1870 * necessary, and the bitmapset of surviving subplans' indexes would have
1871 * been stored as the part_prune_index'th element of
1872 * EState.es_part_prune_results.
1873 *
1874 * If subplans were indeed pruned during initial pruning, the subplan_map
1875 * arrays in the returned PartitionPruneState are re-sequenced to exclude those
1876 * subplans, but only if the maps will be needed for subsequent execution
1877 * pruning passes.
1878 */
1879PartitionPruneState *
1880 ExecInitPartitionExecPruning(PlanState *planstate,
1881 int n_total_subplans,
1882 int part_prune_index,
1883 Bitmapset *relids,
1884 Bitmapset **initially_valid_subplans)
1885{
1886 PartitionPruneState *prunestate;
1887 EState *estate = planstate->state;
1888 PartitionPruneInfo *pruneinfo;
1889
1890 /* Obtain the pruneinfo we need. */
1891 pruneinfo = list_nth_node(PartitionPruneInfo, estate->es_part_prune_infos,
1892 part_prune_index);
1893
1894 /* Its relids better match the plan node's or the planner messed up. */
1895 if (!bms_equal(relids, pruneinfo->relids))
1896 elog(ERROR, "wrong pruneinfo with relids=%s found at part_prune_index=%d contained in plan node with relids=%s",
1897 bmsToString(pruneinfo->relids), part_prune_index,
1898 bmsToString(relids));
1899
1900 /*
1901 * The PartitionPruneState would have been created by
1902 * ExecDoInitialPruning() and stored as the part_prune_index'th element of
1903 * EState.es_part_prune_states.
1904 */
1905 prunestate = list_nth(estate->es_part_prune_states, part_prune_index);
1906 Assert(prunestate != NULL);
1907
1908 /* Use the result of initial pruning done by ExecDoInitialPruning(). */
1909 if (prunestate->do_initial_prune)
1910 *initially_valid_subplans = list_nth_node(Bitmapset,
1911 estate->es_part_prune_results,
1912 part_prune_index);
1913 else
1914 {
1915 /* No pruning, so we'll need to initialize all subplans */
1916 Assert(n_total_subplans > 0);
1917 *initially_valid_subplans = bms_add_range(NULL, 0,
1918 n_total_subplans - 1);
1919 }
1920
1921 /*
1922 * The exec pruning state must also be initialized, if needed, before it
1923 * can be used for pruning during execution.
1924 *
1925 * This also re-sequences subplan indexes contained in prunestate to
1926 * account for any that were removed due to initial pruning; refer to the
1927 * condition in InitExecPartitionPruneContexts() that is used to determine
1928 * whether to do this. If no exec pruning needs to be done, we would thus
1929 * leave the maps to be in an invalid state, but that's ok since that data
1930 * won't be consulted again (cf initial Assert in
1931 * ExecFindMatchingSubPlans).
1932 */
1933 if (prunestate->do_exec_prune)
1934 InitExecPartitionPruneContexts(prunestate, planstate,
1935 *initially_valid_subplans,
1936 n_total_subplans);
1937
1938 return prunestate;
1939}
1940
1941/*
1942 * CreatePartitionPruneState
1943 * Build the data structure required for calling ExecFindMatchingSubPlans
1944 *
1945 * This includes PartitionPruneContexts (stored in each
1946 * PartitionedRelPruningData corresponding to a PartitionedRelPruneInfo),
1947 * which hold the ExprStates needed to evaluate pruning expressions, and
1948 * mapping arrays to convert partition indexes from the pruning logic
1949 * into subplan indexes in the parent plan node's list of child subplans.
1950 *
1951 * 'pruneinfo' is a PartitionPruneInfo as generated by
1952 * make_partition_pruneinfo. Here we build a PartitionPruneState containing a
1953 * PartitionPruningData for each partitioning hierarchy (i.e., each sublist of
1954 * pruneinfo->prune_infos), each of which contains a PartitionedRelPruningData
1955 * for each PartitionedRelPruneInfo appearing in that sublist. This two-level
1956 * system is needed to keep from confusing the different hierarchies when a
1957 * UNION ALL contains multiple partitioned tables as children. The data
1958 * stored in each PartitionedRelPruningData can be re-used each time we
1959 * re-evaluate which partitions match the pruning steps provided in each
1960 * PartitionedRelPruneInfo.
1961 *
1962 * Note that only the PartitionPruneContexts for initial pruning are
1963 * initialized here. Those required for exec pruning are initialized later in
1964 * ExecInitPartitionExecPruning(), as they depend on the availability of the
1965 * parent plan node's PlanState.
1966 *
1967 * If initial pruning steps are to be skipped (e.g., during EXPLAIN
1968 * (GENERIC_PLAN)), *all_leafpart_rtis will be populated with the RT indexes of
1969 * all leaf partitions whose scanning subnode is included in the parent plan
1970 * node's list of child plans. The caller must add these RT indexes to
1971 * estate->es_unpruned_relids.
1972 */
1973static PartitionPruneState *
1974 CreatePartitionPruneState(EState *estate, PartitionPruneInfo *pruneinfo,
1975 Bitmapset **all_leafpart_rtis)
1976{
1977 PartitionPruneState *prunestate;
1978 int n_part_hierarchies;
1979 ListCell *lc;
1980 int i;
1981
1982 /*
1983 * Expression context that will be used by partkey_datum_from_expr() to
1984 * evaluate expressions for comparison against partition bounds.
1985 */
1986 ExprContext *econtext = CreateExprContext(estate);
1987
1988 /* For data reading, executor always includes detached partitions */
1989 if (estate->es_partition_directory == NULL)
1990 estate->es_partition_directory =
1991 CreatePartitionDirectory(estate->es_query_cxt, false);
1992
1993 n_part_hierarchies = list_length(pruneinfo->prune_infos);
1994 Assert(n_part_hierarchies > 0);
1995
1996 /*
1997 * Allocate the data structure
1998 */
1999 prunestate = (PartitionPruneState *)
2000 palloc(offsetof(PartitionPruneState, partprunedata) +
2001 sizeof(PartitionPruningData *) * n_part_hierarchies);
2002
2003 /* Save ExprContext for use during InitExecPartitionPruneContexts(). */
2004 prunestate->econtext = econtext;
2005 prunestate->execparamids = NULL;
2006 /* other_subplans can change at runtime, so we need our own copy */
2007 prunestate->other_subplans = bms_copy(pruneinfo->other_subplans);
2008 prunestate->do_initial_prune = false; /* may be set below */
2009 prunestate->do_exec_prune = false; /* may be set below */
2010 prunestate->num_partprunedata = n_part_hierarchies;
2011
2012 /*
2013 * Create a short-term memory context which we'll use when making calls to
2014 * the partition pruning functions. This avoids possible memory leaks,
2015 * since the pruning functions call comparison functions that aren't under
2016 * our control.
2017 */
2018 prunestate->prune_context =
2019 AllocSetContextCreate(CurrentMemoryContext,
2020 "Partition Prune",
2021 ALLOCSET_DEFAULT_SIZES);
2022
2023 i = 0;
2024 foreach(lc, pruneinfo->prune_infos)
2025 {
2026 List *partrelpruneinfos = lfirst_node(List, lc);
2027 int npartrelpruneinfos = list_length(partrelpruneinfos);
2028 PartitionPruningData *prunedata;
2029 ListCell *lc2;
2030 int j;
2031
2032 prunedata = (PartitionPruningData *)
2033 palloc(offsetof(PartitionPruningData, partrelprunedata) +
2034 npartrelpruneinfos * sizeof(PartitionedRelPruningData));
2035 prunestate->partprunedata[i] = prunedata;
2036 prunedata->num_partrelprunedata = npartrelpruneinfos;
2037
2038 j = 0;
2039 foreach(lc2, partrelpruneinfos)
2040 {
2041 PartitionedRelPruneInfo *pinfo = lfirst_node(PartitionedRelPruneInfo, lc2);
2042 PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2043 Relation partrel;
2044 PartitionDesc partdesc;
2045 PartitionKey partkey;
2046
2047 /*
2048 * We can rely on the copies of the partitioned table's partition
2049 * key and partition descriptor appearing in its relcache entry,
2050 * because that entry will be held open and locked for the
2051 * duration of this executor run.
2052 */
2053 partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex, false);
2054
2055 /* Remember for InitExecPartitionPruneContexts(). */
2056 pprune->partrel = partrel;
2057
2058 partkey = RelationGetPartitionKey(partrel);
2059 partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
2060 partrel);
2061
2062 /*
2063 * Initialize the subplan_map and subpart_map.
2064 *
2065 * The set of partitions that exist now might not be the same that
2066 * existed when the plan was made. The normal case is that it is;
2067 * optimize for that case with a quick comparison, and just copy
2068 * the subplan_map and make subpart_map, leafpart_rti_map point to
2069 * the ones in PruneInfo.
2070 *
2071 * For the case where they aren't identical, we could have more
2072 * partitions on either side; or even exactly the same number of
2073 * them on both but the set of OIDs doesn't match fully. Handle
2074 * this by creating new subplan_map and subpart_map arrays that
2075 * corresponds to the ones in the PruneInfo where the new
2076 * partition descriptor's OIDs match. Any that don't match can be
2077 * set to -1, as if they were pruned. By construction, both
2078 * arrays are in partition bounds order.
2079 */
2080 pprune->nparts = partdesc->nparts;
2081 pprune->subplan_map = palloc(sizeof(int) * partdesc->nparts);
2082
2083 if (partdesc->nparts == pinfo->nparts &&
2084 memcmp(partdesc->oids, pinfo->relid_map,
2085 sizeof(int) * partdesc->nparts) == 0)
2086 {
2087 pprune->subpart_map = pinfo->subpart_map;
2088 pprune->leafpart_rti_map = pinfo->leafpart_rti_map;
2089 memcpy(pprune->subplan_map, pinfo->subplan_map,
2090 sizeof(int) * pinfo->nparts);
2091 }
2092 else
2093 {
2094 int pd_idx = 0;
2095 int pp_idx;
2096
2097 /*
2098 * When the partition arrays are not identical, there could be
2099 * some new ones but it's also possible that one was removed;
2100 * we cope with both situations by walking the arrays and
2101 * discarding those that don't match.
2102 *
2103 * If the number of partitions on both sides match, it's still
2104 * possible that one partition has been detached and another
2105 * attached. Cope with that by creating a map that skips any
2106 * mismatches.
2107 */
2108 pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
2109 pprune->leafpart_rti_map = palloc(sizeof(int) * partdesc->nparts);
2110
2111 for (pp_idx = 0; pp_idx < partdesc->nparts; pp_idx++)
2112 {
2113 /* Skip any InvalidOid relid_map entries */
2114 while (pd_idx < pinfo->nparts &&
2115 !OidIsValid(pinfo->relid_map[pd_idx]))
2116 pd_idx++;
2117
2118 recheck:
2119 if (pd_idx < pinfo->nparts &&
2120 pinfo->relid_map[pd_idx] == partdesc->oids[pp_idx])
2121 {
2122 /* match... */
2123 pprune->subplan_map[pp_idx] =
2124 pinfo->subplan_map[pd_idx];
2125 pprune->subpart_map[pp_idx] =
2126 pinfo->subpart_map[pd_idx];
2127 pprune->leafpart_rti_map[pp_idx] =
2128 pinfo->leafpart_rti_map[pd_idx];
2129 pd_idx++;
2130 continue;
2131 }
2132
2133 /*
2134 * There isn't an exact match in the corresponding
2135 * positions of both arrays. Peek ahead in
2136 * pinfo->relid_map to see if we have a match for the
2137 * current partition in partdesc. Normally if a match
2138 * exists it's just one element ahead, and it means the
2139 * planner saw one extra partition that we no longer see
2140 * now (its concurrent detach finished just in between);
2141 * so we skip that one by updating pd_idx to the new
2142 * location and jumping above. We can then continue to
2143 * match the rest of the elements after skipping the OID
2144 * with no match; no future matches are tried for the
2145 * element that was skipped, because we know the arrays to
2146 * be in the same order.
2147 *
2148 * If we don't see a match anywhere in the rest of the
2149 * pinfo->relid_map array, that means we see an element
2150 * now that the planner didn't see, so mark that one as
2151 * pruned and move on.
2152 */
2153 for (int pd_idx2 = pd_idx + 1; pd_idx2 < pinfo->nparts; pd_idx2++)
2154 {
2155 if (pd_idx2 >= pinfo->nparts)
2156 break;
2157 if (pinfo->relid_map[pd_idx2] == partdesc->oids[pp_idx])
2158 {
2159 pd_idx = pd_idx2;
2160 goto recheck;
2161 }
2162 }
2163
2164 pprune->subpart_map[pp_idx] = -1;
2165 pprune->subplan_map[pp_idx] = -1;
2166 pprune->leafpart_rti_map[pp_idx] = 0;
2167 }
2168 }
2169
2170 /* present_parts is also subject to later modification */
2171 pprune->present_parts = bms_copy(pinfo->present_parts);
2172
2173 /*
2174 * Only initial_context is initialized here. exec_context is
2175 * initialized during ExecInitPartitionExecPruning() when the
2176 * parent plan's PlanState is available.
2177 *
2178 * Note that we must skip execution-time (both "init" and "exec")
2179 * partition pruning in EXPLAIN (GENERIC_PLAN), since parameter
2180 * values may be missing.
2181 */
2182 pprune->initial_pruning_steps = pinfo->initial_pruning_steps;
2183 if (pinfo->initial_pruning_steps &&
2184 !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
2185 {
2186 InitPartitionPruneContext(&pprune->initial_context,
2187 pprune->initial_pruning_steps,
2188 partdesc, partkey, NULL,
2189 econtext);
2190 /* Record whether initial pruning is needed at any level */
2191 prunestate->do_initial_prune = true;
2192 }
2193 pprune->exec_pruning_steps = pinfo->exec_pruning_steps;
2194 if (pinfo->exec_pruning_steps &&
2195 !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
2196 {
2197 /* Record whether exec pruning is needed at any level */
2198 prunestate->do_exec_prune = true;
2199 }
2200
2201 /*
2202 * Accumulate the IDs of all PARAM_EXEC Params affecting the
2203 * partitioning decisions at this plan node.
2204 */
2205 prunestate->execparamids = bms_add_members(prunestate->execparamids,
2206 pinfo->execparamids);
2207
2208 /*
2209 * Return all leaf partition indexes if we're skipping pruning in
2210 * the EXPLAIN (GENERIC_PLAN) case.
2211 */
2212 if (pinfo->initial_pruning_steps && !prunestate->do_initial_prune)
2213 {
2214 int part_index = -1;
2215
2216 while ((part_index = bms_next_member(pprune->present_parts,
2217 part_index)) >= 0)
2218 {
2219 Index rtindex = pprune->leafpart_rti_map[part_index];
2220
2221 if (rtindex)
2222 *all_leafpart_rtis = bms_add_member(*all_leafpart_rtis,
2223 rtindex);
2224 }
2225 }
2226
2227 j++;
2228 }
2229 i++;
2230 }
2231
2232 return prunestate;
2233}
2234
2235/*
2236 * Initialize a PartitionPruneContext for the given list of pruning steps.
2237 */
2238static void
2239 InitPartitionPruneContext(PartitionPruneContext *context,
2240 List *pruning_steps,
2241 PartitionDesc partdesc,
2242 PartitionKey partkey,
2243 PlanState *planstate,
2244 ExprContext *econtext)
2245{
2246 int n_steps;
2247 int partnatts;
2248 ListCell *lc;
2249
2250 n_steps = list_length(pruning_steps);
2251
2252 context->strategy = partkey->strategy;
2253 context->partnatts = partnatts = partkey->partnatts;
2254 context->nparts = partdesc->nparts;
2255 context->boundinfo = partdesc->boundinfo;
2256 context->partcollation = partkey->partcollation;
2257 context->partsupfunc = partkey->partsupfunc;
2258
2259 /* We'll look up type-specific support functions as needed */
2260 context->stepcmpfuncs = (FmgrInfo *)
2261 palloc0(sizeof(FmgrInfo) * n_steps * partnatts);
2262
2263 context->ppccontext = CurrentMemoryContext;
2264 context->planstate = planstate;
2265 context->exprcontext = econtext;
2266
2267 /* Initialize expression state for each expression we need */
2268 context->exprstates = (ExprState **)
2269 palloc0(sizeof(ExprState *) * n_steps * partnatts);
2270 foreach(lc, pruning_steps)
2271 {
2272 PartitionPruneStepOp *step = (PartitionPruneStepOp *) lfirst(lc);
2273 ListCell *lc2 = list_head(step->exprs);
2274 int keyno;
2275
2276 /* not needed for other step kinds */
2277 if (!IsA(step, PartitionPruneStepOp))
2278 continue;
2279
2280 Assert(list_length(step->exprs) <= partnatts);
2281
2282 for (keyno = 0; keyno < partnatts; keyno++)
2283 {
2284 if (bms_is_member(keyno, step->nullkeys))
2285 continue;
2286
2287 if (lc2 != NULL)
2288 {
2289 Expr *expr = lfirst(lc2);
2290
2291 /* not needed for Consts */
2292 if (!IsA(expr, Const))
2293 {
2294 int stateidx = PruneCxtStateIdx(partnatts,
2295 step->step.step_id,
2296 keyno);
2297
2298 /*
2299 * When planstate is NULL, pruning_steps is known not to
2300 * contain any expressions that depend on the parent plan.
2301 * Information of any available EXTERN parameters must be
2302 * passed explicitly in that case, which the caller must
2303 * have made available via econtext.
2304 */
2305 if (planstate == NULL)
2306 context->exprstates[stateidx] =
2307 ExecInitExprWithParams(expr,
2308 econtext->ecxt_param_list_info);
2309 else
2310 context->exprstates[stateidx] =
2311 ExecInitExpr(expr, context->planstate);
2312 }
2313 lc2 = lnext(step->exprs, lc2);
2314 }
2315 }
2316 }
2317}
2318
2319/*
2320 * InitExecPartitionPruneContexts
2321 * Initialize exec pruning contexts deferred by CreatePartitionPruneState()
2322 *
2323 * This function finalizes exec pruning setup for a PartitionPruneState by
2324 * initializing contexts for pruning steps that require the parent plan's
2325 * PlanState. It iterates over PartitionPruningData entries and sets up the
2326 * necessary execution contexts for pruning during query execution.
2327 *
2328 * Also fix the mapping of partition indexes to subplan indexes contained in
2329 * prunestate by considering the new list of subplans that survived initial
2330 * pruning.
2331 *
2332 * Current values of the indexes present in PartitionPruneState count all the
2333 * subplans that would be present before initial pruning was done. If initial
2334 * pruning got rid of some of the subplans, any subsequent pruning passes will
2335 * be looking at a different set of target subplans to choose from than those
2336 * in the pre-initial-pruning set, so the maps in PartitionPruneState
2337 * containing those indexes must be updated to reflect the new indexes of
2338 * subplans in the post-initial-pruning set.
2339 */
2340static void
2341 InitExecPartitionPruneContexts(PartitionPruneState *prunestate,
2342 PlanState *parent_plan,
2343 Bitmapset *initially_valid_subplans,
2344 int n_total_subplans)
2345{
2346 EState *estate;
2347 int *new_subplan_indexes = NULL;
2348 Bitmapset *new_other_subplans;
2349 int i;
2350 int newidx;
2351 bool fix_subplan_map = false;
2352
2353 Assert(prunestate->do_exec_prune);
2354 Assert(parent_plan != NULL);
2355 estate = parent_plan->state;
2356
2357 /*
2358 * No need to fix subplans maps if initial pruning didn't eliminate any
2359 * subplans.
2360 */
2361 if (bms_num_members(initially_valid_subplans) < n_total_subplans)
2362 {
2363 fix_subplan_map = true;
2364
2365 /*
2366 * First we must build a temporary array which maps old subplan
2367 * indexes to new ones. For convenience of initialization, we use
2368 * 1-based indexes in this array and leave pruned items as 0.
2369 */
2370 new_subplan_indexes = (int *) palloc0(sizeof(int) * n_total_subplans);
2371 newidx = 1;
2372 i = -1;
2373 while ((i = bms_next_member(initially_valid_subplans, i)) >= 0)
2374 {
2375 Assert(i < n_total_subplans);
2376 new_subplan_indexes[i] = newidx++;
2377 }
2378 }
2379
2380 /*
2381 * Now we can update each PartitionedRelPruneInfo's subplan_map with new
2382 * subplan indexes. We must also recompute its present_parts bitmap.
2383 */
2384 for (i = 0; i < prunestate->num_partprunedata; i++)
2385 {
2386 PartitionPruningData *prunedata = prunestate->partprunedata[i];
2387 int j;
2388
2389 /*
2390 * Within each hierarchy, we perform this loop in back-to-front order
2391 * so that we determine present_parts for the lowest-level partitioned
2392 * tables first. This way we can tell whether a sub-partitioned
2393 * table's partitions were entirely pruned so we can exclude it from
2394 * the current level's present_parts.
2395 */
2396 for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--)
2397 {
2398 PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2399 int nparts = pprune->nparts;
2400 int k;
2401
2402 /* Initialize PartitionPruneContext for exec pruning, if needed. */
2403 if (pprune->exec_pruning_steps != NIL)
2404 {
2405 PartitionKey partkey;
2406 PartitionDesc partdesc;
2407
2408 /*
2409 * See the comment in CreatePartitionPruneState() regarding
2410 * the usage of partdesc and partkey.
2411 */
2412 partkey = RelationGetPartitionKey(pprune->partrel);
2413 partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
2414 pprune->partrel);
2415
2416 InitPartitionPruneContext(&pprune->exec_context,
2417 pprune->exec_pruning_steps,
2418 partdesc, partkey, parent_plan,
2419 prunestate->econtext);
2420 }
2421
2422 if (!fix_subplan_map)
2423 continue;
2424
2425 /* We just rebuild present_parts from scratch */
2426 bms_free(pprune->present_parts);
2427 pprune->present_parts = NULL;
2428
2429 for (k = 0; k < nparts; k++)
2430 {
2431 int oldidx = pprune->subplan_map[k];
2432 int subidx;
2433
2434 /*
2435 * If this partition existed as a subplan then change the old
2436 * subplan index to the new subplan index. The new index may
2437 * become -1 if the partition was pruned above, or it may just
2438 * come earlier in the subplan list due to some subplans being
2439 * removed earlier in the list. If it's a subpartition, add
2440 * it to present_parts unless it's entirely pruned.
2441 */
2442 if (oldidx >= 0)
2443 {
2444 Assert(oldidx < n_total_subplans);
2445 pprune->subplan_map[k] = new_subplan_indexes[oldidx] - 1;
2446
2447 if (new_subplan_indexes[oldidx] > 0)
2448 pprune->present_parts =
2449 bms_add_member(pprune->present_parts, k);
2450 }
2451 else if ((subidx = pprune->subpart_map[k]) >= 0)
2452 {
2453 PartitionedRelPruningData *subprune;
2454
2455 subprune = &prunedata->partrelprunedata[subidx];
2456
2457 if (!bms_is_empty(subprune->present_parts))
2458 pprune->present_parts =
2459 bms_add_member(pprune->present_parts, k);
2460 }
2461 }
2462 }
2463 }
2464
2465 /*
2466 * If we fixed subplan maps, we must also recompute the other_subplans
2467 * set, since indexes in it may change.
2468 */
2469 if (fix_subplan_map)
2470 {
2471 new_other_subplans = NULL;
2472 i = -1;
2473 while ((i = bms_next_member(prunestate->other_subplans, i)) >= 0)
2474 new_other_subplans = bms_add_member(new_other_subplans,
2475 new_subplan_indexes[i] - 1);
2476
2477 bms_free(prunestate->other_subplans);
2478 prunestate->other_subplans = new_other_subplans;
2479
2480 pfree(new_subplan_indexes);
2481 }
2482}
2483
2484/*
2485 * ExecFindMatchingSubPlans
2486 * Determine which subplans match the pruning steps detailed in
2487 * 'prunestate' for the current comparison expression values.
2488 *
2489 * Pass initial_prune if PARAM_EXEC Params cannot yet be evaluated. This
2490 * differentiates the initial executor-time pruning step from later
2491 * runtime pruning.
2492 *
2493 * The caller must pass a non-NULL validsubplan_rtis during initial pruning
2494 * to collect the RT indexes of leaf partitions whose subnodes will be
2495 * executed. These RT indexes are later added to EState.es_unpruned_relids.
2496 */
2497Bitmapset *
2498 ExecFindMatchingSubPlans(PartitionPruneState *prunestate,
2499 bool initial_prune,
2500 Bitmapset **validsubplan_rtis)
2501{
2502 Bitmapset *result = NULL;
2503 MemoryContext oldcontext;
2504 int i;
2505
2506 /*
2507 * Either we're here on the initial prune done during pruning
2508 * initialization, or we're at a point where PARAM_EXEC Params can be
2509 * evaluated *and* there are steps in which to do so.
2510 */
2511 Assert(initial_prune || prunestate->do_exec_prune);
2512 Assert(validsubplan_rtis != NULL || !initial_prune);
2513
2514 /*
2515 * Switch to a temp context to avoid leaking memory in the executor's
2516 * query-lifespan memory context.
2517 */
2518 oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
2519
2520 /*
2521 * For each hierarchy, do the pruning tests, and add nondeletable
2522 * subplans' indexes to "result".
2523 */
2524 for (i = 0; i < prunestate->num_partprunedata; i++)
2525 {
2526 PartitionPruningData *prunedata = prunestate->partprunedata[i];
2527 PartitionedRelPruningData *pprune;
2528
2529 /*
2530 * We pass the zeroth item, belonging to the root table of the
2531 * hierarchy, and find_matching_subplans_recurse() takes care of
2532 * recursing to other (lower-level) parents as needed.
2533 */
2534 pprune = &prunedata->partrelprunedata[0];
2535 find_matching_subplans_recurse(prunedata, pprune, initial_prune,
2536 &result, validsubplan_rtis);
2537
2538 /*
2539 * Expression eval may have used space in ExprContext too. Avoid
2540 * accessing exec_context during initial pruning, as it is not valid
2541 * at that stage.
2542 */
2543 if (!initial_prune && pprune->exec_pruning_steps)
2544 ResetExprContext(pprune->exec_context.exprcontext);
2545 }
2546
2547 /* Add in any subplans that partition pruning didn't account for */
2548 result = bms_add_members(result, prunestate->other_subplans);
2549
2550 MemoryContextSwitchTo(oldcontext);
2551
2552 /* Copy result out of the temp context before we reset it */
2553 result = bms_copy(result);
2554 if (validsubplan_rtis)
2555 *validsubplan_rtis = bms_copy(*validsubplan_rtis);
2556
2557 MemoryContextReset(prunestate->prune_context);
2558
2559 return result;
2560}
2561
2562/*
2563 * find_matching_subplans_recurse
2564 * Recursive worker function for ExecFindMatchingSubPlans
2565 *
2566 * Adds valid (non-prunable) subplan IDs to *validsubplans. If
2567 * *validsubplan_rtis is non-NULL, it also adds the RT indexes of their
2568 * corresponding partitions, but only if they are leaf partitions.
2569 */
2570static void
2571 find_matching_subplans_recurse(PartitionPruningData *prunedata,
2572 PartitionedRelPruningData *pprune,
2573 bool initial_prune,
2574 Bitmapset **validsubplans,
2575 Bitmapset **validsubplan_rtis)
2576{
2577 Bitmapset *partset;
2578 int i;
2579
2580 /* Guard against stack overflow due to overly deep partition hierarchy. */
2581 check_stack_depth();
2582
2583 /*
2584 * Prune as appropriate, if we have pruning steps matching the current
2585 * execution context. Otherwise just include all partitions at this
2586 * level.
2587 */
2588 if (initial_prune && pprune->initial_pruning_steps)
2589 partset = get_matching_partitions(&pprune->initial_context,
2590 pprune->initial_pruning_steps);
2591 else if (!initial_prune && pprune->exec_pruning_steps)
2592 partset = get_matching_partitions(&pprune->exec_context,
2593 pprune->exec_pruning_steps);
2594 else
2595 partset = pprune->present_parts;
2596
2597 /* Translate partset into subplan indexes */
2598 i = -1;
2599 while ((i = bms_next_member(partset, i)) >= 0)
2600 {
2601 if (pprune->subplan_map[i] >= 0)
2602 {
2603 *validsubplans = bms_add_member(*validsubplans,
2604 pprune->subplan_map[i]);
2605
2606 /*
2607 * Only report leaf partitions. Non-leaf partitions may appear
2608 * here when they use an unflattened Append or MergeAppend.
2609 */
2610 if (validsubplan_rtis && pprune->leafpart_rti_map[i])
2611 *validsubplan_rtis = bms_add_member(*validsubplan_rtis,
2612 pprune->leafpart_rti_map[i]);
2613 }
2614 else
2615 {
2616 int partidx = pprune->subpart_map[i];
2617
2618 if (partidx >= 0)
2619 find_matching_subplans_recurse(prunedata,
2620 &prunedata->partrelprunedata[partidx],
2621 initial_prune, validsubplans,
2622 validsubplan_rtis);
2623 else
2624 {
2625 /*
2626 * We get here if the planner already pruned all the sub-
2627 * partitions for this partition. Silently ignore this
2628 * partition in this case. The end result is the same: we
2629 * would have pruned all partitions just the same, but we
2630 * don't have any pruning steps to execute to verify this.
2631 */
2632 }
2633 }
2634 }
2635}
AclResult
Definition: acl.h:182
@ ACLCHECK_OK
Definition: acl.h:183
AclResult pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mode)
Definition: aclchk.c:3866
AclResult pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
Definition: aclchk.c:4037
AttrMap * build_attrmap_by_name(TupleDesc indesc, TupleDesc outdesc, bool missing_ok)
Definition: attmap.c:175
AttrMap * build_attrmap_by_name_if_req(TupleDesc indesc, TupleDesc outdesc, bool missing_ok)
Definition: attmap.c:261
int16 AttrNumber
Definition: attnum.h:21
#define InvalidAttrNumber
Definition: attnum.h:23
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
Bitmapset * bms_add_range(Bitmapset *a, int lower, int upper)
Definition: bitmapset.c:1019
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:917
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
#define bms_is_empty(a)
Definition: bitmapset.h:118
static Datum values[MAXATTR]
Definition: bootstrap.c:153
#define likely(x)
Definition: c.h:401
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:470
int32_t int32
Definition: c.h:534
uint64_t uint64
Definition: c.h:539
#define unlikely(x)
Definition: c.h:402
unsigned int Index
Definition: c.h:619
#define OidIsValid(objectId)
Definition: c.h:774
int errdetail(const char *fmt,...)
Definition: elog.c:1207
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:143
ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc)
Definition: execExpr.c:370
ExprState * ExecInitQual(List *qual, PlanState *parent)
Definition: execExpr.c:229
ExprState * ExecInitExprWithParams(Expr *node, ParamListInfo ext_params)
Definition: execExpr.c:180
ProjectionInfo * ExecBuildUpdateProjection(List *targetList, bool evalTargetList, List *targetColnos, TupleDesc relDesc, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent)
Definition: execExpr.c:547
List * ExecPrepareExprList(List *nodes, EState *estate)
Definition: execExpr.c:839
void ExecCloseIndices(ResultRelInfo *resultRelInfo)
Definition: execIndexing.c:238
void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
Definition: execIndexing.c:160
void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation, OnConflictAction onConflictAction, List *mergeActions)
Definition: execMain.c:1050
bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, bool emitError)
Definition: execMain.c:1846
void InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation resultRelationDesc, Index resultRelationIndex, ResultRelInfo *partition_root_rri, int instrument_options)
Definition: execMain.c:1243
static void InitExecPartitionPruneContexts(PartitionPruneState *prunestate, PlanState *parent_plan, Bitmapset *initially_valid_subplans, int n_total_subplans)
static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate, PartitionTupleRouting *proute, Oid partoid, PartitionDispatch parent_pd, int partidx, ResultRelInfo *rootResultRelInfo)
void ExecDoInitialPruning(EState *estate)
static ResultRelInfo * ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *rootResultRelInfo, int partidx)
Definition: execPartition.c:502
PartitionPruneState * ExecInitPartitionExecPruning(PlanState *planstate, int n_total_subplans, int part_prune_index, Bitmapset *relids, Bitmapset **initially_valid_subplans)
Bitmapset * ExecFindMatchingSubPlans(PartitionPruneState *prunestate, bool initial_prune, Bitmapset **validsubplan_rtis)
static void ExecInitRoutingInfo(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, PartitionDispatch dispatch, ResultRelInfo *partRelInfo, int partidx, bool is_borrowed_rel)
Definition: execPartition.c:994
static char * ExecBuildSlotPartitionKeyDescription(Relation rel, Datum *values, bool *isnull, int maxfieldlen)
static void FormPartitionKeyDatum(PartitionDispatch pd, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
#define PARTITION_CACHED_FIND_THRESHOLD
PartitionTupleRouting * ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
Definition: execPartition.c:218
static List * adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri)
static List * adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
ResultRelInfo * ExecFindPartition(ModifyTableState *mtstate, ResultRelInfo *rootResultRelInfo, PartitionTupleRouting *proute, TupleTableSlot *slot, EState *estate)
Definition: execPartition.c:265
static void InitPartitionPruneContext(PartitionPruneContext *context, List *pruning_steps, PartitionDesc partdesc, PartitionKey partkey, PlanState *planstate, ExprContext *econtext)
struct PartitionDispatchData PartitionDispatchData
static int get_partition_for_tuple(PartitionDispatch pd, Datum *values, bool *isnull)
static void find_matching_subplans_recurse(PartitionPruningData *prunedata, PartitionedRelPruningData *pprune, bool initial_prune, Bitmapset **validsubplans, Bitmapset **validsubplan_rtis)
static PartitionPruneState * CreatePartitionPruneState(EState *estate, PartitionPruneInfo *pruneinfo, Bitmapset **all_leafpart_rtis)
void ExecCleanupTupleRouting(ModifyTableState *mtstate, PartitionTupleRouting *proute)
struct PartitionDispatchData * PartitionDispatch
Definition: execPartition.h:22
struct PartitionedRelPruningData PartitionedRelPruningData
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1427
const TupleTableSlotOps TTSOpsVirtual
Definition: execTuples.c:84
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1443
Relation ExecGetRangeTableRelation(EState *estate, Index rti, bool isResultRel)
Definition: execUtils.c:825
TupleConversionMap * ExecGetRootToChildMap(ResultRelInfo *resultRelInfo, EState *estate)
Definition: execUtils.c:1326
ExprContext * CreateExprContext(EState *estate)
Definition: execUtils.c:307
TupleConversionMap * ExecGetChildToRootMap(ResultRelInfo *resultRelInfo)
Definition: execUtils.c:1300
#define GetPerTupleExprContext(estate)
Definition: executor.h:653
#define EXEC_FLAG_EXPLAIN_GENERIC
Definition: executor.h:67
#define ResetExprContext(econtext)
Definition: executor.h:647
#define GetPerTupleMemoryContext(estate)
Definition: executor.h:658
static Datum ExecEvalExprSwitchContext(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:433
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1150
char * OidOutputFunctionCall(Oid functionId, Datum val)
Definition: fmgr.c:1763
Assert(PointerIsAligned(start, uint64))
long val
Definition: informix.c:689
j
int j
Definition: isn.c:78
i
int i
Definition: isn.c:77
List * lappend(List *list, void *datum)
Definition: list.c:339
List * lappend_int(List *list, int datum)
Definition: list.c:357
List * lappend_oid(List *list, Oid datum)
Definition: list.c:375
void list_free(List *list)
Definition: list.c:1546
bool list_member_oid(const List *list, Oid datum)
Definition: list.c:722
#define NoLock
Definition: lockdefs.h:34
#define RowExclusiveLock
Definition: lockdefs.h:38
void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
Definition: lsyscache.c:3074
int pg_mbcliplen(const char *mbstr, int len, int limit)
Definition: mbutils.c:1084
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:400
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1610
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc0(Size size)
Definition: mcxt.c:1395
void * palloc(Size size)
Definition: mcxt.c:1365
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
Oid GetUserId(void)
Definition: miscinit.c:469
ResultRelInfo * ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid, bool missing_ok, bool update_cache)
void ExecInitMergeTupleSlots(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo)
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
#define copyObject(obj)
Definition: nodes.h:232
@ ONCONFLICT_NONE
Definition: nodes.h:428
@ ONCONFLICT_UPDATE
Definition: nodes.h:430
@ CMD_MERGE
Definition: nodes.h:279
@ CMD_INSERT
Definition: nodes.h:277
@ CMD_DELETE
Definition: nodes.h:278
@ CMD_UPDATE
Definition: nodes.h:276
@ CMD_NOTHING
Definition: nodes.h:282
#define makeNode(_type_)
Definition: nodes.h:161
#define castNode(_type_, nodeptr)
Definition: nodes.h:182
char * bmsToString(const Bitmapset *bms)
Definition: outfuncs.c:822
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
@ PARTITION_STRATEGY_HASH
Definition: parsenodes.h:902
@ PARTITION_STRATEGY_LIST
Definition: parsenodes.h:900
@ PARTITION_STRATEGY_RANGE
Definition: parsenodes.h:901
PartitionRangeDatumKind
Definition: parsenodes.h:951
#define ACL_SELECT
Definition: parsenodes.h:77
int32 partition_rbound_datum_cmp(FmgrInfo *partsupfunc, Oid *partcollation, Datum *rb_datums, PartitionRangeDatumKind *rb_kind, Datum *tuple_datums, int n_tuple_datums)
Definition: partbounds.c:3557
uint64 compute_partition_hash_value(int partnatts, FmgrInfo *partsupfunc, const Oid *partcollation, const Datum *values, const bool *isnull)
Definition: partbounds.c:4723
int partition_range_datum_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, int nvalues, Datum *values, bool *is_equal)
Definition: partbounds.c:3696
int partition_list_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, Datum value, bool *is_equal)
Definition: partbounds.c:3608
#define partition_bound_accepts_nulls(bi)
Definition: partbounds.h:98
PartitionKey RelationGetPartitionKey(Relation rel)
Definition: partcache.c:51
static int16 get_partition_col_attnum(PartitionKey key, int col)
Definition: partcache.h:80
static int get_partition_natts(PartitionKey key)
Definition: partcache.h:65
static Oid get_partition_col_typid(PartitionKey key, int col)
Definition: partcache.h:86
PartitionDirectory CreatePartitionDirectory(MemoryContext mcxt, bool omit_detached)
Definition: partdesc.c:423
PartitionDesc PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
Definition: partdesc.c:456
List * get_partition_ancestors(Oid relid)
Definition: partition.c:134
Bitmapset * get_matching_partitions(PartitionPruneContext *context, List *pruning_steps)
Definition: partprune.c:846
#define PruneCxtStateIdx(partnatts, step_id, keyno)
Definition: partprune.h:70
int16 attnum
Definition: pg_attribute.h:74
#define PARTITION_MAX_KEYS
#define lfirst(lc)
Definition: pg_list.h:172
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
#define lfirst_int(lc)
Definition: pg_list.h:173
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define linitial(l)
Definition: pg_list.h:178
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
#define lfirst_oid(lc)
Definition: pg_list.h:174
static char * buf
Definition: pg_test_fsync.c:72
uint64_t Datum
Definition: postgres.h:70
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:212
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
#define INNER_VAR
Definition: primnodes.h:242
#define RelationGetForm(relation)
Definition: rel.h:508
#define RelationGetRelid(relation)
Definition: rel.h:514
#define RelationGetDescr(relation)
Definition: rel.h:540
#define RelationGetRelationName(relation)
Definition: rel.h:548
List * RelationGetIndexList(Relation relation)
Definition: relcache.c:4836
int errtable(Relation rel)
Definition: relcache.c:6049
Node * map_variable_attnos(Node *node, int target_varno, int sublevels_up, const AttrMap *attno_map, Oid to_rowtype, bool *found_whole_row)
Definition: rewriteManip.c:1705
int check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
Definition: rls.c:52
@ RLS_ENABLED
Definition: rls.h:45
char * pg_get_partkeydef_columns(Oid relid, bool pretty)
Definition: ruleutils.c:1923
void check_stack_depth(void)
Definition: stack_depth.c:95
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:145
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition: stringinfo.c:281
void appendStringInfoString(StringInfo str, const char *s)
Definition: stringinfo.c:230
void appendStringInfoChar(StringInfo str, char ch)
Definition: stringinfo.c:242
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
Definition: attmap.h:35
int maplen
Definition: attmap.h:37
AttrNumber * attnums
Definition: attmap.h:36
Definition: primnodes.h:324
Definition: execnodes.h:655
List * es_part_prune_infos
Definition: execnodes.h:670
List * es_tuple_routing_result_relations
Definition: execnodes.h:698
int es_top_eflags
Definition: execnodes.h:719
int es_instrument
Definition: execnodes.h:720
Bitmapset * es_unpruned_relids
Definition: execnodes.h:673
List * es_part_prune_states
Definition: execnodes.h:671
MemoryContext es_query_cxt
Definition: execnodes.h:710
List * es_tupleTable
Definition: execnodes.h:712
PartitionDirectory es_partition_directory
Definition: execnodes.h:692
List * es_part_prune_results
Definition: execnodes.h:672
ParamListInfo ecxt_param_list_info
Definition: execnodes.h:285
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:273
struct EState * ecxt_estate
Definition: execnodes.h:315
Definition: primnodes.h:189
EndForeignInsert_function EndForeignInsert
Definition: fdwapi.h:239
BeginForeignInsert_function BeginForeignInsert
Definition: fdwapi.h:238
ExecForeignBatchInsert_function ExecForeignBatchInsert
Definition: fdwapi.h:233
GetForeignModifyBatchSize_function GetForeignModifyBatchSize
Definition: fdwapi.h:234
Definition: fmgr.h:57
Definition: pg_list.h:54
MergeAction * mas_action
Definition: execnodes.h:449
ProjectionInfo * mas_proj
Definition: execnodes.h:450
ExprState * mas_whenqual
Definition: execnodes.h:452
ResultRelInfo * resultRelInfo
Definition: execnodes.h:1402
PlanState ps
Definition: execnodes.h:1397
ResultRelInfo * rootResultRelInfo
Definition: execnodes.h:1410
List * onConflictCols
Definition: plannodes.h:361
List * mergeJoinConditions
Definition: plannodes.h:371
CmdType operation
Definition: plannodes.h:325
List * resultRelations
Definition: plannodes.h:335
List * onConflictSet
Definition: plannodes.h:359
List * mergeActionLists
Definition: plannodes.h:369
List * returningLists
Definition: plannodes.h:345
List * withCheckOptionLists
Definition: plannodes.h:339
Node * onConflictWhere
Definition: plannodes.h:363
OnConflictAction onConflictAction
Definition: plannodes.h:355
Definition: nodes.h:135
TupleTableSlot * oc_ProjSlot
Definition: execnodes.h:434
TupleTableSlot * oc_Existing
Definition: execnodes.h:433
ExprState * oc_WhereClause
Definition: execnodes.h:436
ProjectionInfo * oc_ProjInfo
Definition: execnodes.h:435
PartitionRangeDatumKind ** kind
Definition: partbounds.h:84
int last_found_datum_index
Definition: partdesc.h:46
PartitionBoundInfo boundinfo
Definition: partdesc.h:38
int last_found_count
Definition: partdesc.h:63
Oid * oids
Definition: partdesc.h:33
bool * is_leaf
Definition: partdesc.h:35
int last_found_part_index
Definition: partdesc.h:52
TupleTableSlot * tupslot
Definition: execPartition.c:149
PartitionKey key
Definition: execPartition.c:146
PartitionDesc partdesc
Definition: execPartition.c:148
int indexes[FLEXIBLE_ARRAY_MEMBER]
Definition: execPartition.c:151
int16 partnatts
Definition: partcache.h:28
Oid * partcollation
Definition: partcache.h:39
PartitionStrategy strategy
Definition: partcache.h:27
List * partexprs
Definition: partcache.h:31
FmgrInfo * partsupfunc
Definition: partcache.h:36
AttrNumber * partattrs
Definition: partcache.h:29
FmgrInfo * partsupfunc
Definition: partprune.h:56
ExprContext * exprcontext
Definition: partprune.h:60
MemoryContext ppccontext
Definition: partprune.h:58
PartitionBoundInfo boundinfo
Definition: partprune.h:54
PlanState * planstate
Definition: partprune.h:59
FmgrInfo * stepcmpfuncs
Definition: partprune.h:57
ExprState ** exprstates
Definition: partprune.h:61
Bitmapset * other_subplans
Definition: plannodes.h:1642
Bitmapset * relids
Definition: plannodes.h:1640
List * prune_infos
Definition: plannodes.h:1641
PartitionPruningData * partprunedata[FLEXIBLE_ARRAY_MEMBER]
Definition: execPartition.h:130
Bitmapset * execparamids
Definition: execPartition.h:124
ExprContext * econtext
Definition: execPartition.h:123
Bitmapset * other_subplans
Definition: execPartition.h:125
MemoryContext prune_context
Definition: execPartition.h:126
PartitionPruneStep step
Definition: plannodes.h:1751
Bitmapset * nullkeys
Definition: plannodes.h:1756
PartitionedRelPruningData partrelprunedata[FLEXIBLE_ARRAY_MEMBER]
Definition: execPartition.h:87
PartitionDispatch * partition_dispatch_info
Definition: execPartition.c:94
ResultRelInfo ** partitions
Definition: execPartition.c:98
Relation partition_root
Definition: execPartition.c:93
MemoryContext memcxt
Definition: execPartition.c:102
ResultRelInfo ** nonleaf_partitions
Definition: execPartition.c:95
Bitmapset * present_parts
Definition: plannodes.h:1676
Bitmapset * execparamids
Definition: plannodes.h:1705
List * initial_pruning_steps
Definition: plannodes.h:1700
PartitionPruneContext exec_context
Definition: execPartition.h:74
PartitionPruneContext initial_context
Definition: execPartition.h:73
Bitmapset * present_parts
Definition: execPartition.h:70
Plan * plan
Definition: execnodes.h:1159
EState * state
Definition: execnodes.h:1161
ExprContext * ps_ExprContext
Definition: execnodes.h:1198
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1197
Definition: rel.h:56
Form_pg_class rd_rel
Definition: rel.h:111
TupleTableSlot * ri_PartitionTupleSlot
Definition: execnodes.h:619
OnConflictSetState * ri_onConflict
Definition: execnodes.h:583
List * ri_onConflictArbiterIndexes
Definition: execnodes.h:580
Relation ri_RelationDesc
Definition: execnodes.h:480
struct CopyMultiInsertBuffer * ri_CopyMultiInsertBuffer
Definition: execnodes.h:622
Index ri_RangeTableIndex
Definition: execnodes.h:477
struct FdwRoutine * ri_FdwRoutine
Definition: execnodes.h:533
int ri_BatchSize
Definition: execnodes.h:544
AttrMap * attrMap
Definition: tupconvert.h:28
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
Definition: tableam.c:92
TupleTableSlot * execute_attr_map_slot(AttrMap *attrMap, TupleTableSlot *in_slot, TupleTableSlot *out_slot)
Definition: tupconvert.c:193
static Datum slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
Definition: tuptable.h:398
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:457
Definition: pg_list.h:46
#define IsolationUsesXactSnapshot()
Definition: xact.h:52

AltStyle によって変換されたページ (->オリジナル) /