1/*-------------------------------------------------------------------------
4 * Routines to handle caching of results from parameterized nodes
6 * Portions Copyright (c) 2021-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/executor/nodeMemoize.c
13 * Memoize nodes are intended to sit above parameterized nodes in the plan
14 * tree in order to cache results from them. The intention here is that a
15 * repeat scan with a parameter value that has already been seen by the node
16 * can fetch tuples from the cache rather than having to re-scan the inner
17 * node all over again. The query planner may choose to make use of one of
18 * these when it thinks rescans for previously seen values are likely enough
19 * to warrant adding the additional node.
21 * The method of cache we use is a hash table. When the cache fills, we never
22 * spill tuples to disk, instead, we choose to evict the least recently used
23 * cache entry from the cache. We remember the least recently used entry by
24 * always pushing new entries and entries we look for onto the tail of a
25 * doubly linked list. This means that older items always bubble to the top
28 * Sometimes our callers won't run their scans to completion. For example a
29 * semi-join only needs to run until it finds a matching tuple, and once it
30 * does, the join operator skips to the next outer tuple and does not execute
31 * the inner side again on that scan. Because of this, we must keep track of
32 * when a cache entry is complete, and by default, we know it is when we run
33 * out of tuples to read during the scan. However, there are cases where we
34 * can mark the cache entry as complete without exhausting the scan of all
35 * tuples. One case is unique joins, where the join operator knows that there
36 * will only be at most one match for any given outer tuple. In order to
37 * support such cases we allow the "singlerow" option to be set for the cache.
38 * This option marks the cache entry as complete after we read the first tuple
41 * It's possible when we're filling the cache for a given set of parameters
42 * that we're unable to free enough memory to store any more tuples. If this
43 * happens then we'll have already evicted all other cache entries. When
44 * caching another tuple would cause us to exceed our memory budget, we must
45 * free the entry that we're currently populating and move the state machine
46 * into MEMO_CACHE_BYPASS_MODE. This means that we'll not attempt to cache
47 * any further tuples for this particular scan. We don't have the memory for
48 * it. The state machine will be reset again on the next rescan. If the
49 * memory requirements to cache the next parameter's tuples are less
50 * demanding, then that may allow us to start putting useful entries back into
55 * ExecMemoize - lookup cache, exec subplan when not found
56 * ExecInitMemoize - initialize node and subnodes
57 * ExecEndMemoize - shutdown node and subnodes
58 * ExecReScanMemoize - rescan the memoize node
60 * ExecMemoizeEstimate estimates DSM space needed for parallel plan
61 * ExecMemoizeInitializeDSM initialize DSM for parallel plan
62 * ExecMemoizeInitializeWorker attach to DSM info in parallel worker
63 * ExecMemoizeRetrieveInstrumentation get instrumentation from worker
64 *-------------------------------------------------------------------------
77/* States of the ExecMemoize state machine */
78 #define MEMO_CACHE_LOOKUP 1 /* Attempt to perform a cache lookup */
79 #define MEMO_CACHE_FETCH_NEXT_TUPLE 2 /* Get another tuple from the cache */
80 #define MEMO_FILLING_CACHE 3 /* Read outer node to fill cache */
81 #define MEMO_CACHE_BYPASS_MODE 4 /* Bypass mode. Just read from our
82 * subplan without caching anything */
83#define MEMO_END_OF_SCAN 5 /* Ready for rescan */
86 /* Helper macros for memory accounting */
87#define EMPTY_ENTRY_MEMORY_BYTES(e) (sizeof(MemoizeEntry) + \
88 sizeof(MemoizeKey) + \
89 (e)->key->params->t_len);
90#define CACHE_TUPLE_BYTES(t) (sizeof(MemoizeTuple) + \
93 /* MemoizeTuple Stores an individually cached tuple */
98 * values or NULL if it's the last one */
103 * The hash table key for cached entries plus the LRU list link
113 * The data struct that the cache hash table stores
119 * tuples are cached for this entry */
122 bool complete;
/* Did we read the outer plan to completion? */
126#define SH_PREFIX memoize
127#define SH_ELEMENT_TYPE MemoizeEntry
128#define SH_KEY_TYPE MemoizeKey *
129 #define SH_SCOPE static inline
139 #define SH_PREFIX memoize
140 #define SH_ELEMENT_TYPE MemoizeEntry
141 #define SH_KEY_TYPE MemoizeKey *
143 #define SH_HASH_KEY(tb, key) MemoizeHash_hash(tb, key)
144 #define SH_EQUAL(tb, a, b) MemoizeHash_equal(tb, a, b)
145 #define SH_SCOPE static inline
146 #define SH_STORE_HASH
147 #define SH_GET_HASH(tb, a) a->hash
153 * Hash function for simplehash hashtable. 'key' is unused here as we
154 * require that all table lookups first populate the MemoizeState's
155 * probeslot with the key values to be looked up.
165 int numkeys = mstate->
nkeys;
171 for (
int i = 0;
i < numkeys;
i++)
173 /* combine successive hashkeys by rotating */
176 if (!pslot->tts_isnull[
i])
/* treat nulls as having hash key 0 */
194 for (
int i = 0;
i < numkeys;
i++)
196 /* combine successive hashkeys by rotating */
199 if (!pslot->tts_isnull[
i])
/* treat nulls as having hash key 0 */
204 collations[
i], pslot->tts_values[
i]));
216 * Equality function for confirming hash value matches during a hash
217 * table lookup. 'key2' is never used. Instead the MemoizeState's
218 * probeslot is always populated with details of what's being looked up.
229 /* probeslot should have already been prepared by prepare_probe_slot() */
235 int numkeys = mstate->
nkeys;
243 for (
int i = 0;
i < numkeys;
i++)
247 if (tslot->tts_isnull[
i] != pslot->tts_isnull[
i])
253 /* both NULL? they're equal */
254 if (tslot->tts_isnull[
i])
257 /* perform binary comparison on the two datums */
272 econtext->ecxt_innertuple = tslot;
273 econtext->ecxt_outertuple = pslot;
279 * Initialize the hash table to empty. The MemoizeState's hashtable field
280 * must point to NULL.
287 /* Make a guess at a good size when we're not given a valid size. */
291 /* memoize_create will convert the size to a power of 2 */
297 * Populate mstate's probeslot with the values from the tuple stored
298 * in 'key'. If 'key' is NULL, then perform the population by evaluating
299 * mstate's param_exprs.
306 int numKeys = mstate->
nkeys;
317 /* Set the probeslot's values based on the current parameter values */
318 for (
int i = 0;
i < numKeys;
i++)
327 /* Process the key's MinimalTuple and store the values in probeslot */
339 * Remove all tuples from the cache entry pointed to by 'entry'. This
340 * leaves an empty cache entry. Also, update the memory accounting to
341 * reflect the removal of the tuples.
349 while (tuple != NULL)
355 /* Free memory used for this tuple */
365 /* Update the memory accounting */
371 * Remove 'entry' from the cache and free memory used by it.
380 /* Remove all of the tuples from this entry */
384 * Update memory accounting. entry_purge_tuples should have already
385 * subtracted the memory used for each cached tuple. Here we just update
386 * the amount used by the entry itself.
390 /* Remove the entry from the cache */
391 memoize_delete_item(mstate->
hashtable, entry);
399 * Remove all items from the cache
410 * Likely the most efficient way to remove all items is to just reset the
411 * memory context for the cache and then rebuild a fresh hash table. This
412 * saves having to remove each item one by one and pfree each cached tuple
416 /* NULLify so we recreate the table on the next call */
419 /* reset the LRU list */
422 mstate->
entry = NULL;
426 /* XXX should we add something new to track these purges? */
431 * cache_reduce_memory
432 * Evict older and less recently used items from the cache in order to
433 * reduce the memory consumption back to something below the
434 * MemoizeState's mem_limit.
436 * 'specialkey', if not NULL, causes the function to return false if the entry
437 * which the key belongs to is removed from the cache.
442 bool specialkey_intact =
true;
/* for now */
446 /* Update peak memory usage */
450 /* We expect only to be called when we've gone over budget on memory */
453 /* Start the eviction process starting at the head of the LRU list. */
460 * Populate the hash probe slot in preparation for looking up this LRU
466 * Ideally the LRU list pointers would be stored in the entry itself
467 * rather than in the key. Unfortunately, we can't do that as the
468 * simplehash.h code may resize the table and allocate new memory for
469 * entries which would result in those pointers pointing to the old
470 * buckets. However, it's fine to use the key to store this as that's
471 * only referenced by a pointer in the entry, which of course follows
472 * the entry whenever the hash table is resized. Since we only have a
473 * pointer to the key here, we must perform a hash table lookup to
474 * find the entry that the key belongs to.
476 entry = memoize_lookup(mstate->
hashtable, NULL);
479 * Sanity check that we found the entry belonging to the LRU list
480 * item. A misbehaving hash or equality function could cause the
481 * entry not to be found or the wrong entry to be found.
484 elog(
ERROR,
"could not find memoization table entry");
487 * If we're being called to free memory while the cache is being
488 * populated with new tuples, then we'd better take some care as we
489 * could end up freeing the entry which 'specialkey' belongs to.
490 * Generally callers will pass 'specialkey' as the key for the cache
491 * entry which is currently being populated, so we must set
492 * 'specialkey_intact' to false to inform the caller the specialkey
493 * entry has been removed.
495 if (
key == specialkey)
496 specialkey_intact =
false;
499 * Finally remove the entry. This will remove from the LRU list too.
505 /* Exit if we've freed enough memory */
512 return specialkey_intact;
517 * Perform a lookup to see if we've already cached tuples based on the
518 * scan's current parameters. If we find an existing entry we move it to
519 * the end of the LRU list, set *found to true then return it. If we
520 * don't find an entry then we create a new one and add it to the end of
521 * the LRU list. We also update cache memory accounting and remove older
522 * entries if we go over the memory budget. If we managed to free enough
523 * memory we return the new entry, else we return NULL.
525 * Callers can assume we'll never return NULL when *found is true.
534 /* prepare the probe slot with the current scan parameters */
538 * Add the new entry to the cache. No need to pass a valid key since the
539 * hash function uses mstate's probeslot, which we populated above.
541 entry = memoize_insert(mstate->
hashtable, NULL, found);
546 * Move existing entry to the tail of the LRU list to mark it as the
547 * most recently used item.
556 /* Allocate a new key */
560 /* Update the total cache memory utilization */
563 /* Initialize this entry */
568 * Since this is the most recently used entry, push this entry onto the
569 * end of the LRU list.
578 * If we've gone over our memory budget, then we'll free up some space in
584 * Try to free up some memory. It's highly unlikely that we'll fail
585 * to do so here since the entry we've just added is yet to contain
586 * any tuples and we're able to remove any other entry to reduce the
587 * memory consumption.
593 * The process of removing entries from the cache may have caused the
594 * code in simplehash.h to shuffle elements to earlier buckets in the
595 * hash table. If it has, we'll need to find the entry again by
596 * performing a lookup. Fortunately, we can detect if this has
597 * happened by seeing if the entry is still in use and that the key
598 * pointer matches our expected key.
600 if (entry->
status != memoize_SH_IN_USE || entry->
key !=
key)
603 * We need to repopulate the probeslot as lookups performed during
604 * the cache evictions above will have stored some other key.
608 /* Re-find the newly added entry */
609 entry = memoize_lookup(mstate->
hashtable, NULL);
619 * Add the tuple stored in 'slot' to the mstate's current cache entry.
620 * The cache entry must have already been made with cache_lookup().
621 * mstate's last_tuple field must point to the tail of mstate->entry's
640 /* Account for the memory we just consumed */
646 * This is the first tuple for this entry, so just point the list head
653 /* push this tuple onto the tail of the list */
661 * If we've gone over our memory budget then free up some space in the
672 * The process of removing entries from the cache may have caused the
673 * code in simplehash.h to shuffle elements to earlier buckets in the
674 * hash table. If it has, we'll need to find the entry again by
675 * performing a lookup. Fortunately, we can detect if this has
676 * happened by seeing if the entry is still in use and that the key
677 * pointer matches our expected key.
679 if (entry->
status != memoize_SH_IN_USE || entry->
key !=
key)
682 * We need to repopulate the probeslot as lookups performed during
683 * the cache evictions above will have stored some other key.
687 /* Re-find the entry */
707 * Reset per-tuple memory context to free any expression evaluation
708 * storage allocated in the previous tuple cycle.
722 /* first call? we'll need a hash table. */
727 * We're only ever in this state for the first call of the
728 * scan. Here we have a look to see if we've already seen the
729 * current parameters before and if we have already cached a
730 * complete set of records that the outer plan will return for
733 * When we find a valid cache entry, we'll return the first
734 * tuple from it. If not found, we'll create a cache entry and
735 * then try to fetch a tuple from the outer scan. If we find
736 * one there, we'll try to cache it.
739 /* see if we've got anything cached for the current parameters */
747 * Set last_tuple and entry so that the state
748 * MEMO_CACHE_FETCH_NEXT_TUPLE can easily find the next
749 * tuple for these parameters.
754 /* Fetch the first cached tuple, if there is one */
766 /* The cache entry is void of any tuples. */
771 /* Handle cache miss */
777 * A cache entry was found, but the scan for that entry
778 * did not run to completion. We'll just remove all
779 * tuples and start again. It might be tempting to
780 * continue where we left off, but there's no guarantee
781 * the outer node will produce the tuples in the same
782 * order as it did last time.
787 /* Scan the outer node for a tuple to cache */
793 * cache_lookup may have returned NULL due to failure to
794 * free enough cache space, so ensure we don't do anything
795 * here that assumes it worked. There's no need to go into
796 * bypass mode here as we're setting mstatus to end of
809 * If we failed to create the entry or failed to store the
810 * tuple in the entry, then go into bypass mode.
820 * No need to clear out last_tuple as we'll stay in bypass
821 * mode until the end of the scan.
827 * If we only expect a single row from this scan then we
828 * can mark that we're not expecting more. This allows
829 * cache lookups to work even when the scan has not been
830 * executed to completion.
843 /* We shouldn't be in this state if these are not set */
847 /* Skip to the next tuple to output */
850 /* No more tuples in the cache */
869 /* entry should already have been set by MEMO_CACHE_LOOKUP */
873 * When in the MEMO_FILLING_CACHE state, we've just had a
874 * cache miss and are populating the cache with the current
881 /* No more tuples. Mark it as complete */
888 * Validate if the planner properly set the singlerow flag. It
889 * should only set that if each cache entry can, at most,
893 elog(
ERROR,
"cache entry already complete");
895 /* Record the tuple in the current cache entry */
898 /* Couldn't store it? Handle overflow */
904 * No need to clear out entry or last_tuple as we'll stay
905 * in bypass mode until the end of the scan.
919 * When in bypass mode we just continue to read tuples without
920 * caching. We need to wait until the next rescan before we
921 * can come out of this mode.
939 * We've already returned NULL for this scan, but just in case
940 * something calls us again by mistake.
945 elog(
ERROR,
"unrecognized memoize state: %d",
960 /* check for unsupported flags */
968 * Miscellaneous initialization
970 * create expression context for node
978 * Initialize return slot and type. No need to initialize projection info
979 * because this node doesn't do projections.
985 * Initialize scan slot and type.
990 * Set the state machine to lookup the cache. We won't find anything
991 * until we cache something, but this saves a special case to create the
1004 mstate->
collations = node->collations;
/* Just point directly to the plan
1008 eqfuncoids =
palloc(nkeys *
sizeof(
Oid));
1010 for (
i = 0;
i < nkeys;
i++)
1012 Oid hashop = node->hashOperators[
i];
1018 elog(
ERROR,
"could not find hash function for hash operator %u",
1038 /* Limit the total memory consumed by the cache to this */
1041 /* A memory context dedicated for the cache */
1048 mstate->
entry = NULL;
1051 * Mark if we can assume the cache entry is completed after we get the
1052 * first record for it. Some callers might not call us again after
1053 * getting the first match. e.g. A join operator performing a unique join
1054 * is able to skip to the next outer tuple after getting the first
1055 * matching inner tuple. In this case, the cache entry is complete after
1056 * getting the first tuple. This allows us to mark it as so.
1062 * Record if the cache keys should be compared bit by bit, or logically
1063 * using the type's hash equality operator
1067 /* Zero the statistics counters */
1071 * Because it may require a large allocation, we delay building of the
1072 * hash table until executor run.
1082#ifdef USE_ASSERT_CHECKING
1083 /* Validate the memory accounting code is correct in assert builds. */
1094 while ((entry = memoize_iterate(node->
hashtable, &
i)) != NULL)
1099 while (tuple != NULL)
1102 tuple = tuple->
next;
1113 * When ending a parallel worker, copy the statistics gathered by the
1114 * worker back into shared memory so that it can be picked up by the main
1115 * process to report in EXPLAIN ANALYZE.
1121 /* Make mem_peak available for EXPLAIN */
1125 Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
1130 /* Remove the cache context */
1134 * shut down the subplan
1144 /* Mark that we must lookup the cache for a new set of parameters */
1147 /* nullify pointers used for the last scan */
1152 * if chgParam of subnode is not null then plan will be re-scanned by
1153 * first ExecProcNode.
1159 * Purge the entire cache if a parameter changed that is not part of the
1167 * ExecEstimateCacheEntryOverheadBytes
1168 * For use in the query planner to help it estimate the amount of memory
1169 * required to store a single entry in the cache.
1178/* ----------------------------------------------------------------
1179 * Parallel Query Support
1180 * ----------------------------------------------------------------
1183 /* ----------------------------------------------------------------
1184 * ExecMemoizeEstimate
1186 * Estimate space required to propagate memoize statistics.
1187 * ----------------------------------------------------------------
1194 /* don't need this if not instrumenting or no workers */
1204/* ----------------------------------------------------------------
1205 * ExecMemoizeInitializeDSM
1207 * Initialize DSM space for memoize statistics.
1208 * ----------------------------------------------------------------
1215 /* don't need this if not instrumenting or no workers */
1222 /* ensure any unfilled slots will contain zeroes */
1229/* ----------------------------------------------------------------
1230 * ExecMemoizeInitializeWorker
1232 * Attach worker to DSM space for memoize statistics.
1233 * ----------------------------------------------------------------
1242/* ----------------------------------------------------------------
1243 * ExecMemoizeRetrieveInstrumentation
1245 * Transfer memoize statistics from DSM to private memory.
1246 * ----------------------------------------------------------------
bool bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
uint32 datum_image_hash(Datum value, bool typByVal, int typLen)
bool datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
void ExecReScan(PlanState *node)
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
ExprState * ExecBuildParamSetEqual(TupleDesc desc, const TupleTableSlotOps *lops, const TupleTableSlotOps *rops, const Oid *eqfunctions, const Oid *collations, const List *param_exprs, PlanState *parent)
void ExecEndNode(PlanState *node)
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
const TupleTableSlotOps TTSOpsVirtual
TupleTableSlot * ExecStoreVirtualTuple(TupleTableSlot *slot)
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
const TupleTableSlotOps TTSOpsMinimalTuple
TupleDesc ExecTypeFromExprList(List *exprList)
void ExecCreateScanSlotFromOuterPlan(EState *estate, ScanState *scanstate, const TupleTableSlotOps *tts_ops)
void ExecAssignExprContext(EState *estate, PlanState *planstate)
#define outerPlanState(node)
struct MemoizeInstrumentation MemoizeInstrumentation
#define EXEC_FLAG_BACKWARD
#define ResetExprContext(econtext)
static bool ExecQual(ExprState *state, ExprContext *econtext)
static TupleTableSlot * ExecProcNode(PlanState *node)
static Datum ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull)
void fmgr_info(Oid functionId, FmgrInfo *finfo)
Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation, Datum arg1)
static uint32 murmurhash32(uint32 data)
Assert(PointerIsAligned(start, uint64))
static void dlist_init(dlist_head *head)
static void dlist_delete(dlist_node *node)
#define dlist_foreach_modify(iter, lhead)
static void dlist_push_tail(dlist_head *head, dlist_node *node)
static void dlist_move_tail(dlist_head *head, dlist_node *node)
#define dlist_container(type, membername, ptr)
#define IsParallelWorker()
RegProcedure get_opcode(Oid opno)
bool get_op_hash_functions(Oid opno, RegProcedure *lhs_procno, RegProcedure *rhs_procno)
void MemoryContextReset(MemoryContext context)
void pfree(void *pointer)
MemoryContext CurrentMemoryContext
void MemoryContextDelete(MemoryContext context)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define CHECK_FOR_INTERRUPTS()
size_t get_hash_memory_limit(void)
#define MEMO_CACHE_LOOKUP
#define MEMO_CACHE_FETCH_NEXT_TUPLE
static bool cache_store_tuple(MemoizeState *mstate, TupleTableSlot *slot)
#define MEMO_CACHE_BYPASS_MODE
static uint32 MemoizeHash_hash(struct memoize_hash *tb, const MemoizeKey *key)
void ExecMemoizeInitializeDSM(MemoizeState *node, ParallelContext *pcxt)
MemoizeState * ExecInitMemoize(Memoize *node, EState *estate, int eflags)
static void cache_purge_all(MemoizeState *mstate)
void ExecReScanMemoize(MemoizeState *node)
double ExecEstimateCacheEntryOverheadBytes(double ntuples)
static bool cache_reduce_memory(MemoizeState *mstate, MemoizeKey *specialkey)
static MemoizeEntry * cache_lookup(MemoizeState *mstate, bool *found)
#define MEMO_FILLING_CACHE
static void prepare_probe_slot(MemoizeState *mstate, MemoizeKey *key)
static TupleTableSlot * ExecMemoize(PlanState *pstate)
#define CACHE_TUPLE_BYTES(t)
void ExecMemoizeEstimate(MemoizeState *node, ParallelContext *pcxt)
void ExecMemoizeRetrieveInstrumentation(MemoizeState *node)
struct MemoizeTuple MemoizeTuple
static void entry_purge_tuples(MemoizeState *mstate, MemoizeEntry *entry)
static void remove_cache_entry(MemoizeState *mstate, MemoizeEntry *entry)
void ExecMemoizeInitializeWorker(MemoizeState *node, ParallelWorkerContext *pwcxt)
struct MemoizeEntry MemoizeEntry
static void build_hash_table(MemoizeState *mstate, uint32 size)
static bool MemoizeHash_equal(struct memoize_hash *tb, const MemoizeKey *key1, const MemoizeKey *key2)
#define EMPTY_ENTRY_MEMORY_BYTES(e)
void ExecEndMemoize(MemoizeState *node)
struct MemoizeKey MemoizeKey
#define castNode(_type_, nodeptr)
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
static uint32 pg_rotate_left32(uint32 word, int n)
static void * list_nth(const List *list, int n)
static uint32 DatumGetUInt32(Datum X)
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
#define shm_toc_estimate_chunk(e, sz)
#define shm_toc_estimate_keys(e, cnt)
Size add_size(Size s1, Size s2)
Size mul_size(Size s1, Size s2)
MemoryContext ecxt_per_tuple_memory
TupleTableSlot * probeslot
SharedMemoizeInfo * shared_info
struct MemoizeEntry * entry
ExprState * cache_eq_expr
MemoizeInstrumentation stats
MemoryContext tableContext
struct memoize_hash * hashtable
TupleTableSlot * tableslot
struct MemoizeTuple * last_tuple
struct MemoizeTuple * next
shm_toc_estimator estimator
Instrumentation * instrument
ExprContext * ps_ExprContext
TupleTableSlot * ps_ResultTupleSlot
ProjectionInfo * ps_ProjInfo
ExecProcNodeMtd ExecProcNode
MemoizeInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER]
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
static void slot_getallattrs(TupleTableSlot *slot)
static TupleTableSlot * ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)