1/*-------------------------------------------------------------------------
4 * backend portal memory management
6 * Portals are objects representing the execution state of a query.
7 * This module provides memory management services for portals, but it
8 * doesn't actually run the executor for them.
11 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
12 * Portions Copyright (c) 1994, Regents of the University of California
15 * src/backend/utils/mmgr/portalmem.c
17 *-------------------------------------------------------------------------
32 * Estimate of the maximum number of open portals a user would have,
33 * used in initially sizing the PortalHashTable in EnablePortalManager().
34 * Since the hash table can expand, there's no need to make this overly
35 * generous, and keeping it small avoids unnecessary overhead in the
36 * hash_seq_search() calls executed during transaction end.
38 #define PORTALS_PER_USER 16
46 #define MAX_PORTALNAME_LEN NAMEDATALEN
56 #define PortalHashTableLookup(NAME, PORTAL) \
58 PortalHashEnt *hentry; \
60 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 (NAME), HASH_FIND, NULL); \
63 PORTAL = hentry->portal; \
68 #define PortalHashTableInsert(PORTAL, NAME) \
70 PortalHashEnt *hentry; bool found; \
72 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 (NAME), HASH_ENTER, &found); \
75 elog(ERROR, "duplicate portal name"); \
76 hentry->portal = PORTAL; \
77 /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 PORTAL->name = hentry->portalname; \
81 #define PortalHashTableDelete(PORTAL) \
83 PortalHashEnt *hentry; \
85 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 PORTAL->name, HASH_REMOVE, NULL); \
88 elog(WARNING, "trying to delete portal name that does not exist"); \
94/* ----------------------------------------------------------------
95 * public portal interface functions
96 * ----------------------------------------------------------------
100 * EnablePortalManager
101 * Enables the portal management module at backend startup.
118 * use PORTALS_PER_USER as a guess of how many hash table entries to
127 * Returns a portal given a portal name, or NULL if name not found.
143 * PortalGetPrimaryStmt
144 * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
146 * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 * portal are marked canSetTag, returns the first one. Neither of these
148 * cases should occur in present usages of this function.
155 foreach(lc, portal->
stmts)
167 * Returns a new portal given a name.
169 * allowDup: if true, automatically drop any pre-existing portal of the
170 * same name (if false, an error is raised).
172 * dupSilent: if true, don't even emit a WARNING.
186 (
errcode(ERRCODE_DUPLICATE_CURSOR),
187 errmsg(
"cursor \"%s\" already exists",
name)));
190 (
errcode(ERRCODE_DUPLICATE_CURSOR),
191 errmsg(
"closing existing cursor \"%s\"",
196 /* make new portal structure */
199 /* initialize portal context; typically it won't store much */
204 /* create a resource owner for the portal */
208 /* initialize portal fields that don't start off zero */
217 portal->
atEnd =
true;
/* disallow fetches until query is set */
221 /* put portal in table (sets portal->name) */
224 /* for named portals reuse portal->name copy */
232 * Create a new portal, assigning it a random nonconflicting name.
237 static unsigned int unnamed_portal_count = 0;
241 /* Select a nonconflicting name */
244 unnamed_portal_count++;
245 sprintf(portalname,
"<unnamed portal %u>", unnamed_portal_count);
255 * A simple subroutine to establish a portal's query.
257 * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
258 * allowed anymore to pass NULL. (If you really don't have source text,
259 * you can pass a constant string, perhaps "(query not available)".)
261 * commandTag shall be NULL if and only if the original query string
262 * (before rewriting) was an empty string. Also, the passed commandTag must
263 * be a pointer to a constant string, since it is not copied.
265 * If cplan is provided, then it is a cached plan containing the stmts, and
266 * the caller must have done GetCachedPlan(), causing a refcount increment.
267 * The refcount will be released when the portal is destroyed.
269 * If cplan is NULL, then it is the caller's responsibility to ensure that
270 * the passed plan trees have adequate lifetime. Typically this is done by
271 * copying them into the portal's context.
273 * The caller is also responsible for ensuring that the passed prepStmtName
274 * (if not NULL) and sourceText have adequate lifetime.
276 * NB: this function mustn't do much beyond storing the passed values; in
277 * particular don't do anything that risks elog(ERROR). If that were to
278 * happen here before storing the cplan reference, we'd leak the plancache
279 * refcount that the caller is trying to hand off to us.
283 const char *prepStmtName,
284 const char *sourceText,
292 Assert(sourceText != NULL);
293 Assert(commandTag != CMDTAG_UNKNOWN || stmts ==
NIL);
300 portal->
stmts = stmts;
301 portal->
cplan = cplan;
306 * PortalReleaseCachedPlan
307 * Release a portal's reference to its cached plan, if any.
315 portal->
cplan = NULL;
318 * We must also clear portal->stmts which is now a dangling reference
319 * to the cached plan's plan list. This protects any code that might
320 * try to examine the Portal later.
327 * PortalCreateHoldStore
328 * Create the tuplestore for a portal.
340 * Create the memory context that is used for storage of the tuple set.
341 * Note this is NOT a child of the portal's portalContext.
349 * Create the tuple store, selecting cross-transaction temp files, and
350 * enabling random access only if cursor requires scrolling.
352 * XXX: Should maintenance_work_mem be used for the portal size?
365 * Protect a portal from dropping.
367 * A pinned portal is still unpinned and dropped at transaction or
368 * subtransaction abort.
390 * Transition a portal from READY to ACTIVE state.
392 * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
397 /* For safety, this is a runtime test not just an Assert */
400 (
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
401 errmsg(
"portal \"%s\" cannot be run", portal->
name)));
402 /* Perform the state transition */
409 * Transition a portal from ACTIVE to DONE state.
411 * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
416 /* Perform the state transition */
421 * Allow portalcmds.c to clean up the state it knows about. We might as
422 * well do that now, since the portal can't be executed any more.
424 * In some cases involving execution of a ROLLBACK command in an already
425 * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
426 * with the cleanup hook still unexecuted.
437 * Transition a portal into FAILED state.
439 * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
444 /* Perform the state transition */
449 * Allow portalcmds.c to clean up the state it knows about. We might as
450 * well do that now, since the portal can't be executed any more.
452 * In some cases involving cleanup of an already aborted transaction, this
453 * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
465 * Destroy the portal.
473 * Don't allow dropping a pinned portal, it's still needed by whoever
478 (
errcode(ERRCODE_INVALID_CURSOR_STATE),
479 errmsg(
"cannot drop pinned portal \"%s\"", portal->
name)));
482 * Not sure if the PORTAL_ACTIVE case can validly happen or not...
486 (
errcode(ERRCODE_INVALID_CURSOR_STATE),
487 errmsg(
"cannot drop active portal \"%s\"", portal->
name)));
490 * Allow portalcmds.c to clean up the state it knows about, in particular
491 * shutting down the executor if still active. This step potentially runs
492 * user-defined code so failure has to be expected. It's the cleanup
493 * hook's responsibility to not try to do that more than once, in the case
494 * that failure occurs and then we come back to drop the portal again
495 * during transaction abort.
497 * Note: in most paths of control, this will have been done already in
498 * MarkPortalDone or MarkPortalFailed. We're just making sure.
506 /* There shouldn't be an active snapshot anymore, except after error */
510 * Remove portal from hash table. Because we do this here, we will not
511 * come back to try to remove the portal again if there's any error in the
512 * subsequent steps. Better to leak a little memory than to get into an
513 * infinite error-recovery loop.
517 /* drop cached plan reference, if any */
521 * If portal has a snapshot protecting its data, release that. This needs
522 * a little care since the registration will be attached to the portal's
523 * resowner; if the portal failed, we will already have released the
524 * resowner (and the snapshot) during transaction abort.
535 * Release any resources still attached to the portal. There are several
536 * cases being covered here:
538 * Top transaction commit (indicated by isTopCommit): normally we should
539 * do nothing here and let the regular end-of-transaction resource
540 * releasing mechanism handle these resources too. However, if we have a
541 * FAILED portal (eg, a cursor that got an error), we'd better clean up
542 * its resources to avoid resource-leakage warning messages.
544 * Sub transaction commit: never comes here at all, since we don't kill
545 * any portals in AtSubCommit_Portals().
547 * Main or sub transaction abort: we will do nothing here because
548 * portal->resowner was already set NULL; the resources were already
549 * cleaned up in transaction abort.
551 * Ordinary portal drop: must release resources. However, if the portal
552 * is not FAILED then we do not release its locks. The locks become the
553 * responsibility of the transaction's ResourceOwner (since it is the
554 * parent of the portal's owner) and will be released when the transaction
576 * Delete tuplestore if present. We should do this even under error
577 * conditions; since the tuplestore would have been using cross-
578 * transaction storage, its temp files need to be explicitly deleted.
590 /* delete tuplestore storage, if any */
594 /* release subsidiary storage */
597 /* release portal struct (it's in TopPortalContext) */
602 * Delete all declared cursors.
604 * Used by commands: CLOSE ALL, DISCARD ALL
620 /* Can't close the active portal (the one running the command) */
626 /* Restart the iteration in case that led to other drops */
633 * "Hold" a portal. Prepare it for access by later transactions.
639 * Note that PersistHoldablePortal() must release all resources used by
640 * the portal that are local to the creating transaction.
645 /* drop cached plan reference, if any */
649 * Any resources belonging to the portal will be released in the upcoming
650 * transaction-wide cleanup; the portal will no longer have its own
656 * Having successfully exported the holdable cursor, mark it as not
657 * belonging to this transaction.
665 * Pre-commit processing for portals.
667 * Holdable cursors created in this transaction need to be converted to
668 * materialized form, since we are going to close down the executor and
669 * release locks. Non-holdable portals created in this transaction are
670 * simply removed. Portals remaining from prior transactions should be
673 * Returns true if any portals changed state (possibly causing user-defined
674 * code to be run), false if not.
690 * There should be no pinned portals anymore. Complain if someone
691 * leaked one. Auto-held portals are allowed; we assume that whoever
692 * pinned them is managing them.
695 elog(
ERROR,
"cannot commit while a portal is pinned");
698 * Do not touch active portals --- this can only happen in the case of
699 * a multi-transaction utility command, such as VACUUM, or a commit in
702 * Note however that any resource owner attached to such a portal is
703 * still going to go away, so don't leave a dangling pointer. Also
704 * unregister any snapshots held by the portal, mainly to avoid
705 * snapshot leak warnings from ResourceOwnerRelease().
717 /* Clear portalSnapshot too, for cleanliness */
722 /* Is it a holdable portal created in the current xact? */
728 * We are exiting the transaction that created a holdable cursor.
729 * Instead of dropping the portal, prepare it for access by later
732 * However, if this is PREPARE TRANSACTION rather than COMMIT,
733 * refuse PREPARE, because the semantics seem pretty unclear.
737 (
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
738 errmsg(
"cannot PREPARE a transaction that has created a cursor WITH HOLD")));
742 /* Report we changed state */
748 * Do nothing to cursors held over from a previous transaction
749 * (including ones we just froze in a previous cycle of this loop)
755 /* Zap all non-holdable portals */
758 /* Report we changed state */
763 * After either freezing or dropping a portal, we have to restart the
764 * iteration, because we could have invoked user-defined code that
765 * caused a drop of the next portal in the hash chain.
775 * Abort processing for portals.
777 * At this point we run the cleanup hook if present, but we can't release the
778 * portal's memory until the cleanup call.
793 * When elog(FATAL) is progress, we need to set the active portal to
794 * failed, so that PortalCleanup() doesn't run the executor shutdown.
800 * Do nothing else to cursors held over from a previous transaction.
806 * Do nothing to auto-held cursors. This is similar to the case of a
807 * cursor from a previous transaction, but it could also be that the
808 * cursor was auto-held in this transaction, so it wants to live on.
814 * If it was created in the current transaction, we can't do normal
815 * shutdown on a READY portal either; it might refer to objects
816 * created in the failed transaction. See comments in
817 * AtSubAbort_Portals.
823 * Allow portalcmds.c to clean up the state it knows about, if we
832 /* drop cached plan reference, if any */
836 * Any resources belonging to the portal will be released in the
837 * upcoming transaction-wide cleanup; they will be gone before we run
843 * Although we can't delete the portal data structure proper, we can
844 * release any memory in subsidiary contexts, such as executor state.
845 * The cleanup hook was the last thing that might have needed data
846 * there. But leave active portals alone.
854 * Post-abort cleanup for portals.
856 * Delete all portals not held over from prior transactions. */
870 * Do not touch active portals --- this can only happen in the case of
871 * a multi-transaction command.
877 * Do nothing to cursors held over from a previous transaction or
888 * If a portal is still pinned, forcibly unpin it. PortalDrop will not
889 * let us drop the portal otherwise. Whoever pinned the portal was
890 * interrupted by the abort too and won't try to use it anymore.
896 * We had better not call any user-defined code during cleanup, so if
897 * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
911 * Portal-related cleanup when we return to the main loop on error.
913 * This is different from the cleanup at transaction abort. Auto-held portals
914 * are cleaned up on error but not on transaction abort.
937 * Pre-subcommit processing for portals.
939 * Reassign portals created or used in the current subtransaction to the
940 * parent subtransaction.
970 * Subtransaction abort handling for portals.
972 * Deactivate portals created or used during the failed subtransaction.
973 * Note that per AtSubCommit_Portals, this will catch portals created/used
974 * in descendants of the subtransaction too.
976 * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
993 /* Was it created in this subtransaction? */
996 /* No, but maybe it was used in this subtransaction? */
999 /* Maintain activeSubid until the portal is removed */
1003 * A MarkPortalActive() caller ran an upper-level portal in
1004 * this subtransaction and left the portal ACTIVE. This can't
1005 * happen, but force the portal into FAILED state for the same
1006 * reasons discussed below.
1008 * We assume we can get away without forcing upper-level READY
1009 * portals to fail, even if they were run and then suspended.
1010 * In theory a suspended upper-level portal could have
1011 * acquired some references to objects that are about to be
1012 * destroyed, but there should be sufficient defenses against
1013 * such cases: the portal's original query cannot contain such
1014 * references, and any references within, say, cached plans of
1015 * PL/pgSQL functions are not from active queries and should
1016 * be protected by revalidation logic.
1022 * Also, if we failed it during the current subtransaction
1023 * (either just above, or earlier), reattach its resource
1024 * owner to the current subtransaction's resource owner, so
1025 * that any resources it still holds will be released while
1026 * cleaning up this subtransaction. This prevents some corner
1027 * cases wherein we might get Asserts or worse while cleaning
1028 * up objects created during the current subtransaction
1029 * (because they're still referenced within this portal).
1037 /* Done if it wasn't created in this subtransaction */
1042 * Force any live portals of my own subtransaction into FAILED state.
1043 * We have to do this because they might refer to objects created or
1044 * changed in the failed subtransaction, leading to crashes within
1045 * ExecutorEnd when portalcmds.c tries to close down the portal.
1046 * Currently, every MarkPortalActive() caller ensures it updates the
1047 * portal status again before relinquishing control, so ACTIVE can't
1048 * happen here. If it does happen, dispose the portal like existing
1049 * MarkPortalActive() callers would.
1056 * Allow portalcmds.c to clean up the state it knows about, if we
1065 /* drop cached plan reference, if any */
1069 * Any resources belonging to the portal will be released in the
1070 * upcoming transaction-wide cleanup; they will be gone before we run
1076 * Although we can't delete the portal data structure proper, we can
1077 * release any memory in subsidiary contexts, such as executor state.
1078 * The cleanup hook was the last thing that might have needed data
1086 * Post-subabort cleanup for portals.
1088 * Drop all portals created in the failed subtransaction (but note that
1089 * we will not drop any that were reassigned to the parent above).
1107 * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1108 * let us drop the portal otherwise. Whoever pinned the portal was
1109 * interrupted by the abort too and won't try to use it anymore.
1115 * We had better not call any user-defined code during cleanup, so if
1116 * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1129/* Find all available cursors */
1138 * We put all the tuples into a tuplestore in one scan of the hashtable.
1139 * This avoids any issue of the hashtable possibly changing between calls.
1146 Portal portal = hentry->portal;
1148 bool nulls[6] = {0};
1150 /* report only "visible" entries */
1153 /* also ignore it if PortalDefineQuery hasn't been called yet */
1190 * Hold all pinned portals.
1192 * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1193 * called to protect internally-generated cursors from being dropped during
1194 * the transaction shutdown. Currently, SPI calls this automatically; PLs
1195 * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1196 * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1197 * because we need to run user-defined code while persisting a portal.
1198 * It's too late to do that once transaction abort has started.)
1200 * We protect such portals by converting them to held cursors. We mark them
1201 * as "auto-held" so that exception exit knows to clean them up. (In normal,
1202 * non-exception code paths, the PL needs to clean such portals itself, since
1203 * transaction end won't do it anymore; but that should be normal practice
1221 * Doing transaction control, especially abort, inside a cursor
1222 * loop that is not read-only, for example using UPDATE ...
1223 * RETURNING, has weird semantics issues. Also, this
1224 * implementation wouldn't work, because such portals cannot be
1225 * held. (The core grammar enforces that only SELECT statements
1226 * can drive a cursor, but for example PL/pgSQL does not restrict
1231 (
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1232 errmsg(
"cannot perform transaction commands inside a cursor loop that is not read-only")));
1234 /* Verify it's in a suitable state to be held */
1236 elog(
ERROR,
"pinned portal is not ready to be auto-held");
1245 * Drop the outer active snapshots for all portals, so that no snapshots
1248 * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1249 * ROLLBACK inside a procedure. This has to be separate from that since it
1250 * should not be run until we're done with steps that are likely to fail.
1252 * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1253 * need to clean up snapshot management in VACUUM and perhaps other places.
1260 int numPortalSnaps = 0;
1261 int numActiveSnaps = 0;
1263 /* First, scan PortalHashTable and clear portalSnapshot fields */
1275 /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1279 * Now, pop all the active snapshots, which should be just those that were
1280 * portal snapshots. Ideally we'd drive this directly off the portal
1281 * scan, but there's no good way to visit the portals in the correct
1282 * order. So just cross-check after the fact.
1290 if (numPortalSnaps != numActiveSnaps)
1291 elog(
ERROR,
"portal snapshots (%d) did not account for all active snapshots (%d)",
1292 numPortalSnaps, numActiveSnaps);
static Datum values[MAXATTR]
#define CStringGetTextDatum(s)
#define InvalidSubTransactionId
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
void * hash_seq_search(HASH_SEQ_STATUS *status)
void hash_seq_term(HASH_SEQ_STATUS *status)
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags)
Assert(PointerIsAligned(start, uint64))
bool shmem_exit_inprogress
void * MemoryContextAllocZero(MemoryContext context, Size size)
void pfree(void *pointer)
void MemoryContextDeleteChildren(MemoryContext context)
MemoryContext TopMemoryContext
void MemoryContextDelete(MemoryContext context)
void MemoryContextSetIdentifier(MemoryContext context, const char *id)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define ALLOCSET_SMALL_SIZES
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
#define CURSOR_OPT_SCROLL
#define CURSOR_OPT_BINARY
#define CURSOR_OPT_NO_SCROLL
#define lfirst_node(type, lc)
void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner)
struct PortalData * Portal
void PortalCleanup(Portal portal)
void PersistHoldablePortal(Portal portal)
void AtAbort_Portals(void)
void AtSubAbort_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner myXactOwner, ResourceOwner parentXactOwner)
void EnablePortalManager(void)
void MarkPortalDone(Portal portal)
#define MAX_PORTALNAME_LEN
void PinPortal(Portal portal)
Datum pg_cursor(PG_FUNCTION_ARGS)
static HTAB * PortalHashTable
#define PortalHashTableInsert(PORTAL, NAME)
Portal CreateNewPortal(void)
bool PreCommit_Portals(bool isPrepare)
static MemoryContext TopPortalContext
void MarkPortalFailed(Portal portal)
static void PortalReleaseCachedPlan(Portal portal)
void UnpinPortal(Portal portal)
void HoldPinnedPortals(void)
PlannedStmt * PortalGetPrimaryStmt(Portal portal)
void MarkPortalActive(Portal portal)
void PortalDrop(Portal portal, bool isTopCommit)
#define PortalHashTableLookup(NAME, PORTAL)
bool ThereAreNoReadyPortals(void)
Portal GetPortalByName(const char *name)
void AtSubCommit_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, int parentLevel, ResourceOwner parentXactOwner)
#define PortalHashTableDelete(PORTAL)
void AtCleanup_Portals(void)
void PortalDefineQuery(Portal portal, const char *prepStmtName, const char *sourceText, CommandTag commandTag, List *stmts, CachedPlan *cplan)
void PortalHashTableDeleteAll(void)
static void HoldPortal(Portal portal)
Portal CreatePortal(const char *name, bool allowDup, bool dupSilent)
void AtSubCleanup_Portals(SubTransactionId mySubid)
void PortalErrorCleanup(void)
void ForgetPortalSnapshots(void)
struct portalhashent PortalHashEnt
void PortalCreateHoldStore(Portal portal)
static Datum BoolGetDatum(bool X)
void ResourceOwnerNewParent(ResourceOwner owner, ResourceOwner newparent)
ResourceOwner ResourceOwnerCreate(ResourceOwner parent, const char *name)
void ResourceOwnerRelease(ResourceOwner owner, ResourceReleasePhase phase, bool isCommit, bool isTopLevel)
void ResourceOwnerDelete(ResourceOwner owner)
ResourceOwner CurTransactionResourceOwner
@ RESOURCE_RELEASE_BEFORE_LOCKS
@ RESOURCE_RELEASE_AFTER_LOCKS
void UnregisterSnapshotFromOwner(Snapshot snapshot, ResourceOwner owner)
bool ActiveSnapshotSet(void)
void PopActiveSnapshot(void)
SubTransactionId createSubid
SubTransactionId activeSubid
TimestampTz creation_time
MemoryContext holdContext
MemoryContext portalContext
const char * prepStmtName
Tuplestorestate * holdStore
void(* cleanup)(Portal portal)
Tuplestorestate * setResult
char portalname[MAX_PORTALNAME_LEN]
Tuplestorestate * tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
void tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc, const Datum *values, const bool *isnull)
void tuplestore_end(Tuplestorestate *state)
static Datum TimestampTzGetDatum(TimestampTz X)
SubTransactionId GetCurrentSubTransactionId(void)
int GetCurrentTransactionNestLevel(void)
TimestampTz GetCurrentStatementStartTimestamp(void)