1/*----------------------------------------------------------------------
4 * Table access method routines too big to be inline functions.
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/table/tableam.c
14 * Note that most function in here are documented in tableam.h, rather than
15 * here. That's because there's a lot of inline functions in tableam.h and
16 * it'd be harder to understand if one constantly had to switch between files.
18 *----------------------------------------------------------------------
35 * Constants to control the behavior of block allocation to parallel workers
36 * during a parallel seqscan. Technically these values do not need to be
37 * powers of 2, but having them as powers of 2 makes the math more optimal
38 * and makes the ramp-down stepping more even.
41/* The number of I/O chunks we try to break a parallel seqscan down into */
42 #define PARALLEL_SEQSCAN_NCHUNKS 2048
43/* Ramp down size of allocations when we've only this number of chunks left */
44 #define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS 64
45/* Cap the size of parallel I/O chunks to this number of blocks */
46 #define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE 8192
53/* ----------------------------------------------------------------------------
55 * ----------------------------------------------------------------------------
65 else if (relation->
rd_rel->relkind == RELKIND_FOREIGN_TABLE)
68 * Historically FDWs expect to store heap tuples in slots. Continue
69 * handing them one, to make it less painful to adapt FDWs to new
70 * versions. The cost of a heap slot over a virtual slot is pretty
78 * These need to be supported, as some parts of the code (like COPY)
79 * need to create slots for such relations too. It seems better to
80 * centralize the knowledge that a heap slot is the right thing in
84 relation->
rd_rel->relkind == RELKIND_PARTITIONED_TABLE);
101 *reglist =
lappend(*reglist, slot);
107/* ----------------------------------------------------------------------------
108 * Table scan functions.
109 * ----------------------------------------------------------------------------
125/* ----------------------------------------------------------------------------
126 * Parallel table scan related functions.
127 * ----------------------------------------------------------------------------
176 /* Snapshot was serialized -- restore it */
183 /* SnapshotAny passed by caller (not serialized) */
192/* ----------------------------------------------------------------------------
193 * Index scan related functions.
194 * ----------------------------------------------------------------------------
198 * To perform that check simply start an index scan, create the necessary
199 * slot, do the heap lookup, and shut everything down again. This could be
200 * optimized, but is unlikely to matter from a performance POV. If there
201 * frequently are live index pointers also matching a unique index key, the
202 * CPU overhead of this routine is unlikely to matter.
204 * Note that *tid may be modified when we return true if the AM supports
205 * storing multiple row versions reachable via a single index entry (like
216 bool call_again =
false;
230/* ------------------------------------------------------------------------
231 * Functions for non-modifying operations on individual tuples
232 * ------------------------------------------------------------------------
242 * We don't expect direct calls to table_tuple_get_latest_tid with valid
243 * CheckXidAlive for catalog or regular tables. See detailed comments in
244 * xact.c where these variables are declared.
247 elog(
ERROR,
"unexpected table_tuple_get_latest_tid call during logical decoding");
250 * Since this can be called with user-supplied TID, don't trust the input
255 (
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
256 errmsg(
"tid (%u, %u) is not valid for relation \"%s\"",
265/* ----------------------------------------------------------------------------
266 * Functions to make modifications a bit simpler.
267 * ----------------------------------------------------------------------------
271 * simple_table_tuple_insert - insert a tuple
273 * Currently, this routine differs from table_tuple_insert only in supplying a
274 * default command ID and not allowing access to the speedup options.
283 * simple_table_tuple_delete - delete a tuple
285 * This routine may be used to delete a tuple when concurrent updates of
286 * the target tuple are not expected (for example, because we have a lock
287 * on the relation associated with the tuple). Any failure is reported
299 true /* wait for commit */ ,
300 &tmfd,
false /* changingPart */ );
305 /* Tuple was already updated in current command? */
306 elog(
ERROR,
"tuple already updated by self");
310 /* done successfully */
314 elog(
ERROR,
"tuple concurrently updated");
318 elog(
ERROR,
"tuple concurrently deleted");
322 elog(
ERROR,
"unrecognized table_tuple_delete status: %u", result);
328 * simple_table_tuple_update - replace a tuple
330 * This routine may be used to update a tuple when concurrent updates of
331 * the target tuple are not expected (for example, because we have a lock
332 * on the relation associated with the tuple). Any failure is reported
348 true /* wait for commit */ ,
349 &tmfd, &lockmode, update_indexes);
354 /* Tuple was already updated in current command? */
355 elog(
ERROR,
"tuple already updated by self");
359 /* done successfully */
363 elog(
ERROR,
"tuple concurrently updated");
367 elog(
ERROR,
"tuple concurrently deleted");
371 elog(
ERROR,
"unrecognized table_tuple_update status: %u", result);
377/* ----------------------------------------------------------------------------
378 * Helper functions to implement parallel scans for block oriented AMs.
379 * ----------------------------------------------------------------------------
395 /* compare phs_syncscan initialization to similar logic in initscan */
415 * find and set the scan's startblock
417 * Determine where the parallel seq scan should start. This function may be
418 * called many times, once by each parallel worker. We must be careful only
419 * to set the startblock once.
428 /* Reset the state we use for controlling allocation size. */
429 memset(pbscanwork, 0,
sizeof(*pbscanwork));
432 "pg_nextpower2_32 may be too small for non-standard BlockNumber width");
435 * We determine the chunk size based on the size of the relation. First we
436 * split the relation into PARALLEL_SEQSCAN_NCHUNKS chunks but we then
437 * take the next highest power of 2 number of the chunk size. This means
438 * we split the relation into somewhere between PARALLEL_SEQSCAN_NCHUNKS
439 * and PARALLEL_SEQSCAN_NCHUNKS / 2 chunks.
445 * Ensure we don't go over the maximum chunk size with larger tables. This
446 * means we may get much more than PARALLEL_SEQSCAN_NCHUNKS for larger
447 * tables. Too large a chunk size has been shown to be detrimental to
448 * synchronous scan performance.
454 /* Grab the spinlock. */
458 * If the scan's startblock has not yet been initialized, we must do so
459 * now. If this is not a synchronized scan, we just start at block 0, but
460 * if it is a synchronized scan, we must get the starting position from
461 * the synchronized scan machinery. We can't hold the spinlock while
462 * doing that, though, so release the spinlock, get the information we
463 * need, and retry. If nobody else has initialized the scan in the
464 * meantime, we'll fill in the value we fetched on the second time
484 * get the next page to scan
486 * Get the next page to scan. Even if there are no pages left to scan,
487 * another backend could have grabbed a page to scan and not yet finished
488 * looking at it, so it doesn't follow that the scan is done when the first
489 * backend gets an InvalidBlockNumber return.
500 * The logic below allocates block numbers out to parallel workers in a
501 * way that each worker will receive a set of consecutive block numbers to
502 * scan. Earlier versions of this would allocate the next highest block
503 * number to the next worker to call this function. This would generally
504 * result in workers never receiving consecutive block numbers. Some
505 * operating systems would not detect the sequential I/O pattern due to
506 * each backend being a different process which could result in poor
507 * performance due to inefficient or no readahead. To work around this
508 * issue, we now allocate a range of block numbers for each worker and
509 * when they come back for another block, we give them the next one in
510 * that range until the range is complete. When the worker completes the
511 * range of blocks we then allocate another range for it and return the
512 * first block number from that range.
514 * Here we name these ranges of blocks "chunks". The initial size of
515 * these chunks is determined in table_block_parallelscan_startblock_init
516 * based on the size of the relation. Towards the end of the scan, we
517 * start making reductions in the size of the chunks in order to attempt
518 * to divide the remaining work over all the workers as evenly as
521 * Here pbscanwork is local worker memory. phsw_chunk_remaining tracks
522 * the number of blocks remaining in the chunk. When that reaches 0 then
523 * we must allocate a new chunk for the worker.
525 * phs_nallocated tracks how many blocks have been allocated to workers
526 * already. When phs_nallocated >= rs_nblocks, all blocks have been
529 * Because we use an atomic fetch-and-add to fetch the current value, the
530 * phs_nallocated counter will exceed rs_nblocks, because workers will
531 * still increment the value, when they try to allocate the next block but
532 * all blocks have been allocated already. The counter must be 64 bits
533 * wide because of that, to avoid wrapping around when rs_nblocks is close
536 * The actual block to return is calculated by adding the counter to the
537 * starting block number, modulo nblocks.
541 * First check if we have any remaining blocks in a previous chunk for
542 * this worker. We must consume all of the blocks from that before we
543 * allocate a new chunk to the worker.
548 * Give them the next block in the range and update the remaining
557 * When we've only got PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS chunks
558 * remaining in the scan, we half the chunk size. Since we reduce the
559 * chunk size here, we'll hit this again after doing
560 * PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS at the new size. After a few
561 * iterations of this, we'll end up doing the last few blocks with the
562 * chunk size set to 1.
574 * Set the remaining number of blocks in this chunk so that subsequent
575 * calls from this worker continue on with this chunk until it's done.
586 * Report scan location. Normally, we report the current page number.
587 * When we reach the end of the scan, though, we report the starting page,
588 * not the ending page, just so the starting positions for later scans
589 * doesn't slew backwards. We only report the position at the end of the
590 * scan once, though: subsequent callers will report nothing.
603/* ----------------------------------------------------------------------------
604 * Helper functions to implement relation sizing for block oriented AMs.
605 * ----------------------------------------------------------------------------
609 * table_block_relation_size
611 * If a table AM uses the various relation forks as the sole place where data
612 * is stored, and if it uses them in the expected manner (e.g. the actual data
613 * is in the main fork rather than some other), it can use this implementation
614 * of the relation_size callback rather than implementing its own.
621 /* InvalidForkNumber indicates returning the size for all forks */
630 return nblocks * BLCKSZ;
634 * table_block_relation_estimate_size
636 * This function can't be directly used as the implementation of the
637 * relation_estimate_size callback, because it has a few additional parameters.
638 * Instead, it is intended to be used as a helper function; the caller can
639 * pass through the arguments to its relation_estimate_size function plus the
640 * additional values required here.
642 * overhead_bytes_per_tuple should contain the approximate number of bytes
643 * of storage required to store a tuple above and beyond what is required for
644 * the tuple data proper. Typically, this would include things like the
645 * size of the tuple header and item pointer. This is only used for query
646 * planning, so a table AM where the value is not constant could choose to
647 * pass a "best guess".
649 * usable_bytes_per_page should contain the approximate number of bytes per
650 * page usable for tuple data, excluding the page header and any anticipated
657 Size overhead_bytes_per_tuple,
658 Size usable_bytes_per_page)
666 /* it should have storage, so we can call the smgr */
669 /* coerce values in pg_class to more desirable types */
671 reltuples = (
double) rel->
rd_rel->reltuples;
675 * HACK: if the relation has never yet been vacuumed, use a minimum size
676 * estimate of 10 pages. The idea here is to avoid assuming a
677 * newly-created table is really small, even if it currently is, because
678 * that may not be true once some data gets loaded into it. Once a vacuum
679 * or analyze cycle has been done on it, it's more reasonable to believe
680 * the size is somewhat stable.
682 * (Note that this is only an issue if the plan gets cached and used again
683 * after the table has been filled. What we're trying to avoid is using a
684 * nestloop-type plan on a table that has grown substantially since the
685 * plan was made. Normally, autovacuum/autoanalyze will occur once enough
686 * inserts have happened and cause cached-plan invalidation; but that
687 * doesn't happen instantaneously, and it won't happen at all for cases
688 * such as temporary tables.)
690 * We test "never vacuumed" by seeing whether reltuples < 0.
692 * If the table has inheritance children, we don't apply this heuristic.
693 * Totally empty parent tables are quite common, so we should be willing
694 * to believe that they are empty.
698 !rel->
rd_rel->relhassubclass)
701 /* report estimated # pages */
703 /* quick exit if rel is clearly empty */
711 /* estimate number of tuples from previous tuple density */
712 if (reltuples >= 0 && relpages > 0)
713 density = reltuples / (double) relpages;
717 * When we have no data because the relation was never yet vacuumed,
718 * estimate tuple width from attribute datatypes. We assume here that
719 * the pages are completely full, which is OK for tables but is
720 * probably an overestimate for indexes. Fortunately
721 * get_relation_info() can clamp the overestimate to the parent
724 * Note: this code intentionally disregards alignment considerations,
725 * because (a) that would be gilding the lily considering how crude
726 * the estimate is, (b) it creates platform dependencies in the
727 * default plans which are kind of a headache for regression testing,
728 * and (c) different table AMs might use different padding schemes.
734 * Without reltuples/relpages, we also need to consider fillfactor.
735 * The other branch considers it implicitly by calculating density
736 * from actual relpages/reltuples statistics.
741 tuple_width += overhead_bytes_per_tuple;
742 /* note: integer division is intentional here */
743 density = (usable_bytes_per_page *
fillfactor / 100) / tuple_width;
744 /* There's at least one row on the page, even with low fillfactor. */
747 *tuples = rint(density * (
double) curpages);
750 * We use relallvisible as-is, rather than scaling it up like we do for
751 * the pages and tuples counts, on the theory that any pages added since
752 * the last VACUUM are most likely not marked all-visible. But costsize.c
753 * wants it converted to a fraction.
755 if (relallvisible == 0 || curpages <= 0)
757 else if ((
double) relallvisible >= curpages)
760 *allvisfrac = (double) relallvisible / curpages;
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
#define InvalidBlockNumber
#define RelationGetNumberOfBlocks(reln)
#define StaticAssertStmt(condition, errmessage)
double clamp_row_est(double nrows)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
const TupleTableSlotOps TTSOpsVirtual
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
const TupleTableSlotOps TTSOpsHeapTuple
Assert(PointerIsAligned(start, uint64))
if(TABLE==NULL||TABLE_index==NULL)
static OffsetNumber ItemPointerGetOffsetNumberNoCheck(const ItemPointerData *pointer)
static BlockNumber ItemPointerGetBlockNumberNoCheck(const ItemPointerData *pointer)
List * lappend(List *list, void *datum)
static uint32 pg_nextpower2_32(uint32 num)
int32 get_rel_data_width(Relation rel, int32 *attr_widths)
#define RelationGetRelid(relation)
static SMgrRelation RelationGetSmgr(Relation rel)
#define RelationGetDescr(relation)
#define RelationGetFillFactor(relation, defaultff)
#define RelationGetRelationName(relation)
#define RelationUsesLocalBuffers(relation)
#define HEAP_DEFAULT_FILLFACTOR
#define RelFileLocatorEquals(locator1, locator2)
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
struct ParallelBlockTableScanDescData ParallelBlockTableScanDescData
Size add_size(Size s1, Size s2)
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
void SerializeSnapshot(Snapshot snapshot, char *start_address)
Snapshot GetCatalogSnapshot(Oid relid)
Snapshot RestoreSnapshot(char *start_address)
Snapshot RegisterSnapshot(Snapshot snapshot)
Size EstimateSnapshotSpace(Snapshot snapshot)
#define IsMVCCSnapshot(snapshot)
#define SpinLockInit(lock)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
BlockNumber phs_startblock
pg_atomic_uint64 phs_nallocated
ParallelTableScanDescData base
uint32 phsw_chunk_remaining
RelFileLocator phs_locator
const struct TableAmRoutine * rd_tableam
RelFileLocator rd_locator
Size(* parallelscan_initialize)(Relation rel, ParallelTableScanDesc pscan)
TableScanDesc(* scan_begin)(Relation rel, Snapshot snapshot, int nkeys, ScanKeyData *key, ParallelTableScanDesc pscan, uint32 flags)
void(* tuple_get_latest_tid)(TableScanDesc scan, ItemPointer tid)
const TupleTableSlotOps *(* slot_callbacks)(Relation rel)
bool(* tuple_tid_valid)(TableScanDesc scan, ItemPointer tid)
Size(* parallelscan_estimate)(Relation rel)
void ss_report_location(Relation rel, BlockNumber location)
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
TupleTableSlot * table_slot_create(Relation relation, List **reglist)
#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE
void simple_table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, Snapshot snapshot, TU_UpdateIndexes *update_indexes)
bool table_index_fetch_tuple_check(Relation rel, ItemPointer tid, Snapshot snapshot, bool *all_dead)
Size table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan)
TableScanDesc table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot)
#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS
void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
char * default_table_access_method
void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid)
void simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
void table_block_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber)
Size table_parallelscan_estimate(Relation rel, Snapshot snapshot)
TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, ScanKeyData *key)
Size table_block_parallelscan_estimate(Relation rel)
#define PARALLEL_SEQSCAN_NCHUNKS
void table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
const TupleTableSlotOps * table_slot_callbacks(Relation relation)
BlockNumber table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
void table_block_relation_estimate_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac, Size overhead_bytes_per_tuple, Size usable_bytes_per_page)
bool synchronize_seqscans
#define DEFAULT_TABLE_ACCESS_METHOD
static IndexFetchTableData * table_index_fetch_begin(Relation rel)
static TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
static void table_index_fetch_end(struct IndexFetchTableData *scan)
static TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
static bool table_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, bool *call_again, bool *all_dead)
static void table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options, BulkInsertStateData *bistate)
#define TransactionIdIsValid(xid)
TransactionId CheckXidAlive
CommandId GetCurrentCommandId(bool used)