1/*-------------------------------------------------------------------------
4 * Functions for constructing WAL records
6 * Constructing a WAL record begins with a call to XLogBeginInsert,
7 * followed by a number of XLogRegister* calls. The registered data is
8 * collected in private working memory, and finally assembled into a chain
9 * of XLogRecData structs by a call to XLogRecordAssemble(). See
10 * access/transam/README for details.
12 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
13 * Portions Copyright (c) 1994, Regents of the University of California
15 * src/backend/access/transam/xloginsert.c
17 *-------------------------------------------------------------------------
44 * Guess the maximum buffer size required to store a compressed version of
48#define LZ4_MAX_BLCKSZ LZ4_COMPRESSBOUND(BLCKSZ)
50 #define LZ4_MAX_BLCKSZ 0
54#define ZSTD_MAX_BLCKSZ ZSTD_COMPRESSBOUND(BLCKSZ)
56 #define ZSTD_MAX_BLCKSZ 0
59 #define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
61/* Buffer size required to store a compressed version of backup block image */
62 #define COMPRESS_BUFSIZE Max(Max(PGLZ_MAX_BLCKSZ, LZ4_MAX_BLCKSZ), ZSTD_MAX_BLCKSZ)
65 * For each block reference registered with XLogRegisterBuffer, we fill in
66 * a registered_buffer struct.
70 bool in_use;
/* is this slot in use? */
82 XLogRecData bkp_rdatas[2];
/* temporary rdatas used to hold references to
83 * backup block data in XLogRecordAssemble() */
85 /* buffer to store a compressed version of backup block image */
95 * A chain of XLogRecDatas to hold the "main data" of a WAL record, registered
96 * with XLogRegisterData(...).
102/* flags for the in-progress insertion */
106 * These are used to hold the record header while constructing a record.
107 * 'hdr_scratch' is not a plain variable, but is palloc'd at initialization,
108 * because we want it to be MAXALIGNed and padding bytes zeroed.
110 * For simplicity, it's allocated large enough to hold the headers for any
116 #define SizeOfXlogOrigin (sizeof(RepOriginId) + sizeof(char))
117 #define SizeOfXLogTransactionId (sizeof(TransactionId) + sizeof(char))
119 #define HEADER_SCRATCH_SIZE \
120 (SizeOfXLogRecord + \
121 MaxSizeOfXLogRecordBlockHeader * (XLR_MAX_BLOCK_ID + 1) + \
122 SizeOfXLogRecordDataHeaderLong + SizeOfXlogOrigin + \
123 SizeOfXLogTransactionId)
126 * An array of XLogRecData structs, to hold registered data.
134/* Memory context to hold the registered buffer and data references. */
140 bool *topxid_included);
145 * Begin constructing a WAL record. This must be called before the
146 * XLogRegister* functions and XLogInsert().
155 /* cross-check on whether we should be here or not */
157 elog(
ERROR,
"cannot make new WAL entries during recovery");
160 elog(
ERROR,
"XLogBeginInsert was already called");
166 * Ensure that there are enough buffer and data slots in the working area,
167 * for subsequent XLogRegisterBuffer, XLogRegisterData and XLogRegisterBufData
170 * There is always space for a small number of buffers and data chunks, enough
171 * for most record types. This function is for the exceptional cases that need
180 * This must be called before entering a critical section, because
181 * allocating memory inside a critical section can fail. repalloc() will
182 * check the same, but better to check it here too so that we fail
183 * consistently even if the arrays happen to be large enough already.
187 /* the minimum values can't be decreased */
194 elog(
ERROR,
"maximum number of WAL record block references exceeded");
195 nbuffers = max_block_id + 1;
203 * At least the padding bytes in the structs must be zeroed, because
204 * they are included in WAL data, but initialize it all for tidiness.
219 * Reset WAL record construction buffers.
238 * Register a reference to a buffer with the WAL record being constructed.
239 * This must be called for every page that the WAL-logged operation modifies.
246 /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
251 * Ordinarily, buffer should be exclusive-locked and marked dirty before
252 * we get here, otherwise we could end up violating one of the rules in
253 * access/transam/README.
255 * Some callers intentionally register a clean page and never update that
256 * page's LSN; in that case they can pass the flag REGBUF_NO_CHANGE to
257 * bypass these checks.
259#ifdef USE_ASSERT_CHECKING
267 elog(
ERROR,
"too many registered buffers");
275 regbuf->
flags = flags;
280 * Check that this page hasn't already been registered with some other
283#ifdef USE_ASSERT_CHECKING
291 if (
i == block_id || !regbuf_old->
in_use)
301 regbuf->in_use =
true;
305 * Like XLogRegisterBuffer, but for registering a block that's not in the
306 * shared buffer pool (i.e. when you don't have a Buffer for it).
320 elog(
ERROR,
"too many registered buffers");
326 regbuf->
block = blknum;
328 regbuf->
flags = flags;
333 * Check that this page hasn't already been registered with some other
336#ifdef USE_ASSERT_CHECKING
344 if (
i == block_id || !regbuf_old->
in_use)
354 regbuf->in_use =
true;
358 * Add data to the WAL record that's being constructed.
360 * The data is appended to the "main chunk", available at replay with
381 * we use the mainrdata_last pointer to track the end of the chain, so no
382 * need to clear 'next' here.
392 * Add buffer-specific data to the WAL record that's being constructed.
394 * Block_id must reference a block previously registered with
395 * XLogRegisterBuffer(). If this is called more than once for the same
396 * block_id, the data is appended.
398 * The maximum amount of data that can be registered per block is 65535
399 * bytes. That should be plenty; if you need more than BLCKSZ bytes to
400 * reconstruct the changes to the page, you might as well just log a full
401 * copy of it. (the "main data" that's not associated with a block is not
412 /* find the registered buffer struct */
415 elog(
ERROR,
"no block with id %d registered with WAL insertion",
419 * Check against max_rdatas and ensure we do not register more data per
420 * buffer than can be handled by the physical data format; i.e. that
421 * regbuf->rdata_len does not grow beyond what
422 * XLogRecordBlockHeader->data_length can hold.
432 errdetail_internal(
"Registering more than maximum %u bytes allowed to block %u: current %u bytes, adding %u bytes.",
446 * Set insert status flags for the upcoming WAL record.
448 * The flags that can be used here are:
449 * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
450 * included in the record.
451 * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
452 * durability, which allows to avoid triggering WAL archiving and other
453 * background activity.
463 * Insert an XLOG record having the specified RMID and info bytes, with the
464 * body of the record being the data and buffer references registered earlier
465 * with XLogRegister* calls.
467 * Returns XLOG pointer to end of record (beginning of next record).
468 * This can be used as LSN for data pages affected by the logged action.
469 * (LSN is the XLOG point up to which the XLOG must be flushed to disk
470 * before the data page can be written out. This implements the basic
471 * WAL rule "write the log before the data".)
478 /* XLogBeginInsert() must have been called. */
480 elog(
ERROR,
"XLogBeginInsert was not called");
483 * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
484 * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
489 elog(
PANIC,
"invalid xlog info mask %02X", info);
491 TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
494 * In bootstrap mode, we don't actually log anything but XLOG resources;
495 * return a phony record pointer.
508 bool topxid_included =
false;
514 * Get values needed to decide whether to do full-page writes. Since
515 * we don't yet have an insertion lock, these could change under us,
516 * but XLogInsertRecord will recheck them once it has a lock.
521 &fpw_lsn, &num_fpi, &topxid_included);
533 * Simple wrapper to XLogInsert to insert a WAL record with elementary
534 * contents (only an int64 is supported as value currently).
545 * Assemble a WAL record from the registered data and buffers into an
546 * XLogRecData chain, ready for insertion with XLogInsertRecord().
548 * The record header fields are filled in, except for the xl_prev field. The
549 * calculated CRC does not include the record header yet.
551 * If there are any registered buffers, and a full-page image was not taken
552 * of all of them, *fpw_lsn is set to the lowest LSN among such pages. This
553 * signals that the assembled record is only good for insertion on the
554 * assumption that the RedoRecPtr and doPageWrites values were up-to-date.
556 * *topxid_included is set if the topmost transaction ID is logged with the
557 * current subtransaction.
562 XLogRecPtr *fpw_lsn,
int *num_fpi,
bool *topxid_included)
574 * Note: this function can be called multiple times for the same record.
575 * All the modifications we do to the rdata chains below must handle that.
578 /* The record begins with the fixed-size header */
587 * Enforce consistency checks for this record if user is looking for it.
588 * Do this before at the beginning of this routine to give the possibility
589 * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
596 * Make an rdata chain containing all the data portions of all block
597 * references. This includes the data for full-page images. Also append
598 * the headers for the block references in the scratch buffer.
610 bool is_compressed =
false;
616 /* Determine if this block needs to be backed up */
620 needs_backup =
false;
622 needs_backup =
false;
626 * We assume page LSN is first data on *every* page that can be
627 * passed to XLogInsert, whether it has the standard page layout
640 /* Determine if the buffer data needs to included */
646 needs_data = !needs_backup;
656 * If needs_backup is true or WAL checking is enabled for current
657 * resource manager, log a full-page write for the current block.
664 uint16 compressed_len = 0;
667 * The page needs to be backed up, so calculate its hole length
672 /* Assume we can omit data between pd_lower and pd_upper */
685 /* No "hole" to remove */
692 /* Not a standard page header, don't try to eliminate "hole" */
698 * Try to compress a block image if wal_compression is enabled
710 * Fill in the remaining fields in the XLogRecordBlockHeader
715 /* Report a full page image constructed for the WAL record */
719 * Construct XLogRecData entries for the page content.
722 rdt_datas_last = rdt_datas_last->
next;
727 * If WAL consistency checking is enabled for the resource manager
728 * of this WAL record, a full-page image is included in the record
729 * for the block modified. During redo, the full-page is replayed
730 * only if BKPIMAGE_APPLY is set.
737 /* The current compression is stored in the WAL record */
738 bimg.
length = compressed_len;
740 /* Set the compression method used for this block */
751 elog(
ERROR,
"LZ4 is not supported by this build");
759 elog(
ERROR,
"zstd is not supported by this build");
764 Assert(
false);
/* cannot happen */
766 /* no default case, so that compiler will warn */
770 rdt_datas_last->
len = compressed_len;
778 rdt_datas_last->
data = page;
779 rdt_datas_last->
len = BLCKSZ;
783 /* must skip the hole */
784 rdt_datas_last->
data = page;
788 rdt_datas_last = rdt_datas_last->
next;
790 rdt_datas_last->
data =
792 rdt_datas_last->
len =
803 * When copying to XLogRecordBlockHeader, the length is narrowed
804 * to an uint16. Double-check that it is still correct.
809 * Link the caller-supplied rdata chain for this buffer to the
827 prev_regbuf = regbuf;
829 /* Ok, copy the header to the scratch buffer */
838 memcpy(scratch, &cbimg,
852 /* followed by the record's origin, if any */
861 /* followed by toplevel XID, if not already included in previous record */
866 /* Set the flag that the top xid is included in the WAL */
867 *topxid_included =
true;
874 /* followed by main data, if any */
884 errdetail_internal(
"Main data length is %" PRIu64
" bytes for a maximum of %u bytes.",
890 memcpy(scratch, &mainrdata_len_4b,
sizeof(
uint32));
891 scratch +=
sizeof(
uint32);
902 rdt_datas_last->
next = NULL;
908 * Calculate CRC of the data
910 * Note that the record header isn't added into the CRC initially since we
911 * don't know the prev-link yet. Thus, the CRC will represent the CRC of
912 * the whole record in the order: rdata, then backup blocks, then record
921 * Ensure that the XLogRecord is not too large.
923 * XLogReader machinery is only able to handle records up to a certain
924 * size (ignoring machine resource limitations), so make sure that we will
925 * not emit records larger than the sizes advertised to be supported.
930 errdetail_internal(
"WAL record would be %" PRIu64
" bytes (of maximum %u bytes); rmid %u flags %u.",
934 * Fill in the fields in the record header. Prev-link is filled in later,
935 * once we know where in the WAL the record will be inserted. The CRC does
936 * not include the record header yet.
943 rechdr->
xl_crc = rdata_crc;
949 * Create a compressed version of a backup block image.
951 * Returns false if compression fails (i.e., compressed result is actually
952 * bigger than original). Otherwise, returns true and sets 'dlen' to
953 * the length of compressed block image.
959 int32 orig_len = BLCKSZ - hole_length;
961 int32 extra_bytes = 0;
965 if (hole_length != 0)
967 /* must skip the hole */
968 memcpy(tmp.
data, page, hole_offset);
969 memcpy(tmp.
data + hole_offset,
970 page + (hole_offset + hole_length),
971 BLCKSZ - (hole_length + hole_offset));
975 * Extra data needs to be stored in WAL record for the compressed
976 * version of block image if the hole exists.
994 len = -1;
/* failure */
996 elog(
ERROR,
"LZ4 is not supported by this build");
1003 ZSTD_CLEVEL_DEFAULT);
1004 if (ZSTD_isError(
len))
1005 len = -1;
/* failure */
1007 elog(
ERROR,
"zstd is not supported by this build");
1012 Assert(
false);
/* cannot happen */
1014 /* no default case, so that compiler will warn */
1018 * We recheck the actual size even if compression reports success and see
1019 * if the number of bytes saved by compression is larger than the length
1020 * of extra data needed for the compressed version of block image.
1023 len + extra_bytes < orig_len)
1025 *dlen = (
uint16)
len;
/* successful compression */
1032 * Determine whether the buffer referenced has to be backed up.
1034 * Since we don't yet have the insert lock, fullPageWrites and runningBackups
1035 * (which forces full-page writes) could change later, so the result should
1036 * be used for optimization purposes only.
1050 return true;
/* buffer requires backup */
1052 return false;
/* buffer does not need to be backed up */
1056 * Write a backup block if needed when we are setting a hint. Note that
1057 * this may be called for a variety of page types, not just heaps.
1059 * Callable while holding just share lock on the buffer content.
1061 * We can't use the plain backup block mechanism since that relies on the
1062 * Buffer being exclusively locked. Since some modifications (setting LSN, hint
1063 * bits) are allowed in a sharelocked buffer that can lead to wal checksum
1064 * failures. So instead we copy the page and insert the copied data as normal
1067 * We only need to do something if page has not yet been full page written in
1068 * this checkpoint round. The LSN of the inserted wal record is returned if we
1069 * had to write, InvalidXLogRecPtr otherwise.
1071 * It is possible that multiple concurrent backends could attempt to write WAL
1072 * records. In that case, multiple copies of the same block would be recorded
1073 * in separate WAL records by different backends, though that is still OK from
1074 * a correctness perspective.
1084 * Ensure no checkpoint can change our view of RedoRecPtr.
1089 * Update RedoRecPtr so that we can make the right decision
1094 * We assume page LSN is first data on *every* page that can be passed to
1095 * XLogInsert, whether it has the standard page layout or not. Since we're
1096 * only holding a share-lock on the page, we must take the buffer header
1097 * lock when we look at the LSN.
1111 * Copy buffer so we don't have to worry about concurrent hint bit or
1112 * lsn updates. We assume pd_lower/upper cannot be changed without an
1113 * exclusive lock, so the contents bkp are not racy.
1117 /* Assume we can omit data between pd_lower and pd_upper */
1122 memcpy(copied_buffer.
data, origdata,
lower);
1126 memcpy(copied_buffer.
data, origdata, BLCKSZ);
1143 * Write a WAL record containing a full image of a page. Caller is responsible
1144 * for writing the page to disk after calling this routine.
1146 * Note: If you're using this function, you should be building pages in private
1147 * memory and writing them directly to smgr. If you're using buffers, call
1148 * log_newpage_buffer instead.
1150 * If the page follows the standard page layout, with a PageHeader and unused
1151 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1152 * the unused space to be left out from the WAL record, making it smaller.
1156 Page page,
bool page_std)
1170 * The page may be uninitialized. If so, we can't set the LSN because that
1171 * would corrupt the page.
1182 * Like log_newpage(), but allows logging multiple pages in one operation.
1183 * It is more efficient than calling log_newpage() for each page separately,
1184 * because we can write multiple pages in a single WAL record.
1200 * Iterate over all the pages. They are collected into batches of
1201 * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1207 while (
i < num_pages)
1227 * The page may be uninitialized. If so, we can't set the LSN
1228 * because that would corrupt the page.
1239 * Write a WAL record containing a full image of a page.
1241 * Caller should initialize the buffer and mark it dirty before calling this
1242 * function. This function will set the page LSN.
1244 * If the page follows the standard page layout, with a PageHeader and unused
1245 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1246 * the unused space to be left out from the WAL record, making it smaller.
1256 /* Shared buffers should be modified in a critical section. */
1261 return log_newpage(&rlocator, forknum, blkno, page, page_std);
1265 * WAL-log a range of blocks in a relation.
1267 * An image of all pages with block numbers 'startblk' <= X < 'endblk' is
1268 * written to the WAL. If the range is large, this is done in multiple WAL
1271 * If all page follows the standard page layout, with a PageHeader and unused
1272 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1273 * the unused space to be left out from the WAL records, making them smaller.
1275 * NOTE: This function acquires exclusive-locks on the pages. Typically, this
1276 * is used on a newly-built relation, and the caller is holding a
1277 * AccessExclusiveLock on it, so no other backend can be accessing it at the
1278 * same time. If that's not the case, you must ensure that this does not
1279 * cause a deadlock through some other means.
1294 * Iterate over all the pages in the range. They are collected into
1295 * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1301 while (blkno < endblk)
1310 /* Collect a batch of blocks. */
1320 * Completely empty pages are not WAL-logged. Writing a WAL record
1321 * would change the LSN, and we don't want that. We want the page
1325 bufpack[nbufs++] =
buf;
1331 /* Nothing more to do if all remaining blocks were empty. */
1335 /* Write WAL record for this batch. */
1339 for (
i = 0;
i < nbufs;
i++)
1347 for (
i = 0;
i < nbufs;
i++)
1357 * Allocate working buffers needed for WAL record construction.
1362#ifdef USE_ASSERT_CHECKING
1365 * Check that any records assembled can be decoded. This is capped based
1366 * on what XLogReader would require at its maximum bound. The XLOG_BLCKSZ
1367 * addend covers the larger allocate_recordbuf() demand. This code path
1368 * is called once per backend, more than enough for this check.
1370 size_t max_required =
1376 /* Initialize the working areas */
1380 "WAL record construction",
1399 * Allocate a buffer to hold the header information for a WAL record.
bool BufferIsExclusiveLocked(Buffer buffer)
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
bool BufferIsDirty(Buffer buffer)
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
void UnlockReleaseBuffer(Buffer buffer)
void MarkBufferDirty(Buffer buffer)
void LockBuffer(Buffer buffer, int mode)
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
static Page BufferGetPage(Buffer buffer)
static Block BufferGetBlock(Buffer buffer)
#define BUFFER_LOCK_EXCLUSIVE
PageHeaderData * PageHeader
static bool PageIsNew(const PageData *page)
#define SizeOfPageHeaderData
static void PageSetLSN(Page page, XLogRecPtr lsn)
static XLogRecPtr PageGetLSN(const PageData *page)
#define MemSet(start, val, len)
int errmsg_internal(const char *fmt,...)
int errdetail_internal(const char *fmt,...)
#define ereport(elevel,...)
volatile uint32 CritSectionCount
Assert(PointerIsAligned(start, uint64))
void * MemoryContextAlloc(MemoryContext context, Size size)
void * MemoryContextAllocZero(MemoryContext context, Size size)
void * repalloc(void *pointer, Size size)
MemoryContext TopMemoryContext
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define AllocSizeIsValid(size)
#define IsBootstrapProcessingMode()
#define START_CRIT_SECTION()
#define CHECK_FOR_INTERRUPTS()
#define END_CRIT_SECTION()
Datum lower(PG_FUNCTION_ARGS)
Datum upper(PG_FUNCTION_ARGS)
RepOriginId replorigin_session_origin
#define InvalidRepOriginId
#define XLOG_FPI_FOR_HINT
#define COMP_CRC32C(crc, data, len)
const PGLZ_Strategy *const PGLZ_strategy_default
int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy)
static rewind_source * source
#define DELAY_CHKPT_START
struct RelFileLocator RelFileLocator
#define RelFileLocatorEquals(locator1, locator2)
struct XLogRecData * next
XLogRecData bkp_rdatas[2]
char compressed_page[COMPRESS_BUFSIZE]
Datum batch_start(PG_FUNCTION_ARGS)
TransactionId GetTopTransactionIdIfAny(void)
TransactionId GetCurrentTransactionIdIfAny(void)
bool IsSubxactTopXidLogPending(void)
void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
XLogRecPtr GetRedoRecPtr(void)
static XLogRecPtr RedoRecPtr
XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi, bool topxid_included)
bool XLogInsertAllowed(void)
bool * wal_consistency_checking
#define XLOG_INCLUDE_ORIGIN
#define SizeOfXLogLongPHD
#define InvalidXLogRecPtr
static XLogRecData * mainrdata_head
static bool XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length, void *dest, uint16 *dlen)
XLogRecPtr XLogSimpleInsertInt64(RmgrId rmid, uint8 info, int64 value)
static int max_registered_buffers
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
static uint8 curinsert_flags
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
bool XLogCheckBufferNeedsBackup(Buffer buffer)
void XLogRegisterData(const void *data, uint32 len)
static uint64 mainrdata_len
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
static bool begininsert_called
static int max_registered_block_id
XLogRecPtr log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)
void InitXLogInsert(void)
void XLogSetRecordFlags(uint8 flags)
void log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
static XLogRecData * mainrdata_last
static MemoryContext xloginsert_cxt
void log_newpage_range(Relation rel, ForkNumber forknum, BlockNumber startblk, BlockNumber endblk, bool page_std)
void XLogResetInsertion(void)
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
static XLogRecData hdr_rdt
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
static XLogRecData * XLogRecordAssemble(RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi, bool *topxid_included)
static char * hdr_scratch
static XLogRecData * rdatas
void XLogBeginInsert(void)
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
static registered_buffer * registered_buffers
#define HEADER_SCRATCH_SIZE
#define XLR_NORMAL_MAX_BLOCK_ID
#define REGBUF_FORCE_IMAGE
#define XLR_NORMAL_RDATAS
size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len)
#define SizeOfXLogRecordBlockImageHeader
#define XLogRecordMaxSize
#define BKPIMAGE_COMPRESS_ZSTD
#define BKPBLOCK_HAS_DATA
#define BKPIMAGE_HAS_HOLE
#define XLR_BLOCK_ID_DATA_LONG
#define BKPBLOCK_WILL_INIT
#define XLR_RMGR_INFO_MASK
#define BKPIMAGE_COMPRESS_LZ4
#define XLR_BLOCK_ID_TOPLEVEL_XID
#define XLR_BLOCK_ID_DATA_SHORT
#define SizeOfXLogRecordBlockCompressHeader
#define BKPBLOCK_SAME_REL
#define XLR_SPECIAL_REL_UPDATE
#define SizeOfXLogRecordBlockHeader
#define BKPIMAGE_COMPRESS_PGLZ
#define XLR_BLOCK_ID_ORIGIN
#define BKPBLOCK_HAS_IMAGE
#define XLR_CHECK_CONSISTENCY