1/*-------------------------------------------------------------------------
4 * Generic XLog reading facility
6 * Portions Copyright (c) 2013-2025, PostgreSQL Global Development Group
9 * src/backend/access/transam/xlogreader.c
12 * See xlogreader.h for more notes on this facility.
14 * This file is compiled as both front-end and backend code, so it
15 * may not use ereport, server-defined static variables, etc.
16 *-------------------------------------------------------------------------
56 int segsize, const
char *waldir);
58/* size of the buffer allocated for error message. */
59 #define MAX_ERRORMSG_LEN 1000
62 * Default size; large enough that typical users of XLogReader won't often need
63 * to use the 'oversized' memory allocation code path.
65 #define DEFAULT_DECODE_BUFFER_SIZE (64 * 1024)
68 * Construct a string in state->errormsg_buf explaining what's wrong with
69 * the current record being read.
82 state->errormsg_deferred =
true;
86 * Set the size of the decoding buffer. A pointer to a caller supplied memory
87 * region may also be passed in, in which case non-oversized records will be
95 state->decode_buffer = buffer;
96 state->decode_buffer_size = size;
97 state->decode_buffer_tail = buffer;
98 state->decode_buffer_head = buffer;
102 * Allocate and initialize a new XLogReader.
104 * Returns NULL if the xlogreader couldn't be allocated.
118 /* initialize caller-provided support functions */
119 state->routine = *routine;
122 * Permanently allocate readBuf. We do it this way, rather than just
123 * making a static array, for two reasons: (1) no need to waste the
124 * storage in most instantiations of the backend; (2) a static char array
125 * isn't guaranteed to have any particular alignment, whereas
126 * palloc_extended() will provide MAXALIGN'd storage.
136 /* Initialize segment info. */
140 /* system_identifier initialized to zeroes above */
141 state->private_data = private_data;
142 /* ReadRecPtr, EndRecPtr and readLen initialized to zeroes above */
145 if (!
state->errormsg_buf)
151 state->errormsg_buf[0] =
'0円';
154 * Allocate an initial readRecordBuf of minimal size, which can later be
155 * enlarged if necessary.
164 if (
state->seg.ws_file != -1)
167 if (
state->decode_buffer &&
state->free_decode_buffer)
171 if (
state->readRecordBuf)
178 * Allocate readRecordBuf to fit a record of at least the given length.
180 * readRecordBufSize is set to the new buffer size.
182 * To avoid useless small increases, round its size to a multiple of
183 * XLOG_BLCKSZ, and make sure it's at least 5*Max(BLCKSZ, XLOG_BLCKSZ) to start
184 * with. (That is enough for all "normal" records, but very large commit or
185 * abort records might need more space.)
187 * Note: This routine should *never* be called for xl_tot_len until the header
188 * of the record has been fully validated.
193 uint32 newSize = reclength;
195 newSize += XLOG_BLCKSZ - (newSize % XLOG_BLCKSZ);
196 newSize =
Max(newSize, 5 *
Max(BLCKSZ, XLOG_BLCKSZ));
198 if (
state->readRecordBuf)
201 state->readRecordBufSize = newSize;
205 * Initialize the passed segment structs.
209 int segsize,
const char *waldir)
221 * Begin reading WAL at 'RecPtr'.
223 * 'RecPtr' should point to the beginning of a valid WAL record. Pointing at
224 * the beginning of a page is also OK, if there is a new record right after
225 * the page header, i.e. not a continuation.
227 * This does not make any attempt to read the WAL yet, and hence cannot fail.
228 * If the starting address is not correct, the first call to XLogReadRecord()
238 /* Begin at the passed-in record pointer. */
239 state->EndRecPtr = RecPtr;
240 state->NextRecPtr = RecPtr;
246 * Release the last record that was returned by XLogNextRecord(), if any, to
247 * free up space. Returns the LSN past the end of the record.
259 * Remove it from the decoded record queue. It must be the oldest item
260 * decoded, decode_queue_head.
262 record =
state->record;
265 state->record = NULL;
268 /* It might also be the newest item decoded, decode_queue_tail. */
269 if (
state->decode_queue_tail == record)
270 state->decode_queue_tail = NULL;
272 /* Release the space. */
275 /* It's not in the decode buffer, so free it to release space. */
280 /* It must be the head (oldest) record in the decode buffer. */
281 Assert(
state->decode_buffer_head == (
char *) record);
284 * We need to update head to point to the next record that is in the
285 * decode buffer, if any, being careful to skip oversized ones
286 * (they're not in the decode buffer).
288 record = record->next;
289 while (
unlikely(record && record->oversized))
290 record = record->next;
294 /* Adjust head to release space up to the next record. */
295 state->decode_buffer_head = (
char *) record;
300 * Otherwise we might as well just reset head and tail to the
301 * start of the buffer space, because we're empty. This means
302 * we'll keep overwriting the same piece of memory if we're not
303 * doing any prefetching.
305 state->decode_buffer_head =
state->decode_buffer;
306 state->decode_buffer_tail =
state->decode_buffer;
314 * Attempt to read an XLOG record.
316 * XLogBeginRead() or XLogFindNextRecord() and then XLogReadAhead() must be
317 * called before the first call to XLogNextRecord(). This functions returns
318 * records and errors that were put into an internal queue by XLogReadAhead().
320 * On success, a record is returned.
322 * The returned record (or *errormsg) points to an internal buffer that's
323 * valid until the next call to XLogNextRecord.
328 /* Release the last record returned by XLogNextRecord(). */
331 if (
state->decode_queue_head == NULL)
334 if (
state->errormsg_deferred)
336 if (
state->errormsg_buf[0] !=
'0円')
337 *errormsg =
state->errormsg_buf;
338 state->errormsg_deferred =
false;
342 * state->EndRecPtr is expected to have been set by the last call to
343 * XLogBeginRead() or XLogNextRecord(), and is the location of the
352 * Record this as the most recent record returned, so that we'll release
353 * it next time. This also exposes it to the traditional
354 * XLogRecXXX(xlogreader) macros, which work with the decoder rather than
355 * the record for historical reasons.
360 * Update the pointers to the beginning and one-past-the-end of this
361 * record, again for the benefit of historical code that expected the
362 * decoder to track this rather than accessing these fields of the record
370 return state->record;
374 * Attempt to read an XLOG record.
376 * XLogBeginRead() or XLogFindNextRecord() must be called before the first call
377 * to XLogReadRecord().
379 * If the page_read callback fails to read the requested data, NULL is
380 * returned. The callback is expected to have reported the error; errormsg
383 * If the reading fails for some other reason, NULL is also returned, and
384 * *errormsg is set to a string with details of the failure.
386 * The returned pointer (or *errormsg) points to an internal buffer that's
387 * valid until the next call to XLogReadRecord.
395 * Release last returned record, if there is one. We need to do this so
396 * that we can check for empty decode queue accurately.
401 * Call XLogReadAhead() in blocking mode to make sure there is something
402 * in the queue, though we don't use the result.
407 /* Consume the head record or error. */
412 * This function returns a pointer to the record's header, not the
413 * actual decoded record. The caller will access the decoded record
414 * through the XLogRecGetXXX() macros, which reach the decoded
415 * recorded as xlogreader->record.
425 * Allocate space for a decoded record. The only member of the returned
426 * object that is initialized is the 'oversized' flag, indicating that the
427 * decoded record wouldn't fit in the decode buffer and must eventually be
430 * The caller is responsible for adjusting decode_buffer_tail with the real
431 * size after successfully decoding a record into this space. This way, if
432 * decoding fails, then there is nothing to undo unless the 'oversized' flag
433 * was set and pfree() must be called.
435 * Return NULL if there is no space in the decode buffer and allow_oversized
436 * is false, or if memory allocation fails for an oversized buffer.
444 /* Allocate a circular decode buffer if we don't have one already. */
447 if (
state->decode_buffer_size == 0)
450 state->decode_buffer_head =
state->decode_buffer;
451 state->decode_buffer_tail =
state->decode_buffer;
452 state->free_decode_buffer =
true;
455 /* Try to allocate space in the circular decode buffer. */
456 if (
state->decode_buffer_tail >=
state->decode_buffer_head)
458 /* Empty, or tail is to the right of head. */
459 if (required_space <=
460 state->decode_buffer_size -
461 (
state->decode_buffer_tail -
state->decode_buffer))
464 * There is space between tail and end.
466 * +-----+--------------------+-----+
467 * | |////////////////////|here!|
468 * +-----+--------------------+-----+
477 else if (required_space <
478 state->decode_buffer_head -
state->decode_buffer)
481 * There is space between start and head.
483 * +-----+--------------------+-----+
484 * |here!|////////////////////| |
485 * +-----+--------------------+-----+
497 /* Tail is to the left of head. */
499 state->decode_buffer_head -
state->decode_buffer_tail)
502 * There is space between tail and head.
504 * +-----+--------------------+-----+
505 * |/////|here! |/////|
506 * +-----+--------------------+-----+
517 /* Not enough space in the decode buffer. Are we allowed to allocate? */
520 decoded =
palloc(required_space);
543 char *errormsg;
/* not used */
546 * randAccess indicates whether to verify the previous-record pointer of
547 * the record we're reading. We only do this if we're reading
548 * sequentially, which is what we initially assume.
552 /* reset error state */
553 state->errormsg_buf[0] =
'0円';
559 RecPtr =
state->NextRecPtr;
563 /* read the record after the one we just read */
566 * NextRecPtr is pointing to end+1 of the previous WAL record. If
567 * we're at a page boundary, no more records can fit on the current
568 * page. We must skip over the page header, but we can't do that until
569 * we've read in the page, since the header size is variable.
575 * Caller supplied a position to start at.
577 * In this case, NextRecPtr should already be pointing either to a
578 * valid record starting position or alternatively to the beginning of
579 * a page. See the header comments for XLogBeginRead.
586 state->nonblocking = nonblocking;
587 state->currRecPtr = RecPtr;
590 targetPagePtr = RecPtr - (RecPtr % XLOG_BLCKSZ);
591 targetRecOff = RecPtr % XLOG_BLCKSZ;
594 * Read the page containing the record into state->readBuf. Request enough
595 * byte to cover the whole record header, or at least the part of it that
596 * fits on the same page.
606 * ReadPageInternal always returns at least the page header, so we can
610 if (targetRecOff == 0)
613 * At page start, so skip over page header.
615 RecPtr += pageHeaderSize;
616 targetRecOff = pageHeaderSize;
618 else if (targetRecOff < pageHeaderSize)
622 pageHeaderSize, targetRecOff);
627 targetRecOff == pageHeaderSize)
634 /* ReadPageInternal has verified the page header */
638 * Read the record length.
640 * NB: Even though we use an XLogRecord pointer here, the whole record
641 * header might not fit on this page. xl_tot_len is the first field of the
642 * struct, so it must be on this page (the records are MAXALIGNed), but we
643 * cannot access any other fields until we've verified that we got the
650 * If the whole record header is on this page, validate it immediately.
651 * Otherwise do just a basic sanity check on xl_tot_len, and validate the
652 * rest of the header after reading it from the next page. The xl_tot_len
653 * check is necessary here to ensure that we enter the "Need to reassemble
654 * record" code path below; otherwise we might fail to apply
655 * ValidXLogRecordHeader at all.
666 /* There may be no next page if it's too small. */
670 "invalid record length at %X/%08X: expected at least %u, got %u",
675 /* We'll validate the header once we have the next page. */
680 * Try to find space to decode this record, if we can do so without
681 * calling palloc. If we can't, we'll try again below after we've
682 * validated that total_len isn't garbage bytes from a recycled WAL page.
686 false /* allow_oversized */ );
687 if (decoded == NULL && nonblocking)
690 * There is no space in the circular decode buffer, and the caller is
691 * only reading ahead. The caller should consume existing records to
697 len = XLOG_BLCKSZ - RecPtr % XLOG_BLCKSZ;
700 /* Need to reassemble record */
709 * We always have space for a couple of pages, enough to validate a
710 * boundary-spanning record header.
712 Assert(
state->readRecordBufSize >= XLOG_BLCKSZ * 2);
715 /* Copy the first fragment of the record from the first page. */
716 memcpy(
state->readRecordBuf,
717 state->readBuf + RecPtr % XLOG_BLCKSZ,
len);
718 buffer =
state->readRecordBuf +
len;
723 /* Calculate pointer to beginning of next page */
724 targetPagePtr += XLOG_BLCKSZ;
727 * Read the page header before processing the record data, so we
728 * can handle the case where the previous record ended as being a
742 * If we were expecting a continuation record and got an
743 * "overwrite contrecord" flag, that means the continuation record
744 * was overwritten with a different record. Restart the read by
745 * assuming the address to read is the location where we found
746 * this flag; but keep track of the LSN of the record we were
747 * reading, for later verification.
751 state->overwrittenRecPtr = RecPtr;
752 RecPtr = targetPagePtr;
756 /* Check that the continuation on next page looks valid */
760 "there is no contrecord flag at %X/%08X",
766 * Cross-check that xlp_rem_len agrees with how much of the record
767 * we expect there to be left.
773 "invalid contrecord length %u (expected %lld) at %X/%08X",
775 ((
long long) total_len) - gotlen,
780 /* Wait for the next page to become available */
789 /* Append the continuation from this page to the buffer */
798 contdata = (
char *)
state->readBuf + pageHeaderSize;
799 len = XLOG_BLCKSZ - pageHeaderSize;
805 pageHeaderSize +
len);
807 memcpy(buffer, contdata,
len);
811 /* If we just reassembled the record header, validate it. */
822 * We might need a bigger buffer. We have validated the record
823 * header, in the case that it split over a page boundary. We've
824 * also cross-checked total_len against xlp_rem_len on the second
825 * page, and verified xlp_pageaddr on both.
827 if (total_len >
state->readRecordBufSize)
829 char save_copy[XLOG_BLCKSZ * 2];
832 * Save and restore the data we already had. It can't be more
836 Assert(gotlen <= state->readRecordBufSize);
837 memcpy(save_copy,
state->readRecordBuf, gotlen);
839 memcpy(
state->readRecordBuf, save_copy, gotlen);
840 buffer =
state->readRecordBuf + gotlen;
842 }
while (gotlen < total_len);
850 state->DecodeRecPtr = RecPtr;
851 state->NextRecPtr = targetPagePtr + pageHeaderSize
856 /* Wait for the record data to become available */
858 Min(targetRecOff + total_len, XLOG_BLCKSZ));
864 /* Record does not cross a page boundary */
870 state->DecodeRecPtr = RecPtr;
874 * Special processing if it's an XLOG SWITCH record
876 if (record->
xl_rmid == RM_XLOG_ID &&
879 /* Pretend it extends to end of segment */
880 state->NextRecPtr +=
state->segcxt.ws_segsize - 1;
885 * If we got here without a DecodedXLogRecord, it means we needed to
886 * validate total_len before trusting it, but by now we've done that.
893 true /* allow_oversized */ );
894 /* allocation should always happen under allow_oversized */
900 /* Record the location of the next record. */
904 * If it's in the decode buffer, mark the decode buffer space as
909 /* The new decode buffer head must be MAXALIGNed. */
911 if ((
char *) decoded ==
state->decode_buffer)
914 state->decode_buffer_tail += decoded->
size;
917 /* Insert it into the queue of decoded records. */
919 if (
state->decode_queue_tail)
920 state->decode_queue_tail->
next = decoded;
921 state->decode_queue_tail = decoded;
922 if (!
state->decode_queue_head)
923 state->decode_queue_head = decoded;
931 * We get here when a record that spans multiple pages needs to be
932 * assembled, but something went wrong -- perhaps a contrecord piece
933 * was lost. If caller is WAL replay, it will know where the aborted
934 * record was and where to direct followup WAL to be written, marking
935 * the next piece with XLP_FIRST_IS_OVERWRITE_CONTRECORD, which will
936 * in turn signal downstream WAL consumers that the broken WAL record
939 state->abortedRecPtr = RecPtr;
940 state->missingContrecPtr = targetPagePtr;
943 * If we got here without reporting an error, make sure an error is
944 * queued so that XLogPrefetcherReadRecord() doesn't bring us back a
945 * second time and clobber the above state.
947 state->errormsg_deferred =
true;
954 * Invalidate the read state. We might read from a different source after
960 * If an error was written to errormsg_buf, it'll be returned to the
961 * caller of XLogReadRecord() after all successfully decoded records from
969 * Try to decode the next available record, and return it. The record will
970 * also be returned to XLogNextRecord(), which must be called to 'consume'
973 * If nonblocking is true, may return NULL due to lack of data or WAL decoding
981 if (
state->errormsg_deferred)
988 return state->decode_queue_tail;
995 * Read a single xlog page including at least [pageptr, reqLen] of valid data
996 * via the page_read() callback.
998 * Returns XLREAD_FAIL if the required page cannot be read for some
999 * reason; errormsg_buf is set in that case (unless the error occurs in the
1000 * page_read callback).
1002 * Returns XLREAD_WOULDBLOCK if the requested data can't be read without
1003 * waiting. This can be returned only if the installed page_read callback
1004 * respects the state->nonblocking flag, and cannot read the requested data
1007 * We fetch the page from a reader-local cache if we know we have the required
1008 * data and if there hasn't been any error since caching the data.
1018 Assert((pageptr % XLOG_BLCKSZ) == 0);
1023 /* check whether we have all the requested data already */
1024 if (targetSegNo ==
state->seg.ws_segno &&
1025 targetPageOff ==
state->segoff && reqLen <= state->
readLen)
1026 return state->readLen;
1029 * Invalidate contents of internal buffer before read attempt. Just set
1030 * the length to 0, rather than a full XLogReaderInvalReadState(), so we
1031 * don't forget the segment we last successfully read.
1036 * Data is not in our buffer.
1038 * Every time we actually read the segment, even if we looked at parts of
1039 * it before, we need to do verification as the page_read callback might
1040 * now be rereading data from a different source.
1042 * Whenever switching to a new WAL segment, we read the first page of the
1043 * file and validate its header, even if that's not where the target
1044 * record is. This is so that we can check the additional identification
1045 * info that is present in the first page's "long" header.
1047 if (targetSegNo !=
state->seg.ws_segno && targetPageOff != 0)
1049 XLogRecPtr targetSegmentPtr = pageptr - targetPageOff;
1059 /* we can be sure to have enough WAL available, we scrolled back */
1068 * First, read the requested data length, but at least a short page header
1069 * so that we can validate it.
1081 /* Do we have enough data to check the header length? */
1089 /* still not enough */
1102 * Now that we know we have the full header, validate it.
1107 /* update read state information */
1108 state->seg.ws_segno = targetSegNo;
1109 state->segoff = targetPageOff;
1121 * Invalidate the xlogreader's read state to force a re-read.
1126 state->seg.ws_segno = 0;
1132 * Validate an XLOG record header.
1134 * This is just a convenience subroutine to avoid duplicated code in
1135 * XLogReadRecord. It's not intended for use from anywhere else.
1145 "invalid record length at %X/%08X: expected at least %u, got %u",
1153 "invalid resource manager ID %u at %X/%08X",
1160 * We can't exactly verify the prev-link, but surely it should be less
1161 * than the record's own address.
1163 if (!(record->
xl_prev < RecPtr))
1166 "record with incorrect prev-link %X/%08X at %X/%08X",
1175 * Record's prev-link should exactly match our previous location. This
1176 * check guards against torn WAL pages where a stale but valid-looking
1177 * WAL record starts on a sector boundary.
1179 if (record->
xl_prev != PrevRecPtr)
1182 "record with incorrect prev-link %X/%08X at %X/%08X",
1194 * CRC-check an XLOG record. We do not believe the contents of an XLOG
1195 * record (other than to the minimal extent of computing the amount of
1196 * data to read in) until we've checked the CRCs.
1198 * We assume all of the record (that is, xl_tot_len bytes) has been read
1199 * into memory at *record. Also, ValidXLogRecordHeader() has accepted the
1200 * record's header, which means in particular that xl_tot_len is at least
1210 /* Calculate the CRC */
1213 /* include the record header last */
1220 "incorrect resource manager data checksum in record at %X/%08X",
1229 * Validate a page header.
1231 * Check if 'phdr' is valid as the header of the XLog page at position
1242 Assert((recptr % XLOG_BLCKSZ) == 0);
1254 "invalid magic number %04X in WAL segment %s, LSN %X/%08X, offset %u",
1269 "invalid info bits %04X in WAL segment %s, LSN %X/%08X, offset %u",
1281 if (
state->system_identifier &&
1285 "WAL file is from different database system: WAL file database system identifier is %" PRIu64
", pg_control database system identifier is %" PRIu64,
1287 state->system_identifier);
1293 "WAL file is from different database system: incorrect segment size in page header");
1299 "WAL file is from different database system: incorrect XLOG_BLCKSZ in page header");
1303 else if (offset == 0)
1309 /* hmm, first page of file doesn't have a long header? */
1311 "invalid info bits %04X in WAL segment %s, LSN %X/%08X, offset %u",
1320 * Check that the address on the page agrees with what we expected. This
1321 * check typically fails when an old WAL segment is recycled, and hasn't
1322 * yet been overwritten with new data yet.
1331 "unexpected pageaddr %X/%08X in WAL segment %s, LSN %X/%08X, offset %u",
1340 * Since child timelines are always assigned a TLI greater than their
1341 * immediate parent's TLI, we should never see TLI go backwards across
1342 * successive pages of a consistent WAL sequence.
1344 * Sometimes we re-read a segment that's already been (partially) read. So
1345 * we only verify TLIs for pages that are later than the last remembered
1348 if (recptr >
state->latestPagePtr)
1357 "out-of-sequence timeline ID %u (after %u) in WAL segment %s, LSN %X/%08X, offset %u",
1359 state->latestPageTLI,
1366 state->latestPagePtr = recptr;
1373 * Forget about an error produced by XLogReaderValidatePageHeader().
1378 state->errormsg_buf[0] =
'0円';
1379 state->errormsg_deferred =
false;
1383 * Find the first record with an lsn >= RecPtr.
1385 * This is different from XLogBeginRead() in that RecPtr doesn't need to point
1386 * to a valid record boundary. Useful for checking whether RecPtr is a valid
1387 * xlog address for reading, and to find the first valid address after some
1388 * address when dumping records for debugging purposes.
1390 * This positions the reader, like XLogBeginRead(), so that the next call to
1391 * XLogReadRecord() will read the next valid record.
1403 /* Make sure ReadPageInternal() can't return XLREAD_WOULDBLOCK. */
1404 state->nonblocking =
false;
1407 * skip over potential continuation data, keeping in mind that it may span
1419 * Compute targetRecOff. It should typically be equal or greater than
1420 * short page-header since a valid record can't start anywhere before
1421 * that, except when caller has explicitly specified the offset that
1422 * falls somewhere there or when we are skipping multi-page
1423 * continuation record. It doesn't matter though because
1424 * ReadPageInternal() is prepared to handle that and will read at
1425 * least short page-header worth of data
1427 targetRecOff = tmpRecPtr % XLOG_BLCKSZ;
1429 /* scroll back to page boundary */
1430 targetPagePtr = tmpRecPtr - targetRecOff;
1432 /* Read the page containing the record */
1441 /* make sure we have enough data for the page header */
1446 /* skip over potential continuation data */
1450 * If the length of the remaining continuation data is more than
1451 * what can fit in this page, the continuation record crosses over
1452 * this page. Read the next page and try again. xlp_rem_len in the
1453 * next page header will contain the remaining length of the
1456 * Note that record headers are MAXALIGN'ed
1459 tmpRecPtr = targetPagePtr + XLOG_BLCKSZ;
1463 * The previous continuation record ends in this page. Set
1464 * tmpRecPtr to point to the first valid record
1466 tmpRecPtr = targetPagePtr + pageHeaderSize
1473 tmpRecPtr = targetPagePtr + pageHeaderSize;
1479 * we know now that tmpRecPtr is an address pointing to a valid XLogRecord
1480 * because either we're at the first record after the beginning of a page
1481 * or we just jumped over the remaining data of a continuation.
1486 /* past the record we've found, break out */
1487 if (RecPtr <= state->ReadRecPtr)
1489 /* Rewind the reader to the beginning of the last record. */
1490 found =
state->ReadRecPtr;
1503 * Helper function to ease writing of XLogReaderRoutine->page_read callbacks.
1504 * If this function is used, caller must supply a segment_open callback in
1505 * 'state', as that is used here.
1507 * Read 'count' bytes into 'buf', starting at location 'startptr', from WAL
1508 * fetched from timeline 'tli'.
1510 * Returns true if succeeded, false if an error occurs, in which case
1511 * 'errinfo' receives error details.
1538 * If the data we want is not in a segment we have open, close what we
1539 * have (if anything) and open the next one, using the caller's
1540 * provided segment_open callback.
1542 if (
state->seg.ws_file < 0 ||
1544 tli !=
state->seg.ws_tli)
1548 if (
state->seg.ws_file >= 0)
1552 state->routine.segment_open(
state, nextSegNo, &tli);
1554 /* This shouldn't happen -- indicates a bug in segment_open */
1557 /* Update the current segment info. */
1558 state->seg.ws_tli = tli;
1559 state->seg.ws_segno = nextSegNo;
1562 /* How many bytes are within this segment? */
1563 if (nbytes > (
state->segcxt.ws_segsize - startoff))
1564 segbytes =
state->segcxt.ws_segsize - startoff;
1569 /* Measure I/O timing when reading segment */
1575 /* Reset errno first; eases reporting non-errno-affecting errors */
1577 readbytes =
pg_pread(
state->seg.ws_file, p, segbytes, (off_t) startoff);
1583 io_start, 1, readbytes);
1596 /* Update state for read */
1597 recptr += readbytes;
1598 nbytes -= readbytes;
1605/* ----------------------------------------
1606 * Functions for decoding the data and block references in a record.
1607 * ----------------------------------------
1611 * Private function to reset the state, forgetting all decoded records, if we
1612 * are asked to move to a new read position.
1619 /* Reset the decoded record queue, freeing any oversized records. */
1620 while ((r =
state->decode_queue_head) != NULL)
1626 state->decode_queue_tail = NULL;
1627 state->decode_queue_head = NULL;
1628 state->record = NULL;
1630 /* Reset the decode buffer to empty. */
1631 state->decode_buffer_tail =
state->decode_buffer;
1632 state->decode_buffer_head =
state->decode_buffer;
1634 /* Clear error state. */
1635 state->errormsg_buf[0] =
'0円';
1636 state->errormsg_deferred =
false;
1640 * Compute the maximum possible amount of padding that could be required to
1641 * decode a record, given xl_tot_len from the record's header. This is the
1642 * amount of output buffer space that we need to decode a record, though we
1643 * might not finish up using it all.
1645 * This computation is pessimistic and assumes the maximum possible number of
1646 * blocks, due to lack of better information.
1653 /* Account for the fixed size part of the decoded record struct. */
1655 /* Account for the flexible blocks array of maximum possible size. */
1657 /* Account for all the raw main and block data. */
1659 /* We might insert padding before main_data. */
1660 size += (MAXIMUM_ALIGNOF - 1);
1661 /* We might insert padding before each block's data. */
1663 /* We might insert padding at the end. */
1664 size += (MAXIMUM_ALIGNOF - 1);
1670 * Decode a record. "decoded" must point to a MAXALIGNed memory area that has
1671 * space for at least DecodeXLogRecordRequiredSpace(record) bytes. On
1672 * success, decoded->size contains the actual space occupied by the decoded
1673 * record, which may turn out to be less.
1675 * Only decoded->oversized member must be initialized already, and will not be
1676 * modified. Other members will be initialized as required.
1678 * On error, a human-readable error message is returned in *errormsg, and
1679 * the return value is false.
1689 * read next _size bytes from record buffer, but check for overrun first.
1691#define COPY_HEADER_FIELD(_dst, _size) \
1693 if (remaining < _size) \
1694 goto shortdata_err; \
1695 memcpy(_dst, ptr, _size); \
1697 remaining -= _size; \
1707 decoded->
header = *record;
1709 decoded->
next = NULL;
1715 ptr = (
char *) record;
1719 /* Decode the headers */
1727 /* XLogRecordDataHeaderShort */
1728 uint8 main_data_len;
1733 datatotal += main_data_len;
1734 break;
/* by convention, the main data fragment is
1739 /* XLogRecordDataHeaderLong */
1744 datatotal += main_data_len;
1745 break;
/* by convention, the main data fragment is
1758 /* XLogRecordBlockHeader */
1762 /* mark any intervening block IDs as not in use */
1766 if (block_id <= decoded->max_block_id)
1769 "out-of-order block_id %u at %X/%08X",
1776 blk = &decoded->
blocks[block_id];
1782 blk->
flags = fork_flags;
1789 /* cross-check that the HAS_DATA flag is set iff data_length > 0 */
1793 "BKPBLOCK_HAS_DATA set, but no data included at %X/%08X",
1800 "BKPBLOCK_HAS_DATA not set, but data length is %u at %X/%08X",
1827 * cross-check that hole_offset > 0, hole_length > 0 and
1828 * bimg_len < BLCKSZ if the HAS_HOLE flag is set.
1836 "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%08X",
1845 * cross-check that hole_offset == 0 and hole_length == 0 if
1846 * the HAS_HOLE flag is not set.
1852 "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%08X",
1860 * Cross-check that bimg_len < BLCKSZ if it is compressed.
1866 "BKPIMAGE_COMPRESSED set, but block image length %u at %X/%08X",
1873 * cross-check that bimg_len = BLCKSZ if neither HAS_HOLE is
1874 * set nor COMPRESSED().
1881 "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_COMPRESSED set, but block image length is %u at %X/%08X",
1894 if (rlocator == NULL)
1897 "BKPBLOCK_SAME_REL set but no previous rel at %X/%08X",
1909 "invalid block_id %u at %X/%08X",
1919 * Ok, we've parsed the fragment headers, and verified that the total
1920 * length of the payload in the fragments is equal to the amount of data
1921 * left. Copy the data of each fragment to contiguous space after the
1922 * blocks array, inserting alignment padding before the data fragments so
1923 * they can be cast to struct pointers by REDO routines.
1925 out = ((
char *) decoded) +
1929 /* block data first */
1930 for (block_id = 0; block_id <= decoded->
max_block_id; block_id++)
1941 /* no need to align image */
1957 /* and finally, the main data */
1967 /* Report the actual size we used. */
1976 "record with invalid length at %X/%08X",
1979 *errormsg =
state->errormsg_buf;
1985 * Returns information about the block that a block reference refers to.
1987 * This is like XLogRecGetBlockTagExtended, except that the block reference
1988 * must exist and there's no access to prefetch_buffer.
1999 elog(
ERROR,
"could not locate backup block with ID %d in WAL record",
2002 pg_fatal(
"could not locate backup block with ID %d in WAL record",
2009 * Returns information about the block that a block reference refers to,
2010 * optionally including the buffer that the block may already be in.
2012 * If the WAL record contains a block reference with the given ID, *rlocator,
2013 * *forknum, *blknum and *prefetch_buffer are filled in (if not NULL), and
2014 * returns true. Otherwise returns false.
2033 *blknum = bkpb->
blkno;
2034 if (prefetch_buffer)
2040 * Returns the data associated with a block reference, or NULL if there is
2041 * no data (e.g. because a full-page image was taken instead). The returned
2042 * pointer points to a MAXALIGNed buffer.
2070 * Restore a full-page image from a backup block attached to an XLOG record.
2072 * Returns true if a full-page image is restored, and false on failure with
2073 * an error to be consumed by the caller.
2086 "could not restore image at %X/%08X with invalid block %d specified",
2104 /* If a backup block image is compressed, decompress it */
2105 bool decomp_success =
true;
2111 decomp_success =
false;
2116 if (LZ4_decompress_safe(ptr, tmp.
data,
2118 decomp_success =
false;
2120 report_invalid_record(record,
"could not restore image at %X/%08X compressed with %s not supported by build, block %d",
2130 size_t decomp_result = ZSTD_decompress(tmp.
data,
2134 if (ZSTD_isError(decomp_result))
2135 decomp_success =
false;
2137 report_invalid_record(record,
"could not restore image at %X/%08X compressed with %s not supported by build, block %d",
2146 report_invalid_record(record,
"could not restore image at %X/%08X compressed with unknown method, block %d",
2152 if (!decomp_success)
2163 /* generate page, taking into account hole if necessary */
2166 memcpy(page, ptr, BLCKSZ);
2171 /* must zero-fill the hole */
2184 * Extract the FullTransactionId from a WAL record.
2190 * This function is only safe during replay, because it depends on the
2191 * replay state. See AdvanceNextFullTransactionIdPastXid() for more.
#define pg_attribute_printf(f, a)
#define MemSet(start, val, len)
void err(int eval, const char *fmt,...)
#define MCXT_ALLOC_NO_OOM
Assert(PointerIsAligned(start, uint64))
if(TABLE==NULL||TABLE_index==NULL)
void pfree(void *pointer)
void * palloc_extended(Size size, int flags)
#define AmStartupProcess()
#define InvalidRepOriginId
#define COMP_CRC32C(crc, data, len)
#define EQ_CRC32C(c1, c2)
int32 pglz_decompress(const char *source, int32 slen, char *dest, int32 rawsize, bool check_complete)
instr_time pgstat_prepare_io_time(bool track_io_guc)
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt, uint64 bytes)
#define RmgrIdIsValid(rmid)
struct DecodedXLogRecord * next
TransactionId toplevel_xid
RepOriginId record_origin
DecodedBkpBlock blocks[FLEXIBLE_ARRAY_MEMBER]
FullTransactionId nextXid
DecodedXLogRecord * record
#define InvalidTransactionId
static FullTransactionId FullTransactionIdFromAllowableAt(FullTransactionId nextFullXid, TransactionId xid)
TransamVariablesData * TransamVariables
static void pgstat_report_wait_start(uint32 wait_event_info)
static void pgstat_report_wait_end(void)
#define XLP_FIRST_IS_CONTRECORD
XLogLongPageHeaderData * XLogLongPageHeader
#define XLP_FIRST_IS_OVERWRITE_CONTRECORD
#define XLogSegmentOffset(xlogptr, wal_segsz_bytes)
XLogPageHeaderData * XLogPageHeader
#define XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes)
#define XRecOffIsValid(xlrp)
#define SizeOfXLogShortPHD
static void XLogFileName(char *fname, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
#define XLogPageHeaderSize(hdr)
#define XLByteInSeg(xlrp, logSegNo, wal_segsz_bytes)
#define LSN_FORMAT_ARGS(lsn)
#define XLogRecPtrIsInvalid(r)
#define InvalidXLogRecPtr
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)
static XLogPageReadResult XLogDecodeNextRecord(XLogReaderState *state, bool nonblocking)
XLogReaderState * XLogReaderAllocate(int wal_segment_size, const char *waldir, XLogReaderRoutine *routine, void *private_data)
void XLogReaderSetDecodeBuffer(XLogReaderState *state, void *buffer, size_t size)
DecodedXLogRecord * XLogReadAhead(XLogReaderState *state, bool nonblocking)
static void WALOpenSegmentInit(WALOpenSegment *seg, WALSegmentContext *segcxt, int segsize, const char *waldir)
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)
static int ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen)
DecodedXLogRecord * XLogNextRecord(XLogReaderState *state, char **errormsg)
static void report_invalid_record(XLogReaderState *state, const char *fmt,...) pg_attribute_printf(2
static void static void allocate_recordbuf(XLogReaderState *state, uint32 reclength)
bool WALRead(XLogReaderState *state, char *buf, XLogRecPtr startptr, Size count, TimeLineID tli, WALReadError *errinfo)
XLogRecord * XLogReadRecord(XLogReaderState *state, char **errormsg)
void XLogReaderResetError(XLogReaderState *state)
static void XLogReaderInvalReadState(XLogReaderState *state)
#define COPY_HEADER_FIELD(_dst, _size)
bool XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr, char *phdr)
FullTransactionId XLogRecGetFullXid(XLogReaderState *record)
void XLogReaderFree(XLogReaderState *state)
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
static void ResetDecoder(XLogReaderState *state)
bool DecodeXLogRecord(XLogReaderState *state, DecodedXLogRecord *decoded, XLogRecord *record, XLogRecPtr lsn, char **errormsg)
static bool ValidXLogRecord(XLogReaderState *state, XLogRecord *record, XLogRecPtr recptr)
#define DEFAULT_DECODE_BUFFER_SIZE
size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len)
static DecodedXLogRecord * XLogReadRecordAlloc(XLogReaderState *state, size_t xl_tot_len, bool allow_oversized)
XLogRecPtr XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr)
void XLogBeginRead(XLogReaderState *state, XLogRecPtr RecPtr)
bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
XLogRecPtr XLogReleasePreviousRecord(XLogReaderState *state)
static bool ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, XLogRecPtr PrevRecPtr, XLogRecord *record, bool randAccess)
static bool XLogReaderHasQueuedRecordOrError(XLogReaderState *state)
#define XLogRecGetXid(decoder)
#define XLogRecHasBlockRef(decoder, block_id)
#define BKPIMAGE_COMPRESS_ZSTD
#define BKPBLOCK_FORK_MASK
#define BKPBLOCK_HAS_DATA
#define BKPIMAGE_HAS_HOLE
#define XLR_BLOCK_ID_DATA_LONG
#define BKPIMAGE_COMPRESS_LZ4
#define BKPIMAGE_COMPRESSED(info)
#define XLR_BLOCK_ID_TOPLEVEL_XID
#define XLR_BLOCK_ID_DATA_SHORT
#define BKPBLOCK_SAME_REL
#define BKPIMAGE_COMPRESS_PGLZ
#define XLR_BLOCK_ID_ORIGIN
#define BKPBLOCK_HAS_IMAGE