1/*-------------------------------------------------------------------------
4 * Management of free memory pages.
6 * The intention of this code is to provide infrastructure for memory
7 * allocators written specifically for PostgreSQL. At least in the case
8 * of dynamic shared memory, we can't simply use malloc() or even
9 * relatively thin wrappers like palloc() which sit on top of it, because
10 * no allocator built into the operating system will deal with relative
11 * pointers. In the future, we may find other cases in which greater
12 * control over our own memory management seems desirable.
14 * A FreePageManager keeps track of which 4kB pages of memory are currently
15 * unused from the point of view of some higher-level memory allocator.
16 * Unlike a user-facing allocator such as palloc(), a FreePageManager can
17 * only allocate and free in units of whole pages, and freeing an
18 * allocation can only be done given knowledge of its length in pages.
20 * Since a free page manager has only a fixed amount of dedicated memory,
21 * and since there is no underlying allocator, it uses the free pages
22 * it is given to manage to store its bookkeeping data. It keeps multiple
23 * freelists of runs of pages, sorted by the size of the run; the head of
24 * each freelist is stored in the FreePageManager itself, and the first
25 * page of each run contains a relative pointer to the next run. See
26 * FreePageManagerGetInternal for more details on how the freelists are
29 * To avoid memory fragmentation, it's important to consolidate adjacent
30 * spans of pages whenever possible; otherwise, large allocation requests
31 * might not be satisfied even when sufficient contiguous space is
32 * available. Therefore, in addition to the freelists, we maintain an
33 * in-memory btree of free page ranges ordered by page number. If a
34 * range being freed precedes or follows a range that is already free,
35 * the existing range is extended; if it exactly bridges the gap between
36 * free ranges, then the two existing ranges are consolidated with the
37 * newly-freed range to form one great big range of free pages.
39 * When there is only one range of free pages, the btree is trivial and
40 * is stored within the FreePageManager proper; otherwise, pages are
41 * allocated from the area under management as needed. Even in cases
42 * where memory fragmentation is very severe, only a tiny fraction of
43 * the pages under management are consumed by this btree.
45 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
46 * Portions Copyright (c) 1994, Regents of the University of California
49 * src/backend/utils/mmgr/freepage.c
51 *-------------------------------------------------------------------------
62/* Magic numbers to identify various page types */
63 #define FREE_PAGE_SPAN_LEADER_MAGIC 0xea4020f0
64 #define FREE_PAGE_LEAF_MAGIC 0x98eae728
65 #define FREE_PAGE_INTERNAL_MAGIC 0x19aa32c9
67/* Doubly linked list of spans of free pages; stored in first page of span. */
70 int magic;
/* always FREE_PAGE_SPAN_LEADER_MAGIC */
72 RelptrFreePageSpanLeader
prev;
73 RelptrFreePageSpanLeader
next;
76/* Common header for btree leaf and internal pages. */
79 int magic;
/* FREE_PAGE_LEAF_MAGIC or
80 * FREE_PAGE_INTERNAL_MAGIC */
82 RelptrFreePageBtree
parent;
/* uplink */
85/* Internal key; points to next level of btree. */
89 RelptrFreePageBtree
child;
/* downlink */
92/* Leaf key; no payload data. */
99/* Work out how many keys will fit on a page. */
100 #define FPM_ITEMS_PER_INTERNAL_PAGE \
101 ((FPM_PAGE_SIZE - sizeof(FreePageBtreeHeader)) / \
102 sizeof(FreePageBtreeInternalKey))
103 #define FPM_ITEMS_PER_LEAF_PAGE \
104 ((FPM_PAGE_SIZE - sizeof(FreePageBtreeHeader)) / \
105 sizeof(FreePageBtreeLeafKey))
107/* A btree page of either sort */
118/* Results of a btree search */
127/* Helper functions */
160 Size npages,
bool soft);
167#ifdef FPM_EXTRA_ASSERTS
172 * Initialize a new, empty free page manager.
174 * 'fpm' should reference caller-provided memory large enough to contain a
175 * FreePageManager. We'll initialize it here.
177 * 'base' is the address to which all pointers are relative. When managing
178 * a dynamic shared memory segment, it should normally be the base of the
179 * segment. When managing backend-private memory, it can be either NULL or,
180 * if managing a single contiguous extent of memory, the start of that extent.
196#ifdef FPM_EXTRA_ASSERTS
205 * Allocate a run of pages of the given length from the free page manager.
206 * The return value indicates whether we were able to satisfy the request;
207 * if true, the first page of the allocation is stored in *first_page.
213 Size contiguous_pages;
218 * It's a bit counterintuitive, but allocating pages can actually create
219 * opportunities for cleanup that create larger ranges. We might pull a
220 * key out of the btree that enables the item at the head of the btree
221 * recycle list to be inserted; and then if there are more items behind it
222 * one of those might cause two currently-separated ranges to merge,
223 * creating a single range of contiguous pages larger than any that
224 * existed previously. It might be worth trying to improve the cleanup
225 * algorithm to avoid such corner cases, but for now we just notice the
226 * condition and do the appropriate reporting.
233 * FreePageManagerGetInternal may have set contiguous_pages_dirty.
234 * Recompute contiguous_pages if so.
238#ifdef FPM_EXTRA_ASSERTS
241 Assert(fpm->free_pages >= npages);
242 fpm->free_pages -= npages;
244 Assert(fpm->free_pages == sum_free_pages(fpm));
250#ifdef FPM_EXTRA_ASSERTS
269 sum_free_pages_recurse(fpm, child, sum);
281 /* Count the spans by scanning the freelists. */
294 }
while (candidate != NULL);
298 /* Count btree internal pages. */
303 sum_free_pages_recurse(fpm,
root, &sum);
306 /* Count the recycle list. */
320 * Compute the size of the largest run of pages that the user could
338 if (candidate->
npages > largest)
339 largest = candidate->
npages;
341 }
while (candidate != NULL);
362 * Recompute the size of the largest run of pages that the user could
363 * successfully get, if it has been marked dirty.
376 * Transfer a run of pages to the free page manager.
381 Size contiguous_pages;
385 /* Record the new pages. */
390 * If the new range we inserted into the page manager was contiguous with
391 * an existing range, it may have opened up cleanup opportunities.
393 if (contiguous_pages > npages)
395 Size cleanup_contiguous_pages;
398 if (cleanup_contiguous_pages > contiguous_pages)
399 contiguous_pages = cleanup_contiguous_pages;
402 /* See if we now have a new largest chunk. */
407 * The earlier call to FreePageManagerPutInternal may have set
408 * contiguous_pages_dirty if it needed to allocate internal pages, so
409 * recompute contiguous_pages if necessary.
413#ifdef FPM_EXTRA_ASSERTS
414 fpm->free_pages += npages;
415 Assert(fpm->free_pages == sum_free_pages(fpm));
421 * Produce a debugging dump of the state of a free page manager.
429 bool dumped_any_freelist =
false;
432 /* Initialize output buffer. */
435 /* Dump general stuff. */
454 /* Dump btree recycle list. */
462 /* Dump free lists. */
469 if (!dumped_any_freelist)
472 dumped_any_freelist =
true;
479 /* And return result to caller. */
485 * The first_page value stored at index zero in any non-root page must match
486 * the first_page value stored in its parent at the index which points to that
487 * page. So when the value stored at index zero in a btree page changes, we've
488 * got to walk up the tree adjusting ancestor keys until we reach an ancestor
489 * where that key isn't index zero. This function should be called after
490 * updating the first key on the target page; it will propagate the change
491 * upward as far as needed.
493 * We assume here that the first key on the page has not changed enough to
494 * require changes in the ordering of keys on its ancestor pages. Thus,
495 * if we search the parent page for the first key greater than or equal to
496 * the first key on the current page, the downlink to this page will be either
497 * the exact index returned by the search (if the first key decreased)
498 * or one less (if the first key increased).
508 /* This might be either a leaf or an internal page. */
523 /* Loop until we find an ancestor that does not require adjustment. */
533 /* Key is either at index s or index s-1; figure out which. */
551#ifdef USE_ASSERT_CHECKING
552 /* Debugging double-check. */
557 Assert(s < parent->hdr.nused);
562 /* Update the parent key. */
566 * If this is the first key in the parent, go up another level; else
576 * Attempt to reclaim space from the free-page btree. The return value is
577 * the largest range of contiguous pages created by the cleanup operation.
583 Size max_contiguous_pages = 0;
585 /* Attempt to shrink the depth of the btree. */
590 /* If the root contains only one key, reduce depth by one. */
591 if (
root->hdr.nused == 1)
593 /* Shrink depth of tree by one. */
598 /* If root is a leaf, convert only entry to singleton range. */
607 /* If root is an internal page, make only child the root. */
615 else if (
root->hdr.nused == 2 &&
619 Size start_of_second;
621 end_of_first =
root->u.leaf_key[0].first_page +
622 root->u.leaf_key[0].npages;
623 start_of_second =
root->u.leaf_key[1].first_page;
625 if (end_of_first + 1 == start_of_second)
629 if (end_of_first == root_page)
635 root->u.leaf_key[1].npages + 1;
641 Assert(max_contiguous_pages == 0);
646 /* Whether it worked or not, it's time to stop. */
651 /* Nothing more to do. Stop. */
657 * Attempt to free recycled btree pages. We skip this if releasing the
658 * recycled page would require a btree page split, because the page we're
659 * trying to recycle would be consumed by the split, which would be
662 * We also currently only ever attempt to recycle the first page on the
663 * list; that could be made more aggressive, but it's not clear that the
664 * complexity would be worthwhile.
670 Size contiguous_pages;
675 if (contiguous_pages == 0)
682 if (contiguous_pages > max_contiguous_pages)
683 max_contiguous_pages = contiguous_pages;
687 return max_contiguous_pages;
691 * Consider consolidating the given page with its left or right sibling,
692 * if it's fairly empty.
702 * We only try to consolidate pages that are less than a third full. We
703 * could be more aggressive about this, but that might risk performing
704 * consolidation only to end up splitting again shortly thereafter. Since
705 * the btree should be very small compared to the space under management,
706 * our goal isn't so much to ensure that it always occupies the absolutely
707 * smallest possible number of pages as to reclaim pages before things get
708 * too egregiously out of hand.
721 * If we can fit our right sibling's keys onto this page, consolidate.
744 * If we can fit our keys onto our left sibling's page, consolidate. In
745 * this case, we move our keys onto the other page rather than vice versa,
746 * to avoid having to adjust ancestor keys.
770 * Find the passed page's left sibling; that is, the page at the same level
771 * of the tree whose keyspace immediately precedes ours.
779 /* Move up until we can move left. */
789 return NULL;
/* we were passed the rightmost page */
815 * Find the passed page's right sibling; that is, the page at the same level
816 * of the tree whose keyspace immediately follows ours.
824 /* Move up until we can move right. */
834 return NULL;
/* we were passed the rightmost page */
860 * Get the first key on a btree page.
877 * Get a page from the btree recycle list for use as a btree page.
897 * Insert an item into an internal page.
914 * Insert an item into a leaf page.
931 * Put a page on the btree recycle list.
952 * Remove an item from the btree at the given position on the given page.
960 /* When last item is removed, extirpate entire page from btree. */
967 /* Physically remove the key from the page. */
973 /* If we just removed the first key, adjust ancestor keys. */
977 /* Consider whether to consolidate this page with a sibling. */
982 * Remove a page from the btree. Caller is responsible for having relocated
983 * any keys from this page that are still wanted. The page is placed on the
996 /* Find parent page. */
1000 /* We are removing the root page. */
1009 * If the parent contains only one item, we need to remove it as well.
1017 /* Find and remove the downlink. */
1042 /* Recycle the page. */
1045 /* Adjust ancestor keys if needed. */
1049 /* Consider whether to consolidate the parent with a sibling. */
1054 * Search the btree for an entry for the given first page and initialize
1055 * *result with the results of the search. result->page and result->index
1056 * indicate either the position of an exact match or the position at which
1057 * the new key should be inserted. result->found is true for an exact match,
1058 * otherwise false. result->split_pages will contain the number of additional
1059 * btree pages that will be needed when performing a split to insert a key.
1060 * Except as described above, the contents of fields in the result object are
1061 * undefined on return.
1073 /* If the btree is empty, there's nothing to find. */
1076 result->
page = NULL;
1077 result->
found =
false;
1081 /* Descend until we hit a leaf. */
1092 * If we found an exact match we descend directly. Otherwise, we
1093 * descend into the child to the left if possible so that we can find
1094 * the insertion point at that child's high end.
1096 if (!found_exact &&
index > 0)
1099 /* Track required split depth for leaf insert. */
1108 /* Descend to appropriate child page. */
1115 /* Track required split depth for leaf insert. */
1124 /* Search leaf page. */
1127 /* Assemble results. */
1135 * Search an internal page for the first key greater than or equal to a given
1136 * page number. Returns the index of that key, or one greater than the number
1137 * of keys on the page if none.
1150 Size mid = (low + high) / 2;
1153 if (first_page ==
val)
1155 else if (first_page <
val)
1165 * Search a leaf page for the first key greater than or equal to a given
1166 * page number. Returns the index of that key, or one greater than the number
1167 * of keys on the page if none.
1180 Size mid = (low + high) / 2;
1183 if (first_page ==
val)
1185 else if (first_page <
val)
1195 * Allocate a new btree page and move half the keys from the provided page
1196 * to the new page. Caller is responsible for making sure that there's a
1197 * page available from fpm->btree_recycle. Returns a pointer to the new page,
1198 * to which caller must add a downlink.
1228 * When internal pages are split or merged, the parent pointers of their
1229 * children must be updated.
1247 * Debugging dump of btree data.
1262 if (parent != check_parent)
1293 * Debugging dump of free-span data.
1301 while (span != NULL)
1303 if (span->
npages != expected_pages)
1315 * This function allocates a run of pages of the given length from the free
1326 Size victim_page = 0;
/* placate compiler */
1330 * Search for a free span.
1332 * Right now, we use a simple best-fit policy here, but it's possible for
1333 * this to result in memory fragmentation if we're repeatedly asked to
1334 * allocate chunks just a little smaller than what we have available.
1335 * Hopefully, this is unlikely, because we expect most requests to be
1336 * single pages or superblock-sized chunks -- but no policy can be optimal
1337 * under all circumstances unless it has knowledge of future allocation
1342 /* Skip empty freelists. */
1347 * All of the freelists except the last one contain only items of a
1348 * single size, so we just take the first one. But the final free
1349 * list contains everything too big for any of the other lists, so we
1350 * need to search the list.
1361 if (candidate->
npages >= npages && (victim == NULL ||
1365 if (victim->
npages == npages)
1369 }
while (candidate != NULL);
1374 /* If we didn't find an allocatable span, return failure. */
1378 /* Remove span from free list. */
1390 /* Decide whether we might be invalidating contiguous_pages. */
1395 * The victim span came from the oversized freelist, and had the same
1396 * size as the longest span. There may or may not be another one of
1397 * the same size, so contiguous_pages must be recomputed just to be
1406 * The victim span came from a fixed sized freelist, and it was the
1407 * list for spans of the same size as the current longest span, and
1408 * the list is now empty after removing the victim. So
1409 * contiguous_pages must be recomputed without a doubt.
1415 * If we haven't initialized the btree yet, the victim must be the single
1416 * span stored within the FreePageManager itself. Otherwise, we need to
1433 * If the span we found is exactly the right size, remove it from the
1434 * btree completely. Otherwise, adjust the btree entry to reflect the
1435 * still-unallocated portion of the span, and put that portion on the
1436 * appropriate free list.
1440 if (victim->
npages == npages)
1446 /* Adjust btree to reflect remaining pages. */
1450 key->first_page += npages;
1451 key->npages -= npages;
1452 if (result.
index == 0)
1455 /* Put the unallocated pages back on the appropriate free list. */
1457 victim->
npages - npages);
1461 /* Return results to caller. */
1467 * Put a range of pages into the btree and freelists, consolidating it with
1468 * existing free spans just before and/or after it. If 'soft' is true,
1469 * only perform the insertion if it can be done without allocating new btree
1470 * pages; if false, do it always. Returns 0 if the soft flag caused the
1471 * insertion to be skipped, or otherwise the size of the contiguous span
1472 * created by the insertion. This may be larger than npages if we're able
1473 * to consolidate with an adjacent range.
1488 /* We can store a single free span without initializing the btree. */
1493 /* Don't have a span yet; store this one. */
1502 /* New span immediately follows sole existing span. */
1511 /* New span immediately precedes sole existing span. */
1521 /* Not contiguous; we need to initialize the btree. */
1528 return 0;
/* Should not allocate if soft. */
1533 /* We'd better be able to get a page from the existing range. */
1534 elog(
FATAL,
"free page manager btree is corrupt");
1537 /* Create the btree and move the preexisting range into it. */
1539 root->hdr.nused = 1;
1549 * Corner case: it may be that the btree root took the very last
1550 * free page. In that case, the sole btree entry covers a zero
1551 * page run, which is invalid. Overwrite it with the entry we're
1552 * trying to insert and get out.
1554 if (
root->u.leaf_key[0].npages == 0)
1556 root->u.leaf_key[0].first_page = first_page;
1557 root->u.leaf_key[0].npages = npages;
1562 /* Fall through to insert the new key. */
1566 /* Search the btree. */
1569 if (result.
index > 0)
1574 nindex = result.
index;
1585 /* Consolidate with the previous entry if possible. */
1586 if (prevkey != NULL && prevkey->
first_page + prevkey->
npages >= first_page)
1588 bool remove_next =
false;
1594 /* Check whether we can *also* consolidate with the following entry. */
1595 if (nextkey != NULL &&
1606 /* Put the span on the correct freelist and save size. */
1609 result = prevkey->
npages;
1612 * If we consolidated with both the preceding and following entries,
1613 * we must remove the following entry. We do this last, because
1614 * removing an element from the btree may invalidate pointers we hold
1615 * into the current data structure.
1617 * NB: The btree is technically in an invalid state a this point
1618 * because we've already updated prevkey to cover the same key space
1619 * as nextkey. FreePageBtreeRemove() shouldn't notice that, though.
1627 /* Consolidate with the next entry if possible. */
1628 if (nextkey != NULL && first_page + npages >= nextkey->
first_page)
1632 /* Compute new size for span. */
1636 /* Put span on correct free list. */
1640 /* Update key in place. */
1642 nextkey->
npages = newpages;
1644 /* If reducing first key on page, ancestors might need adjustment. */
1651 /* Split leaf page and as many of its ancestors as necessary. */
1655 * NB: We could consider various coping strategies here to avoid a
1656 * split; most obviously, if np != result.page, we could target that
1657 * page instead. More complicated shuffling strategies could be
1658 * possible as well; basically, unless every single leaf page is 100%
1659 * full, we can jam this key in there if we try hard enough. It's
1660 * unlikely that trying that hard is worthwhile, but it's possible we
1661 * might need to make more than no effort. For now, we just do the
1662 * easy thing, which is nothing.
1665 /* If this is a soft insert, it's time to give up. */
1669 /* Check whether we need to allocate more btree pages to split. */
1677 * Allocate the required number of pages and split each one in
1678 * turn. This should never fail, because if we've got enough
1679 * spans of free pages kicking around that we need additional
1680 * storage space just to remember them all, then we should
1681 * certainly have enough to expand the btree, which should only
1682 * ever use a tiny number of pages compared to the number under
1683 * management. If it does, something's badly screwed up.
1686 for (
i = 0;
i < pages_needed; ++
i)
1689 elog(
FATAL,
"free page manager btree is corrupt");
1694 * The act of allocating pages to recycle may have invalidated the
1695 * results of our previous btree research, so repeat it. (We could
1696 * recheck whether any of our split-avoidance strategies that were
1697 * not viable before now are, but it hardly seems worthwhile, so
1698 * we don't bother. Consolidation can't be possible now if it
1699 * wasn't previously.)
1704 * The act of allocating pages for use in constructing our btree
1705 * should never cause any page to become more full, so the new
1706 * split depth should be no greater than the old one, and perhaps
1707 * less if we fortuitously allocated a chunk that freed up a slot
1708 * on the page we need to update.
1713 /* If we still need to perform a split, do it. */
1725 /* Identify parent page, which must receive downlink. */
1728 /* Split the page - downlink not added yet. */
1732 * At this point in the loop, we're always carrying a pending
1733 * insertion. On the first pass, it's the actual key we're
1734 * trying to insert; on subsequent passes, it's the downlink
1735 * that needs to be added as a result of the split performed
1736 * during the previous loop iteration. Since we've just split
1737 * the page, there's definitely room on one of the two
1746 split_target : newsibling;
1749 if (
index == 0 && insert_into == split_target)
1759 split_target : newsibling;
1764 if (
index == 0 && insert_into == split_target)
1768 /* If the page we just split has no parent, split the root. */
1794 /* If the parent page isn't full, insert the downlink. */
1809 /* The parent also needs to be split, so loop around. */
1811 split_target = parent;
1815 * The loop above did the insert, so just need to update the free
1816 * list, and we're done.
1824 /* Physically add the key to the page. */
1828 /* If new first key on page, ancestors might need adjustment. */
1829 if (result.
index == 0)
1832 /* Put it on the free list. */
1839 * Remove a FreePageSpanLeader from the linked-list that contains it, either
1840 * because we're changing the size of the span, or because we're allocating it.
1868 * Initialize a new FreePageSpanLeader and put it on the appropriate free list.
static Size FreePageBtreeSearchInternal(FreePageBtree *btp, Size first_page)
#define FREE_PAGE_INTERNAL_MAGIC
struct FreePageBtreeHeader FreePageBtreeHeader
struct FreePageBtreeInternalKey FreePageBtreeInternalKey
static FreePageBtree * FreePageBtreeFindRightSibling(char *base, FreePageBtree *btp)
static Size FreePageBtreeFirstKey(FreePageBtree *btp)
static Size FreePageBtreeSearchLeaf(FreePageBtree *btp, Size first_page)
static void FreePagePopSpanLeader(FreePageManager *fpm, Size pageno)
static bool FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page)
char * FreePageManagerDump(FreePageManager *fpm)
static void FreePageBtreeRemove(FreePageManager *fpm, FreePageBtree *btp, Size index)
static void FreePagePushSpanLeader(FreePageManager *fpm, Size first_page, Size npages)
static void FreePageBtreeInsertInternal(char *base, FreePageBtree *btp, Size index, Size first_page, FreePageBtree *child)
static void FreePageBtreeAdjustAncestorKeys(FreePageManager *fpm, FreePageBtree *btp)
bool FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
static Size FreePageManagerLargestContiguous(FreePageManager *fpm)
void FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
#define FPM_ITEMS_PER_LEAF_PAGE
static void FreePageManagerUpdateLargest(FreePageManager *fpm)
static FreePageBtree * FreePageBtreeFindLeftSibling(char *base, FreePageBtree *btp)
static FreePageBtree * FreePageBtreeGetRecycled(FreePageManager *fpm)
struct FreePageBtreeSearchResult FreePageBtreeSearchResult
static FreePageBtree * FreePageBtreeSplitPage(FreePageManager *fpm, FreePageBtree *btp)
static void FreePageBtreeRemovePage(FreePageManager *fpm, FreePageBtree *btp)
static void FreePageBtreeUpdateParentPointers(char *base, FreePageBtree *btp)
#define FREE_PAGE_SPAN_LEADER_MAGIC
struct FreePageBtreeLeafKey FreePageBtreeLeafKey
static Size FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages, bool soft)
void FreePageManagerInitialize(FreePageManager *fpm, char *base)
static void FreePageManagerDumpBtree(FreePageManager *fpm, FreePageBtree *btp, FreePageBtree *parent, int level, StringInfo buf)
static Size FreePageBtreeCleanup(FreePageManager *fpm)
static void FreePageBtreeInsertLeaf(FreePageBtree *btp, Size index, Size first_page, Size npages)
static void FreePageBtreeSearch(FreePageManager *fpm, Size first_page, FreePageBtreeSearchResult *result)
#define FPM_ITEMS_PER_INTERNAL_PAGE
static void FreePageBtreeConsolidate(FreePageManager *fpm, FreePageBtree *btp)
#define FREE_PAGE_LEAF_MAGIC
static void FreePageManagerDumpSpans(FreePageManager *fpm, FreePageSpanLeader *span, Size expected_pages, StringInfo buf)
static void FreePageBtreeRecycle(FreePageManager *fpm, Size pageno)
#define fpm_page_to_pointer(base, page)
#define FPM_NUM_FREELISTS
#define fpm_pointer_to_page(base, ptr)
#define fpm_segment_base(fpm)
#define fpm_pointer_is_page_aligned(base, ptr)
Assert(PointerIsAligned(start, uint64))
#define relptr_is_null(rp)
#define relptr_access(base, rp)
#define relptr_store(base, rp, val)
#define relptr_offset(rp)
#define relptr_copy(rp1, rp2)
void check_stack_depth(void)
void appendStringInfo(StringInfo str, const char *fmt,...)
void appendStringInfoString(StringInfo str, const char *s)
void appendStringInfoChar(StringInfo str, char ch)
void initStringInfo(StringInfo str)
RelptrFreePageBtree child
union FreePageBtree::@35 u
FreePageBtreeInternalKey internal_key[FPM_ITEMS_PER_INTERNAL_PAGE]
FreePageBtreeLeafKey leaf_key[FPM_ITEMS_PER_LEAF_PAGE]
unsigned btree_recycle_count
RelptrFreePageManager self
Size singleton_first_page
RelptrFreePageBtree btree_root
RelptrFreePageSpanLeader freelist[FPM_NUM_FREELISTS]
bool contiguous_pages_dirty
RelptrFreePageSpanLeader btree_recycle
RelptrFreePageSpanLeader prev
RelptrFreePageSpanLeader next