PostgreSQL Source Code: src/include/access/nbtree.h Source File

PostgreSQL Source Code git master
nbtree.h
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nbtree.h
4 * header file for postgres btree access method implementation.
5 *
6 *
7 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
9 *
10 * src/include/access/nbtree.h
11 *
12 *-------------------------------------------------------------------------
13 */
14#ifndef NBTREE_H
15#define NBTREE_H
16
17#include "access/amapi.h"
18#include "access/itup.h"
19#include "access/sdir.h"
20#include "catalog/pg_am_d.h"
21#include "catalog/pg_class.h"
22#include "catalog/pg_index.h"
23#include "lib/stringinfo.h"
24#include "storage/bufmgr.h"
25#include "storage/dsm.h"
26#include "storage/shm_toc.h"
27#include "utils/skipsupport.h"
28
29/* There's room for a 16-bit vacuum cycle ID in BTPageOpaqueData */
30 typedef uint16 BTCycleId;
31
32/*
33 * BTPageOpaqueData -- At the end of every page, we store a pointer
34 * to both siblings in the tree. This is used to do forward/backward
35 * index scans. The next-page link is also critical for recovery when
36 * a search has navigated to the wrong page due to concurrent page splits
37 * or deletions; see src/backend/access/nbtree/README for more info.
38 *
39 * In addition, we store the page's btree level (counting upwards from
40 * zero at a leaf page) as well as some flag bits indicating the page type
41 * and status. If the page is deleted, a BTDeletedPageData struct is stored
42 * in the page's tuple area, while a standard BTPageOpaqueData struct is
43 * stored in the page special area.
44 *
45 * We also store a "vacuum cycle ID". When a page is split while VACUUM is
46 * processing the index, a nonzero value associated with the VACUUM run is
47 * stored into both halves of the split page. (If VACUUM is not running,
48 * both pages receive zero cycleids.) This allows VACUUM to detect whether
49 * a page was split since it started, with a small probability of false match
50 * if the page was last split some exact multiple of MAX_BT_CYCLE_ID VACUUMs
51 * ago. Also, during a split, the BTP_SPLIT_END flag is cleared in the left
52 * (original) page, and set in the right page, but only if the next page
53 * to its right has a different cycleid.
54 *
55 * NOTE: the BTP_LEAF flag bit is redundant since level==0 could be tested
56 * instead.
57 *
58 * NOTE: the btpo_level field used to be a union type in order to allow
59 * deleted pages to store a 32-bit safexid in the same field. We now store
60 * 64-bit/full safexid values using BTDeletedPageData instead.
61 */
62
63 typedef struct BTPageOpaqueData
64{
65 BlockNumber btpo_prev; /* left sibling, or P_NONE if leftmost */
66 BlockNumber btpo_next; /* right sibling, or P_NONE if rightmost */
67 uint32 btpo_level; /* tree level --- zero for leaf pages */
68 uint16 btpo_flags; /* flag bits, see below */
69 BTCycleId btpo_cycleid; /* vacuum cycle ID of latest split */
70 } BTPageOpaqueData;
71
72 typedef BTPageOpaqueData *BTPageOpaque;
73
74 #define BTPageGetOpaque(page) ((BTPageOpaque) PageGetSpecialPointer(page))
75
76/* Bits defined in btpo_flags */
77 #define BTP_LEAF (1 << 0) /* leaf page, i.e. not internal page */
78 #define BTP_ROOT (1 << 1) /* root page (has no parent) */
79 #define BTP_DELETED (1 << 2) /* page has been deleted from tree */
80 #define BTP_META (1 << 3) /* meta-page */
81 #define BTP_HALF_DEAD (1 << 4) /* empty, but still in tree */
82 #define BTP_SPLIT_END (1 << 5) /* rightmost page of split group */
83 #define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DEAD tuples (deprecated) */
84 #define BTP_INCOMPLETE_SPLIT (1 << 7) /* right sibling's downlink is missing */
85 #define BTP_HAS_FULLXID (1 << 8) /* contains BTDeletedPageData */
86
87/*
88 * The max allowed value of a cycle ID is a bit less than 64K. This is
89 * for convenience of pg_filedump and similar utilities: we want to use
90 * the last 2 bytes of special space as an index type indicator, and
91 * restricting cycle ID lets btree use that space for vacuum cycle IDs
92 * while still allowing index type to be identified.
93 */
94 #define MAX_BT_CYCLE_ID 0xFF7F
95
96
97/*
98 * The Meta page is always the first page in the btree index.
99 * Its primary purpose is to point to the location of the btree root page.
100 * We also point to the "fast" root, which is the current effective root;
101 * see README for discussion.
102 */
103
104 typedef struct BTMetaPageData
105{
106 uint32 btm_magic; /* should contain BTREE_MAGIC */
107 uint32 btm_version; /* nbtree version (always <= BTREE_VERSION) */
108 BlockNumber btm_root; /* current root location */
109 uint32 btm_level; /* tree level of the root page */
110 BlockNumber btm_fastroot; /* current "fast" root location */
111 uint32 btm_fastlevel; /* tree level of the "fast" root page */
112 /* remaining fields only valid when btm_version >= BTREE_NOVAC_VERSION */
113
114 /* number of deleted, non-recyclable pages during last cleanup */
115 uint32 btm_last_cleanup_num_delpages;
116 /* number of heap tuples during last cleanup (deprecated) */
117 float8 btm_last_cleanup_num_heap_tuples;
118
119 bool btm_allequalimage; /* are all columns "equalimage"? */
120 } BTMetaPageData;
121
122 #define BTPageGetMeta(p) \
123 ((BTMetaPageData *) PageGetContents(p))
124
125/*
126 * The current Btree version is 4. That's what you'll get when you create
127 * a new index.
128 *
129 * Btree version 3 was used in PostgreSQL v11. It is mostly the same as
130 * version 4, but heap TIDs were not part of the keyspace. Index tuples
131 * with duplicate keys could be stored in any order. We continue to
132 * support reading and writing Btree versions 2 and 3, so that they don't
133 * need to be immediately re-indexed at pg_upgrade. In order to get the
134 * new heapkeyspace semantics, however, a REINDEX is needed.
135 *
136 * Deduplication is safe to use when the btm_allequalimage field is set to
137 * true. It's safe to read the btm_allequalimage field on version 3, but
138 * only version 4 indexes make use of deduplication. Even version 4
139 * indexes created on PostgreSQL v12 will need a REINDEX to make use of
140 * deduplication, though, since there is no other way to set
141 * btm_allequalimage to true (pg_upgrade hasn't been taught to set the
142 * metapage field).
143 *
144 * Btree version 2 is mostly the same as version 3. There are two new
145 * fields in the metapage that were introduced in version 3. A version 2
146 * metapage will be automatically upgraded to version 3 on the first
147 * insert to it. INCLUDE indexes cannot use version 2.
148 */
149 #define BTREE_METAPAGE 0 /* first page is meta */
150 #define BTREE_MAGIC 0x053162 /* magic number in metapage */
151 #define BTREE_VERSION 4 /* current version number */
152 #define BTREE_MIN_VERSION 2 /* minimum supported version */
153 #define BTREE_NOVAC_VERSION 3 /* version with all meta fields set */
154
155/*
156 * Maximum size of a btree index entry, including its tuple header.
157 *
158 * We actually need to be able to fit three items on every page,
159 * so restrict any one item to 1/3 the per-page available space.
160 *
161 * There are rare cases where _bt_truncate() will need to enlarge
162 * a heap index tuple to make space for a tiebreaker heap TID
163 * attribute, which we account for here.
164 */
165 #define BTMaxItemSize \
166 (MAXALIGN_DOWN((BLCKSZ - \
167 MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \
168 MAXALIGN(sizeof(BTPageOpaqueData))) / 3) - \
169 MAXALIGN(sizeof(ItemPointerData)))
170 #define BTMaxItemSizeNoHeapTid \
171 MAXALIGN_DOWN((BLCKSZ - \
172 MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \
173 MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
174
175/*
176 * MaxTIDsPerBTreePage is an upper bound on the number of heap TIDs tuples
177 * that may be stored on a btree leaf page. It is used to size the
178 * per-page temporary buffers.
179 *
180 * Note: we don't bother considering per-tuple overheads here to keep
181 * things simple (value is based on how many elements a single array of
182 * heap TIDs must have to fill the space between the page header and
183 * special area). The value is slightly higher (i.e. more conservative)
184 * than necessary as a result, which is considered acceptable.
185 */
186 #define MaxTIDsPerBTreePage \
187 (int) ((BLCKSZ - SizeOfPageHeaderData - sizeof(BTPageOpaqueData)) / \
188 sizeof(ItemPointerData))
189
190/*
191 * The leaf-page fillfactor defaults to 90% but is user-adjustable.
192 * For pages above the leaf level, we use a fixed 70% fillfactor.
193 * The fillfactor is applied during index build and when splitting
194 * a rightmost page; when splitting non-rightmost pages we try to
195 * divide the data equally. When splitting a page that's entirely
196 * filled with a single value (duplicates), the effective leaf-page
197 * fillfactor is 96%, regardless of whether the page is a rightmost
198 * page.
199 */
200 #define BTREE_MIN_FILLFACTOR 10
201 #define BTREE_DEFAULT_FILLFACTOR 90
202 #define BTREE_NONLEAF_FILLFACTOR 70
203 #define BTREE_SINGLEVAL_FILLFACTOR 96
204
205/*
206 * In general, the btree code tries to localize its knowledge about
207 * page layout to a couple of routines. However, we need a special
208 * value to indicate "no page number" in those places where we expect
209 * page numbers. We can use zero for this because we never need to
210 * make a pointer to the metadata page.
211 */
212
213 #define P_NONE 0
214
215/*
216 * Macros to test whether a page is leftmost or rightmost on its tree level,
217 * as well as other state info kept in the opaque data.
218 */
219 #define P_LEFTMOST(opaque) ((opaque)->btpo_prev == P_NONE)
220 #define P_RIGHTMOST(opaque) ((opaque)->btpo_next == P_NONE)
221 #define P_ISLEAF(opaque) (((opaque)->btpo_flags & BTP_LEAF) != 0)
222 #define P_ISROOT(opaque) (((opaque)->btpo_flags & BTP_ROOT) != 0)
223 #define P_ISDELETED(opaque) (((opaque)->btpo_flags & BTP_DELETED) != 0)
224 #define P_ISMETA(opaque) (((opaque)->btpo_flags & BTP_META) != 0)
225 #define P_ISHALFDEAD(opaque) (((opaque)->btpo_flags & BTP_HALF_DEAD) != 0)
226 #define P_IGNORE(opaque) (((opaque)->btpo_flags & (BTP_DELETED|BTP_HALF_DEAD)) != 0)
227 #define P_HAS_GARBAGE(opaque) (((opaque)->btpo_flags & BTP_HAS_GARBAGE) != 0)
228 #define P_INCOMPLETE_SPLIT(opaque) (((opaque)->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0)
229 #define P_HAS_FULLXID(opaque) (((opaque)->btpo_flags & BTP_HAS_FULLXID) != 0)
230
231/*
232 * BTDeletedPageData is the page contents of a deleted page
233 */
234 typedef struct BTDeletedPageData
235{
236 FullTransactionId safexid; /* See BTPageIsRecyclable() */
237 } BTDeletedPageData;
238
239static inline void
240 BTPageSetDeleted(Page page, FullTransactionId safexid)
241{
242 BTPageOpaque opaque;
243 PageHeader header;
244 BTDeletedPageData *contents;
245
246 opaque = BTPageGetOpaque(page);
247 header = ((PageHeader) page);
248
249 opaque->btpo_flags &= ~BTP_HALF_DEAD;
250 opaque->btpo_flags |= BTP_DELETED | BTP_HAS_FULLXID;
251 header->pd_lower = MAXALIGN(SizeOfPageHeaderData) +
252 sizeof(BTDeletedPageData);
253 header->pd_upper = header->pd_special;
254
255 /* Set safexid in deleted page */
256 contents = ((BTDeletedPageData *) PageGetContents(page));
257 contents->safexid = safexid;
258}
259
260static inline FullTransactionId
261 BTPageGetDeleteXid(Page page)
262{
263 BTPageOpaque opaque;
264 BTDeletedPageData *contents;
265
266 /* We only expect to be called with a deleted page */
267 Assert(!PageIsNew(page));
268 opaque = BTPageGetOpaque(page);
269 Assert(P_ISDELETED(opaque));
270
271 /* pg_upgrade'd deleted page -- must be safe to recycle now */
272 if (!P_HAS_FULLXID(opaque))
273 return FirstNormalFullTransactionId;
274
275 /* Get safexid from deleted page */
276 contents = ((BTDeletedPageData *) PageGetContents(page));
277 return contents->safexid;
278}
279
280/*
281 * Is an existing page recyclable?
282 *
283 * This exists to centralize the policy on which deleted pages are now safe to
284 * re-use. However, _bt_pendingfsm_finalize() duplicates some of the same
285 * logic because it doesn't work directly with pages -- keep the two in sync.
286 *
287 * Note: PageIsNew() pages are always safe to recycle, but we can't deal with
288 * them here (caller is responsible for that case themselves). Caller might
289 * well need special handling for new pages anyway.
290 */
291static inline bool
292 BTPageIsRecyclable(Page page, Relation heaprel)
293{
294 BTPageOpaque opaque;
295
296 Assert(!PageIsNew(page));
297 Assert(heaprel != NULL);
298
299 /* Recycling okay iff page is deleted and safexid is old enough */
300 opaque = BTPageGetOpaque(page);
301 if (P_ISDELETED(opaque))
302 {
303 FullTransactionId safexid = BTPageGetDeleteXid(page);
304
305 /*
306 * The page was deleted, but when? If it was just deleted, a scan
307 * might have seen the downlink to it, and will read the page later.
308 * As long as that can happen, we must keep the deleted page around as
309 * a tombstone.
310 *
311 * For that check if the deletion XID could still be visible to
312 * anyone. If not, then no scan that's still in progress could have
313 * seen its downlink, and we can recycle it.
314 */
315 return GlobalVisCheckRemovableFullXid(heaprel, safexid);
316 }
317
318 return false;
319}
320
321/*
322 * BTVacState and BTPendingFSM are private nbtree.c state used during VACUUM.
323 * They are exported for use by page deletion related code in nbtpage.c.
324 */
325 typedef struct BTPendingFSM
326{
327 BlockNumber target; /* Page deleted by current VACUUM */
328 FullTransactionId safexid; /* Page's BTDeletedPageData.safexid */
329 } BTPendingFSM;
330
331 typedef struct BTVacState
332{
333 IndexVacuumInfo *info;
334 IndexBulkDeleteResult *stats;
335 IndexBulkDeleteCallback callback;
336 void *callback_state;
337 BTCycleId cycleid;
338 MemoryContext pagedelcontext;
339
340 /*
341 * _bt_pendingfsm_finalize() state
342 */
343 int bufsize; /* pendingpages space (in # elements) */
344 int maxbufsize; /* max bufsize that respects work_mem */
345 BTPendingFSM *pendingpages; /* One entry per newly deleted page */
346 int npendingpages; /* current # valid pendingpages */
347 } BTVacState;
348
349/*
350 * Lehman and Yao's algorithm requires a ``high key'' on every non-rightmost
351 * page. The high key is not a tuple that is used to visit the heap. It is
352 * a pivot tuple (see "Notes on B-Tree tuple format" below for definition).
353 * The high key on a page is required to be greater than or equal to any
354 * other key that appears on the page. If we find ourselves trying to
355 * insert a key that is strictly > high key, we know we need to move right
356 * (this should only happen if the page was split since we examined the
357 * parent page).
358 *
359 * Our insertion algorithm guarantees that we can use the initial least key
360 * on our right sibling as the high key. Once a page is created, its high
361 * key changes only if the page is split.
362 *
363 * On a non-rightmost page, the high key lives in item 1 and data items
364 * start in item 2. Rightmost pages have no high key, so we store data
365 * items beginning in item 1.
366 */
367
368 #define P_HIKEY ((OffsetNumber) 1)
369 #define P_FIRSTKEY ((OffsetNumber) 2)
370 #define P_FIRSTDATAKEY(opaque) (P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY)
371
372/*
373 * Notes on B-Tree tuple format, and key and non-key attributes:
374 *
375 * INCLUDE B-Tree indexes have non-key attributes. These are extra
376 * attributes that may be returned by index-only scans, but do not influence
377 * the order of items in the index (formally, non-key attributes are not
378 * considered to be part of the key space). Non-key attributes are only
379 * present in leaf index tuples whose item pointers actually point to heap
380 * tuples (non-pivot tuples). _bt_check_natts() enforces the rules
381 * described here.
382 *
383 * Non-pivot tuple format (plain/non-posting variant):
384 *
385 * t_tid | t_info | key values | INCLUDE columns, if any
386 *
387 * t_tid points to the heap TID, which is a tiebreaker key column as of
388 * BTREE_VERSION 4.
389 *
390 * Non-pivot tuples complement pivot tuples, which only have key columns.
391 * The sole purpose of pivot tuples is to represent how the key space is
392 * separated. In general, any B-Tree index that has more than one level
393 * (i.e. any index that does not just consist of a metapage and a single
394 * leaf root page) must have some number of pivot tuples, since pivot
395 * tuples are used for traversing the tree. Suffix truncation can omit
396 * trailing key columns when a new pivot is formed, which makes minus
397 * infinity their logical value. Since BTREE_VERSION 4 indexes treat heap
398 * TID as a trailing key column that ensures that all index tuples are
399 * physically unique, it is necessary to represent heap TID as a trailing
400 * key column in pivot tuples, though very often this can be truncated
401 * away, just like any other key column. (Actually, the heap TID is
402 * omitted rather than truncated, since its representation is different to
403 * the non-pivot representation.)
404 *
405 * Pivot tuple format:
406 *
407 * t_tid | t_info | key values | [heap TID]
408 *
409 * We store the number of columns present inside pivot tuples by abusing
410 * their t_tid offset field, since pivot tuples never need to store a real
411 * offset (pivot tuples generally store a downlink in t_tid, though). The
412 * offset field only stores the number of columns/attributes when the
413 * INDEX_ALT_TID_MASK bit is set, which doesn't count the trailing heap
414 * TID column sometimes stored in pivot tuples -- that's represented by
415 * the presence of BT_PIVOT_HEAP_TID_ATTR. The INDEX_ALT_TID_MASK bit in
416 * t_info is always set on BTREE_VERSION 4 pivot tuples, since
417 * BTreeTupleIsPivot() must work reliably on heapkeyspace versions.
418 *
419 * In version 2 or version 3 (!heapkeyspace) indexes, INDEX_ALT_TID_MASK
420 * might not be set in pivot tuples. BTreeTupleIsPivot() won't work
421 * reliably as a result. The number of columns stored is implicitly the
422 * same as the number of columns in the index, just like any non-pivot
423 * tuple. (The number of columns stored should not vary, since suffix
424 * truncation of key columns is unsafe within any !heapkeyspace index.)
425 *
426 * The 12 least significant bits from t_tid's offset number are used to
427 * represent the number of key columns within a pivot tuple. This leaves 4
428 * status bits (BT_STATUS_OFFSET_MASK bits), which are shared by all tuples
429 * that have the INDEX_ALT_TID_MASK bit set (set in t_info) to store basic
430 * tuple metadata. BTreeTupleIsPivot() and BTreeTupleIsPosting() use the
431 * BT_STATUS_OFFSET_MASK bits.
432 *
433 * Sometimes non-pivot tuples also use a representation that repurposes
434 * t_tid to store metadata rather than a TID. PostgreSQL v13 introduced a
435 * new non-pivot tuple format to support deduplication: posting list
436 * tuples. Deduplication merges together multiple equal non-pivot tuples
437 * into a logically equivalent, space efficient representation. A posting
438 * list is an array of ItemPointerData elements. Non-pivot tuples are
439 * merged together to form posting list tuples lazily, at the point where
440 * we'd otherwise have to split a leaf page.
441 *
442 * Posting tuple format (alternative non-pivot tuple representation):
443 *
444 * t_tid | t_info | key values | posting list (TID array)
445 *
446 * Posting list tuples are recognized as such by having the
447 * INDEX_ALT_TID_MASK status bit set in t_info and the BT_IS_POSTING status
448 * bit set in t_tid's offset number. These flags redefine the content of
449 * the posting tuple's t_tid to store the location of the posting list
450 * (instead of a block number), as well as the total number of heap TIDs
451 * present in the tuple (instead of a real offset number).
452 *
453 * The 12 least significant bits from t_tid's offset number are used to
454 * represent the number of heap TIDs present in the tuple, leaving 4 status
455 * bits (the BT_STATUS_OFFSET_MASK bits). Like any non-pivot tuple, the
456 * number of columns stored is always implicitly the total number in the
457 * index (in practice there can never be non-key columns stored, since
458 * deduplication is not supported with INCLUDE indexes).
459 */
460 #define INDEX_ALT_TID_MASK INDEX_AM_RESERVED_BIT
461
462/* Item pointer offset bit masks */
463 #define BT_OFFSET_MASK 0x0FFF
464 #define BT_STATUS_OFFSET_MASK 0xF000
465/* BT_STATUS_OFFSET_MASK status bits */
466 #define BT_PIVOT_HEAP_TID_ATTR 0x1000
467 #define BT_IS_POSTING 0x2000
468
469/*
470 * Mask allocated for number of keys in index tuple must be able to fit
471 * maximum possible number of index attributes
472 */
473 StaticAssertDecl(BT_OFFSET_MASK >= INDEX_MAX_KEYS,
474 "BT_OFFSET_MASK can't fit INDEX_MAX_KEYS");
475
476/*
477 * Note: BTreeTupleIsPivot() can have false negatives (but not false
478 * positives) when used with !heapkeyspace indexes
479 */
480static inline bool
481 BTreeTupleIsPivot(IndexTuple itup)
482{
483 if ((itup->t_info & INDEX_ALT_TID_MASK) == 0)
484 return false;
485 /* absence of BT_IS_POSTING in offset number indicates pivot tuple */
486 if ((ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) & BT_IS_POSTING) != 0)
487 return false;
488
489 return true;
490}
491
492static inline bool
493 BTreeTupleIsPosting(IndexTuple itup)
494{
495 if ((itup->t_info & INDEX_ALT_TID_MASK) == 0)
496 return false;
497 /* presence of BT_IS_POSTING in offset number indicates posting tuple */
498 if ((ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) & BT_IS_POSTING) == 0)
499 return false;
500
501 return true;
502}
503
504static inline void
505 BTreeTupleSetPosting(IndexTuple itup, uint16 nhtids, int postingoffset)
506{
507 Assert(nhtids > 1);
508 Assert((nhtids & BT_STATUS_OFFSET_MASK) == 0);
509 Assert((size_t) postingoffset == MAXALIGN(postingoffset));
510 Assert(postingoffset < INDEX_SIZE_MASK);
511 Assert(!BTreeTupleIsPivot(itup));
512
513 itup->t_info |= INDEX_ALT_TID_MASK;
514 ItemPointerSetOffsetNumber(&itup->t_tid, (nhtids | BT_IS_POSTING));
515 ItemPointerSetBlockNumber(&itup->t_tid, postingoffset);
516}
517
518static inline uint16
519 BTreeTupleGetNPosting(IndexTuple posting)
520{
521 OffsetNumber existing;
522
523 Assert(BTreeTupleIsPosting(posting));
524
525 existing = ItemPointerGetOffsetNumberNoCheck(&posting->t_tid);
526 return (existing & BT_OFFSET_MASK);
527}
528
529static inline uint32
530 BTreeTupleGetPostingOffset(IndexTuple posting)
531{
532 Assert(BTreeTupleIsPosting(posting));
533
534 return ItemPointerGetBlockNumberNoCheck(&posting->t_tid);
535}
536
537static inline ItemPointer
538 BTreeTupleGetPosting(IndexTuple posting)
539{
540 return (ItemPointer) ((char *) posting +
541 BTreeTupleGetPostingOffset(posting));
542}
543
544static inline ItemPointer
545 BTreeTupleGetPostingN(IndexTuple posting, int n)
546{
547 return BTreeTupleGetPosting(posting) + n;
548}
549
550/*
551 * Get/set downlink block number in pivot tuple.
552 *
553 * Note: Cannot assert that tuple is a pivot tuple. If we did so then
554 * !heapkeyspace indexes would exhibit false positive assertion failures.
555 */
556static inline BlockNumber
557 BTreeTupleGetDownLink(IndexTuple pivot)
558{
559 return ItemPointerGetBlockNumberNoCheck(&pivot->t_tid);
560}
561
562static inline void
563 BTreeTupleSetDownLink(IndexTuple pivot, BlockNumber blkno)
564{
565 ItemPointerSetBlockNumber(&pivot->t_tid, blkno);
566}
567
568/*
569 * Get number of attributes within tuple.
570 *
571 * Note that this does not include an implicit tiebreaker heap TID
572 * attribute, if any. Note also that the number of key attributes must be
573 * explicitly represented in all heapkeyspace pivot tuples.
574 *
575 * Note: This is defined as a macro rather than an inline function to
576 * avoid including rel.h.
577 */
578 #define BTreeTupleGetNAtts(itup, rel) \
579 ( \
580 (BTreeTupleIsPivot(itup)) ? \
581 ( \
582 ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_OFFSET_MASK \
583 ) \
584 : \
585 IndexRelationGetNumberOfAttributes(rel) \
586 )
587
588/*
589 * Set number of key attributes in tuple.
590 *
591 * The heap TID tiebreaker attribute bit may also be set here, indicating that
592 * a heap TID value will be stored at the end of the tuple (i.e. using the
593 * special pivot tuple representation).
594 */
595static inline void
596 BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
597{
598 Assert(nkeyatts <= INDEX_MAX_KEYS);
599 Assert((nkeyatts & BT_STATUS_OFFSET_MASK) == 0);
600 Assert(!heaptid || nkeyatts > 0);
601 Assert(!BTreeTupleIsPivot(itup) || nkeyatts == 0);
602
603 itup->t_info |= INDEX_ALT_TID_MASK;
604
605 if (heaptid)
606 nkeyatts |= BT_PIVOT_HEAP_TID_ATTR;
607
608 /* BT_IS_POSTING bit is deliberately unset here */
609 ItemPointerSetOffsetNumber(&itup->t_tid, nkeyatts);
610 Assert(BTreeTupleIsPivot(itup));
611}
612
613/*
614 * Get/set leaf page's "top parent" link from its high key. Used during page
615 * deletion.
616 *
617 * Note: Cannot assert that tuple is a pivot tuple. If we did so then
618 * !heapkeyspace indexes would exhibit false positive assertion failures.
619 */
620static inline BlockNumber
621 BTreeTupleGetTopParent(IndexTuple leafhikey)
622{
623 return ItemPointerGetBlockNumberNoCheck(&leafhikey->t_tid);
624}
625
626static inline void
627 BTreeTupleSetTopParent(IndexTuple leafhikey, BlockNumber blkno)
628{
629 ItemPointerSetBlockNumber(&leafhikey->t_tid, blkno);
630 BTreeTupleSetNAtts(leafhikey, 0, false);
631}
632
633/*
634 * Get tiebreaker heap TID attribute, if any.
635 *
636 * This returns the first/lowest heap TID in the case of a posting list tuple.
637 */
638static inline ItemPointer
639 BTreeTupleGetHeapTID(IndexTuple itup)
640{
641 if (BTreeTupleIsPivot(itup))
642 {
643 /* Pivot tuple heap TID representation? */
644 if ((ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) &
645 BT_PIVOT_HEAP_TID_ATTR) != 0)
646 return (ItemPointer) ((char *) itup + IndexTupleSize(itup) -
647 sizeof(ItemPointerData));
648
649 /* Heap TID attribute was truncated */
650 return NULL;
651 }
652 else if (BTreeTupleIsPosting(itup))
653 return BTreeTupleGetPosting(itup);
654
655 return &itup->t_tid;
656}
657
658/*
659 * Get maximum heap TID attribute, which could be the only TID in the case of
660 * a non-pivot tuple that does not have a posting list.
661 *
662 * Works with non-pivot tuples only.
663 */
664static inline ItemPointer
665 BTreeTupleGetMaxHeapTID(IndexTuple itup)
666{
667 Assert(!BTreeTupleIsPivot(itup));
668
669 if (BTreeTupleIsPosting(itup))
670 {
671 uint16 nposting = BTreeTupleGetNPosting(itup);
672
673 return BTreeTupleGetPostingN(itup, nposting - 1);
674 }
675
676 return &itup->t_tid;
677}
678
679/*
680 * Operator strategy numbers for B-tree have been moved to access/stratnum.h,
681 * because many places need to use them in ScanKeyInit() calls.
682 *
683 * The strategy numbers are chosen so that we can commute them by
684 * subtraction, thus:
685 */
686 #define BTCommuteStrategyNumber(strat) (BTMaxStrategyNumber + 1 - (strat))
687
688/*
689 * When a new operator class is declared, we require that the user
690 * supply us with an amproc procedure (BTORDER_PROC) for determining
691 * whether, for two keys a and b, a < b, a = b, or a > b. This routine
692 * must return < 0, 0, > 0, respectively, in these three cases.
693 *
694 * To facilitate accelerated sorting, an operator class may choose to
695 * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see
696 * src/include/utils/sortsupport.h.
697 *
698 * To support window frames defined by "RANGE offset PRECEDING/FOLLOWING",
699 * an operator class may choose to offer a third amproc procedure
700 * (BTINRANGE_PROC), independently of whether it offers sortsupport.
701 * For full details, see doc/src/sgml/btree.sgml.
702 *
703 * To facilitate B-Tree deduplication, an operator class may choose to
704 * offer a forth amproc procedure (BTEQUALIMAGE_PROC). For full details,
705 * see doc/src/sgml/btree.sgml.
706 *
707 * An operator class may choose to offer a fifth amproc procedure
708 * (BTOPTIONS_PROC). These procedures define a set of user-visible
709 * parameters that can be used to control operator class behavior. None of
710 * the built-in B-Tree operator classes currently register an "options" proc.
711 *
712 * To facilitate more efficient B-Tree skip scans, an operator class may
713 * choose to offer a sixth amproc procedure (BTSKIPSUPPORT_PROC). For full
714 * details, see src/include/utils/skipsupport.h.
715 */
716
717 #define BTORDER_PROC 1
718 #define BTSORTSUPPORT_PROC 2
719 #define BTINRANGE_PROC 3
720 #define BTEQUALIMAGE_PROC 4
721 #define BTOPTIONS_PROC 5
722 #define BTSKIPSUPPORT_PROC 6
723 #define BTNProcs 6
724
725/*
726 * We need to be able to tell the difference between read and write
727 * requests for pages, in order to do locking correctly.
728 */
729
730 #define BT_READ BUFFER_LOCK_SHARE
731 #define BT_WRITE BUFFER_LOCK_EXCLUSIVE
732
733/*
734 * BTStackData -- As we descend a tree, we push the location of pivot
735 * tuples whose downlink we are about to follow onto a private stack. If
736 * we split a leaf, we use this stack to walk back up the tree and insert
737 * data into its parent page at the correct location. We also have to
738 * recursively insert into the grandparent page if and when the parent page
739 * splits. Our private stack can become stale due to concurrent page
740 * splits and page deletions, but it should never give us an irredeemably
741 * bad picture.
742 */
743 typedef struct BTStackData
744{
745 BlockNumber bts_blkno;
746 OffsetNumber bts_offset;
747 struct BTStackData *bts_parent;
748 } BTStackData;
749
750 typedef BTStackData *BTStack;
751
752/*
753 * BTScanInsertData is the btree-private state needed to find an initial
754 * position for an indexscan, or to insert new tuples -- an "insertion
755 * scankey" (not to be confused with a search scankey). It's used to descend
756 * a B-Tree using _bt_search.
757 *
758 * heapkeyspace indicates if we expect all keys in the index to be physically
759 * unique because heap TID is used as a tiebreaker attribute, and if index may
760 * have truncated key attributes in pivot tuples. This is actually a property
761 * of the index relation itself (not an indexscan). heapkeyspace indexes are
762 * indexes whose version is >= version 4. It's convenient to keep this close
763 * by, rather than accessing the metapage repeatedly.
764 *
765 * allequalimage is set to indicate that deduplication is safe for the index.
766 * This is also a property of the index relation rather than an indexscan.
767 *
768 * anynullkeys indicates if any of the keys had NULL value when scankey was
769 * built from index tuple (note that already-truncated tuple key attributes
770 * set NULL as a placeholder key value, which also affects value of
771 * anynullkeys). This is a convenience for unique index non-pivot tuple
772 * insertion, which usually temporarily unsets scantid, but shouldn't iff
773 * anynullkeys is true. Value generally matches non-pivot tuple's HasNulls
774 * bit, but may not when inserting into an INCLUDE index (tuple header value
775 * is affected by the NULL-ness of both key and non-key attributes).
776 *
777 * See comments in _bt_first for an explanation of the nextkey and backward
778 * fields.
779 *
780 * scantid is the heap TID that is used as a final tiebreaker attribute. It
781 * is set to NULL when index scan doesn't need to find a position for a
782 * specific physical tuple. Must be set when inserting new tuples into
783 * heapkeyspace indexes, since every tuple in the tree unambiguously belongs
784 * in one exact position (it's never set with !heapkeyspace indexes, though).
785 * Despite the representational difference, nbtree search code considers
786 * scantid to be just another insertion scankey attribute.
787 *
788 * scankeys is an array of scan key entries for attributes that are compared
789 * before scantid (user-visible attributes). keysz is the size of the array.
790 * During insertion, there must be a scan key for every attribute, but when
791 * starting a regular index scan some can be omitted. The array is used as a
792 * flexible array member, though it's sized in a way that makes it possible to
793 * use stack allocations. See nbtree/README for full details.
794 */
795 typedef struct BTScanInsertData
796{
797 bool heapkeyspace;
798 bool allequalimage;
799 bool anynullkeys;
800 bool nextkey;
801 bool backward; /* backward index scan? */
802 ItemPointer scantid; /* tiebreaker for scankeys */
803 int keysz; /* Size of scankeys array */
804 ScanKeyData scankeys[INDEX_MAX_KEYS]; /* Must appear last */
805 } BTScanInsertData;
806
807 typedef BTScanInsertData *BTScanInsert;
808
809/*
810 * BTInsertStateData is a working area used during insertion.
811 *
812 * This is filled in after descending the tree to the first leaf page the new
813 * tuple might belong on. Tracks the current position while performing
814 * uniqueness check, before we have determined which exact page to insert
815 * to.
816 *
817 * (This should be private to nbtinsert.c, but it's also used by
818 * _bt_binsrch_insert)
819 */
820 typedef struct BTInsertStateData
821{
822 IndexTuple itup; /* Item we're inserting */
823 Size itemsz; /* Size of itup -- should be MAXALIGN()'d */
824 BTScanInsert itup_key; /* Insertion scankey */
825
826 /* Buffer containing leaf page we're likely to insert itup on */
827 Buffer buf;
828
829 /*
830 * Cache of bounds within the current buffer. Only used for insertions
831 * where _bt_check_unique is called. See _bt_binsrch_insert and
832 * _bt_findinsertloc for details.
833 */
834 bool bounds_valid;
835 OffsetNumber low;
836 OffsetNumber stricthigh;
837
838 /*
839 * if _bt_binsrch_insert found the location inside existing posting list,
840 * save the position inside the list. -1 sentinel value indicates overlap
841 * with an existing posting list tuple that has its LP_DEAD bit set.
842 */
843 int postingoff;
844 } BTInsertStateData;
845
846 typedef BTInsertStateData *BTInsertState;
847
848/*
849 * State used to representing an individual pending tuple during
850 * deduplication.
851 */
852 typedef struct BTDedupInterval
853{
854 OffsetNumber baseoff;
855 uint16 nitems;
856 } BTDedupInterval;
857
858/*
859 * BTDedupStateData is a working area used during deduplication.
860 *
861 * The status info fields track the state of a whole-page deduplication pass.
862 * State about the current pending posting list is also tracked.
863 *
864 * A pending posting list is comprised of a contiguous group of equal items
865 * from the page, starting from page offset number 'baseoff'. This is the
866 * offset number of the "base" tuple for new posting list. 'nitems' is the
867 * current total number of existing items from the page that will be merged to
868 * make a new posting list tuple, including the base tuple item. (Existing
869 * items may themselves be posting list tuples, or regular non-pivot tuples.)
870 *
871 * The total size of the existing tuples to be freed when pending posting list
872 * is processed gets tracked by 'phystupsize'. This information allows
873 * deduplication to calculate the space saving for each new posting list
874 * tuple, and for the entire pass over the page as a whole.
875 */
876 typedef struct BTDedupStateData
877{
878 /* Deduplication status info for entire pass over page */
879 bool deduplicate; /* Still deduplicating page? */
880 int nmaxitems; /* Number of max-sized tuples so far */
881 Size maxpostingsize; /* Limit on size of final tuple */
882
883 /* Metadata about base tuple of current pending posting list */
884 IndexTuple base; /* Use to form new posting list */
885 OffsetNumber baseoff; /* page offset of base */
886 Size basetupsize; /* base size without original posting list */
887
888 /* Other metadata about pending posting list */
889 ItemPointer htids; /* Heap TIDs in pending posting list */
890 int nhtids; /* Number of heap TIDs in htids array */
891 int nitems; /* Number of existing tuples/line pointers */
892 Size phystupsize; /* Includes line pointer overhead */
893
894 /*
895 * Array of tuples to go on new version of the page. Contains one entry
896 * for each group of consecutive items. Note that existing tuples that
897 * will not become posting list tuples do not appear in the array (they
898 * are implicitly unchanged by deduplication pass).
899 */
900 int nintervals; /* current number of intervals in array */
901 BTDedupInterval intervals[MaxIndexTuplesPerPage];
902 } BTDedupStateData;
903
904 typedef BTDedupStateData *BTDedupState;
905
906/*
907 * BTVacuumPostingData is state that represents how to VACUUM (or delete) a
908 * posting list tuple when some (though not all) of its TIDs are to be
909 * deleted.
910 *
911 * Convention is that itup field is the original posting list tuple on input,
912 * and palloc()'d final tuple used to overwrite existing tuple on output.
913 */
914 typedef struct BTVacuumPostingData
915{
916 /* Tuple that will be/was updated */
917 IndexTuple itup;
918 OffsetNumber updatedoffset;
919
920 /* State needed to describe final itup in WAL */
921 uint16 ndeletedtids;
922 uint16 deletetids[FLEXIBLE_ARRAY_MEMBER];
923 } BTVacuumPostingData;
924
925 typedef BTVacuumPostingData *BTVacuumPosting;
926
927/*
928 * BTScanOpaqueData is the btree-private state needed for an indexscan.
929 * This consists of preprocessed scan keys (see _bt_preprocess_keys() for
930 * details of the preprocessing), information about the current location
931 * of the scan, and information about the marked location, if any. (We use
932 * BTScanPosData to represent the data needed for each of current and marked
933 * locations.) In addition we can remember some known-killed index entries
934 * that must be marked before we can move off the current page.
935 *
936 * Index scans work a page at a time: we pin and read-lock the page, identify
937 * all the matching items on the page and save them in BTScanPosData, then
938 * release the read-lock while returning the items to the caller for
939 * processing. This approach minimizes lock/unlock traffic. We must always
940 * drop the lock to make it okay for caller to process the returned items.
941 * Whether or not we can also release the pin during this window will vary.
942 * We drop the pin (when so->dropPin) to avoid blocking progress by VACUUM
943 * (see nbtree/README section about making concurrent TID recycling safe).
944 * We'll always release both the lock and the pin on the current page before
945 * moving on to its sibling page.
946 *
947 * If we are doing an index-only scan, we save the entire IndexTuple for each
948 * matched item, otherwise only its heap TID and offset. The IndexTuples go
949 * into a separate workspace array; each BTScanPosItem stores its tuple's
950 * offset within that array. Posting list tuples store a "base" tuple once,
951 * allowing the same key to be returned for each TID in the posting list
952 * tuple.
953 */
954
955 typedef struct BTScanPosItem /* what we remember about each match */
956{
957 ItemPointerData heapTid; /* TID of referenced heap item */
958 OffsetNumber indexOffset; /* index item's location within page */
959 LocationIndex tupleOffset; /* IndexTuple's offset in workspace, if any */
960 } BTScanPosItem;
961
962 typedef struct BTScanPosData
963{
964 Buffer buf; /* currPage buf (invalid means unpinned) */
965
966 /* page details as of the saved position's call to _bt_readpage */
967 BlockNumber currPage; /* page referenced by items array */
968 BlockNumber prevPage; /* currPage's left link */
969 BlockNumber nextPage; /* currPage's right link */
970 XLogRecPtr lsn; /* currPage's LSN (when so->dropPin) */
971
972 /* scan direction for the saved position's call to _bt_readpage */
973 ScanDirection dir;
974
975 /*
976 * If we are doing an index-only scan, nextTupleOffset is the first free
977 * location in the associated tuple storage workspace.
978 */
979 int nextTupleOffset;
980
981 /*
982 * moreLeft and moreRight track whether we think there may be matching
983 * index entries to the left and right of the current page, respectively.
984 */
985 bool moreLeft;
986 bool moreRight;
987
988 /*
989 * The items array is always ordered in index order (ie, increasing
990 * indexoffset). When scanning backwards it is convenient to fill the
991 * array back-to-front, so we start at the last slot and fill downwards.
992 * Hence we need both a first-valid-entry and a last-valid-entry counter.
993 * itemIndex is a cursor showing which entry was last returned to caller.
994 */
995 int firstItem; /* first valid index in items[] */
996 int lastItem; /* last valid index in items[] */
997 int itemIndex; /* current index in items[] */
998
999 BTScanPosItem items[MaxTIDsPerBTreePage]; /* MUST BE LAST */
1000 } BTScanPosData;
1001
1002 typedef BTScanPosData *BTScanPos;
1003
1004 #define BTScanPosIsPinned(scanpos) \
1005( \
1006 AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
1007 !BufferIsValid((scanpos).buf)), \
1008 BufferIsValid((scanpos).buf) \
1009)
1010 #define BTScanPosUnpin(scanpos) \
1011 do { \
1012 ReleaseBuffer((scanpos).buf); \
1013 (scanpos).buf = InvalidBuffer; \
1014 } while (0)
1015 #define BTScanPosUnpinIfPinned(scanpos) \
1016 do { \
1017 if (BTScanPosIsPinned(scanpos)) \
1018 BTScanPosUnpin(scanpos); \
1019 } while (0)
1020
1021 #define BTScanPosIsValid(scanpos) \
1022( \
1023 AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
1024 !BufferIsValid((scanpos).buf)), \
1025 BlockNumberIsValid((scanpos).currPage) \
1026)
1027 #define BTScanPosInvalidate(scanpos) \
1028 do { \
1029 (scanpos).buf = InvalidBuffer; \
1030 (scanpos).currPage = InvalidBlockNumber; \
1031 } while (0)
1032
1033/* We need one of these for each equality-type SK_SEARCHARRAY scan key */
1034 typedef struct BTArrayKeyInfo
1035{
1036 /* fields set for both kinds of array (SAOP arrays and skip arrays) */
1037 int scan_key; /* index of associated key in keyData */
1038 int num_elems; /* number of elems (-1 means skip array) */
1039
1040 /* fields set for ScalarArrayOpExpr arrays only */
1041 Datum *elem_values; /* array of num_elems Datums */
1042 int cur_elem; /* index of current element in elem_values */
1043
1044 /* fields set for skip arrays only */
1045 int16 attlen; /* attr's length, in bytes */
1046 bool attbyval; /* attr's FormData_pg_attribute.attbyval */
1047 bool null_elem; /* NULL is lowest/highest element? */
1048 SkipSupport sksup; /* skip support (NULL if opclass lacks it) */
1049 ScanKey low_compare; /* array's > or >= lower bound */
1050 ScanKey high_compare; /* array's < or <= upper bound */
1051 } BTArrayKeyInfo;
1052
1053 typedef struct BTScanOpaqueData
1054{
1055 /* these fields are set by _bt_preprocess_keys(): */
1056 bool qual_ok; /* false if qual can never be satisfied */
1057 int numberOfKeys; /* number of preprocessed scan keys */
1058 ScanKey keyData; /* array of preprocessed scan keys */
1059
1060 /* workspace for SK_SEARCHARRAY support */
1061 int numArrayKeys; /* number of equality-type array keys */
1062 bool skipScan; /* At least one skip array in arrayKeys[]? */
1063 bool needPrimScan; /* New prim scan to continue in current dir? */
1064 bool scanBehind; /* Check scan not still behind on next page? */
1065 bool oppositeDirCheck; /* scanBehind opposite-scan-dir check? */
1066 BTArrayKeyInfo *arrayKeys; /* info about each equality-type array key */
1067 FmgrInfo *orderProcs; /* ORDER procs for required equality keys */
1068 MemoryContext arrayContext; /* scan-lifespan context for array data */
1069
1070 /* info about killed items if any (killedItems is NULL if never used) */
1071 int *killedItems; /* currPos.items indexes of killed items */
1072 int numKilled; /* number of currently stored items */
1073 bool dropPin; /* drop leaf pin before btgettuple returns? */
1074
1075 /*
1076 * If we are doing an index-only scan, these are the tuple storage
1077 * workspaces for the currPos and markPos respectively. Each is of size
1078 * BLCKSZ, so it can hold as much as a full page's worth of tuples.
1079 */
1080 char *currTuples; /* tuple storage for currPos */
1081 char *markTuples; /* tuple storage for markPos */
1082
1083 /*
1084 * If the marked position is on the same page as current position, we
1085 * don't use markPos, but just keep the marked itemIndex in markItemIndex
1086 * (all the rest of currPos is valid for the mark position). Hence, to
1087 * determine if there is a mark, first look at markItemIndex, then at
1088 * markPos.
1089 */
1090 int markItemIndex; /* itemIndex, or -1 if not valid */
1091
1092 /* keep these last in struct for efficiency */
1093 BTScanPosData currPos; /* current position data */
1094 BTScanPosData markPos; /* marked position, if any */
1095 } BTScanOpaqueData;
1096
1097 typedef BTScanOpaqueData *BTScanOpaque;
1098
1099/*
1100 * _bt_readpage state used across _bt_checkkeys calls for a page
1101 */
1102 typedef struct BTReadPageState
1103{
1104 /* Input parameters, set by _bt_readpage for _bt_checkkeys */
1105 OffsetNumber minoff; /* Lowest non-pivot tuple's offset */
1106 OffsetNumber maxoff; /* Highest non-pivot tuple's offset */
1107 IndexTuple finaltup; /* Needed by scans with array keys */
1108 Page page; /* Page being read */
1109 bool firstpage; /* page is first for primitive scan? */
1110 bool forcenonrequired; /* treat all keys as nonrequired? */
1111 int startikey; /* start comparisons from this scan key */
1112
1113 /* Per-tuple input parameters, set by _bt_readpage for _bt_checkkeys */
1114 OffsetNumber offnum; /* current tuple's page offset number */
1115
1116 /* Output parameters, set by _bt_checkkeys for _bt_readpage */
1117 OffsetNumber skip; /* Array keys "look ahead" skip offnum */
1118 bool continuescan; /* Terminate ongoing (primitive) index scan? */
1119
1120 /*
1121 * Private _bt_checkkeys state used to manage "look ahead" optimization
1122 * and primscan scheduling (only used during scans with array keys)
1123 */
1124 int16 rechecks;
1125 int16 targetdistance;
1126 int16 nskipadvances;
1127
1128 } BTReadPageState;
1129
1130/*
1131 * We use some private sk_flags bits in preprocessed scan keys. We're allowed
1132 * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
1133 * index's indoption[] array entry for the index attribute.
1134 */
1135 #define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
1136 #define SK_BT_REQBKWD 0x00020000 /* required to continue backward scan */
1137 #define SK_BT_SKIP 0x00040000 /* skip array on column without input = */
1138
1139/* SK_BT_SKIP-only flags (set and unset by array advancement) */
1140 #define SK_BT_MINVAL 0x00080000 /* invalid sk_argument, use low_compare */
1141 #define SK_BT_MAXVAL 0x00100000 /* invalid sk_argument, use high_compare */
1142 #define SK_BT_NEXT 0x00200000 /* positions the scan > sk_argument */
1143 #define SK_BT_PRIOR 0x00400000 /* positions the scan < sk_argument */
1144
1145/* Remaps pg_index flag bits to uppermost SK_BT_* byte */
1146 #define SK_BT_INDOPTION_SHIFT 24 /* must clear the above bits */
1147 #define SK_BT_DESC (INDOPTION_DESC << SK_BT_INDOPTION_SHIFT)
1148 #define SK_BT_NULLS_FIRST (INDOPTION_NULLS_FIRST << SK_BT_INDOPTION_SHIFT)
1149
1150 typedef struct BTOptions
1151{
1152 int32 varlena_header_; /* varlena header (do not touch directly!) */
1153 int fillfactor; /* page fill factor in percent (0..100) */
1154 float8 vacuum_cleanup_index_scale_factor; /* deprecated */
1155 bool deduplicate_items; /* Try to deduplicate items? */
1156 } BTOptions;
1157
1158 #define BTGetFillFactor(relation) \
1159 (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
1160 relation->rd_rel->relam == BTREE_AM_OID), \
1161 (relation)->rd_options ? \
1162 ((BTOptions *) (relation)->rd_options)->fillfactor : \
1163 BTREE_DEFAULT_FILLFACTOR)
1164 #define BTGetTargetPageFreeSpace(relation) \
1165 (BLCKSZ * (100 - BTGetFillFactor(relation)) / 100)
1166 #define BTGetDeduplicateItems(relation) \
1167 (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
1168 relation->rd_rel->relam == BTREE_AM_OID), \
1169 ((relation)->rd_options ? \
1170 ((BTOptions *) (relation)->rd_options)->deduplicate_items : true))
1171
1172/*
1173 * Constant definition for progress reporting. Phase numbers must match
1174 * btbuildphasename.
1175 */
1176/* PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE is 1 (see progress.h) */
1177 #define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN 2
1178 #define PROGRESS_BTREE_PHASE_PERFORMSORT_1 3
1179 #define PROGRESS_BTREE_PHASE_PERFORMSORT_2 4
1180 #define PROGRESS_BTREE_PHASE_LEAF_LOAD 5
1181
1182/*
1183 * external entry points for btree, in nbtree.c
1184 */
1185extern void btbuildempty(Relation index);
1186extern bool btinsert(Relation rel, Datum *values, bool *isnull,
1187 ItemPointer ht_ctid, Relation heapRel,
1188 IndexUniqueCheck checkUnique,
1189 bool indexUnchanged,
1190 struct IndexInfo *indexInfo);
1191extern IndexScanDesc btbeginscan(Relation rel, int nkeys, int norderbys);
1192extern Size btestimateparallelscan(Relation rel, int nkeys, int norderbys);
1193extern void btinitparallelscan(void *target);
1194extern bool btgettuple(IndexScanDesc scan, ScanDirection dir);
1195extern int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm);
1196extern void btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
1197 ScanKey orderbys, int norderbys);
1198extern void btparallelrescan(IndexScanDesc scan);
1199extern void btendscan(IndexScanDesc scan);
1200extern void btmarkpos(IndexScanDesc scan);
1201extern void btrestrpos(IndexScanDesc scan);
1202extern IndexBulkDeleteResult *btbulkdelete(IndexVacuumInfo *info,
1203 IndexBulkDeleteResult *stats,
1204 IndexBulkDeleteCallback callback,
1205 void *callback_state);
1206extern IndexBulkDeleteResult *btvacuumcleanup(IndexVacuumInfo *info,
1207 IndexBulkDeleteResult *stats);
1208extern bool btcanreturn(Relation index, int attno);
1209extern int btgettreeheight(Relation rel);
1210
1211extern CompareType bttranslatestrategy(StrategyNumber strategy, Oid opfamily);
1212extern StrategyNumber bttranslatecmptype(CompareType cmptype, Oid opfamily);
1213
1214/*
1215 * prototypes for internal functions in nbtree.c
1216 */
1217extern bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *next_scan_page,
1218 BlockNumber *last_curr_page, bool first);
1219extern void _bt_parallel_release(IndexScanDesc scan,
1220 BlockNumber next_scan_page,
1221 BlockNumber curr_page);
1222extern void _bt_parallel_done(IndexScanDesc scan);
1223extern void _bt_parallel_primscan_schedule(IndexScanDesc scan,
1224 BlockNumber curr_page);
1225
1226/*
1227 * prototypes for functions in nbtdedup.c
1228 */
1229extern void _bt_dedup_pass(Relation rel, Buffer buf, IndexTuple newitem,
1230 Size newitemsz, bool bottomupdedup);
1231extern bool _bt_bottomupdel_pass(Relation rel, Buffer buf, Relation heapRel,
1232 Size newitemsz);
1233extern void _bt_dedup_start_pending(BTDedupState state, IndexTuple base,
1234 OffsetNumber baseoff);
1235extern bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup);
1236extern Size _bt_dedup_finish_pending(Page newpage, BTDedupState state);
1237extern IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids,
1238 int nhtids);
1239extern void _bt_update_posting(BTVacuumPosting vacposting);
1240extern IndexTuple _bt_swap_posting(IndexTuple newitem, IndexTuple oposting,
1241 int postingoff);
1242
1243/*
1244 * prototypes for functions in nbtinsert.c
1245 */
1246extern bool _bt_doinsert(Relation rel, IndexTuple itup,
1247 IndexUniqueCheck checkUnique, bool indexUnchanged,
1248 Relation heapRel);
1249extern void _bt_finish_split(Relation rel, Relation heaprel, Buffer lbuf,
1250 BTStack stack);
1251extern Buffer _bt_getstackbuf(Relation rel, Relation heaprel, BTStack stack,
1252 BlockNumber child);
1253
1254/*
1255 * prototypes for functions in nbtsplitloc.c
1256 */
1257extern OffsetNumber _bt_findsplitloc(Relation rel, Page origpage,
1258 OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem,
1259 bool *newitemonleft);
1260
1261/*
1262 * prototypes for functions in nbtpage.c
1263 */
1264extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
1265 bool allequalimage);
1266extern bool _bt_vacuum_needs_cleanup(Relation rel);
1267extern void _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages);
1268extern void _bt_upgrademetapage(Page page);
1269extern Buffer _bt_getroot(Relation rel, Relation heaprel, int access);
1270extern Buffer _bt_gettrueroot(Relation rel);
1271extern int _bt_getrootheight(Relation rel);
1272extern void _bt_metaversion(Relation rel, bool *heapkeyspace,
1273 bool *allequalimage);
1274extern void _bt_checkpage(Relation rel, Buffer buf);
1275extern Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access);
1276extern Buffer _bt_allocbuf(Relation rel, Relation heaprel);
1277extern Buffer _bt_relandgetbuf(Relation rel, Buffer obuf,
1278 BlockNumber blkno, int access);
1279extern void _bt_relbuf(Relation rel, Buffer buf);
1280extern void _bt_lockbuf(Relation rel, Buffer buf, int access);
1281extern void _bt_unlockbuf(Relation rel, Buffer buf);
1282extern bool _bt_conditionallockbuf(Relation rel, Buffer buf);
1283extern void _bt_upgradelockbufcleanup(Relation rel, Buffer buf);
1284extern void _bt_pageinit(Page page, Size size);
1285extern void _bt_delitems_vacuum(Relation rel, Buffer buf,
1286 OffsetNumber *deletable, int ndeletable,
1287 BTVacuumPosting *updatable, int nupdatable);
1288struct TM_IndexDeleteOp; /* avoid including tableam.h here */
1289extern void _bt_delitems_delete_check(Relation rel, Buffer buf,
1290 Relation heapRel,
1291 struct TM_IndexDeleteOp *delstate);
1292extern void _bt_pagedel(Relation rel, Buffer leafbuf, BTVacState *vstate);
1293extern void _bt_pendingfsm_init(Relation rel, BTVacState *vstate,
1294 bool cleanuponly);
1295extern void _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate);
1296
1297/*
1298 * prototypes for functions in nbtpreprocesskeys.c
1299 */
1300extern void _bt_preprocess_keys(IndexScanDesc scan);
1301
1302/*
1303 * prototypes for functions in nbtsearch.c
1304 */
1305extern BTStack _bt_search(Relation rel, Relation heaprel, BTScanInsert key,
1306 Buffer *bufP, int access);
1307extern OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate);
1308extern int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum);
1309extern bool _bt_first(IndexScanDesc scan, ScanDirection dir);
1310extern bool _bt_next(IndexScanDesc scan, ScanDirection dir);
1311extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost);
1312
1313/*
1314 * prototypes for functions in nbtutils.c
1315 */
1316extern BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup);
1317extern void _bt_freestack(BTStack stack);
1318extern bool _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir);
1319extern int _bt_binsrch_array_skey(FmgrInfo *orderproc,
1320 bool cur_elem_trig, ScanDirection dir,
1321 Datum tupdatum, bool tupnull,
1322 BTArrayKeyInfo *array, ScanKey cur,
1323 int32 *set_elem_result);
1324extern void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir);
1325extern bool _bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys,
1326 IndexTuple tuple, int tupnatts);
1327extern bool _bt_scanbehind_checkkeys(IndexScanDesc scan, ScanDirection dir,
1328 IndexTuple finaltup);
1329extern void _bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate);
1330extern void _bt_killitems(IndexScanDesc scan);
1331extern BTCycleId _bt_vacuum_cycleid(Relation rel);
1332extern BTCycleId _bt_start_vacuum(Relation rel);
1333extern void _bt_end_vacuum(Relation rel);
1334extern void _bt_end_vacuum_callback(int code, Datum arg);
1335extern Size BTreeShmemSize(void);
1336extern void BTreeShmemInit(void);
1337extern bytea *btoptions(Datum reloptions, bool validate);
1338extern bool btproperty(Oid index_oid, int attno,
1339 IndexAMProperty prop, const char *propname,
1340 bool *res, bool *isnull);
1341extern char *btbuildphasename(int64 phasenum);
1342extern IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft,
1343 IndexTuple firstright, BTScanInsert itup_key);
1344extern int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft,
1345 IndexTuple firstright);
1346extern bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page,
1347 OffsetNumber offnum);
1348extern void _bt_check_third_page(Relation rel, Relation heap,
1349 bool needheaptidspace, Page page, IndexTuple newtup);
1350extern bool _bt_allequalimage(Relation rel, bool debugmessage);
1351
1352/*
1353 * prototypes for functions in nbtvalidate.c
1354 */
1355extern bool btvalidate(Oid opclassoid);
1356extern void btadjustmembers(Oid opfamilyoid,
1357 Oid opclassoid,
1358 List *operators,
1359 List *functions);
1360
1361/*
1362 * prototypes for functions in nbtsort.c
1363 */
1364extern IndexBuildResult *btbuild(Relation heap, Relation index,
1365 struct IndexInfo *indexInfo);
1366extern void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc);
1367
1368#endif /* NBTREE_H */
IndexAMProperty
Definition: amapi.h:39
static bool validate(Port *port, const char *auth)
Definition: auth-oauth.c:638
uint32 BlockNumber
Definition: block.h:31
static Datum values[MAXATTR]
Definition: bootstrap.c:153
int Buffer
Definition: buf.h:23
PageHeaderData * PageHeader
Definition: bufpage.h:174
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:234
#define SizeOfPageHeaderData
Definition: bufpage.h:217
static char * PageGetContents(Page page)
Definition: bufpage.h:258
PageData * Page
Definition: bufpage.h:82
uint16 LocationIndex
Definition: bufpage.h:91
#define MAXALIGN(LEN)
Definition: c.h:810
int64_t int64
Definition: c.h:535
double float8
Definition: c.h:635
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:470
int16_t int16
Definition: c.h:533
int32_t int32
Definition: c.h:534
uint16_t uint16
Definition: c.h:537
uint32_t uint32
Definition: c.h:538
size_t Size
Definition: c.h:610
CompareType
Definition: cmptype.h:32
struct cursor * cur
Definition: ecpg.c:29
bool(* IndexBulkDeleteCallback)(ItemPointer itemptr, void *state)
Definition: genam.h:114
IndexUniqueCheck
Definition: genam.h:143
Assert(PointerIsAligned(start, uint64))
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition: itemptr.h:147
static OffsetNumber ItemPointerGetOffsetNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:114
static BlockNumber ItemPointerGetBlockNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:93
struct ItemPointerData ItemPointerData
static Size IndexTupleSize(const IndexTupleData *itup)
Definition: itup.h:71
#define MaxIndexTuplesPerPage
Definition: itup.h:181
#define INDEX_SIZE_MASK
Definition: itup.h:65
#define P_HAS_FULLXID(opaque)
Definition: nbtree.h:229
bool btinsert(Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, bool indexUnchanged, struct IndexInfo *indexInfo)
Definition: nbtree.c:202
Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
Definition: nbtsearch.c:2613
Buffer _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
Definition: nbtpage.c:1003
void _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, Page page, IndexTuple newtup)
Definition: nbtutils.c:4270
void _bt_parallel_primscan_schedule(IndexScanDesc scan, BlockNumber curr_page)
Definition: nbtree.c:1031
bool btcanreturn(Relation index, int attno)
Definition: nbtree.c:1745
bool _bt_scanbehind_checkkeys(IndexScanDesc scan, ScanDirection dir, IndexTuple finaltup)
Definition: nbtutils.c:2280
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:72
#define BT_PIVOT_HEAP_TID_ATTR
Definition: nbtree.h:466
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:519
void _bt_upgrademetapage(Page page)
Definition: nbtpage.c:107
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1023
IndexTuple _bt_swap_posting(IndexTuple newitem, IndexTuple oposting, int postingoff)
Definition: nbtdedup.c:1023
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:481
Buffer _bt_gettrueroot(Relation rel)
Definition: nbtpage.c:580
int _bt_getrootheight(Relation rel)
Definition: nbtpage.c:675
BTStackData * BTStack
Definition: nbtree.h:750
void _bt_end_vacuum(Relation rel)
Definition: nbtutils.c:3672
bool btvalidate(Oid opclassoid)
Definition: nbtvalidate.c:40
void _bt_pageinit(Page page, Size size)
Definition: nbtpage.c:1129
bool _bt_first(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:887
void _bt_dedup_pass(Relation rel, Buffer buf, IndexTuple newitem, Size newitemsz, bool bottomupdedup)
Definition: nbtdedup.c:59
OffsetNumber _bt_findsplitloc(Relation rel, Page origpage, OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem, bool *newitemonleft)
Definition: nbtsplitloc.c:130
static FullTransactionId BTPageGetDeleteXid(Page page)
Definition: nbtree.h:261
#define BTP_HAS_FULLXID
Definition: nbtree.h:85
bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *next_scan_page, BlockNumber *last_curr_page, bool first)
Definition: nbtree.c:816
void _bt_update_posting(BTVacuumPosting vacposting)
Definition: nbtdedup.c:925
bool _bt_bottomupdel_pass(Relation rel, Buffer buf, Relation heapRel, Size newitemsz)
Definition: nbtdedup.c:308
bool _bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys, IndexTuple tuple, int tupnatts)
Definition: nbtutils.c:2152
bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup)
Definition: nbtdedup.c:485
void _bt_end_vacuum_callback(int code, Datum arg)
Definition: nbtutils.c:3700
void _bt_pagedel(Relation rel, Buffer leafbuf, BTVacState *vstate)
Definition: nbtpage.c:1802
Buffer _bt_allocbuf(Relation rel, Relation heaprel)
Definition: nbtpage.c:869
struct BTPageOpaqueData BTPageOpaqueData
StrategyNumber bttranslatecmptype(CompareType cmptype, Oid opfamily)
Definition: nbtree.c:1780
IndexScanDesc btbeginscan(Relation rel, int nkeys, int norderbys)
Definition: nbtree.c:336
void _bt_delitems_vacuum(Relation rel, Buffer buf, OffsetNumber *deletable, int ndeletable, BTVacuumPosting *updatable, int nupdatable)
Definition: nbtpage.c:1154
struct BTReadPageState BTReadPageState
int _bt_binsrch_array_skey(FmgrInfo *orderproc, bool cur_elem_trig, ScanDirection dir, Datum tupdatum, bool tupnull, BTArrayKeyInfo *array, ScanKey cur, int32 *set_elem_result)
Definition: nbtutils.c:289
static void BTreeTupleSetTopParent(IndexTuple leafhikey, BlockNumber blkno)
Definition: nbtree.h:627
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:189
static void BTreeTupleSetPosting(IndexTuple itup, uint16 nhtids, int postingoffset)
Definition: nbtree.h:505
Buffer _bt_getstackbuf(Relation rel, Relation heaprel, BTStack stack, BlockNumber child)
Definition: nbtinsert.c:2329
Size btestimateparallelscan(Relation rel, int nkeys, int norderbys)
Definition: nbtree.c:590
void BTreeShmemInit(void)
Definition: nbtutils.c:3722
void _bt_parallel_done(IndexScanDesc scan)
Definition: nbtree.c:981
void _bt_dedup_start_pending(BTDedupState state, IndexTuple base, OffsetNumber baseoff)
Definition: nbtdedup.c:434
#define BTPageGetOpaque(page)
Definition: nbtree.h:74
#define P_ISDELETED(opaque)
Definition: nbtree.h:223
static ItemPointer BTreeTupleGetPosting(IndexTuple posting)
Definition: nbtree.h:538
void _bt_checkpage(Relation rel, Buffer buf)
Definition: nbtpage.c:797
IndexBulkDeleteResult * btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: nbtree.c:1095
BTCycleId _bt_vacuum_cycleid(Relation rel)
Definition: nbtutils.c:3581
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:739
void _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, struct TM_IndexDeleteOp *delstate)
Definition: nbtpage.c:1513
static BlockNumber BTreeTupleGetTopParent(IndexTuple leafhikey)
Definition: nbtree.h:621
struct BTArrayKeyInfo BTArrayKeyInfo
void btadjustmembers(Oid opfamilyoid, Oid opclassoid, List *operators, List *functions)
Definition: nbtvalidate.c:288
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:97
struct BTPendingFSM BTPendingFSM
void _bt_killitems(IndexScanDesc scan)
Definition: nbtutils.c:3361
#define BT_OFFSET_MASK
Definition: nbtree.h:463
IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
Definition: nbtdedup.c:865
bool _bt_doinsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, bool indexUnchanged, Relation heapRel)
Definition: nbtinsert.c:103
CompareType bttranslatestrategy(StrategyNumber strategy, Oid opfamily)
Definition: nbtree.c:1760
#define MaxTIDsPerBTreePage
Definition: nbtree.h:186
static void BTreeTupleSetDownLink(IndexTuple pivot, BlockNumber blkno)
Definition: nbtree.h:563
bool btgettuple(IndexScanDesc scan, ScanDirection dir)
Definition: nbtree.c:226
void btparallelrescan(IndexScanDesc scan)
Definition: nbtree.c:773
bool _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:1274
struct BTVacState BTVacState
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:4110
IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:3844
void _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages)
Definition: nbtpage.c:232
#define BT_STATUS_OFFSET_MASK
Definition: nbtree.h:464
uint16 BTCycleId
Definition: nbtree.h:30
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:530
struct BTScanInsertData BTScanInsertData
void btbuildempty(Relation index)
Definition: nbtree.c:179
struct BTMetaPageData BTMetaPageData
bool _bt_conditionallockbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1093
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition: nbtpage.c:845
BTScanInsertData * BTScanInsert
Definition: nbtree.h:807
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:4063
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:545
int btgettreeheight(Relation rel)
Definition: nbtree.c:1754
void _bt_finish_split(Relation rel, Relation heaprel, Buffer lbuf, BTStack stack)
Definition: nbtinsert.c:2251
void btinitparallelscan(void *target)
Definition: nbtree.c:757
IndexBulkDeleteResult * btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: nbtree.c:1065
void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc)
Definition: nbtsort.c:1743
static bool BTPageIsRecyclable(Page page, Relation heaprel)
Definition: nbtree.h:292
void _bt_unlockbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1070
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:557
#define INDEX_ALT_TID_MASK
Definition: nbtree.h:460
BTStack _bt_search(Relation rel, Relation heaprel, BTScanInsert key, Buffer *bufP, int access)
Definition: nbtsearch.c:107
void _bt_upgradelockbufcleanup(Relation rel, Buffer buf)
Definition: nbtpage.c:1109
void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level, bool allequalimage)
Definition: nbtpage.c:67
BTVacuumPostingData * BTVacuumPosting
Definition: nbtree.h:925
void _bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate)
Definition: nbtutils.c:2393
IndexBuildResult * btbuild(Relation heap, Relation index, struct IndexInfo *indexInfo)
Definition: nbtsort.c:296
struct BTDeletedPageData BTDeletedPageData
bool _bt_vacuum_needs_cleanup(Relation rel)
Definition: nbtpage.c:179
OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate)
Definition: nbtsearch.c:479
bool _bt_next(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:1593
char * btbuildphasename(int64 phasenum)
Definition: nbtutils.c:3796
struct BTDedupInterval BTDedupInterval
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:693
struct BTScanPosItem BTScanPosItem
bytea * btoptions(Datum reloptions, bool validate)
Definition: nbtutils.c:3750
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:665
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:493
Size BTreeShmemSize(void)
Definition: nbtutils.c:3709
static void BTPageSetDeleted(Page page, FullTransactionId safexid)
Definition: nbtree.h:240
int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: nbtree.c:288
void btmarkpos(IndexScanDesc scan)
Definition: nbtree.c:506
BTDedupStateData * BTDedupState
Definition: nbtree.h:904
void btendscan(IndexScanDesc scan)
Definition: nbtree.c:470
void _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
Definition: nbtpage.c:2996
#define BT_IS_POSTING
Definition: nbtree.h:467
void _bt_lockbuf(Relation rel, Buffer buf, int access)
Definition: nbtpage.c:1039
struct BTOptions BTOptions
void btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
Definition: nbtree.c:385
bool btproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
Definition: nbtutils.c:3773
Buffer _bt_getroot(Relation rel, Relation heaprel, int access)
Definition: nbtpage.c:344
struct BTScanPosData BTScanPosData
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:639
void _bt_parallel_release(IndexScanDesc scan, BlockNumber next_scan_page, BlockNumber curr_page)
Definition: nbtree.c:954
void _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
Definition: nbtpage.c:2954
Size _bt_dedup_finish_pending(Page newpage, BTDedupState state)
Definition: nbtdedup.c:556
struct BTScanOpaqueData BTScanOpaqueData
BTInsertStateData * BTInsertState
Definition: nbtree.h:846
static void BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
Definition: nbtree.h:596
void btrestrpos(IndexScanDesc scan)
Definition: nbtree.c:532
bool _bt_allequalimage(Relation rel, bool debugmessage)
Definition: nbtutils.c:4327
struct BTDedupStateData BTDedupStateData
StaticAssertDecl(BT_OFFSET_MASK >=INDEX_MAX_KEYS, "BT_OFFSET_MASK can't fit INDEX_MAX_KEYS")
#define BTP_DELETED
Definition: nbtree.h:79
void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:613
void _bt_preprocess_keys(IndexScanDesc scan)
struct BTStackData BTStackData
BTCycleId _bt_start_vacuum(Relation rel)
Definition: nbtutils.c:3615
BTScanPosData * BTScanPos
Definition: nbtree.h:1002
struct BTInsertStateData BTInsertStateData
struct BTVacuumPostingData BTVacuumPostingData
BTScanOpaqueData * BTScanOpaque
Definition: nbtree.h:1097
uint16 OffsetNumber
Definition: off.h:24
void * arg
#define INDEX_MAX_KEYS
static char * buf
Definition: pg_test_fsync.c:72
uint64_t Datum
Definition: postgres.h:70
unsigned int Oid
Definition: postgres_ext.h:32
short access
Definition: preproc-type.c:36
bool GlobalVisCheckRemovableFullXid(Relation rel, FullTransactionId fxid)
Definition: procarray.c:4248
static const struct fns functions
Definition: regcomp.c:358
ScanDirection
Definition: sdir.h:25
uint16 StrategyNumber
Definition: stratnum.h:22
bool attbyval
Definition: nbtree.h:1046
Datum * elem_values
Definition: nbtree.h:1041
ScanKey high_compare
Definition: nbtree.h:1050
ScanKey low_compare
Definition: nbtree.h:1049
int scan_key
Definition: nbtree.h:1037
SkipSupport sksup
Definition: nbtree.h:1048
int num_elems
Definition: nbtree.h:1038
int16 attlen
Definition: nbtree.h:1045
bool null_elem
Definition: nbtree.h:1047
int cur_elem
Definition: nbtree.h:1042
uint16 nitems
Definition: nbtree.h:855
OffsetNumber baseoff
Definition: nbtree.h:854
Size maxpostingsize
Definition: nbtree.h:881
ItemPointer htids
Definition: nbtree.h:889
bool deduplicate
Definition: nbtree.h:879
int nhtids
Definition: nbtree.h:890
OffsetNumber baseoff
Definition: nbtree.h:885
Size basetupsize
Definition: nbtree.h:886
int nitems
Definition: nbtree.h:891
BTDedupInterval intervals[MaxIndexTuplesPerPage]
Definition: nbtree.h:901
int nintervals
Definition: nbtree.h:900
IndexTuple base
Definition: nbtree.h:884
Size phystupsize
Definition: nbtree.h:892
int nmaxitems
Definition: nbtree.h:880
FullTransactionId safexid
Definition: nbtree.h:236
OffsetNumber stricthigh
Definition: nbtree.h:836
Size itemsz
Definition: nbtree.h:823
bool bounds_valid
Definition: nbtree.h:834
Buffer buf
Definition: nbtree.h:827
OffsetNumber low
Definition: nbtree.h:835
int postingoff
Definition: nbtree.h:843
IndexTuple itup
Definition: nbtree.h:822
BTScanInsert itup_key
Definition: nbtree.h:824
uint32 btm_last_cleanup_num_delpages
Definition: nbtree.h:115
uint32 btm_level
Definition: nbtree.h:109
float8 btm_last_cleanup_num_heap_tuples
Definition: nbtree.h:117
BlockNumber btm_fastroot
Definition: nbtree.h:110
uint32 btm_version
Definition: nbtree.h:107
uint32 btm_magic
Definition: nbtree.h:106
BlockNumber btm_root
Definition: nbtree.h:108
bool btm_allequalimage
Definition: nbtree.h:119
uint32 btm_fastlevel
Definition: nbtree.h:111
int fillfactor
Definition: nbtree.h:1153
bool deduplicate_items
Definition: nbtree.h:1155
float8 vacuum_cleanup_index_scale_factor
Definition: nbtree.h:1154
int32 varlena_header_
Definition: nbtree.h:1152
BlockNumber btpo_next
Definition: nbtree.h:66
BlockNumber btpo_prev
Definition: nbtree.h:65
uint16 btpo_flags
Definition: nbtree.h:68
uint32 btpo_level
Definition: nbtree.h:67
BTCycleId btpo_cycleid
Definition: nbtree.h:69
FullTransactionId safexid
Definition: nbtree.h:328
BlockNumber target
Definition: nbtree.h:327
bool forcenonrequired
Definition: nbtree.h:1110
int startikey
Definition: nbtree.h:1111
bool continuescan
Definition: nbtree.h:1118
bool firstpage
Definition: nbtree.h:1109
IndexTuple finaltup
Definition: nbtree.h:1107
OffsetNumber minoff
Definition: nbtree.h:1105
Page page
Definition: nbtree.h:1108
int16 targetdistance
Definition: nbtree.h:1125
int16 nskipadvances
Definition: nbtree.h:1126
OffsetNumber offnum
Definition: nbtree.h:1114
int16 rechecks
Definition: nbtree.h:1124
OffsetNumber skip
Definition: nbtree.h:1117
OffsetNumber maxoff
Definition: nbtree.h:1106
bool nextkey
Definition: nbtree.h:800
ItemPointer scantid
Definition: nbtree.h:802
bool allequalimage
Definition: nbtree.h:798
bool heapkeyspace
Definition: nbtree.h:797
bool anynullkeys
Definition: nbtree.h:799
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:804
bool backward
Definition: nbtree.h:801
int markItemIndex
Definition: nbtree.h:1090
int numberOfKeys
Definition: nbtree.h:1057
bool needPrimScan
Definition: nbtree.h:1063
int numKilled
Definition: nbtree.h:1072
bool qual_ok
Definition: nbtree.h:1056
BTArrayKeyInfo * arrayKeys
Definition: nbtree.h:1066
int numArrayKeys
Definition: nbtree.h:1061
char * markTuples
Definition: nbtree.h:1081
FmgrInfo * orderProcs
Definition: nbtree.h:1067
BTScanPosData currPos
Definition: nbtree.h:1093
int * killedItems
Definition: nbtree.h:1071
char * currTuples
Definition: nbtree.h:1080
bool oppositeDirCheck
Definition: nbtree.h:1065
bool scanBehind
Definition: nbtree.h:1064
bool skipScan
Definition: nbtree.h:1062
bool dropPin
Definition: nbtree.h:1073
BTScanPosData markPos
Definition: nbtree.h:1094
ScanKey keyData
Definition: nbtree.h:1058
MemoryContext arrayContext
Definition: nbtree.h:1068
bool moreRight
Definition: nbtree.h:986
Buffer buf
Definition: nbtree.h:964
BlockNumber currPage
Definition: nbtree.h:967
int firstItem
Definition: nbtree.h:995
int nextTupleOffset
Definition: nbtree.h:979
BlockNumber prevPage
Definition: nbtree.h:968
BlockNumber nextPage
Definition: nbtree.h:969
bool moreLeft
Definition: nbtree.h:985
int lastItem
Definition: nbtree.h:996
BTScanPosItem items[MaxTIDsPerBTreePage]
Definition: nbtree.h:999
int itemIndex
Definition: nbtree.h:997
ScanDirection dir
Definition: nbtree.h:973
XLogRecPtr lsn
Definition: nbtree.h:970
ItemPointerData heapTid
Definition: nbtree.h:957
LocationIndex tupleOffset
Definition: nbtree.h:959
OffsetNumber indexOffset
Definition: nbtree.h:958
BlockNumber bts_blkno
Definition: nbtree.h:745
struct BTStackData * bts_parent
Definition: nbtree.h:747
OffsetNumber bts_offset
Definition: nbtree.h:746
IndexBulkDeleteResult * stats
Definition: nbtree.h:334
BTCycleId cycleid
Definition: nbtree.h:337
BTPendingFSM * pendingpages
Definition: nbtree.h:345
int npendingpages
Definition: nbtree.h:346
IndexBulkDeleteCallback callback
Definition: nbtree.h:335
MemoryContext pagedelcontext
Definition: nbtree.h:338
IndexVacuumInfo * info
Definition: nbtree.h:333
int bufsize
Definition: nbtree.h:343
int maxbufsize
Definition: nbtree.h:344
void * callback_state
Definition: nbtree.h:336
uint16 deletetids[FLEXIBLE_ARRAY_MEMBER]
Definition: nbtree.h:922
uint16 ndeletedtids
Definition: nbtree.h:921
IndexTuple itup
Definition: nbtree.h:917
OffsetNumber updatedoffset
Definition: nbtree.h:918
Definition: fmgr.h:57
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
Definition: pg_list.h:54
LocationIndex pd_special
Definition: bufpage.h:168
LocationIndex pd_upper
Definition: bufpage.h:167
LocationIndex pd_lower
Definition: bufpage.h:166
Definition: rel.h:56
Definition: skey.h:65
Definition: dsm.c:67
Definition: type.h:96
Definition: shm_toc.c:27
Definition: regguts.h:323
Definition: c.h:692
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:46
#define FirstNormalFullTransactionId
Definition: transam.h:57
uint64 XLogRecPtr
Definition: xlogdefs.h:21

AltStyle によって変換されたページ (->オリジナル) /