1/*-------------------------------------------------------------------------
4 * various support functions for SP-GiST
7 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/spgist/spgutils.c
13 *-------------------------------------------------------------------------
32#include "utils/fmgrprotos.h"
40 * SP-GiST handler function: return IndexAmRoutine with access method parameters
106 * Determine the nominal input data type for an index column
108 * We define the "nominal" input type as the associated opclass's opcintype,
109 * or if that is a polymorphic type, the base type of the heap column or
110 * expression that is the index's input. The reason for preferring the
111 * opcintype is that non-polymorphic opclasses probably don't want to hear
112 * about binary-compatible input types. For instance, if a text opclass
113 * is being used with a varchar heap column, we want to report "text" not
114 * "varchar". Likewise, opclasses don't want to hear about domain types,
115 * so if we do consult the actual input type, we make sure to flatten domains.
117 * At some point maybe this should go somewhere else, but it's not clear
118 * if any other index AMs have a use for it.
129 Assert(indexcol > 0 && indexcol <= index->rd_index->indnkeyatts);
130 opcintype =
index->rd_opcintype[indexcol - 1];
131 if (!IsPolymorphicType(opcintype))
133 heapcol =
index->rd_index->indkey.values[indexcol - 1];
134 if (heapcol != 0)
/* Simple index column? */
138 * If the index expressions are already cached, skip calling
139 * RelationGetIndexExpressions, as it will make a copy which is overkill.
140 * We're not going to modify the trees, and we're not going to do anything
141 * that would invalidate the relcache entry before we're done.
143 if (
index->rd_indexprs)
144 indexprs =
index->rd_indexprs;
148 for (
int i = 1;
i <=
index->rd_index->indnkeyatts;
i++)
150 if (
index->rd_index->indkey.values[
i - 1] == 0)
152 /* expression column */
153 if (indexpr_item == NULL)
154 elog(
ERROR,
"wrong number of index expressions");
157 indexpr_item =
lnext(indexprs, indexpr_item);
160 elog(
ERROR,
"wrong number of index expressions");
164/* Fill in a SpGistTypeDesc struct with info about the specified data type */
176 desc->
attlen = typtup->typlen;
184 * Fetch local cache of AM-specific info about the index, initializing it
192 if (
index->rd_amcache == NULL)
201 /* SPGiST must have one key column and can also have INCLUDE columns */
206 * Get the actual (well, nominal) data type of the key column. We
207 * pass this to the opclass config function so that polymorphic
208 * opclasses are possible.
212 /* Call the config function to get config info for the opclass */
222 * If leafType isn't specified, use the declared index column type,
223 * which index.c will have derived from the opclass's opcintype.
224 * (Although we now make spgvalidate.c warn if these aren't the same,
225 * old user-defined opclasses may not set the STORAGE parameter
226 * correctly, so believe leafType if it's given.)
234 * If index column type is binary-coercible to atttype (for
235 * example, it's a domain over atttype), treat it as plain atttype
236 * to avoid thinking we need to compress.
243 /* Get the information we need about each relevant datatype */
250 (
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
251 errmsg(
"compress method must be defined when leaf type is different from input type")));
257 /* Save lookups in this common case */
265 * Finally, if it's a real index (not a partitioned one), get the
266 * lastUsedPages data from the metapage
268 if (
index->rd_rel->relkind != RELKIND_PARTITIONED_INDEX)
279 elog(
ERROR,
"index \"%s\" is not an SP-GiST index",
287 index->rd_amcache = cache;
291 /* assume it's up to date */
299 * Compute a tuple descriptor for leaf tuples or index-only-scan result tuples.
301 * We can use the relcache's tupdesc as-is in many cases, and it's always
302 * OK so far as any INCLUDE columns are concerned. However, the entry for
303 * the key column has to match leafType in the first case or attType in the
304 * second case. While the relcache's tupdesc *should* show leafType, this
305 * might not hold for legacy user-defined opclasses, since before v14 they
306 * were not allowed to declare their true storage type in CREATE OPCLASS.
307 * Also, attType can be different from what is in the relcache.
309 * This function gives back either a pointer to the relcache's tupdesc
310 * if that is suitable, or a palloc'd copy that's been adjusted to match
311 * the specified key column type. We can avoid doing any catalog lookups
312 * here by insisting that the caller pass an SpGistTypeDesc not just an OID.
327 /* It's sufficient to update the type-dependent fields of the column */
328 att->atttypid = keyType->
type;
330 att->attlen = keyType->
attlen;
334 /* We shouldn't need to bother with making these valid: */
337 /* In case we changed typlen, we'd better reset following offsets */
346/* Initialize SpGistState for working with the given index */
354 /* Get cached static information about index */
363 /* Ensure we have a valid descriptor for leaf tuples */
366 /* Make workspace for constructing dead tuples */
370 * Set horizon XID to use in redirection tuples. Use our own XID if we
371 * have one, else use InvalidTransactionId. The latter case can happen in
372 * VACUUM or REINDEX CONCURRENTLY, and in neither case would it be okay to
373 * force an XID to be assigned. VACUUM won't create any redirection
374 * tuples anyway, but REINDEX CONCURRENTLY can. Fortunately, REINDEX
375 * CONCURRENTLY doesn't mark the index valid until the end, so there could
376 * never be any concurrent scans "in flight" to a redirection tuple it has
377 * inserted. And it locks out VACUUM until the end, too. So it's okay
378 * for VACUUM to immediately expire a redirection tuple that contains an
383 /* Assume we're not in an index build (spgbuild will override) */
384 state->isBuild =
false;
388 * Allocate a new page (either by recycling, or by extending the index file).
390 * The returned buffer is already pinned and exclusive-locked.
391 * Caller is responsible for initializing the page by calling SpGistInitBuffer.
398 /* First, try to get a page from FSM */
404 break;
/* nothing known to FSM */
407 * The fixed pages shouldn't ever be listed in FSM, but just in case
416 * We have to guard against the possibility that someone else already
417 * recycled this page; the buffer may be locked if so.
424 return buffer;
/* OK to use, if never initialized */
427 return buffer;
/* OK to use */
432 /* Can't use it, so release buffer and try again */
443 * Update index metapage's lastUsedPages info from local cache, if possible
445 * Updating meta page isn't critical for index working, so
446 * 1 use ConditionalLockBuffer to improve concurrency
447 * 2 don't WAL-log metabuffer changes to decrease WAL traffic
468 * Set pd_lower just past the end of the metadata. This is
469 * essential, because without doing so, metadata will be lost if
470 * xlog.c compresses the page. (We must do this here because
471 * pre-v11 versions of PG did not set the metapage's pd_lower
472 * correctly, so a pg_upgraded index might contain the wrong
488/* Macro to select proper element of lastUsedPages cache depending on flags */
489/* Masking flags with SPGIST_CACHED_PAGES is just for paranoia's sake */
490 #define GET_LUP(c, f) (&(c)->lastUsedPages.cachedPage[((unsigned int) (f)) % SPGIST_CACHED_PAGES])
493 * Allocate and initialize a new buffer of the type and parity specified by
494 * flags. The returned buffer is already pinned and exclusive-locked.
496 * When requesting an inner page, if we get one with the wrong parity,
497 * we just release the buffer and try again. We will get a different page
498 * because GetFreeIndexPage will have marked the page used in FSM. The page
499 * is entered in our local lastUsedPages cache, so there's some hope of
500 * making use of it later in this session, but otherwise we rely on VACUUM
501 * to eventually re-enter the page in FSM, making it available for recycling.
502 * Note that such a page does not get marked dirty here, so unless it's used
503 * fairly soon, the buffer will just get discarded and the page will remain
506 * When we return a buffer to the caller, the page is *not* entered into
507 * the lastUsedPages cache; we expect the caller will do so after it's taken
508 * whatever space it will use. This is because after the caller has used up
509 * some space, the page might have less space than whatever was cached already
510 * so we'd rather not trash the old cache entry.
532 /* Leaf pages have no parity concerns, so just use it */
542 /* Page has right parity, use it */
547 /* Page has wrong parity, record it in cache and try again */
560 * Get a buffer of the type and parity specified by flags, having at least
561 * as much free space as indicated by needSpace. We use the lastUsedPages
562 * cache to assign the same buffer previously requested when possible.
563 * The returned buffer is already pinned and exclusive-locked.
565 * *isNew is set true if the page was initialized here, false if it was
574 /* Bail out if even an empty page wouldn't meet the demand */
576 elog(
ERROR,
"desired SPGiST tuple size is too big");
579 * If possible, increase the space request to include relation's
580 * fillfactor. This ensures that when we add unrelated tuples to a page,
581 * we try to keep 100-fillfactor% available for adding tuples that are
582 * related to the ones already on it. But fillfactor mustn't cause an
583 * error for requests that would otherwise be legal.
588 /* Get the cache entry for this flags setting */
591 /* If we have nothing cached, just turn it over to allocNewBuffer */
598 /* fixed pages should never be in cache */
601 /* If cached freeSpace isn't enough, don't bother looking at the page */
612 * buffer is locked by another process, so return a new buffer
623 /* OK to initialize the page */
637 * Check that page is of right type and has enough space. We must
638 * recheck this since our cache isn't necessarily up to date.
645 if (freeSpace >= needSpace)
647 /* Success, update freespace info and return the buffer */
655 * fallback to allocation of new buffer
660 /* No success with cache, so return a new buffer */
666 * Update lastUsedPages cache when done modifying a page.
668 * We update the appropriate cache entry if it already contained this page
669 * (its freeSpace is likely obsolete), or if this page has more space than
670 * whatever we had cached.
682 /* Never enter fixed pages (root pages) in cache, though */
705 * Initialize an SPGiST page to empty, with specified flags
719 * Initialize a buffer's page to empty, with specified flags
729 * Initialize metadata page
742 /* initialize last-used-page cache to empty */
747 * Set pd_lower just past the end of the metadata. This is essential,
748 * because without doing so, metadata will be lost if xlog.c compresses
756 * reloptions processing for SPGiST
772 * Get the space needed to store a non-null datum of the indicated type
773 * in an inner tuple (that is, as a prefix or node label).
774 * Note the result is already rounded up to a MAXALIGN boundary.
775 * Here we follow the convention that pass-by-val types are just stored
776 * in their Datum representation (compare memcpyInnerDatum).
784 size =
sizeof(
Datum);
794 * Copy the given non-null datum to *target, in the inner-tuple case
803 memcpy(target, &datum,
sizeof(
Datum));
813 * Compute space required for a leaf tuple holding the given data.
815 * This must match the size-calculation portion of spgFormLeafTuple.
819 const Datum *datums,
const bool *isnulls)
823 bool needs_null_mask =
false;
824 int natts = tupleDescriptor->
natts;
827 * Decide whether we need a nulls bitmask.
829 * If there is only a key attribute (natts == 1), never use a bitmask, for
830 * compatibility with the pre-v14 layout of leaf tuples. Otherwise, we
831 * need one if any attribute is null.
835 for (
int i = 0;
i < natts;
i++)
839 needs_null_mask =
true;
846 * Calculate size of the data part; same as for heap tuples.
851 * Compute total size.
858 * Ensure that we can replace the tuple with a dead tuple later. This test
859 * is unnecessary when there are any non-null attributes, but be safe.
868 * Construct a leaf tuple containing the given heap TID and datum values
872 const Datum *datums,
const bool *isnulls)
879 bool needs_null_mask =
false;
880 int natts = tupleDescriptor->
natts;
881 char *tp;
/* ptr to tuple data */
882 uint16 tupmask = 0;
/* unused heap_fill_tuple output */
885 * Decide whether we need a nulls bitmask.
887 * If there is only a key attribute (natts == 1), never use a bitmask, for
888 * compatibility with the pre-v14 layout of leaf tuples. Otherwise, we
889 * need one if any attribute is null.
893 for (
int i = 0;
i < natts;
i++)
897 needs_null_mask =
true;
904 * Calculate size of the data part; same as for heap tuples.
909 * Compute total size.
912 size = hoff + data_size;
916 * Ensure that we can replace the tuple with a dead tuple later. This test
917 * is unnecessary when there are any non-null attributes, but be safe.
922 /* OK, form the tuple */
929 tp = (
char *) tup + hoff;
933 bits8 *bp;
/* ptr to null bitmap in tuple */
935 /* Set nullmask presence bit in SpGistLeafTuple header */
937 /* Fill the data area and null mask */
944 /* Fill data area only */
946 &tupmask, (
bits8 *) NULL);
948 /* otherwise we have no data, nor a bitmap, to fill */
954 * Construct a node (to go into an inner tuple) containing the given label
956 * Note that the node's downlink is just set invalid here. Caller will fill
964 unsigned short infomask = 0;
966 /* compute space needed (note result is already maxaligned) */
972 * Here we make sure that the size will fit in the field reserved for it
977 (
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
978 errmsg(
"index row requires %zu bytes, maximum size is %zu",
985 /* we don't bother setting the INDEX_VAR_MASK bit */
989 /* The TID field will be filled in later */
999 * Construct an inner tuple containing the given prefix and node array
1007 unsigned int prefixSize;
1011 /* Compute size needed */
1019 /* Note: we rely on node tuple sizes to be maxaligned already */
1020 for (
i = 0;
i < nNodes;
i++)
1024 * Ensure that we can replace the tuple with a dead tuple later. This
1025 * test is unnecessary given current tuple layouts, but let's be safe.
1031 * Inner tuple should be small enough to fit on a page
1035 (
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
1036 errmsg(
"SP-GiST inner tuple size %zu exceeds maximum %zu",
1039 errhint(
"Values larger than a buffer page cannot be indexed.")));
1042 * Check for overflow of header fields --- probably can't fail if the
1043 * above succeeded, but let's be paranoid
1048 elog(
ERROR,
"SPGiST inner tuple header field is too small");
1050 /* OK, form the tuple */
1062 for (
i = 0;
i < nNodes;
i++)
1074 * Construct a "dead" tuple to replace a tuple being deleted.
1076 * The state can be SPGIST_REDIRECT, SPGIST_DEAD, or SPGIST_PLACEHOLDER.
1077 * For a REDIRECT tuple, a pointer (blkno+offset) must be supplied, and
1078 * the xid field is filled in automatically.
1080 * This is called in critical sections, so we don't use palloc; the tuple
1081 * is built in preallocated storage. It should be copied before another
1082 * call with different parameters can occur.
1109 * Convert an SPGiST leaf tuple into Datum/isnull arrays.
1111 * The caller must allocate sufficient storage for the output arrays.
1112 * (INDEX_MAX_KEYS entries should be enough.)
1116 Datum *datums,
bool *isnulls,
bool keyColumnIsNull)
1119 char *tp;
/* ptr to tuple data */
1120 bits8 *bp;
/* ptr to null bitmap in tuple */
1122 if (keyColumnIsNull && tupleDescriptor->
natts == 1)
1125 * Trivial case: there is only the key attribute and we're in a nulls
1126 * tree. The hasNullsMask bit in the tuple header should not be set
1127 * (and thus we can't use index_deform_tuple_internal), but
1128 * nonetheless the result is NULL.
1130 * Note: currently this is dead code, because noplace calls this when
1131 * there is only the key attribute. But we should cover the case.
1140 tp = (
char *) tup +
SGLTHDRSZ(hasNullsMask);
1145 tp, bp, hasNullsMask);
1148 * Key column isnull value from the tuple should be consistent with
1149 * keyColumnIsNull flag from the caller.
1155 * Extract the label datums of the nodes within innerTuple
1157 * Returns NULL if label datums are NULLs
1166 /* Either all the labels must be NULL, or none. */
1173 elog(
ERROR,
"some but not all node labels are null in SPGiST inner tuple");
1175 /* They're all null, so just return NULL */
1184 elog(
ERROR,
"some but not all node labels are null in SPGiST inner tuple");
1192 * Add a new item to the page, replacing a PLACEHOLDER item if possible.
1193 * Return the location it's inserted at, or InvalidOffsetNumber on failure.
1195 * If startOffset isn't NULL, we start searching for placeholders at
1196 * *startOffset, and update that to the next place to search. This is just
1197 * an optimization for repeated insertions.
1199 * If errorOK is false, we throw error when there's not enough room,
1200 * rather than returning InvalidOffsetNumber.
1214 /* Try to replace a placeholder */
1224 for (;
i <= maxoff;
i++)
1236 /* Done if we found a placeholder */
1242 /* Hint was no good, re-search from beginning */
1247 /* Hmm, no placeholder found? */
1254 /* Replace the placeholder tuple */
1257 offnum =
PageAddItem(page, item, size, offnum,
false,
false);
1260 * We should not have failed given the size check at the top of
1261 * the function, but test anyway. If we did fail, we must PANIC
1262 * because we've already deleted the placeholder tuple, and
1263 * there's no other way to keep the damage from getting to disk.
1270 *startOffset = offnum + 1;
1273 elog(
PANIC,
"failed to add item of size %zu to SPGiST index page",
1280 /* No luck in replacing a placeholder, so just add it to the page */
1285 elog(
ERROR,
"failed to add item of size %zu to SPGiST index page",
1292 * spgproperty() -- Check boolean properties of indexes.
1294 * This is optional for most AMs, but is required for SP-GiST because the core
1295 * property code doesn't support AMPROP_DISTANCE_ORDERABLE.
1300 bool *res,
bool *isnull)
1308 /* Only answer column-level inquiries */
1321 * Currently, SP-GiST distance-ordered scans require that there be a
1322 * distance operator in the opclass with the default types. So we assume
1323 * that if such an operator exists, then there's a reason for it.
1326 /* First we need to know the column's opclass. */
1334 /* Now look up the opclass family and input datatype. */
1341 /* And now we can check whether the operator is provided. */
1352 if (amopform->amoppurpose == AMOP_ORDER &&
1353 (amopform->amoplefttype == opcintype ||
1354 amopform->amoprighttype == opcintype) &&
@ AMPROP_DISTANCE_ORDERABLE
bool opfamily_can_sort_type(Oid opfamilyoid, Oid datatypeoid)
static bool validate(Port *port, const char *auth)
#define InvalidBlockNumber
BlockNumber BufferGetBlockNumber(Buffer buffer)
Buffer ExtendBufferedRel(BufferManagerRelation bmr, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
bool ConditionalLockBuffer(Buffer buffer)
void ReleaseBuffer(Buffer buffer)
void UnlockReleaseBuffer(Buffer buffer)
void MarkBufferDirty(Buffer buffer)
void LockBuffer(Buffer buffer, int mode)
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
#define BUFFER_LOCK_UNLOCK
#define BUFFER_LOCK_SHARE
static Page BufferGetPage(Buffer buffer)
static Size BufferGetPageSize(Buffer buffer)
void PageIndexTupleDelete(Page page, OffsetNumber offnum)
Size PageGetExactFreeSpace(const PageData *page)
void PageInit(Page page, Size pageSize, Size specialSize)
static bool PageIsEmpty(const PageData *page)
PageHeaderData * PageHeader
static Item PageGetItem(const PageData *page, const ItemIdData *itemId)
static bool PageIsNew(const PageData *page)
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
#define OidIsValid(objectId)
int errhint(const char *fmt,...)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
#define PG_RETURN_POINTER(x)
Assert(PointerIsAligned(start, uint64))
Size heap_compute_data_size(TupleDesc tupleDesc, const Datum *values, const bool *isnull)
void heap_fill_tuple(TupleDesc tupleDesc, const Datum *values, const bool *isnull, char *data, Size data_size, uint16 *infomask, bits8 *bit)
#define HeapTupleIsValid(tuple)
static void * GETSTRUCT(const HeapTupleData *tuple)
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum)
BlockNumber GetFreeIndexPage(Relation rel)
void index_deform_tuple_internal(TupleDesc tupleDescriptor, Datum *values, bool *isnull, char *tp, bits8 *bp, int hasnulls)
if(TABLE==NULL||TABLE_index==NULL)
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
static void ItemPointerSetInvalid(ItemPointerData *pointer)
static bool IndexTupleHasNulls(const IndexTupleData *itup)
static Size IndexTupleSize(const IndexTupleData *itup)
bool get_opclass_opfamily_and_input_type(Oid opclass, Oid *opfamily, Oid *opcintype)
Oid get_op_rettype(Oid opno)
Oid get_index_column_opclass(Oid index_oid, int attno)
Oid getBaseType(Oid typid)
Oid get_atttype(Oid relid, AttrNumber attnum)
void * MemoryContextAllocZero(MemoryContext context, Size size)
void * palloc0(Size size)
Oid exprType(const Node *expr)
#define InvalidOffsetNumber
#define FirstOffsetNumber
bool IsBinaryCoercible(Oid srctype, Oid targettype)
FormData_pg_amop * Form_pg_amop
FormData_pg_attribute * Form_pg_attribute
static ListCell * list_head(const List *l)
static ListCell * lnext(const List *l, const ListCell *c)
FormData_pg_type * Form_pg_type
static Datum PointerGetDatum(const void *X)
static Datum ObjectIdGetDatum(Oid X)
static Pointer DatumGetPointer(Datum X)
#define RelationGetDescr(relation)
#define RelationGetRelationName(relation)
#define IndexRelationGetNumberOfAttributes(relation)
#define IndexRelationGetNumberOfKeyAttributes(relation)
List * RelationGetIndexExpressions(Relation relation)
void * build_reloptions(Datum reloptions, bool validate, relopt_kind kind, Size relopt_struct_size, const relopt_parse_elt *relopt_elems, int num_relopt_elems)
void spgcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
IndexBuildResult * spgbuild(Relation heap, Relation index, IndexInfo *indexInfo)
bool spginsert(Relation index, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, bool indexUnchanged, IndexInfo *indexInfo)
void spgbuildempty(Relation index)
#define SPGIST_OPTIONS_PROC
#define SPGIST_COMPRESS_PROC
#define SPGIST_CONFIG_PROC
SpGistDeadTupleData * SpGistDeadTuple
#define SGLT_GET_HASNULLMASK(spgLeafTuple)
SpGistInnerTupleData * SpGistInnerTuple
#define SpGistPageStoresNulls(page)
#define SPGIST_PLACEHOLDER
#define SGLT_SET_HASNULLMASK(spgLeafTuple, hasnulls)
#define SGITITERATE(x, i, nt)
#define SpGistGetTargetPageFreeSpace(relation)
#define spgFirstIncludeColumn
#define SpGistPageGetMeta(p)
SpGistNodeTupleData * SpGistNodeTuple
#define SGITMAXPREFIXSIZE
#define SpGistPageIsLeaf(page)
#define SPGIST_METAPAGE_BLKNO
#define SpGistPageIsDeleted(page)
#define SGLTHDRSZ(hasnulls)
#define SGLT_SET_NEXTOFFSET(spgLeafTuple, offsetNumber)
struct SpGistLeafTupleData * SpGistLeafTuple
#define SPGIST_MAGIC_NUMBER
#define SPGIST_CACHED_PAGES
#define GBUF_REQ_NULLS(flags)
#define SPGIST_PAGE_CAPACITY
#define SpGistPageGetOpaque(page)
#define GBUF_INNER_PARITY(x)
#define SpGistBlockIsFixed(blkno)
#define GBUF_REQ_LEAF(flags)
struct SpGistLeafTupleData SpGistLeafTupleData
IndexScanDesc spgbeginscan(Relation rel, int keysz, int orderbysz)
bool spgcanreturn(Relation index, int attno)
bool spggettuple(IndexScanDesc scan, ScanDirection dir)
void spgendscan(IndexScanDesc scan)
void spgrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
int64 spggetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Datum * spgExtractNodeLabels(SpGistState *state, SpGistInnerTuple innerTuple)
void initSpGistState(SpGistState *state, Relation index)
void SpGistUpdateMetaPage(Relation index)
TupleDesc getSpGistTupleDesc(Relation index, SpGistTypeDesc *keyType)
Buffer SpGistNewBuffer(Relation index)
SpGistInnerTuple spgFormInnerTuple(SpGistState *state, bool hasPrefix, Datum prefix, int nNodes, SpGistNodeTuple *nodes)
SpGistLeafTuple spgFormLeafTuple(SpGistState *state, ItemPointer heapPtr, const Datum *datums, const bool *isnulls)
static void memcpyInnerDatum(void *target, SpGistTypeDesc *att, Datum datum)
SpGistCache * spgGetCache(Relation index)
void spgDeformLeafTuple(SpGistLeafTuple tup, TupleDesc tupleDescriptor, Datum *datums, bool *isnulls, bool keyColumnIsNull)
SpGistDeadTuple spgFormDeadTuple(SpGistState *state, int tupstate, BlockNumber blkno, OffsetNumber offnum)
unsigned int SpGistGetInnerTypeSize(SpGistTypeDesc *att, Datum datum)
static Oid GetIndexInputType(Relation index, AttrNumber indexcol)
void SpGistInitBuffer(Buffer b, uint16 f)
Buffer SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
bytea * spgoptions(Datum reloptions, bool validate)
static Buffer allocNewBuffer(Relation index, int flags)
bool spgproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
void SpGistSetLastUsedPage(Relation index, Buffer buffer)
static void fillTypeDesc(SpGistTypeDesc *desc, Oid type)
OffsetNumber SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size, OffsetNumber *startOffset, bool errorOK)
Datum spghandler(PG_FUNCTION_ARGS)
Size SpGistGetLeafTupleSize(TupleDesc tupleDescriptor, const Datum *datums, const bool *isnulls)
void SpGistInitPage(Page page, uint16 f)
SpGistNodeTuple spgFormNodeTuple(SpGistState *state, Datum label, bool isnull)
void SpGistInitMetapage(Page page)
IndexBulkDeleteResult * spgbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
IndexBulkDeleteResult * spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
bool spgvalidate(Oid opclassoid)
void spgadjustmembers(Oid opfamilyoid, Oid opclassoid, List *operators, List *functions)
ambuildphasename_function ambuildphasename
ambuildempty_function ambuildempty
amvacuumcleanup_function amvacuumcleanup
amoptions_function amoptions
amestimateparallelscan_function amestimateparallelscan
amrestrpos_function amrestrpos
aminsert_function aminsert
amendscan_function amendscan
amtranslate_strategy_function amtranslatestrategy
amparallelrescan_function amparallelrescan
bool amconsistentordering
amtranslate_cmptype_function amtranslatecmptype
amcostestimate_function amcostestimate
amadjustmembers_function amadjustmembers
amgettuple_function amgettuple
amcanreturn_function amcanreturn
amgetbitmap_function amgetbitmap
amproperty_function amproperty
ambulkdelete_function ambulkdelete
amvalidate_function amvalidate
ammarkpos_function ammarkpos
bool amusemaintenanceworkmem
ambeginscan_function ambeginscan
amrescan_function amrescan
aminitparallelscan_function aminitparallelscan
uint8 amparallelvacuumoptions
aminsertcleanup_function aminsertcleanup
amgettreeheight_function amgettreeheight
bool amconsistentequality
SpGistTypeDesc attPrefixType
SpGistTypeDesc attLeafType
SpGistLUPCache lastUsedPages
SpGistTypeDesc attLabelType
SpGistLastUsedPage cachedPage[SPGIST_CACHED_PAGES]
SpGistLUPCache lastUsedPages
CatCTup * members[FLEXIBLE_ARRAY_MEMBER]
void ReleaseSysCache(HeapTuple tuple)
HeapTuple SearchSysCache1(int cacheId, Datum key1)
#define ReleaseSysCacheList(x)
#define SearchSysCacheList1(cacheId, key1)
#define InvalidCompressionMethod
#define InvalidTransactionId
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
void populate_compact_attribute(TupleDesc tupdesc, int attnum)
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
#define VACUUM_OPTION_PARALLEL_BULKDEL
#define VACUUM_OPTION_PARALLEL_COND_CLEANUP
static Size VARSIZE_ANY(const void *PTR)
TransactionId GetTopTransactionIdIfAny(void)