1/*-------------------------------------------------------------------------
4 * This file contains index tuple accessor and mutator routines,
5 * as well as various tuple utilities.
7 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
12 * src/backend/access/common/indextuple.c
14 *-------------------------------------------------------------------------
26 * This enables de-toasting of index entries. Needed until VACUUM is
27 * smart enough to rebuild indexes from scratch.
29 #define TOAST_INDEX_HACK
31/* ----------------------------------------------------------------
32 * index_ tuple interface routines
33 * ----------------------------------------------------------------
39 * As index_form_tuple_context, but allocates the returned tuple in the
40 * CurrentMemoryContext.
53 * index_form_tuple_context
55 * This shouldn't leak any memory; otherwise, callers such as
56 * tuplesort_putindextuplevalues() will be very unhappy.
58 * This shouldn't perform external table access provided caller
59 * does not pass values that are stored EXTERNAL.
61 * Allocates returned tuple in provided 'context'.
70 char *tp;
/* tuple pointer */
76 unsigned short infomask = 0;
79 int numberOfAttributes = tupleDescriptor->
natts;
81#ifdef TOAST_INDEX_HACK
88 (
errcode(ERRCODE_TOO_MANY_COLUMNS),
89 errmsg(
"number of index columns (%d) exceeds limit (%d)",
92#ifdef TOAST_INDEX_HACK
93 for (
i = 0;
i < numberOfAttributes;
i++)
98 untoasted_free[
i] =
false;
100 /* Do nothing if value is NULL or not of varlena type */
101 if (isnull[
i] || att->attlen != -1)
105 * If value is stored EXTERNAL, must fetch it so we are not depending
106 * on outside storage. This should be improved someday.
110 untoasted_values[
i] =
113 untoasted_free[
i] =
true;
117 * If value is above size target, and is of a compressible datatype,
118 * try to compress it in-line.
122 (att->attstorage == TYPSTORAGE_EXTENDED ||
123 att->attstorage == TYPSTORAGE_MAIN))
128 att->attcompression);
132 /* successful compression */
133 if (untoasted_free[
i])
135 untoasted_values[
i] = cvalue;
136 untoasted_free[
i] =
true;
142 for (
i = 0;
i < numberOfAttributes;
i++)
155#ifdef TOAST_INDEX_HACK
157 untoasted_values, isnull);
162 size = hoff + data_size;
163 size =
MAXALIGN(size);
/* be conservative */
180#ifdef TOAST_INDEX_HACK
181 for (
i = 0;
i < numberOfAttributes;
i++)
183 if (untoasted_free[
i])
189 * We do this because heap_fill_tuple wants to initialize a "tupmask"
190 * which is used for HeapTuples, but we want an indextuple infomask. The
191 * only relevant info is the "has variable attributes" field. We have
192 * already set the hasnull bit above.
197 /* Also assert we got rid of external attributes */
198#ifdef TOAST_INDEX_HACK
203 * Here we make sure that the size will fit in the field reserved for it
208 (
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
209 errmsg(
"index row requires %zu bytes, maximum size is %zu",
215 * initialize metadata
222 * nocache_index_getattr
224 * This gets called from index_getattr() macro, and only in cases
225 * where we can't use cacheoffset and the value is not null.
227 * This caches attribute offsets in the attribute descriptor.
229 * An alternative way to speed things up would be to cache offsets
230 * with the tuple, but that seems more difficult unless you take
231 * the storage hit of actually putting those offsets into the
232 * tuple you send to disk. Yuck.
234 * This scheme will be slightly slower than that, but should
235 * perform well for queries which hit large #'s of tuples. After
236 * you cache the offsets once, examining all the other tuples using
237 * the same attribute descriptor will go much quicker. -cim 5/4/91
245 char *tp;
/* ptr to data part of tuple */
246 bits8 *bp = NULL;
/* ptr to null bitmap in tuple */
247 bool slow =
false;
/* do we have to walk attrs? */
248 int data_off;
/* tuple data offset */
249 int off;
/* current offset within data */
254 * 1: No nulls and no variable-width attributes.
255 * 2: Has a null or a var-width AFTER att.
256 * 3: Has nulls or var-widths BEFORE att.
267 * there's a null somewhere in the tuple
269 * check to see if desired att is null
272 /* XXX "knows" t_bits are just after fixed tuple header! */
276 * Now check to see if any preceding bits are null...
280 int finalbit =
attnum & 0x07;
282 /* check for nulls "before" final bit of last byte */
283 if ((~bp[
byte]) & ((1 << finalbit) - 1))
287 /* check for nulls in any "earlier" bytes */
290 for (
i = 0;
i < byte;
i++)
302 tp = (
char *) tup + data_off;
309 * If we get here, there are no nulls up to and including the target
310 * attribute. If we have a cached offset, we can use it.
317 * Otherwise, check for non-fixed-length attrs up to and including
318 * target. If there aren't any, it's safe to cheaply initialize the
319 * cached offsets for these attrs.
338 int natts = tupleDesc->
natts;
342 * If we get here, we have a tuple with no nulls or var-widths up to
343 * and including the target attribute, so we can use the cached offset
344 * ... only we don't have it yet, or we'd not have got here. Since
345 * it's cheap to compute offsets for fixed-width columns, we take the
346 * opportunity to initialize the cached offsets for *all* the leading
347 * fixed-width columns, in hope of avoiding future visits to this
352 /* we might have set some offsets in the slow path previously */
359 for (;
j < natts;
j++)
379 bool usecache =
true;
383 * Now we know that we have to walk the tuple CAREFULLY. But we still
384 * might be able to cache some offsets for next time.
386 * Note - This loop is a little tricky. For each non-null attribute,
387 * we have to first account for alignment padding before the attr,
388 * then advance over the attr based on its length. Nulls have no
389 * storage and no alignment padding either. We can use/set
390 * attcacheoff until we reach either a null or a var-width attribute.
393 for (
i = 0;;
i++)
/* loop exit is at "break" */
400 continue;
/* this cannot be the target att */
403 /* If we know the next offset, we can skip the rest */
406 else if (att->
attlen == -1)
409 * We can only cache the offset for a varlena attribute if the
410 * offset is already suitably aligned, so that there would be
411 * no pad bytes in any case: then the offset will be valid for
412 * either an aligned or unaligned value.
426 /* not varlena, so safe to use att_nominal_alignby */
438 if (usecache && att->
attlen <= 0)
447 * Convert an index tuple into Datum/isnull arrays.
449 * The caller must allocate sufficient storage for the output arrays.
450 * (INDEX_MAX_KEYS entries should be enough.)
452 * This is nearly the same as heap_deform_tuple(), but for IndexTuples.
453 * One difference is that the tuple should never have any missing columns.
459 char *tp;
/* ptr to tuple data */
460 bits8 *bp;
/* ptr to null bitmap in tuple */
462 /* XXX "knows" t_bits are just after fixed tuple header! */
472 * Convert an index tuple into Datum/isnull arrays,
473 * without assuming any specific layout of the index tuple header.
475 * Caller must supply pointer to data area, pointer to nulls bitmap
476 * (which can be NULL if !hasnulls), and hasnulls flag.
481 char *tp,
bits8 *bp,
int hasnulls)
483 int natts = tupleDescriptor->
natts;
/* number of atts to extract */
485 int off = 0;
/* offset in tuple data */
486 bool slow =
false;
/* can we use/set attcacheoff? */
488 /* Assert to protect callers who allocate fixed-size arrays */
499 slow =
true;
/* can't use attcacheoff anymore */
507 else if (thisatt->
attlen == -1)
510 * We can only cache the offset for a varlena attribute if the
511 * offset is already suitably aligned, so that there would be no
512 * pad bytes in any case: then the offset will be valid for either
513 * an aligned or unaligned value.
527 /* not varlena, so safe to use att_nominal_alignby */
531 thisatt->attcacheoff = off;
538 if (thisatt->attlen <= 0)
539 slow =
true;
/* can't use attcacheoff anymore */
544 * Create a palloc'd copy of an index tuple.
554 memcpy(result,
source, size);
559 * Create a palloc'd copy of an index tuple, leaving only the first
560 * leavenatts attributes remaining.
562 * Truncation is guaranteed to result in an index tuple that is no
563 * larger than the original. It is safe to use the IndexTuple with
564 * the original tuple descriptor, but caller must avoid actually
565 * accessing truncated attributes from returned tuple! In practice
566 * this means that index_getattr() must be called with special care,
567 * and that the truncated tuple should only ever be accessed by code
568 * under caller's direct control.
570 * It's safe to call this function with a buffer lock held, since it
571 * never performs external table access. If it ever became possible
572 * for index tuples to contain EXTERNAL TOAST values, then this would
573 * have to be revisited.
584 Assert(leavenatts <= sourceDescriptor->natts);
586 /* Easy case: no truncation actually required */
587 if (leavenatts == sourceDescriptor->
natts)
590 /* Create temporary truncated tuple descriptor */
593 /* Deform, form copy of tuple with fewer attributes */
600 * Cannot leak memory here, TupleDescCopy() doesn't allocate any inner
601 * structure, so, plain pfree() should clean all allocated memory
static Datum values[MAXATTR]
struct varlena * detoast_external_attr(struct varlena *attr)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
Assert(PointerIsAligned(start, uint64))
#define TOAST_INDEX_TARGET
Size heap_compute_data_size(TupleDesc tupleDesc, const Datum *values, const bool *isnull)
void heap_fill_tuple(TupleDesc tupleDesc, const Datum *values, const bool *isnull, char *data, Size data_size, uint16 *infomask, bits8 *bit)
void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor, Datum *values, bool *isnull)
IndexTuple index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source, int leavenatts)
IndexTuple CopyIndexTuple(IndexTuple source)
void index_deform_tuple_internal(TupleDesc tupleDescriptor, Datum *values, bool *isnull, char *tp, bits8 *bp, int hasnulls)
IndexTuple index_form_tuple_context(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull, MemoryContext context)
Datum nocache_index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc)
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
static bool IndexTupleHasVarwidths(const IndexTupleData *itup)
IndexTupleData * IndexTuple
static bool IndexTupleHasNulls(const IndexTupleData *itup)
struct IndexTupleData IndexTupleData
static Size IndexTupleSize(const IndexTupleData *itup)
static Size IndexInfoFindDataOffset(unsigned short t_info)
void * MemoryContextAllocZero(MemoryContext context, Size size)
void pfree(void *pointer)
MemoryContext CurrentMemoryContext
FormData_pg_attribute * Form_pg_attribute
static rewind_source * source
static Datum PointerGetDatum(const void *X)
static Pointer DatumGetPointer(Datum X)
Datum toast_compress_datum(Datum value, char cmethod)
TupleDesc CreateTupleDescTruncatedCopy(TupleDesc tupdesc, int natts)
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
#define att_nominal_alignby(cur_offset, attalignby)
static bool att_isnull(int ATT, const bits8 *BITS)
#define att_addlength_pointer(cur_offset, attlen, attptr)
#define att_pointer_alignby(cur_offset, attalignby, attlen, attptr)
static bool VARATT_IS_EXTENDED(const void *PTR)
static bool VARATT_IS_EXTERNAL(const void *PTR)
static Size VARSIZE(const void *PTR)