1/*-------------------------------------------------------------------------
4 * This file contains heap tuple accessor and mutator routines, as well
5 * as various tuple utilities.
7 * Some notes about varlenas and this code:
9 * Before Postgres 8.3 varlenas always had a 4-byte length header, and
10 * therefore always needed 4-byte alignment (at least). This wasted space
11 * for short varlenas, for example CHAR(1) took 5 bytes and could need up to
12 * 3 additional padding bytes for alignment.
14 * Now, a short varlena (up to 126 data bytes) is reduced to a 1-byte header
15 * and we don't align it. To hide this from datatype-specific functions that
16 * don't want to deal with it, such a datum is considered "toasted" and will
17 * be expanded back to the normal 4-byte-header format by pg_detoast_datum.
18 * (In performance-critical code paths we can use pg_detoast_datum_packed
19 * and the appropriate access macros to avoid that overhead.) Note that this
20 * conversion is performed directly in heap_form_tuple, without invoking
23 * This change will break any code that assumes it needn't detoast values
24 * that have been put into a tuple but never sent to disk. Hopefully there
25 * are few such places.
27 * Varlenas still have alignment INT (or DOUBLE) in pg_type/pg_attribute, since
28 * that's the normal requirement for the untoasted format. But we ignore that
29 * for the 1-byte-header format. This means that the actual start position
30 * of a varlena datum may vary depending on which format it has. To determine
31 * what is stored, we have to require that alignment padding bytes be zero.
32 * (Postgres actually has always zeroed them, but now it's required!) Since
33 * the first byte of a 1-byte-header varlena can never be zero, we can examine
34 * the first byte after the previous datum to tell if it's a pad byte or the
35 * start of a 1-byte-header varlena.
37 * Note that while formerly we could rely on the first varlena column of a
38 * system catalog to be at the offset suggested by the C struct for the
39 * catalog, this is now risky: it's only safe if the preceding field is
40 * word-aligned, so that there will never be any padding.
42 * We don't pack varlenas whose attstorage is PLAIN, since the data type
43 * isn't expecting to have to detoast values. This is used in particular
44 * by oidvector and int2vector, which are used in the system catalogs
45 * and we'd like to still refer to them via C struct offsets.
48 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
49 * Portions Copyright (c) 1994, Regents of the University of California
53 * src/backend/access/common/heaptuple.c
55 *-------------------------------------------------------------------------
71 * Does att's datatype allow packing into the 1-byte-header varlena format?
72 * While functions that use TupleDescAttr() and assign attstorage =
73 * TYPSTORAGE_PLAIN cannot use packed varlena headers, functions that call
74 * TupleDescInitEntry() use typeForm->typstorage (TYPSTORAGE_EXTENDED) and
75 * can use packed varlena headers, e.g.:
76 * CREATE TABLE test(a VARCHAR(10000) STORAGE PLAIN);
77 * INSERT INTO test VALUES (repeat('A',10));
78 * This can be verified with pageinspect.
80 #define ATT_IS_PACKABLE(att) \
81 ((att)->attlen == -1 && (att)->attstorage != TYPSTORAGE_PLAIN)
82/* Use this if it's already known varlena */
83 #define VARLENA_ATT_IS_PACKABLE(att) \
84 ((att)->attstorage != TYPSTORAGE_PLAIN)
86/* FormData_pg_attribute.attstorage != TYPSTORAGE_PLAIN and an attlen of -1 */
87 #define COMPACT_ATTR_IS_PACKABLE(att) \
88 ((att)->attlen == -1 && (att)->attispackable)
91 * Setup for caching pass-by-ref missing attributes in a way that survives
92 * tupleDesc destruction.
117 if (entry1->
len != entry2->
len)
118 return entry1->
len > entry2->
len ? 1 : -1;
142/* ----------------------------------------------------------------
143 * misc support routines
144 * ----------------------------------------------------------------
148 * Return the missing value of an attribute, or NULL if there isn't one.
156 Assert(attnum <= tupleDesc->natts);
179 /* no need to cache by-value attributes */
183 /* set up cache if required */
187 /* check if there's a cache entry */
199 /* cache miss, so we need a non-transient copy of the datum */
215 * heap_compute_data_size
216 * Determine size of the data area of a tuple to be constructed
223 Size data_length = 0;
225 int numberOfAttributes = tupleDesc->
natts;
227 for (
i = 0;
i < numberOfAttributes;
i++)
242 * we're anticipating converting to a short varlena header, so
243 * adjust length and don't count any alignment
247 else if (atti->
attlen == -1 &&
251 * we want to flatten the expanded value so that the constructed
252 * tuple doesn't depend on it
270 * Per-attribute helper for heap_fill_tuple and other routines building tuples.
272 * Fill in either a data value or a bit in the null bitmask
287 * If we're building a null bitmap, set the appropriate bit for the
288 * current column value here.
311 * XXX we use the att_nominal_alignby macro on the pointer value itself,
312 * not on an offset. This is a bit of a hack.
319 data_length = att->
attlen;
321 else if (att->
attlen == -1)
332 * we want to flatten the expanded value so that the
333 * constructed tuple doesn't depend on it
344 /* no alignment, since it's short by definition */
346 memcpy(
data,
val, data_length);
351 /* no alignment for short varlenas */
353 memcpy(
data,
val, data_length);
357 /* convert to short varlena -- no alignment */
364 /* full 4-byte header varlena */
367 memcpy(
data,
val, data_length);
370 else if (att->
attlen == -2)
372 /* cstring ... never needs alignment */
380 /* fixed-length pass-by-reference */
383 data_length = att->
attlen;
393 * Load data portion of a tuple from values/isnull arrays
395 * We also fill the null bitmap (if any) and set the infomask bits
396 * that reflect the tuple's data contents.
398 * NOTE: it is now REQUIRED that the caller have pre-zeroed the data area.
409 int numberOfAttributes = tupleDesc->
natts;
411#ifdef USE_ASSERT_CHECKING
422 /* just to keep compiler quiet */
429 for (
i = 0;
i < numberOfAttributes;
i++)
439 isnull ? isnull[
i] :
true);
446/* ----------------------------------------------------------------
447 * heap tuple interface
448 * ----------------------------------------------------------------
452 * heap_attisnull - returns true iff tuple attribute is not present
459 * We allow a NULL tupledesc for relations not expected to have missing
460 * values, such as catalog relations and indexes.
462 Assert(!tupleDesc || attnum <= tupleDesc->natts);
487 /* these are never null */
500 * This only gets called from fastgetattr(), in cases where we
501 * can't use a cacheoffset and the value is not null.
503 * This caches attribute offsets in the attribute descriptor.
505 * An alternative way to speed things up would be to cache offsets
506 * with the tuple, but that seems more difficult unless you take
507 * the storage hit of actually putting those offsets into the
508 * tuple you send to disk. Yuck.
510 * This scheme will be slightly slower than that, but should
511 * perform well for queries which hit large #'s of tuples. After
512 * you cache the offsets once, examining all the other tuples using
513 * the same attribute descriptor will go much quicker. -cim 5/4/91
515 * NOTE: if you need to change this code, see also heap_deform_tuple.
516 * Also see nocache_index_getattr, which is the same code for index
526 char *tp;
/* ptr to data part of tuple */
527 bits8 *bp = td->
t_bits;
/* ptr to null bitmap in tuple */
528 bool slow =
false;
/* do we have to walk attrs? */
529 int off;
/* current offset within data */
534 * 1: No nulls and no variable-width attributes.
535 * 2: Has a null or a var-width AFTER att.
536 * 3: Has nulls or var-widths BEFORE att.
545 * there's a null somewhere in the tuple
547 * check to see if any preceding bits are null...
550 int finalbit =
attnum & 0x07;
552 /* check for nulls "before" final bit of last byte */
553 if ((~bp[
byte]) & ((1 << finalbit) - 1))
557 /* check for nulls in any "earlier" bytes */
560 for (
i = 0;
i < byte;
i++)
571 tp = (
char *) td + td->
t_hoff;
578 * If we get here, there are no nulls up to and including the target
579 * attribute. If we have a cached offset, we can use it.
586 * Otherwise, check for non-fixed-length attrs up to and including
587 * target. If there aren't any, it's safe to cheaply initialize the
588 * cached offsets for these attrs.
607 int natts = tupleDesc->
natts;
611 * If we get here, we have a tuple with no nulls or var-widths up to
612 * and including the target attribute, so we can use the cached offset
613 * ... only we don't have it yet, or we'd not have got here. Since
614 * it's cheap to compute offsets for fixed-width columns, we take the
615 * opportunity to initialize the cached offsets for *all* the leading
616 * fixed-width columns, in hope of avoiding future visits to this
621 /* we might have set some offsets in the slow path previously */
628 for (;
j < natts;
j++)
648 bool usecache =
true;
652 * Now we know that we have to walk the tuple CAREFULLY. But we still
653 * might be able to cache some offsets for next time.
655 * Note - This loop is a little tricky. For each non-null attribute,
656 * we have to first account for alignment padding before the attr,
657 * then advance over the attr based on its length. Nulls have no
658 * storage and no alignment padding either. We can use/set
659 * attcacheoff until we reach either a null or a var-width attribute.
662 for (
i = 0;;
i++)
/* loop exit is at "break" */
669 continue;
/* this cannot be the target att */
672 /* If we know the next offset, we can skip the rest */
675 else if (att->
attlen == -1)
678 * We can only cache the offset for a varlena attribute if the
679 * offset is already suitably aligned, so that there would be
680 * no pad bytes in any case: then the offset will be valid for
681 * either an aligned or unaligned value.
695 /* not varlena, so safe to use att_nominal_alignby */
707 if (usecache && att->
attlen <= 0)
718 * Fetch the value of a system attribute for a tuple.
720 * This is a support routine for heap_getattr(). The function has already
721 * determined that the attnum refers to a system attribute.
731 /* Currently, no sys attribute ever reads as NULL. */
737 /* pass-by-reference datatype */
750 * cmin and cmax are now both aliases for the same field, which
751 * can in fact also be a combo command id. XXX perhaps we should
752 * return the "real" cmin or cmax if possible, that is if we are
753 * inside the originating transaction?
762 result = 0;
/* keep compiler quiet */
771 * returns a copy of an entire tuple
773 * The HeapTuple struct, tuple header, and tuple data are all allocated
774 * as a single palloc() block.
795 * heap_copytuple_with_tuple
797 * copy a tuple into a caller-supplied HeapTuple management struct
799 * Note that after calling this function, the "dest" HeapTuple will not be
800 * allocated as a single palloc() block (unlike with heap_copytuple()).
820 * Expand a tuple which has fewer attributes than required. For each attribute
821 * not present in the sourceTuple, if there is a missing value that will be
822 * used. Otherwise the attribute will be set to NULL.
824 * The source tuple must have fewer attributes than the required number.
826 * Only one of targetHeapTuple and targetMinimalTuple may be supplied. The
827 * other argument must be NULL.
842 int natts = tupleDesc->
natts;
849 bits8 *nullBits = NULL;
854 Assert((targetHeapTuple && !targetMinimalTuple)
855 || (!targetHeapTuple && targetMinimalTuple));
857 Assert(sourceNatts < natts);
859 sourceNullLen = (hasNulls ?
BITMAPLEN(sourceNatts) : 0);
861 targetDataLen = sourceDataLen;
867 * If there are missing values we want to put them into the tuple.
868 * Before that we have to compute the extra length for the values
869 * array and the variable length data.
874 * Find the first item in attrmiss for which we don't have a value in
875 * the source. We can ignore all the missing entries before that.
877 for (firstmissingnum = sourceNatts;
878 firstmissingnum < natts;
881 if (attrmiss[firstmissingnum].am_present)
888 * Now walk the missing attributes. If there is a missing value make
889 * space for it. Otherwise, it's going to be NULL.
891 for (
attnum = firstmissingnum;
895 if (attrmiss[
attnum].am_present)
910 /* no missing value, so it must be null */
914 }
/* end if have missing values */
918 * If there are no missing values at all then NULLS must be allowed,
919 * since some of the attributes are known to be absent.
929 len += targetNullLen;
935 * Allocate and zero the space needed. Note that the tuple body and
936 * HeapTupleData management structure are allocated in one chunk.
942 len += targetDataLen;
945 (*targetHeapTuple)->t_data
948 (*targetHeapTuple)->t_len =
len;
949 (*targetHeapTuple)->t_tableOid = sourceTuple->
t_tableOid;
950 (*targetHeapTuple)->t_self = sourceTuple->
t_self;
953 targetTHeader->
t_hoff = hoff;
958 /* We also make sure that t_ctid is invalid unless explicitly set */
960 if (targetNullLen > 0)
961 nullBits = (
bits8 *) ((
char *) (*targetHeapTuple)->t_data
963 targetData = (
char *) (*targetHeapTuple)->t_data + hoff;
970 len += targetDataLen;
973 (*targetMinimalTuple)->t_len =
len;
975 (*targetMinimalTuple)->t_infomask = sourceTHeader->
t_infomask;
976 /* Same macro works for MinimalTuples */
978 if (targetNullLen > 0)
979 nullBits = (
bits8 *) ((
char *) *targetMinimalTuple
981 targetData = (
char *) *targetMinimalTuple + hoff;
982 infoMask = &((*targetMinimalTuple)->t_infomask);
985 if (targetNullLen > 0)
987 if (sourceNullLen > 0)
989 /* if bitmap pre-existed copy in - all is set */
991 ((
char *) sourceTHeader)
994 nullBits += sourceNullLen - 1;
999 /* Set NOT NULL for all existing attributes */
1000 memset(nullBits, 0xff, sourceNullLen);
1002 nullBits += sourceNullLen - 1;
1004 if (sourceNatts & 0x07)
1006 /* build the mask (inverted!) */
1007 bitMask = 0xff << (sourceNatts & 0x07);
1009 *nullBits = ~bitMask;
1013 bitMask = (1 << ((sourceNatts - 1) & 0x07));
1014 }
/* End if have null bitmap */
1017 ((
char *) sourceTuple->
t_data) + sourceTHeader->
t_hoff,
1020 targetData += sourceDataLen;
1022 /* Now fill in the missing values */
1027 if (attrmiss && attrmiss[
attnum].am_present)
1030 nullBits ? &nullBits : NULL,
1034 attrmiss[
attnum].am_value,
1047 }
/* end loop over missing attributes */
1051 * Fill in the missing values for a minimal HeapTuple
1058 expand_tuple(NULL, &minimalTuple, sourceTuple, tupleDesc);
1059 return minimalTuple;
1063 * Fill in the missing values for an ordinary HeapTuple
1070 expand_tuple(&heapTuple, NULL, sourceTuple, tupleDesc);
1075 * heap_copy_tuple_as_datum
1077 * copy a tuple as a composite-type Datum
1086 * If the tuple contains any external TOAST pointers, we have to inline
1087 * those fields to meet the conventions for composite-type Datums.
1095 * Fast path for easy case: just make a palloc'd copy and insert the
1096 * correct composite-Datum header fields (since those may not be set if
1097 * the given tuple came from disk, rather than from heap_form_tuple).
1111 * construct a tuple from the given values[] and isnull[] arrays,
1112 * which are of the length indicated by tupleDescriptor->natts
1114 * The result is allocated in the current memory context.
1126 bool hasnull =
false;
1127 int numberOfAttributes = tupleDescriptor->
natts;
1132 (
errcode(ERRCODE_TOO_MANY_COLUMNS),
1133 errmsg(
"number of columns (%d) exceeds limit (%d)",
1139 for (
i = 0;
i < numberOfAttributes;
i++)
1149 * Determine total space needed
1163 * Allocate and zero the space needed. Note that the tuple body and
1164 * HeapTupleData management structure are allocated in one chunk.
1170 * And fill in the information. Note we fill the Datum fields even though
1171 * this tuple may never become a Datum. This lets HeapTupleHeaderGetDatum
1172 * identify the tuple type if needed.
1181 /* We also make sure that t_ctid is invalid unless explicitly set */
1193 (hasnull ? td->
t_bits : NULL));
1200 * form a new tuple from an old tuple and a set of replacement values.
1202 * The replValues, replIsnull, and doReplace arrays must be of the length
1203 * indicated by tupleDesc->natts. The new tuple is constructed using the data
1204 * from replValues/replIsnull at columns where doReplace is true, and using
1205 * the data from the old tuple at columns where doReplace is false.
1207 * The result is allocated in the current memory context.
1212 const Datum *replValues,
1213 const bool *replIsnull,
1214 const bool *doReplace)
1216 int numberOfAttributes = tupleDesc->
natts;
1223 * allocate and fill values and isnull arrays from either the tuple or the
1224 * repl information, as appropriate.
1226 * NOTE: it's debatable whether to use heap_deform_tuple() here or just
1227 * heap_getattr() only the non-replaced columns. The latter could win if
1228 * there are many replaced columns and few non-replaced ones. However,
1229 * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
1230 * O(N^2) if there are many non-replaced columns, so it seems better to
1231 * err on the side of linear cost.
1234 isnull = (
bool *)
palloc(numberOfAttributes *
sizeof(
bool));
1238 for (attoff = 0; attoff < numberOfAttributes; attoff++)
1240 if (doReplace[attoff])
1242 values[attoff] = replValues[attoff];
1243 isnull[attoff] = replIsnull[attoff];
1248 * create a new tuple from the values and isnull arrays
1256 * copy the identification info of the old tuple: t_ctid, t_self
1266 * heap_modify_tuple_by_cols
1267 * form a new tuple from an old tuple and a set of replacement values.
1269 * This is like heap_modify_tuple, except that instead of specifying which
1270 * column(s) to replace by a boolean map, an array of target column numbers
1271 * is used. This is often more convenient when a fixed number of columns
1272 * are to be replaced. The replCols, replValues, and replIsnull arrays must
1273 * be of length nCols. Target column numbers are indexed from 1.
1275 * The result is allocated in the current memory context.
1281 const int *replCols,
1282 const Datum *replValues,
1283 const bool *replIsnull)
1285 int numberOfAttributes = tupleDesc->
natts;
1292 * allocate and fill values and isnull arrays from the tuple, then replace
1293 * selected columns from the input arrays.
1296 isnull = (
bool *)
palloc(numberOfAttributes *
sizeof(
bool));
1300 for (
i = 0;
i < nCols;
i++)
1304 if (attnum <= 0 || attnum > numberOfAttributes)
1307 isnull[
attnum - 1] = replIsnull[
i];
1311 * create a new tuple from the values and isnull arrays
1319 * copy the identification info of the old tuple: t_ctid, t_self
1330 * Given a tuple, extract data into values/isnull arrays; this is
1331 * the inverse of heap_form_tuple.
1333 * Storage for the values/isnull arrays is provided by the caller;
1334 * it should be sized according to tupleDesc->natts not
1335 * HeapTupleHeaderGetNatts(tuple->t_data).
1337 * Note that for pass-by-reference datatypes, the pointer placed
1338 * in the Datum will point into the given tuple.
1340 * When all or most of a tuple's fields need to be extracted,
1341 * this routine will be significantly quicker than a loop around
1342 * heap_getattr; the loop will become O(N^2) as soon as any
1343 * noncacheable attribute offsets are involved.
1351 int tdesc_natts = tupleDesc->
natts;
1352 int natts;
/* number of atts to extract */
1354 char *tp;
/* ptr to tuple data */
1355 uint32 off;
/* offset in tuple data */
1356 bits8 *bp = tup->
t_bits;
/* ptr to null bitmap in tuple */
1357 bool slow =
false;
/* can we use/set attcacheoff? */
1362 * In inheritance situations, it is possible that the given tuple actually
1363 * has more fields than the caller is expecting. Don't run off the end of
1364 * the caller's arrays.
1366 natts =
Min(natts, tdesc_natts);
1368 tp = (
char *) tup + tup->
t_hoff;
1380 slow =
true;
/* can't use attcacheoff anymore */
1388 else if (thisatt->
attlen == -1)
1391 * We can only cache the offset for a varlena attribute if the
1392 * offset is already suitably aligned, so that there would be no
1393 * pad bytes in any case: then the offset will be valid for either
1394 * an aligned or unaligned value.
1408 /* not varlena, so safe to use att_nominal_alignby */
1412 thisatt->attcacheoff = off;
1419 if (thisatt->attlen <= 0)
1420 slow =
true;
/* can't use attcacheoff anymore */
1424 * If tuple doesn't have all the atts indicated by tupleDesc, read the
1425 * rest as nulls or missing values as appropriate.
1442 * heap_form_minimal_tuple
1443 * construct a MinimalTuple from the given values[] and isnull[] arrays,
1444 * which are of the length indicated by tupleDescriptor->natts
1446 * This is exactly like heap_form_tuple() except that the result is a
1447 * "minimal" tuple lacking a HeapTupleData header as well as room for system
1450 * The result is allocated in the current memory context.
1463 bool hasnull =
false;
1464 int numberOfAttributes = tupleDescriptor->
natts;
1471 (
errcode(ERRCODE_TOO_MANY_COLUMNS),
1472 errmsg(
"number of columns (%d) exceeds limit (%d)",
1478 for (
i = 0;
i < numberOfAttributes;
i++)
1488 * Determine total space needed
1502 * Allocate and zero the space needed.
1505 memset(mem, 0, extra);
1509 * And fill in the information.
1518 (
char *) tuple + hoff,
1521 (hasnull ? tuple->
t_bits : NULL));
1527 * heap_free_minimal_tuple
1536 * heap_copy_minimal_tuple
1537 * copy a MinimalTuple
1539 * The result is allocated in the current memory context.
1549 memset(mem, 0, extra);
1551 memcpy(result, mtup, mtup->
t_len);
1556 * heap_tuple_from_minimal_tuple
1557 * create a HeapTuple by copying from a MinimalTuple;
1558 * system columns are filled with zeroes
1560 * The result is allocated in the current memory context.
1561 * The HeapTuple struct, tuple header, and tuple data are all allocated
1562 * as a single palloc() block.
1581 * minimal_tuple_from_heap_tuple
1582 * create a MinimalTuple by copying from a HeapTuple
1584 * The result is allocated in the current memory context.
1597 memset(mem, 0, extra);
1606 * This mainly exists so JIT can inline the definition, but it's also
1607 * sometimes useful in debugging sessions.
static Datum values[MAXATTR]
Datum datumCopy(Datum value, bool typByVal, int typLen)
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
ExpandedObjectHeader * DatumGetEOHP(Datum d)
void EOH_flatten_into(ExpandedObjectHeader *eohptr, void *result, Size allocated_size)
Size EOH_get_flat_size(ExpandedObjectHeader *eohptr)
uint32 hash_bytes(const unsigned char *k, int keylen)
Assert(PointerIsAligned(start, uint64))
Datum toast_flatten_tuple_to_datum(HeapTupleHeader tup, uint32 tup_len, TupleDesc tupleDesc)
Size heap_compute_data_size(TupleDesc tupleDesc, const Datum *values, const bool *isnull)
static uint32 missing_hash(const void *key, Size keysize)
HeapTuple heap_modify_tuple(HeapTuple tuple, TupleDesc tupleDesc, const Datum *replValues, const bool *replIsnull, const bool *doReplace)
void heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
HeapTuple heap_copytuple(HeapTuple tuple)
static void init_missing_cache()
size_t varsize_any(void *p)
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup, Size extra)
Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
MinimalTuple heap_form_minimal_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull, Size extra)
HeapTuple heap_modify_tuple_by_cols(HeapTuple tuple, TupleDesc tupleDesc, int nCols, const int *replCols, const Datum *replValues, const bool *replIsnull)
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
void heap_free_minimal_tuple(MinimalTuple mtup)
bool heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc)
Datum nocachegetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc)
Datum getmissingattr(TupleDesc tupleDesc, int attnum, bool *isnull)
MinimalTuple minimal_tuple_from_heap_tuple(HeapTuple htup, Size extra)
HeapTuple heap_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc)
static void fill_val(CompactAttribute *att, bits8 **bit, int *bitmask, char **dataP, uint16 *infomask, Datum datum, bool isnull)
void heap_fill_tuple(TupleDesc tupleDesc, const Datum *values, const bool *isnull, char *data, Size data_size, uint16 *infomask, bits8 *bit)
#define COMPACT_ATTR_IS_PACKABLE(att)
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
static int missing_match(const void *key1, const void *key2, Size keysize)
Datum heap_copy_tuple_as_datum(HeapTuple tuple, TupleDesc tupleDesc)
static HTAB * missing_cache
static void expand_tuple(HeapTuple *targetHeapTuple, MinimalTuple *targetMinimalTuple, HeapTuple sourceTuple, TupleDesc tupleDesc)
HeapTuple heap_tuple_from_minimal_tuple(MinimalTuple mtup)
MinimalTuple minimal_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc)
void heap_freetuple(HeapTuple htup)
HeapTupleData * HeapTuple
MinimalTupleData * MinimalTuple
HeapTupleHeaderData * HeapTupleHeader
#define HeapTupleIsValid(tuple)
#define MINIMAL_TUPLE_OFFSET
#define HeapTupleHeaderGetNatts(tup)
static void HeapTupleHeaderSetTypMod(HeapTupleHeaderData *tup, int32 typmod)
static bool HeapTupleHasNulls(const HeapTupleData *tuple)
static int BITMAPLEN(int NATTS)
static bool HeapTupleHasExternal(const HeapTupleData *tuple)
#define SizeofMinimalTupleHeader
static void HeapTupleHeaderSetTypeId(HeapTupleHeaderData *tup, Oid datum_typeid)
static CommandId HeapTupleHeaderGetRawCommandId(const HeapTupleHeaderData *tup)
static TransactionId HeapTupleHeaderGetRawXmax(const HeapTupleHeaderData *tup)
#define MaxTupleAttributeNumber
static bool HeapTupleNoNulls(const HeapTupleData *tuple)
static void HeapTupleHeaderSetDatumLength(HeapTupleHeaderData *tup, uint32 len)
static TransactionId HeapTupleHeaderGetRawXmin(const HeapTupleHeaderData *tup)
#define HeapTupleHeaderSetNatts(tup, natts)
static bool HeapTupleHasVarWidth(const HeapTupleData *tuple)
if(TABLE==NULL||TABLE_index==NULL)
static void ItemPointerSetInvalid(ItemPointerData *pointer)
void pfree(void *pointer)
void * palloc0(Size size)
MemoryContext TopMemoryContext
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
static Datum PointerGetDatum(const void *X)
static Datum TransactionIdGetDatum(TransactionId X)
static Datum CommandIdGetDatum(CommandId X)
static Datum ObjectIdGetDatum(Oid X)
static char * DatumGetCString(Datum X)
static Pointer DatumGetPointer(Datum X)
bits8 t_bits[FLEXIBLE_ARRAY_MEMBER]
struct AttrMissing * missing
#define MinTransactionIdAttributeNumber
#define MaxCommandIdAttributeNumber
#define MaxTransactionIdAttributeNumber
#define TableOidAttributeNumber
#define SelfItemPointerAttributeNumber
#define MinCommandIdAttributeNumber
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
#define att_nominal_alignby(cur_offset, attalignby)
#define att_datum_alignby(cur_offset, attalignby, attlen, attdatum)
static bool att_isnull(int ATT, const bits8 *BITS)
#define att_addlength_pointer(cur_offset, attlen, attptr)
#define att_pointer_alignby(cur_offset, attalignby, attlen, attptr)
#define att_addlength_datum(cur_offset, attlen, attdatum)
static void store_att_byval(void *T, Datum newdatum, int attlen)
static bool VARATT_IS_SHORT(const void *PTR)
static Size VARSIZE_ANY(const void *PTR)
static bool VARATT_CAN_MAKE_SHORT(const void *PTR)
static bool VARATT_IS_EXTERNAL(const void *PTR)
static Size VARSIZE(const void *PTR)
static char * VARDATA(const void *PTR)
static Size VARATT_CONVERTED_SHORT_SIZE(const void *PTR)
static Size VARSIZE_EXTERNAL(const void *PTR)
static bool VARATT_IS_EXTERNAL_EXPANDED(const void *PTR)
static void SET_VARSIZE_SHORT(void *PTR, Size len)
static Size VARSIZE_SHORT(const void *PTR)
Datum bit(PG_FUNCTION_ARGS)