git.postgresql.org Git - postgresql.git/commitdiff

git projects / postgresql.git / commitdiff
? search:
summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: ef03ea0)
Change dynahash.c and hsearch.h to use int64 instead of long
2025年8月22日 02:59:02 +0000 (11:59 +0900)
2025年8月22日 02:59:02 +0000 (11:59 +0900)
This code was relying on "long", which is signed 8 bytes everywhere
except on Windows where it is 4 bytes, that could potentially expose it
to overflows, even if the current uses in the code are fine as far as I
know. This code is now able to rely on the same sizeof() variable
everywhere, with int64. long was used for sizes, partition counts and
entry counts.

Some callers of the dynahash.c routines used long declarations, that can
be cleaned up to use int64 instead. There was one shortcut based on
SIZEOF_LONG, that can be removed. long is entirely removed from
dynahash.c and hsearch.h.

Similar work was done in b1e5c9fa9ac4.

Reviewed-by: Peter Eisentraut <peter@eisentraut.org>
Reviewed-by: Chao Li <li.evan.chao@gmail.com>
Discussion: https://postgr.es/m/aKQYp-bKTRtRauZ6@paquier.xyz


diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 9fc9635d3300d0dc41114e83058d9fb3eb7dd22a..1cb368c8590ba6fdfd34c37a8771e051f555961c 100644 (file)
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -2713,8 +2713,8 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
FILE *qfile;
- long num_entries;
- long num_remove = 0;
+ int64 num_entries;
+ int64 num_remove = 0;
pgssHashKey key;
TimestampTz stats_reset;
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 227df90f89c97fb20e263cbe9a1e28c28c1f480b..fb784acf4af243b0d7e5d9d9d69c8f84481441ee 100644 (file)
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -586,7 +586,7 @@ RelFileLocatorSkippingWAL(RelFileLocator rlocator)
Size
EstimatePendingSyncsSpace(void)
{
- long entries;
+ int64 entries;
entries = pendingSyncHash ? hash_get_num_entries(pendingSyncHash) : 0;
return mul_size(1 + entries, sizeof(RelFileLocator));
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index d12a3ca06842fde14af8540fa566275885e81454..a0770e867968a1120a903fd93362700fb1eac41a 100644 (file)
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -330,8 +330,8 @@ InitShmemIndex(void)
*/
HTAB *
ShmemInitHash(const char *name, /* table string name for shmem index */
- long init_size, /* initial table size */
- long max_size, /* max size of the table */
+ int64 init_size, /* initial table size */
+ int64 max_size, /* max size of the table */
HASHCTL *infoP, /* info about key and bucket size */
int hash_flags) /* info about infoP */
{
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index f8c88147160e9ac2da86eff2674dde911e698dac..233b85b623d5dfd6eee1f01ab0526cf34507331c 100644 (file)
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -443,7 +443,7 @@ void
LockManagerShmemInit(void)
{
HASHCTL info;
- long init_table_size,
+ int64 init_table_size,
max_table_size;
bool found;
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index c07fb58835557d12e354a783e67b30bf0525d5a7..c1d8511ad17a918bbbb2a0d23b915536492c628d 100644 (file)
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -1145,7 +1145,7 @@ void
PredicateLockShmemInit(void)
{
HASHCTL info;
- long max_table_size;
+ int64 max_table_size;
Size requestSize;
bool found;
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index a7094917c20ca91b4e18d04c8fdda6d89f261ec3..1aeee5be42acd9b1380ffaf1a385bb9f8af98522 100644 (file)
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -154,7 +154,7 @@ typedef HASHBUCKET *HASHSEGMENT;
typedef struct
{
slock_t mutex; /* spinlock for this freelist */
- long nentries; /* number of entries in associated buckets */
+ int64 nentries; /* number of entries in associated buckets */
HASHELEMENT *freeList; /* chain of free elements */
} FreeListData;
@@ -182,8 +182,8 @@ struct HASHHDR
/* These fields can change, but not in a partitioned table */
/* Also, dsize can't change in a shared table, even if unpartitioned */
- long dsize; /* directory size */
- long nsegs; /* number of allocated segments (<= dsize) */
+ int64 dsize; /* directory size */
+ int64 nsegs; /* number of allocated segments (<= dsize) */
uint32 max_bucket; /* ID of maximum bucket in use */
uint32 high_mask; /* mask to modulo into entire table */
uint32 low_mask; /* mask to modulo into lower half of table */
@@ -191,9 +191,9 @@ struct HASHHDR
/* These fields are fixed at hashtable creation */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
- long num_partitions; /* # partitions (must be power of 2), or 0 */
- long max_dsize; /* 'dsize' limit if directory is fixed size */
- long ssize; /* segment size --- must be power of 2 */
+ int64 num_partitions; /* # partitions (must be power of 2), or 0 */
+ int64 max_dsize; /* 'dsize' limit if directory is fixed size */
+ int64 ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
int nelem_alloc; /* number of entries to allocate at once */
bool isfixed; /* if true, don't enlarge */
@@ -236,7 +236,7 @@ struct HTAB
/* We keep local copies of these fixed values to reduce contention */
Size keysize; /* hash key length in bytes */
- long ssize; /* segment size --- must be power of 2 */
+ int64 ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
/*
@@ -277,12 +277,12 @@ static bool expand_table(HTAB *hashp);
static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx);
static void hdefault(HTAB *hashp);
static int choose_nelem_alloc(Size entrysize);
-static bool init_htab(HTAB *hashp, long nelem);
+static bool init_htab(HTAB *hashp, int64 nelem);
pg_noreturn static void hash_corrupted(HTAB *hashp);
static uint32 hash_initial_lookup(HTAB *hashp, uint32 hashvalue,
HASHBUCKET **bucketptr);
-static long next_pow2_long(long num);
-static int next_pow2_int(long num);
+static int64 next_pow2_int64(int64 num);
+static int next_pow2_int(int64 num);
static void register_seq_scan(HTAB *hashp);
static void deregister_seq_scan(HTAB *hashp);
static bool has_seq_scans(HTAB *hashp);
@@ -355,7 +355,7 @@ string_compare(const char *key1, const char *key2, Size keysize)
* large nelem will penalize hash_seq_search speed without buying much.
*/
HTAB *
-hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
+hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
{
HTAB *hashp;
HASHHDR *hctl;
@@ -697,7 +697,7 @@ choose_nelem_alloc(Size entrysize)
* arrays
*/
static bool
-init_htab(HTAB *hashp, long nelem)
+init_htab(HTAB *hashp, int64 nelem)
{
HASHHDR *hctl = hashp->hctl;
HASHSEGMENT *segp;
@@ -780,10 +780,10 @@ init_htab(HTAB *hashp, long nelem)
* NB: assumes that all hash structure parameters have default values!
*/
Size
-hash_estimate_size(long num_entries, Size entrysize)
+hash_estimate_size(int64 num_entries, Size entrysize)
{
Size size;
- long nBuckets,
+ int64 nBuckets,
nSegments,
nDirEntries,
nElementAllocs,
@@ -791,9 +791,9 @@ hash_estimate_size(long num_entries, Size entrysize)
elementAllocCnt;
/* estimate number of buckets wanted */
- nBuckets = next_pow2_long(num_entries);
+ nBuckets = next_pow2_int64(num_entries);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_int64((nBuckets - 1) / DEF_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
@@ -826,17 +826,17 @@ hash_estimate_size(long num_entries, Size entrysize)
*
* XXX this had better agree with the behavior of init_htab()...
*/
-long
-hash_select_dirsize(long num_entries)
+int64
+hash_select_dirsize(int64 num_entries)
{
- long nBuckets,
+ int64 nBuckets,
nSegments,
nDirEntries;
/* estimate number of buckets wanted */
- nBuckets = next_pow2_long(num_entries);
+ nBuckets = next_pow2_int64(num_entries);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_int64((nBuckets - 1) / DEF_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
@@ -887,7 +887,7 @@ hash_stats(const char *caller, HTAB *hashp)
HASHHDR *hctl = hashp->hctl;
elog(DEBUG4,
- "hash_stats: Caller: %s Table Name: \"%s\" Accesses: " UINT64_FORMAT " Collisions: " UINT64_FORMAT " Expansions: " UINT64_FORMAT " Entries: %ld Key Size: %zu Max Bucket: %u Segment Count: %ld",
+ "hash_stats: Caller: %s Table Name: \"%s\" Accesses: " UINT64_FORMAT " Collisions: " UINT64_FORMAT " Expansions: " UINT64_FORMAT " Entries: " INT64_FORMAT " Key Size: %zu Max Bucket: %u Segment Count: " INT64_FORMAT,
caller != NULL ? caller : "(unknown)", hashp->tabname, hctl->accesses,
hctl->collisions, hctl->expansions, hash_get_num_entries(hashp),
hctl->keysize, hctl->max_bucket, hctl->nsegs);
@@ -993,7 +993,7 @@ hash_search_with_hash_value(HTAB *hashp,
* Can't split if running in partitioned mode, nor if frozen, nor if
* table is the subject of any active hash_seq_search scans.
*/
- if (hctl->freeList[0].nentries > (long) hctl->max_bucket &&
+ if (hctl->freeList[0].nentries > (int64) hctl->max_bucket &&
!IS_PARTITIONED(hctl) && !hashp->frozen &&
!has_seq_scans(hashp))
(void) expand_table(hashp);
@@ -1332,11 +1332,11 @@ get_hash_entry(HTAB *hashp, int freelist_idx)
/*
* hash_get_num_entries -- get the number of entries in a hashtable
*/
-long
+int64
hash_get_num_entries(HTAB *hashp)
{
int i;
- long sum = hashp->hctl->freeList[0].nentries;
+ int64 sum = hashp->hctl->freeList[0].nentries;
/*
* We currently don't bother with acquiring the mutexes; it's only
@@ -1417,9 +1417,9 @@ hash_seq_search(HASH_SEQ_STATUS *status)
HTAB *hashp;
HASHHDR *hctl;
uint32 max_bucket;
- long ssize;
- long segment_num;
- long segment_ndx;
+ int64 ssize;
+ int64 segment_num;
+ int64 segment_ndx;
HASHSEGMENT segp;
uint32 curBucket;
HASHELEMENT *curElem;
@@ -1548,11 +1548,11 @@ expand_table(HTAB *hashp)
HASHHDR *hctl = hashp->hctl;
HASHSEGMENT old_seg,
new_seg;
- long old_bucket,
+ int64 old_bucket,
new_bucket;
- long new_segnum,
+ int64 new_segnum,
new_segndx;
- long old_segnum,
+ int64 old_segnum,
old_segndx;
HASHBUCKET *oldlink,
*newlink;
@@ -1620,7 +1620,7 @@ expand_table(HTAB *hashp)
currElement = nextElement)
{
nextElement = currElement->link;
- if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
+ if ((int64) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
{
*oldlink = currElement;
oldlink = &currElement->link;
@@ -1644,9 +1644,9 @@ dir_realloc(HTAB *hashp)
{
HASHSEGMENT *p;
HASHSEGMENT *old_p;
- long new_dsize;
- long old_dirsize;
- long new_dirsize;
+ int64 new_dsize;
+ int64 old_dirsize;
+ int64 new_dirsize;
if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
return false;
@@ -1780,8 +1780,8 @@ hash_initial_lookup(HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr)
{
HASHHDR *hctl = hashp->hctl;
HASHSEGMENT segp;
- long segment_num;
- long segment_ndx;
+ int64 segment_num;
+ int64 segment_ndx;
uint32 bucket;
bucket = calc_bucket(hctl, hashvalue);
@@ -1814,25 +1814,21 @@ hash_corrupted(HTAB *hashp)
/* calculate ceil(log base 2) of num */
int
-my_log2(long num)
+my_log2(int64 num)
{
/*
* guard against too-large input, which would be invalid for
* pg_ceil_log2_*()
*/
- if (num > LONG_MAX / 2)
- num = LONG_MAX / 2;
+ if (num > PG_INT64_MAX / 2)
+ num = PG_INT64_MAX / 2;
-#if SIZEOF_LONG < 8
- return pg_ceil_log2_32(num);
-#else
return pg_ceil_log2_64(num);
-#endif
}
-/* calculate first power of 2 >= num, bounded to what will fit in a long */
-static long
-next_pow2_long(long num)
+/* calculate first power of 2 >= num, bounded to what will fit in a int64 */
+static int64
+next_pow2_int64(int64 num)
{
/* my_log2's internal range check is sufficient */
return 1L << my_log2(num);
@@ -1840,7 +1836,7 @@ next_pow2_long(long num)
/* calculate first power of 2 >= num, bounded to what will fit in an int */
static int
-next_pow2_int(long num)
+next_pow2_int(int64 num)
{
if (num > INT_MAX / 2)
num = INT_MAX / 2;
diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h
index c1f668ded95235580e748e05e76e2069b276ae23..8604feca93ba0048cc0c0e9ee7e587fc0fa95cb8 100644 (file)
--- a/src/include/storage/shmem.h
+++ b/src/include/storage/shmem.h
@@ -35,7 +35,7 @@ extern void *ShmemAllocNoError(Size size);
extern void *ShmemAllocUnlocked(Size size);
extern bool ShmemAddrIsValid(const void *addr);
extern void InitShmemIndex(void);
-extern HTAB *ShmemInitHash(const char *name, long init_size, long max_size,
+extern HTAB *ShmemInitHash(const char *name, int64 init_size, int64 max_size,
HASHCTL *infoP, int hash_flags);
extern void *ShmemInitStruct(const char *name, Size size, bool *foundPtr);
extern Size add_size(Size s1, Size s2);
diff --git a/src/include/utils/dynahash.h b/src/include/utils/dynahash.h
index 8a31d9524e2a4b8f8b9d6410636931709f76ef69..a4362d3f65e59632213ad17584325a1df4f570fd 100644 (file)
--- a/src/include/utils/dynahash.h
+++ b/src/include/utils/dynahash.h
@@ -15,6 +15,6 @@
#ifndef DYNAHASH_H
#define DYNAHASH_H
-extern int my_log2(long num);
+extern int my_log2(int64 num);
#endif /* DYNAHASH_H */
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index 80deb8e543e6f89cbcde3ed4186b00c3f3cf9352..cb09a4cbe8cbd3586f875587c0ceef7904887436 100644 (file)
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -65,12 +65,12 @@ typedef struct HTAB HTAB;
typedef struct HASHCTL
{
/* Used if HASH_PARTITION flag is set: */
- long num_partitions; /* # partitions (must be power of 2) */
+ int64 num_partitions; /* # partitions (must be power of 2) */
/* Used if HASH_SEGMENT flag is set: */
- long ssize; /* segment size */
+ int64 ssize; /* segment size */
/* Used if HASH_DIRSIZE flag is set: */
- long dsize; /* (initial) directory size */
- long max_dsize; /* limit to dsize if dir size is limited */
+ int64 dsize; /* (initial) directory size */
+ int64 max_dsize; /* limit to dsize if dir size is limited */
/* Used if HASH_ELEM flag is set (which is now required): */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
@@ -129,7 +129,7 @@ typedef struct
/*
* prototypes for functions in dynahash.c
*/
-extern HTAB *hash_create(const char *tabname, long nelem,
+extern HTAB *hash_create(const char *tabname, int64 nelem,
const HASHCTL *info, int flags);
extern void hash_destroy(HTAB *hashp);
extern void hash_stats(const char *caller, HTAB *hashp);
@@ -141,7 +141,7 @@ extern void *hash_search_with_hash_value(HTAB *hashp, const void *keyPtr,
bool *foundPtr);
extern bool hash_update_hash_key(HTAB *hashp, void *existingEntry,
const void *newKeyPtr);
-extern long hash_get_num_entries(HTAB *hashp);
+extern int64 hash_get_num_entries(HTAB *hashp);
extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp);
extern void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status,
HTAB *hashp,
@@ -149,8 +149,8 @@ extern void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status,
extern void *hash_seq_search(HASH_SEQ_STATUS *status);
extern void hash_seq_term(HASH_SEQ_STATUS *status);
extern void hash_freeze(HTAB *hashp);
-extern Size hash_estimate_size(long num_entries, Size entrysize);
-extern long hash_select_dirsize(long num_entries);
+extern Size hash_estimate_size(int64 num_entries, Size entrysize);
+extern int64 hash_select_dirsize(int64 num_entries);
extern Size hash_get_shared_size(HASHCTL *info, int flags);
extern void AtEOXact_HashTables(bool isCommit);
extern void AtEOSubXact_HashTables(bool isCommit, int nestDepth);
This is the main PostgreSQL git repository.
RSS Atom

AltStyle によって変換されたページ (->オリジナル) /