PostgreSQL Source Code git master
Data Structures | Macros | Typedefs | Functions | Variables
lwlock.c File Reference
#include "postgres.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "port/pg_bitutils.h"
#include "storage/proc.h"
#include "storage/proclist.h"
#include "storage/procnumber.h"
#include "storage/spin.h"
#include "utils/memutils.h"
#include "storage/lwlocklist.h"
Include dependency graph for lwlock.c:

Go to the source code of this file.

Data Structures

struct   LWLockHandle
 
 

Macros

#define  LW_FLAG_HAS_WAITERS   ((uint32) 1 << 31)
 
#define  LW_FLAG_RELEASE_OK   ((uint32) 1 << 30)
 
#define  LW_FLAG_LOCKED   ((uint32) 1 << 29)
 
#define  LW_FLAG_BITS   3
 
#define  LW_FLAG_MASK   (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))
 
#define  LW_VAL_EXCLUSIVE   (MAX_BACKENDS + 1)
 
#define  LW_VAL_SHARED   1
 
#define  LW_SHARED_MASK   MAX_BACKENDS
 
 
#define  PG_LWLOCK(id, lockname)   [id] = CppAsString(lockname),
 
#define  PG_LWLOCKTRANCHE(id, lockname)   [LWTRANCHE_##id] = CppAsString(lockname),
 
#define  MAX_SIMUL_LWLOCKS   200
 
#define  MAX_NAMED_TRANCHES   256
 
#define  T_NAME(lock)    GetLWTrancheName((lock)->tranche)
 
#define  PRINT_LWDEBUG(a, b, c)   ((void)0)
 
#define  LOG_LWDEBUG(a, b, c)   ((void)0)
 

Typedefs

typedef struct LWLockHandle  LWLockHandle
 
 

Functions

  StaticAssertDecl (((MAX_BACKENDS+1) &MAX_BACKENDS)==0, "MAX_BACKENDS + 1 needs to be a power of 2")
 
  StaticAssertDecl ((MAX_BACKENDS &LW_FLAG_MASK)==0, "MAX_BACKENDS and LW_FLAG_MASK overlap")
 
  StaticAssertDecl ((LW_VAL_EXCLUSIVE &LW_FLAG_MASK)==0, "LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap")
 
 
static void  InitializeLWLocks (void)
 
static void  LWLockReportWaitStart (LWLock *lock)
 
static void  LWLockReportWaitEnd (void)
 
static const char *  GetLWTrancheName (uint16 trancheId)
 
static int  NumLWLocksForNamedTranches (void)
 
 
void  CreateLWLocks (void)
 
void  InitLWLockAccess (void)
 
LWLockPaddedGetNamedLWLockTranche (const char *tranche_name)
 
int  LWLockNewTrancheId (const char *name)
 
void  RequestNamedLWLockTranche (const char *tranche_name, int num_lwlocks)
 
void  LWLockInitialize (LWLock *lock, int tranche_id)
 
const char *  GetLWLockIdentifier (uint32 classId, uint16 eventId)
 
static bool  LWLockAttemptLock (LWLock *lock, LWLockMode mode)
 
static void  LWLockWaitListLock (LWLock *lock)
 
static void  LWLockWaitListUnlock (LWLock *lock)
 
static void  LWLockWakeup (LWLock *lock)
 
static void  LWLockQueueSelf (LWLock *lock, LWLockMode mode)
 
static void  LWLockDequeueSelf (LWLock *lock)
 
 
 
 
static bool  LWLockConflictsWithVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
 
bool  LWLockWaitForVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
 
 
 
 
void  LWLockDisown (LWLock *lock)
 
void  LWLockRelease (LWLock *lock)
 
 
 
void  LWLockReleaseAll (void)
 
void  ForEachLWLockHeldByMe (void(*callback)(LWLock *, LWLockMode, void *), void *context)
 
bool  LWLockHeldByMe (LWLock *lock)
 
bool  LWLockAnyHeldByMe (LWLock *lock, int nlocks, size_t stride)
 
 

Variables

static const char *const  BuiltinTrancheNames []
 
char **  LWLockTrancheNames = NULL
 
 
static int  num_held_lwlocks = 0
 
 
 
 
 
int *  LWLockCounter = NULL
 
static int  LocalLWLockCounter
 

Macro Definition Documentation

LOG_LWDEBUG

#define LOG_LWDEBUG (   a,
  b,
  c 
)    ((void)0)

Definition at line 276 of file lwlock.c.

LW_FLAG_BITS

#define LW_FLAG_BITS   3

Definition at line 97 of file lwlock.c.

LW_FLAG_HAS_WAITERS

#define LW_FLAG_HAS_WAITERS   ((uint32) 1 << 31)

Definition at line 94 of file lwlock.c.

LW_FLAG_LOCKED

#define LW_FLAG_LOCKED   ((uint32) 1 << 29)

Definition at line 96 of file lwlock.c.

LW_FLAG_MASK

#define LW_FLAG_MASK   (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))

Definition at line 98 of file lwlock.c.

LW_FLAG_RELEASE_OK

#define LW_FLAG_RELEASE_OK   ((uint32) 1 << 30)

Definition at line 95 of file lwlock.c.

LW_LOCK_MASK

#define LW_LOCK_MASK   (MAX_BACKENDS | LW_VAL_EXCLUSIVE)

Definition at line 106 of file lwlock.c.

LW_SHARED_MASK

#define LW_SHARED_MASK   MAX_BACKENDS

Definition at line 105 of file lwlock.c.

LW_VAL_EXCLUSIVE

#define LW_VAL_EXCLUSIVE   (MAX_BACKENDS + 1)

Definition at line 101 of file lwlock.c.

LW_VAL_SHARED

#define LW_VAL_SHARED   1

Definition at line 102 of file lwlock.c.

MAX_NAMED_TRANCHES

#define MAX_NAMED_TRANCHES   256

Definition at line 204 of file lwlock.c.

MAX_SIMUL_LWLOCKS

#define MAX_SIMUL_LWLOCKS   200

Definition at line 168 of file lwlock.c.

PG_LWLOCK

#define PG_LWLOCK (   id,
  lockname 
)    [id] = CppAsString(lockname),

PG_LWLOCKTRANCHE

#define PG_LWLOCKTRANCHE (   id,
  lockname 
)    [LWTRANCHE_##id] = CppAsString(lockname),

PRINT_LWDEBUG

#define PRINT_LWDEBUG (   a,
  b,
  c 
)    ((void)0)

Definition at line 275 of file lwlock.c.

T_NAME

#define T_NAME (   lock )     GetLWTrancheName((lock)->tranche)

Definition at line 211 of file lwlock.c.

Typedef Documentation

LWLockHandle

typedef struct LWLockHandle LWLockHandle

NamedLWLockTrancheRequest

Function Documentation

CreateLWLocks()

void CreateLWLocks ( void  )

Definition at line 441 of file lwlock.c.

442{
444 {
445 Size spaceLocks = LWLockShmemSize();
446 char *ptr;
447
448 /* Allocate space */
449 ptr = (char *) ShmemAlloc(spaceLocks);
450
451 /* Initialize the dynamic-allocation counter for tranches */
452 LWLockCounter = (int *) ptr;
454 ptr += MAXALIGN(sizeof(int));
455
456 /* Initialize tranche names */
457 LWLockTrancheNames = (char **) ptr;
458 ptr += MAX_NAMED_TRANCHES * sizeof(char *);
459 for (int i = 0; i < MAX_NAMED_TRANCHES; i++)
460 {
461 LWLockTrancheNames[i] = ptr;
462 ptr += NAMEDATALEN;
463 }
464
465 /*
466 * Move named tranche requests to shared memory. This is done for the
467 * benefit of EXEC_BACKEND builds, which otherwise wouldn't be able to
468 * call GetNamedLWLockTranche() outside postmaster.
469 */
471 {
472 /*
473 * Save the pointer to the request array in postmaster's local
474 * memory. We'll need it if we ever need to re-initialize shared
475 * memory after a crash.
476 */
478
483 }
484
485 /* Ensure desired alignment of LWLock array */
486 ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
488
489 /* Initialize all LWLocks */
491 }
492}
#define MAXALIGN(LEN)
Definition: c.h:810
size_t Size
Definition: c.h:610
bool IsUnderPostmaster
Definition: globals.c:120
i
int i
Definition: isn.c:77
char ** LWLockTrancheNames
Definition: lwlock.c:154
int NamedLWLockTrancheRequests
Definition: lwlock.c:192
struct NamedLWLockTrancheRequest NamedLWLockTrancheRequest
static NamedLWLockTrancheRequest * LocalNamedLWLockTrancheRequestArray
Definition: lwlock.c:196
static void InitializeLWLocks(void)
Definition: lwlock.c:498
NamedLWLockTrancheRequest * NamedLWLockTrancheRequestArray
Definition: lwlock.c:193
LWLockPadded * MainLWLockArray
Definition: lwlock.c:161
int * LWLockCounter
Definition: lwlock.c:199
Size LWLockShmemSize(void)
Definition: lwlock.c:397
#define MAX_NAMED_TRANCHES
Definition: lwlock.c:204
#define LWLOCK_PADDED_SIZE
Definition: lwlock.h:62
@ LWTRANCHE_FIRST_USER_DEFINED
Definition: lwlock.h:186
#define NAMEDATALEN
void * ShmemAlloc(Size size)
Definition: shmem.c:152

References i, InitializeLWLocks(), IsUnderPostmaster, LocalNamedLWLockTrancheRequestArray, LWLOCK_PADDED_SIZE, LWLockCounter, LWLockShmemSize(), LWLockTrancheNames, LWTRANCHE_FIRST_USER_DEFINED, MainLWLockArray, MAX_NAMED_TRANCHES, MAXALIGN, NAMEDATALEN, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, and ShmemAlloc().

Referenced by CreateOrAttachShmemStructs().

ForEachLWLockHeldByMe()

void ForEachLWLockHeldByMe ( void(*)(LWLock *, LWLockMode, void *)  callback,
void *  context 
)

Definition at line 1962 of file lwlock.c.

1964{
1965 int i;
1966
1967 for (i = 0; i < num_held_lwlocks; i++)
1968 callback(held_lwlocks[i].lock, held_lwlocks[i].mode, context);
1969}
static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS]
Definition: lwlock.c:178
static int num_held_lwlocks
Definition: lwlock.c:177
static PgChecksumMode mode
Definition: pg_checksums.c:55
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:46

References callback(), held_lwlocks, i, mode, and num_held_lwlocks.

GetLWLockIdentifier()

const char * GetLWLockIdentifier ( uint32  classId,
uint16  eventId 
)

Definition at line 773 of file lwlock.c.

774{
775 Assert(classId == PG_WAIT_LWLOCK);
776 /* The event IDs are just tranche numbers. */
777 return GetLWTrancheName(eventId);
778}
Assert(PointerIsAligned(start, uint64))
static const char * GetLWTrancheName(uint16 trancheId)
Definition: lwlock.c:737
#define PG_WAIT_LWLOCK
Definition: wait_classes.h:18

References Assert(), GetLWTrancheName(), and PG_WAIT_LWLOCK.

Referenced by pgstat_get_wait_event().

GetLWTrancheName()

static const char * GetLWTrancheName ( uint16  trancheId )
static

Definition at line 737 of file lwlock.c.

738{
739 /* Built-in tranche or individual LWLock? */
740 if (trancheId < LWTRANCHE_FIRST_USER_DEFINED)
741 return BuiltinTrancheNames[trancheId];
742
743 /*
744 * We only ever add new entries to LWLockTrancheNames, so most lookups can
745 * avoid taking the spinlock as long as the backend-local counter
746 * (LocalLWLockCounter) is greater than the requested tranche ID. Else,
747 * we need to first update the backend-local counter with ShmemLock held
748 * before attempting the lookup again. In practice, the latter case is
749 * probably rare.
750 */
751 if (trancheId >= LocalLWLockCounter)
752 {
756
757 if (trancheId >= LocalLWLockCounter)
758 elog(ERROR, "tranche %d is not registered", trancheId);
759 }
760
761 /*
762 * It's an extension tranche, so look in LWLockTrancheNames.
763 */
764 trancheId -= LWTRANCHE_FIRST_USER_DEFINED;
765
766 return LWLockTrancheNames[trancheId];
767}
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
static int LocalLWLockCounter
Definition: lwlock.c:202
static const char *const BuiltinTrancheNames[]
Definition: lwlock.c:135
slock_t * ShmemLock
Definition: shmem.c:88
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59

References BuiltinTrancheNames, elog, ERROR, LocalLWLockCounter, LWLockCounter, LWLockTrancheNames, LWTRANCHE_FIRST_USER_DEFINED, ShmemLock, SpinLockAcquire, and SpinLockRelease.

Referenced by GetLWLockIdentifier(), and LWLockInitialize().

GetNamedLWLockTranche()

LWLockPadded * GetNamedLWLockTranche ( const char *  tranche_name )

Definition at line 566 of file lwlock.c.

567{
568 int lock_pos;
569 int i;
570
571 /*
572 * Obtain the position of base address of LWLock belonging to requested
573 * tranche_name in MainLWLockArray. LWLocks for named tranches are placed
574 * in MainLWLockArray after fixed locks.
575 */
576 lock_pos = NUM_FIXED_LWLOCKS;
577 for (i = 0; i < NamedLWLockTrancheRequests; i++)
578 {
579 if (strcmp(NamedLWLockTrancheRequestArray[i].tranche_name,
580 tranche_name) == 0)
581 return &MainLWLockArray[lock_pos];
582
584 }
585
586 elog(ERROR, "requested tranche is not registered");
587
588 /* just to keep compiler quiet */
589 return NULL;
590}
#define NUM_FIXED_LWLOCKS
Definition: lwlock.h:107

References elog, ERROR, i, MainLWLockArray, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NUM_FIXED_LWLOCKS, and NamedLWLockTrancheRequest::num_lwlocks.

Referenced by pgss_shmem_startup(), and test_lwlock_tranche_lookup().

InitializeLWLocks()

static void InitializeLWLocks ( void  )
static

Definition at line 498 of file lwlock.c.

499{
500 int id;
501 int i;
502 int j;
503 LWLockPadded *lock;
504
505 /* Initialize all individual LWLocks in main array */
506 for (id = 0, lock = MainLWLockArray; id < NUM_INDIVIDUAL_LWLOCKS; id++, lock++)
507 LWLockInitialize(&lock->lock, id);
508
509 /* Initialize buffer mapping LWLocks in main array */
511 for (id = 0; id < NUM_BUFFER_PARTITIONS; id++, lock++)
512 LWLockInitialize(&lock->lock, LWTRANCHE_BUFFER_MAPPING);
513
514 /* Initialize lmgrs' LWLocks in main array */
516 for (id = 0; id < NUM_LOCK_PARTITIONS; id++, lock++)
517 LWLockInitialize(&lock->lock, LWTRANCHE_LOCK_MANAGER);
518
519 /* Initialize predicate lmgrs' LWLocks in main array */
521 for (id = 0; id < NUM_PREDICATELOCK_PARTITIONS; id++, lock++)
522 LWLockInitialize(&lock->lock, LWTRANCHE_PREDICATE_LOCK_MANAGER);
523
524 /*
525 * Copy the info about any named tranches into shared memory (so that
526 * other processes can see it), and initialize the requested LWLocks.
527 */
529 {
531
532 for (i = 0; i < NamedLWLockTrancheRequests; i++)
533 {
535 int tranche;
536
538 tranche = LWLockNewTrancheId(request->tranche_name);
539
540 for (j = 0; j < request->num_lwlocks; j++, lock++)
541 LWLockInitialize(&lock->lock, tranche);
542 }
543 }
544}
j
int j
Definition: isn.c:78
int LWLockNewTrancheId(const char *name)
Definition: lwlock.c:596
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:698
#define BUFFER_MAPPING_LWLOCK_OFFSET
Definition: lwlock.h:102
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:95
#define LOCK_MANAGER_LWLOCK_OFFSET
Definition: lwlock.h:103
#define NUM_BUFFER_PARTITIONS
Definition: lwlock.h:91
#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET
Definition: lwlock.h:105
#define NUM_PREDICATELOCK_PARTITIONS
Definition: lwlock.h:99
char tranche_name[NAMEDATALEN]
Definition: lwlock.c:183
LWLock lock
Definition: lwlock.h:70

References BUFFER_MAPPING_LWLOCK_OFFSET, i, j, LWLockPadded::lock, LOCK_MANAGER_LWLOCK_OFFSET, LWLockInitialize(), LWLockNewTrancheId(), MainLWLockArray, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NUM_BUFFER_PARTITIONS, NUM_FIXED_LWLOCKS, NUM_LOCK_PARTITIONS, NamedLWLockTrancheRequest::num_lwlocks, NUM_PREDICATELOCK_PARTITIONS, PREDICATELOCK_MANAGER_LWLOCK_OFFSET, and NamedLWLockTrancheRequest::tranche_name.

Referenced by CreateLWLocks().

InitLWLockAccess()

void InitLWLockAccess ( void  )

Definition at line 550 of file lwlock.c.

551{
552#ifdef LWLOCK_STATS
553 init_lwlock_stats();
554#endif
555}

Referenced by InitAuxiliaryProcess(), and InitProcess().

LWLockAcquire()

bool LWLockAcquire ( LWLocklock,
LWLockMode  mode 
)

Definition at line 1174 of file lwlock.c.

1175{
1176 PGPROC *proc = MyProc;
1177 bool result = true;
1178 int extraWaits = 0;
1179#ifdef LWLOCK_STATS
1180 lwlock_stats *lwstats;
1181
1182 lwstats = get_lwlock_stats_entry(lock);
1183#endif
1184
1186
1187 PRINT_LWDEBUG("LWLockAcquire", lock, mode);
1188
1189#ifdef LWLOCK_STATS
1190 /* Count lock acquisition attempts */
1191 if (mode == LW_EXCLUSIVE)
1192 lwstats->ex_acquire_count++;
1193 else
1194 lwstats->sh_acquire_count++;
1195#endif /* LWLOCK_STATS */
1196
1197 /*
1198 * We can't wait if we haven't got a PGPROC. This should only occur
1199 * during bootstrap or shared memory initialization. Put an Assert here
1200 * to catch unsafe coding practices.
1201 */
1202 Assert(!(proc == NULL && IsUnderPostmaster));
1203
1204 /* Ensure we will have room to remember the lock */
1206 elog(ERROR, "too many LWLocks taken");
1207
1208 /*
1209 * Lock out cancel/die interrupts until we exit the code section protected
1210 * by the LWLock. This ensures that interrupts will not interfere with
1211 * manipulations of data structures in shared memory.
1212 */
1214
1215 /*
1216 * Loop here to try to acquire lock after each time we are signaled by
1217 * LWLockRelease.
1218 *
1219 * NOTE: it might seem better to have LWLockRelease actually grant us the
1220 * lock, rather than retrying and possibly having to go back to sleep. But
1221 * in practice that is no good because it means a process swap for every
1222 * lock acquisition when two or more processes are contending for the same
1223 * lock. Since LWLocks are normally used to protect not-very-long
1224 * sections of computation, a process needs to be able to acquire and
1225 * release the same lock many times during a single CPU time slice, even
1226 * in the presence of contention. The efficiency of being able to do that
1227 * outweighs the inefficiency of sometimes wasting a process dispatch
1228 * cycle because the lock is not free when a released waiter finally gets
1229 * to run. See pgsql-hackers archives for 29-Dec-01.
1230 */
1231 for (;;)
1232 {
1233 bool mustwait;
1234
1235 /*
1236 * Try to grab the lock the first time, we're not in the waitqueue
1237 * yet/anymore.
1238 */
1239 mustwait = LWLockAttemptLock(lock, mode);
1240
1241 if (!mustwait)
1242 {
1243 LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
1244 break; /* got the lock */
1245 }
1246
1247 /*
1248 * Ok, at this point we couldn't grab the lock on the first try. We
1249 * cannot simply queue ourselves to the end of the list and wait to be
1250 * woken up because by now the lock could long have been released.
1251 * Instead add us to the queue and try to grab the lock again. If we
1252 * succeed we need to revert the queuing and be happy, otherwise we
1253 * recheck the lock. If we still couldn't grab it, we know that the
1254 * other locker will see our queue entries when releasing since they
1255 * existed before we checked for the lock.
1256 */
1257
1258 /* add to the queue */
1259 LWLockQueueSelf(lock, mode);
1260
1261 /* we're now guaranteed to be woken up if necessary */
1262 mustwait = LWLockAttemptLock(lock, mode);
1263
1264 /* ok, grabbed the lock the second time round, need to undo queueing */
1265 if (!mustwait)
1266 {
1267 LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
1268
1269 LWLockDequeueSelf(lock);
1270 break;
1271 }
1272
1273 /*
1274 * Wait until awakened.
1275 *
1276 * It is possible that we get awakened for a reason other than being
1277 * signaled by LWLockRelease. If so, loop back and wait again. Once
1278 * we've gotten the LWLock, re-increment the sema by the number of
1279 * additional signals received.
1280 */
1281 LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
1282
1283#ifdef LWLOCK_STATS
1284 lwstats->block_count++;
1285#endif
1286
1288 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1289 TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1290
1291 for (;;)
1292 {
1293 PGSemaphoreLock(proc->sem);
1294 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1295 break;
1296 extraWaits++;
1297 }
1298
1299 /* Retrying, allow LWLockRelease to release waiters again. */
1301
1302#ifdef LOCK_DEBUG
1303 {
1304 /* not waiting anymore */
1305 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1306
1307 Assert(nwaiters < MAX_BACKENDS);
1308 }
1309#endif
1310
1311 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1312 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
1314
1315 LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
1316
1317 /* Now loop back and try to acquire lock again. */
1318 result = false;
1319 }
1320
1321 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_ENABLED())
1322 TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), mode);
1323
1324 /* Add lock to list of locks held by this backend */
1327
1328 /*
1329 * Fix the process wait semaphore's count for any absorbed wakeups.
1330 */
1331 while (extraWaits-- > 0)
1332 PGSemaphoreUnlock(proc->sem);
1333
1334 return result;
1335}
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:408
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:379
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:223
uint32_t uint32
Definition: c.h:538
static bool LWLockAttemptLock(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:790
static void LWLockReportWaitEnd(void)
Definition: lwlock.c:728
#define LW_FLAG_RELEASE_OK
Definition: lwlock.c:95
#define MAX_SIMUL_LWLOCKS
Definition: lwlock.c:168
#define T_NAME(lock)
Definition: lwlock.c:211
#define LOG_LWDEBUG(a, b, c)
Definition: lwlock.c:276
static void LWLockQueueSelf(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1042
#define PRINT_LWDEBUG(a, b, c)
Definition: lwlock.c:275
static void LWLockReportWaitStart(LWLock *lock)
Definition: lwlock.c:719
static void LWLockDequeueSelf(LWLock *lock)
Definition: lwlock.c:1085
@ LW_WS_NOT_WAITING
Definition: lwlock.h:30
@ LW_SHARED
Definition: lwlock.h:113
@ LW_EXCLUSIVE
Definition: lwlock.h:112
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:133
void PGSemaphoreUnlock(PGSemaphore sema)
Definition: posix_sema.c:339
void PGSemaphoreLock(PGSemaphore sema)
Definition: posix_sema.c:319
#define MAX_BACKENDS
Definition: procnumber.h:39
PGPROC * MyProc
Definition: proc.c:66
LWLockMode mode
Definition: lwlock.c:174
LWLock * lock
Definition: lwlock.c:173
pg_atomic_uint32 state
Definition: lwlock.h:44
Definition: proc.h:179
PGSemaphore sem
Definition: proc.h:183
uint8 lwWaiting
Definition: proc.h:240

References Assert(), elog, ERROR, held_lwlocks, HOLD_INTERRUPTS, IsUnderPostmaster, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_RELEASE_OK, LW_SHARED, LW_WS_NOT_WAITING, LWLockAttemptLock(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, MyProc, num_held_lwlocks, pg_atomic_fetch_or_u32(), pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, PGPROC::sem, LWLock::state, and T_NAME.

Referenced by _bt_end_vacuum(), _bt_parallel_done(), _bt_parallel_primscan_schedule(), _bt_parallel_release(), _bt_parallel_seize(), _bt_start_vacuum(), _bt_vacuum_cycleid(), AbsorbSyncRequests(), ActivateCommitTs(), AdvanceNextFullTransactionIdPastXid(), AdvanceOldestClogXid(), AdvanceOldestCommitTsXid(), AdvanceXLInsertBuffer(), alloc_object(), AlterSystemSetConfigFile(), ApplyLauncherMain(), apw_detach_shmem(), apw_dump_now(), apw_load_buffers(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AtAbort_Twophase(), AtEOXact_LogicalRepWorkers(), AtPrepare_PredicateLocks(), attach_internal(), autoprewarm_main(), autoprewarm_start_worker(), AutoVacLauncherMain(), AutoVacuumRequestWork(), AutoVacWorkerMain(), BackendPidGetProc(), BackendXidGetPid(), BecomeLockGroupLeader(), BecomeLockGroupMember(), btparallelrescan(), BufferAlloc(), CancelDBBackends(), check_for_freed_segments(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointReplicationOrigin(), CheckPointReplicationSlots(), CheckPointTwoPhase(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), choose_next_subplan_for_leader(), choose_next_subplan_for_worker(), CleanupInvalidationState(), ClearOldPredicateLocks(), ComputeXidHorizons(), consume_xids_shortcut(), copy_replication_slot(), CountDBBackends(), CountDBConnections(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreateInitDecodingContext(), CreatePredicateLock(), CreateRestartPoint(), DeactivateCommitTs(), DeleteChildTargetLocks(), DeleteLockTarget(), destroy_superblock(), do_autovacuum(), do_pg_backup_start(), do_pg_backup_stop(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_get_total_size(), dsa_pin(), dsa_release_in_place(), dsa_set_size_limit(), dsa_trim(), dsa_unpin(), dshash_delete_key(), dshash_dump(), dshash_find(), dshash_find_or_insert(), dshash_seq_next(), dsm_attach(), dsm_create(), dsm_detach(), dsm_pin_segment(), dsm_unpin_segment(), ensure_active_superblock(), entry_reset(), EvictUnpinnedBufferInternal(), Exec_ListenPreCommit(), ExecParallelHashMergeCounters(), ExecParallelHashPopChunkQueue(), ExecParallelHashTupleAlloc(), ExecParallelHashTuplePrealloc(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendBufferedRelShared(), ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), FindAndDropRelationBuffers(), FindDeletedTupleInLocalRel(), FinishPreparedTransaction(), FlushDatabaseBuffers(), FlushRelationBuffers(), FlushRelationsAllBuffers(), ForceTransactionIdLimitUpdate(), ForwardSyncRequest(), FreeWorkerInfo(), get_local_synced_slots(), get_val_in_shmem(), get_xid_status(), GetBackgroundWorkerPid(), GetBackgroundWorkerTypeByPid(), GetBlockerStatusData(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastImportantRecPtr(), GetLastSegSwitchData(), GetLatestCommitTsData(), GetLeaderApplyWorkerPid(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetMultiXactInfo(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestSafeDecodingTransactionId(), GetOldestUnsummarizedLSN(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionData(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSafeSnapshotBlockingPids(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetStrictOldestNonRemovableTransactionId(), GetVirtualXIDsDelayingChkpt(), GetWaitEventCustomIdentifier(), GetWaitEventCustomNames(), GetWalSummarizerState(), HaveVirtualXIDsDelayingChkpt(), init_conflict_slot_xmin(), init_dsm_registry(), InitWalSender(), injection_shmem_startup(), injection_stats_fixed_reset_all_cb(), injection_stats_fixed_snapshot_cb(), InjectionPointAttach(), InjectionPointDetach(), InjectionPointList(), InstallXLogFileSegment(), InvalidateBuffer(), InvalidateObsoleteReplicationSlots(), InvalidatePossiblyObsoleteSlot(), InvalidateVictimBuffer(), IoWorkerMain(), IsInstallXLogFileSegmentActive(), KnownAssignedXidsCompress(), KnownAssignedXidsReset(), lock_twophase_recover(), LockAcquireExtended(), LockBuffer(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), LockWaiterCount(), logicalrep_launcher_attach_dshmem(), logicalrep_pa_worker_stop(), logicalrep_worker_attach(), logicalrep_worker_detach(), logicalrep_worker_launch(), logicalrep_worker_stop(), logicalrep_worker_stop_internal(), logicalrep_worker_wakeup(), logicalrep_workers_find(), LookupGXact(), LookupGXactBySubid(), MarkAsPrepared(), MarkAsPreparing(), MaybeExtendOffsetSlru(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), PageIsPredicateLocked(), perform_relmap_update(), pg_control_checkpoint(), pg_control_init(), pg_control_recovery(), pg_control_system(), pg_get_replication_slots(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_notification_queue_usage(), pg_show_replication_origin_status(), pg_stat_get_subscription(), pg_stat_statements_internal(), pg_xact_status(), pgaio_worker_die(), pgaio_worker_register(), pgaio_worker_submit_internal(), pgss_shmem_startup(), pgss_store(), pgstat_archiver_reset_all_cb(), pgstat_archiver_snapshot_cb(), pgstat_bgwriter_reset_all_cb(), pgstat_bgwriter_snapshot_cb(), pgstat_build_snapshot(), pgstat_checkpointer_reset_all_cb(), pgstat_checkpointer_snapshot_cb(), pgstat_fetch_replslot(), pgstat_io_flush_cb(), pgstat_io_reset_all_cb(), pgstat_io_snapshot_cb(), pgstat_lock_entry(), pgstat_lock_entry_shared(), pgstat_report_inj_fixed(), pgstat_reset_matching_entries(), pgstat_reset_replslot(), pgstat_reset_slru_counter_internal(), pgstat_slru_flush_cb(), pgstat_slru_snapshot_cb(), pgstat_wal_flush_cb(), pgstat_wal_reset_all_cb(), pgstat_wal_snapshot_cb(), PostPrepare_Locks(), PostPrepare_MultiXact(), PostPrepare_Twophase(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockTwoPhaseFinish(), PrefetchSharedBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayClearTransaction(), ProcArrayEndTransaction(), ProcArrayGetReplicationSlotXmin(), ProcArrayGroupClearXid(), ProcArrayInstallImportedXmin(), ProcArrayInstallRestoredXmin(), ProcArrayRemove(), ProcArraySetReplicationSlotXmin(), process_syncing_tables_for_apply(), ProcKill(), ProcNumberGetTransactionIds(), ProcSleep(), ReachedEndOfBackup(), read_relmap_file(), ReadMultiXactIdRange(), ReadNextFullTransactionId(), ReadNextMultiXactId(), ReadReplicationSlot(), RecordNewMultiXact(), RecoverPreparedTransactions(), RegisterDynamicBackgroundWorker(), RegisterPredicateLockingXid(), RelationCacheInitFilePreInvalidate(), RelationMapCopy(), RelationMapFinishBootstrap(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), relmap_redo(), RemoveScratchTarget(), ReplicationOriginExitCleanup(), ReplicationSlotAcquire(), ReplicationSlotCleanup(), ReplicationSlotCreate(), ReplicationSlotDropPtr(), ReplicationSlotName(), ReplicationSlotRelease(), ReplicationSlotsComputeLogicalRestartLSN(), ReplicationSlotsComputeRequiredLSN(), ReplicationSlotsComputeRequiredXmin(), ReplicationSlotsCountDBSlots(), ReplicationSlotsDropDBSlots(), replorigin_advance(), replorigin_get_progress(), replorigin_session_advance(), replorigin_session_get_progress(), replorigin_session_reset(), replorigin_session_setup(), replorigin_state_clear(), resize(), RestoreScratchTarget(), restoreTwoPhaseData(), SaveSlotToPath(), SearchNamedReplicationSlot(), SerialAdd(), SerialGetMinConflictCommitSeqNo(), SerialInit(), SerialSetActiveSerXmin(), set_indexsafe_procflags(), set_val_in_shmem(), SetCommitTsLimit(), SetInstallXLogFileSegmentActive(), SetMultiXactIdLimit(), SetNextObjectId(), SetOffsetVacuumLimit(), SetTransactionIdLimit(), SetXidCommitTsInPage(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SignalVirtualTransaction(), SIInsertDataEntries(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SimpleLruWriteAll(), SimpleLruZeroAndWritePage(), SlruDeleteSegment(), SlruInternalWritePage(), SnapBuildInitialSnapshot(), ss_get_location(), StandbyRecoverPreparedTransactions(), StandbySlotsHaveCaughtup(), StartupDecodingContext(), StartupSUBTRANS(), StartupXLOG(), sts_parallel_scan_next(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SummarizeWAL(), SwitchIntoArchiveRecovery(), synchronize_one_slot(), SyncOneBuffer(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), tbm_shared_iterate(), TerminateBackgroundWorker(), TerminateOtherDBBackends(), test_aio_shmem_startup(), test_slru_page_exists(), test_slru_page_read(), test_slru_page_write(), TransactionGroupUpdateXidStatus(), TransactionIdGetCommitTsData(), TransactionIdIsInProgress(), TransactionIdSetPageStatus(), TransactionTreeSetCommitTsData(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TrimMultiXact(), TruncateMultiXact(), TwoPhaseGetGXact(), TwoPhaseGetOldestXidInCommit(), TwoPhaseGetXidByVirtualXID(), update_cached_xid_range(), update_synced_slots_inactive_since(), UpdateMinRecoveryPoint(), vac_truncate_clog(), vacuum_rel(), VacuumUpdateCosts(), validate_sync_standby_slots(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), wait_for_relation_state_change(), wait_for_worker_state_change(), WaitEventCustomNew(), WaitForReplicationWorkerAttach(), WaitForWalSummarization(), WakeupWalSummarizer(), WALInsertLockAcquire(), WALInsertLockAcquireExclusive(), WalSummarizerMain(), WalSummarizerShutdown(), write_relcache_init_file(), xact_redo(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), XLogReportParameters(), XLogShutdownWalRcv(), and ZeroAndLockBuffer().

LWLockAcquireOrWait()

bool LWLockAcquireOrWait ( LWLocklock,
LWLockMode  mode 
)

Definition at line 1402 of file lwlock.c.

1403{
1404 PGPROC *proc = MyProc;
1405 bool mustwait;
1406 int extraWaits = 0;
1407#ifdef LWLOCK_STATS
1408 lwlock_stats *lwstats;
1409
1410 lwstats = get_lwlock_stats_entry(lock);
1411#endif
1412
1414
1415 PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
1416
1417 /* Ensure we will have room to remember the lock */
1419 elog(ERROR, "too many LWLocks taken");
1420
1421 /*
1422 * Lock out cancel/die interrupts until we exit the code section protected
1423 * by the LWLock. This ensures that interrupts will not interfere with
1424 * manipulations of data structures in shared memory.
1425 */
1427
1428 /*
1429 * NB: We're using nearly the same twice-in-a-row lock acquisition
1430 * protocol as LWLockAcquire(). Check its comments for details.
1431 */
1432 mustwait = LWLockAttemptLock(lock, mode);
1433
1434 if (mustwait)
1435 {
1437
1438 mustwait = LWLockAttemptLock(lock, mode);
1439
1440 if (mustwait)
1441 {
1442 /*
1443 * Wait until awakened. Like in LWLockAcquire, be prepared for
1444 * bogus wakeups.
1445 */
1446 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
1447
1448#ifdef LWLOCK_STATS
1449 lwstats->block_count++;
1450#endif
1451
1453 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1454 TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1455
1456 for (;;)
1457 {
1458 PGSemaphoreLock(proc->sem);
1459 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1460 break;
1461 extraWaits++;
1462 }
1463
1464#ifdef LOCK_DEBUG
1465 {
1466 /* not waiting anymore */
1467 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1468
1469 Assert(nwaiters < MAX_BACKENDS);
1470 }
1471#endif
1472 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1473 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
1475
1476 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
1477 }
1478 else
1479 {
1480 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
1481
1482 /*
1483 * Got lock in the second attempt, undo queueing. We need to treat
1484 * this as having successfully acquired the lock, otherwise we'd
1485 * not necessarily wake up people we've prevented from acquiring
1486 * the lock.
1487 */
1488 LWLockDequeueSelf(lock);
1489 }
1490 }
1491
1492 /*
1493 * Fix the process wait semaphore's count for any absorbed wakeups.
1494 */
1495 while (extraWaits-- > 0)
1496 PGSemaphoreUnlock(proc->sem);
1497
1498 if (mustwait)
1499 {
1500 /* Failed to get lock, so release interrupt holdoff */
1502 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
1503 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL_ENABLED())
1504 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(lock), mode);
1505 }
1506 else
1507 {
1508 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
1509 /* Add lock to list of locks held by this backend */
1512 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_ENABLED())
1513 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(lock), mode);
1514 }
1515
1516 return !mustwait;
1517}
@ LW_WAIT_UNTIL_FREE
Definition: lwlock.h:114
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:135

References Assert(), elog, ERROR, held_lwlocks, HOLD_INTERRUPTS, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_SHARED, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LWLockAttemptLock(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, MyProc, num_held_lwlocks, pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, RESUME_INTERRUPTS, PGPROC::sem, and T_NAME.

Referenced by XLogFlush().

LWLockAnyHeldByMe()

bool LWLockAnyHeldByMe ( LWLocklock,
int  nlocks,
size_t  stride 
)

Definition at line 1995 of file lwlock.c.

1996{
1997 char *held_lock_addr;
1998 char *begin;
1999 char *end;
2000 int i;
2001
2002 begin = (char *) lock;
2003 end = begin + nlocks * stride;
2004 for (i = 0; i < num_held_lwlocks; i++)
2005 {
2006 held_lock_addr = (char *) held_lwlocks[i].lock;
2007 if (held_lock_addr >= begin &&
2008 held_lock_addr < end &&
2009 (held_lock_addr - begin) % stride == 0)
2010 return true;
2011 }
2012 return false;
2013}

References held_lwlocks, i, and num_held_lwlocks.

LWLockAttemptLock()

static bool LWLockAttemptLock ( LWLocklock,
LWLockMode  mode 
)
static

Definition at line 790 of file lwlock.c.

791{
792 uint32 old_state;
793
795
796 /*
797 * Read once outside the loop, later iterations will get the newer value
798 * via compare & exchange.
799 */
800 old_state = pg_atomic_read_u32(&lock->state);
801
802 /* loop until we've determined whether we could acquire the lock or not */
803 while (true)
804 {
805 uint32 desired_state;
806 bool lock_free;
807
808 desired_state = old_state;
809
810 if (mode == LW_EXCLUSIVE)
811 {
812 lock_free = (old_state & LW_LOCK_MASK) == 0;
813 if (lock_free)
814 desired_state += LW_VAL_EXCLUSIVE;
815 }
816 else
817 {
818 lock_free = (old_state & LW_VAL_EXCLUSIVE) == 0;
819 if (lock_free)
820 desired_state += LW_VAL_SHARED;
821 }
822
823 /*
824 * Attempt to swap in the state we are expecting. If we didn't see
825 * lock to be free, that's just the old value. If we saw it as free,
826 * we'll attempt to mark it acquired. The reason that we always swap
827 * in the value is that this doubles as a memory barrier. We could try
828 * to be smarter and only swap in values if we saw the lock as free,
829 * but benchmark haven't shown it as beneficial so far.
830 *
831 * Retry if the value changed since we last looked at it.
832 */
834 &old_state, desired_state))
835 {
836 if (lock_free)
837 {
838 /* Great! Got the lock. */
839#ifdef LOCK_DEBUG
840 if (mode == LW_EXCLUSIVE)
841 lock->owner = MyProc;
842#endif
843 return false;
844 }
845 else
846 return true; /* somebody else has the lock */
847 }
848 }
850}
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:347
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:237
#define pg_unreachable()
Definition: c.h:331
#define LW_VAL_EXCLUSIVE
Definition: lwlock.c:101
#define LW_VAL_SHARED
Definition: lwlock.c:102
#define LW_LOCK_MASK
Definition: lwlock.c:106

References Assert(), LW_EXCLUSIVE, LW_LOCK_MASK, LW_SHARED, LW_VAL_EXCLUSIVE, LW_VAL_SHARED, mode, MyProc, pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pg_unreachable, and LWLock::state.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockConditionalAcquire().

LWLockConditionalAcquire()

bool LWLockConditionalAcquire ( LWLocklock,
LWLockMode  mode 
)

Definition at line 1345 of file lwlock.c.

1346{
1347 bool mustwait;
1348
1350
1351 PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
1352
1353 /* Ensure we will have room to remember the lock */
1355 elog(ERROR, "too many LWLocks taken");
1356
1357 /*
1358 * Lock out cancel/die interrupts until we exit the code section protected
1359 * by the LWLock. This ensures that interrupts will not interfere with
1360 * manipulations of data structures in shared memory.
1361 */
1363
1364 /* Check for the lock */
1365 mustwait = LWLockAttemptLock(lock, mode);
1366
1367 if (mustwait)
1368 {
1369 /* Failed to get lock, so release interrupt holdoff */
1371
1372 LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
1373 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL_ENABLED())
1374 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(lock), mode);
1375 }
1376 else
1377 {
1378 /* Add lock to list of locks held by this backend */
1381 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_ENABLED())
1382 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(lock), mode);
1383 }
1384 return !mustwait;
1385}

References Assert(), elog, ERROR, held_lwlocks, HOLD_INTERRUPTS, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_SHARED, LWLockAttemptLock(), MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, num_held_lwlocks, PRINT_LWDEBUG, RESUME_INTERRUPTS, and T_NAME.

Referenced by ConditionalLockBuffer(), GetVictimBuffer(), pgstat_io_flush_cb(), pgstat_lock_entry(), pgstat_lock_entry_shared(), pgstat_slru_flush_cb(), pgstat_wal_flush_cb(), ProcArrayEndTransaction(), SimpleLruWaitIO(), ss_report_location(), TransactionIdSetPageStatus(), and XLogNeedsFlush().

LWLockConflictsWithVar()

static bool LWLockConflictsWithVar ( LWLocklock,
pg_atomic_uint64valptr,
uint64  oldval,
uint64newval,
bool *  result 
)
static

Definition at line 1529 of file lwlock.c.

1531{
1532 bool mustwait;
1533 uint64 value;
1534
1535 /*
1536 * Test first to see if it the slot is free right now.
1537 *
1538 * XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
1539 * via LWLockWaitForVar(), uses an implied barrier with a spinlock before
1540 * this, so we don't need a memory barrier here as far as the current
1541 * usage is concerned. But that might not be safe in general.
1542 */
1543 mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0;
1544
1545 if (!mustwait)
1546 {
1547 *result = true;
1548 return false;
1549 }
1550
1551 *result = false;
1552
1553 /*
1554 * Reading this value atomically is safe even on platforms where uint64
1555 * cannot be read without observing a torn value.
1556 */
1557 value = pg_atomic_read_u64(valptr);
1558
1559 if (value != oldval)
1560 {
1561 mustwait = false;
1562 *newval = value;
1563 }
1564 else
1565 {
1566 mustwait = true;
1567 }
1568
1569 return mustwait;
1570}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:465
uint64_t uint64
Definition: c.h:539
#define newval
static struct @169 value

References LW_VAL_EXCLUSIVE, newval, pg_atomic_read_u32(), pg_atomic_read_u64(), LWLock::state, and value.

Referenced by LWLockWaitForVar().

LWLockDequeueSelf()

static void LWLockDequeueSelf ( LWLocklock )
static

Definition at line 1085 of file lwlock.c.

1086{
1087 bool on_waitlist;
1088
1089#ifdef LWLOCK_STATS
1090 lwlock_stats *lwstats;
1091
1092 lwstats = get_lwlock_stats_entry(lock);
1093
1094 lwstats->dequeue_self_count++;
1095#endif
1096
1097 LWLockWaitListLock(lock);
1098
1099 /*
1100 * Remove ourselves from the waitlist, unless we've already been removed.
1101 * The removal happens with the wait list lock held, so there's no race in
1102 * this check.
1103 */
1104 on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
1105 if (on_waitlist)
1106 proclist_delete(&lock->waiters, MyProcNumber, lwWaitLink);
1107
1108 if (proclist_is_empty(&lock->waiters) &&
1110 {
1112 }
1113
1114 /* XXX: combine with fetch_and above? */
1116
1117 /* clear waiting state again, nice for debugging */
1118 if (on_waitlist)
1120 else
1121 {
1122 int extraWaits = 0;
1123
1124 /*
1125 * Somebody else dequeued us and has or will wake us up. Deal with the
1126 * superfluous absorption of a wakeup.
1127 */
1128
1129 /*
1130 * Reset RELEASE_OK flag if somebody woke us before we removed
1131 * ourselves - they'll have set it to false.
1132 */
1134
1135 /*
1136 * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
1137 * get reset at some inconvenient point later. Most of the time this
1138 * will immediately return.
1139 */
1140 for (;;)
1141 {
1144 break;
1145 extraWaits++;
1146 }
1147
1148 /*
1149 * Fix the process wait semaphore's count for any absorbed wakeups.
1150 */
1151 while (extraWaits-- > 0)
1153 }
1154
1155#ifdef LOCK_DEBUG
1156 {
1157 /* not waiting anymore */
1158 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1159
1160 Assert(nwaiters < MAX_BACKENDS);
1161 }
1162#endif
1163}
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:394
ProcNumber MyProcNumber
Definition: globals.c:90
static void LWLockWaitListLock(LWLock *lock)
Definition: lwlock.c:861
#define LW_FLAG_HAS_WAITERS
Definition: lwlock.c:94
static void LWLockWaitListUnlock(LWLock *lock)
Definition: lwlock.c:913
@ LW_WS_WAITING
Definition: lwlock.h:31
#define proclist_delete(list, procno, link_member)
Definition: proclist.h:187
static bool proclist_is_empty(const proclist_head *list)
Definition: proclist.h:38
proclist_head waiters
Definition: lwlock.h:45

References Assert(), LW_FLAG_HAS_WAITERS, LW_FLAG_RELEASE_OK, LW_WS_NOT_WAITING, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, MAX_BACKENDS, MyProc, MyProcNumber, pg_atomic_fetch_and_u32(), pg_atomic_fetch_or_u32(), pg_atomic_fetch_sub_u32(), pg_atomic_read_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), proclist_delete, proclist_is_empty(), PGPROC::sem, LWLock::state, and LWLock::waiters.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

LWLockDisown()

void LWLockDisown ( LWLocklock )

Definition at line 1883 of file lwlock.c.

1884{
1886
1888}
static LWLockMode LWLockDisownInternal(LWLock *lock)
Definition: lwlock.c:1800

References LWLockDisownInternal(), and RESUME_INTERRUPTS.

Referenced by buffer_stage_common().

LWLockDisownInternal()

static LWLockMode LWLockDisownInternal ( LWLocklock )
inlinestatic

Definition at line 1800 of file lwlock.c.

1801{
1803 int i;
1804
1805 /*
1806 * Remove lock from list of locks held. Usually, but not always, it will
1807 * be the latest-acquired lock; so search array backwards.
1808 */
1809 for (i = num_held_lwlocks; --i >= 0;)
1810 if (lock == held_lwlocks[i].lock)
1811 break;
1812
1813 if (i < 0)
1814 elog(ERROR, "lock %s is not held", T_NAME(lock));
1815
1817
1819 for (; i < num_held_lwlocks; i++)
1820 held_lwlocks[i] = held_lwlocks[i + 1];
1821
1822 return mode;
1823}
LWLockMode
Definition: lwlock.h:111

References elog, ERROR, held_lwlocks, i, LWLockHandle::mode, mode, num_held_lwlocks, and T_NAME.

Referenced by LWLockDisown(), and LWLockRelease().

LWLockHeldByMe()

bool LWLockHeldByMe ( LWLocklock )

Definition at line 1977 of file lwlock.c.

1978{
1979 int i;
1980
1981 for (i = 0; i < num_held_lwlocks; i++)
1982 {
1983 if (held_lwlocks[i].lock == lock)
1984 return true;
1985 }
1986 return false;
1987}

References held_lwlocks, i, and num_held_lwlocks.

Referenced by alloc_object(), autovac_recalculate_workers_for_balance(), buffer_stage_common(), check_for_freed_segments_locked(), CompactCheckpointerRequestQueue(), delete_item(), DeleteLockTarget(), dshash_dump(), ensure_active_superblock(), FlushOneBuffer(), get_best_segment(), GetLockHoldersAndWaiters(), GetOldestSafeDecodingTransactionId(), GetSnapshotDataReuse(), init_span(), InvalidatePossiblyObsoleteSlot(), logicalrep_pa_worker_count(), logicalrep_sync_worker_count(), logicalrep_worker_find(), logicalrep_worker_wakeup_ptr(), logicalrep_workers_find(), MaintainLatestCompletedXid(), MaintainLatestCompletedXidRecovery(), make_new_segment(), MarkBufferDirtyHint(), OnConflict_CheckForSerializationFailure(), ProcArrayEndTransaction(), ProcArraySetReplicationSlotXmin(), ProcSleep(), ReleaseOneSerializableXact(), RemoveScratchTarget(), RemoveTargetIfNoLongerUsed(), resize(), RestoreScratchTarget(), SetNewSxactGlobalXmin(), SlruSelectLRUPage(), test_slru_page_readonly(), TransactionIdInRecentPast(), TwoPhaseGetGXact(), UnpinBufferNoOwner(), and VacuumUpdateCosts().

LWLockHeldByMeInMode()

bool LWLockHeldByMeInMode ( LWLocklock,
LWLockMode  mode 
)

Definition at line 2021 of file lwlock.c.

2022{
2023 int i;
2024
2025 for (i = 0; i < num_held_lwlocks; i++)
2026 {
2027 if (held_lwlocks[i].lock == lock && held_lwlocks[i].mode == mode)
2028 return true;
2029 }
2030 return false;
2031}

References held_lwlocks, i, mode, and num_held_lwlocks.

Referenced by BufferIsDirty(), BufferIsExclusiveLocked(), DeleteLockTarget(), dshash_delete_current(), dshash_delete_entry(), dshash_seq_next(), InvalidatePossiblyObsoleteSlot(), IsBufferCleanupOK(), JoinWaitQueue(), logicalrep_worker_cleanup(), logicalrep_worker_stop_internal(), MarkAsPreparingGuts(), MarkBufferDirty(), pgstat_create_replslot(), pgstat_drop_replslot(), PrepareRedoAdd(), PrepareRedoRemoveFull(), ProcArrayEndTransactionInternal(), ProcessTwoPhaseBuffer(), RemoveGXact(), SimpleLruReadPage(), SimpleLruZeroPage(), SlruInternalWritePage(), SyncRepWakeQueue(), TransactionIdSetPageStatusInternal(), TransactionIdSetStatusBit(), TransferPredicateLocksToNewTarget(), and write_relmap_file().

LWLockInitialize()

void LWLockInitialize ( LWLocklock,
int  tranche_id 
)

Definition at line 698 of file lwlock.c.

699{
700 /* verify the tranche_id is valid */
701 (void) GetLWTrancheName(tranche_id);
702
704#ifdef LOCK_DEBUG
705 pg_atomic_init_u32(&lock->nwaiters, 0);
706#endif
707 lock->tranche = tranche_id;
708 proclist_init(&lock->waiters);
709}
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:219
static void proclist_init(proclist_head *list)
Definition: proclist.h:29
uint16 tranche
Definition: lwlock.h:43

References GetLWTrancheName(), LW_FLAG_RELEASE_OK, pg_atomic_init_u32(), proclist_init(), LWLock::state, LWLock::tranche, and LWLock::waiters.

Referenced by apw_init_state(), btinitparallelscan(), BufferManagerShmemInit(), create_internal(), dshash_create(), ExecAppendInitializeDSM(), ExecHashJoinInitializeDSM(), init_tdr_dsm(), InitializeLWLocks(), InitProcGlobal(), injection_stats_fixed_init_shmem_cb(), pgstat_archiver_init_shmem_cb(), pgstat_bgwriter_init_shmem_cb(), pgstat_checkpointer_init_shmem_cb(), pgstat_init_entry(), pgstat_io_init_shmem_cb(), pgstat_slru_init_shmem_cb(), pgstat_wal_init_shmem_cb(), PredicateLockShmemInit(), ReplicationOriginShmemInit(), ReplicationSlotsShmemInit(), SimpleLruInit(), sts_initialize(), tbm_prepare_shared_iterate(), test_lwlock_initialize(), and XLOGShmemInit().

LWLockNewTrancheId()

int LWLockNewTrancheId ( const char *  name )

Definition at line 596 of file lwlock.c.

597{
598 int result;
599
600 if (!name)
602 (errcode(ERRCODE_INVALID_NAME),
603 errmsg("tranche name cannot be NULL")));
604
605 if (strlen(name) >= NAMEDATALEN)
607 (errcode(ERRCODE_NAME_TOO_LONG),
608 errmsg("tranche name too long"),
609 errdetail("LWLock tranche names must be no longer than %d bytes.",
610 NAMEDATALEN - 1)));
611
612 /*
613 * We use the ShmemLock spinlock to protect LWLockCounter and
614 * LWLockTrancheNames.
615 */
617
619 {
622 (errmsg("maximum number of tranches already registered"),
623 errdetail("No more than %d tranches may be registered.",
625 }
626
627 result = (*LWLockCounter)++;
630
632
633 return result;
634}
int errdetail(const char *fmt,...)
Definition: elog.c:1207
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ereport(elevel,...)
Definition: elog.h:150
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
const char * name

References ereport, errcode(), errdetail(), errmsg(), ERROR, LocalLWLockCounter, LWLockCounter, LWLockTrancheNames, LWTRANCHE_FIRST_USER_DEFINED, MAX_NAMED_TRANCHES, name, NAMEDATALEN, ShmemLock, SpinLockAcquire, SpinLockRelease, and strlcpy().

Referenced by apw_init_state(), GetNamedDSA(), GetNamedDSHash(), init_tdr_dsm(), InitializeLWLocks(), test_basic(), test_create(), test_dsa_basic(), test_dsa_resowners(), test_empty(), test_lwlock_tranche_creation(), test_lwlock_tranches(), test_random(), and test_slru_shmem_startup().

LWLockQueueSelf()

static void LWLockQueueSelf ( LWLocklock,
LWLockMode  mode 
)
static

Definition at line 1042 of file lwlock.c.

1043{
1044 /*
1045 * If we don't have a PGPROC structure, there's no way to wait. This
1046 * should never occur, since MyProc should only be null during shared
1047 * memory initialization.
1048 */
1049 if (MyProc == NULL)
1050 elog(PANIC, "cannot wait without a PGPROC structure");
1051
1053 elog(PANIC, "queueing for lock while waiting on another one");
1054
1055 LWLockWaitListLock(lock);
1056
1057 /* setting the flag is protected by the spinlock */
1059
1062
1063 /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
1064 if (mode == LW_WAIT_UNTIL_FREE)
1065 proclist_push_head(&lock->waiters, MyProcNumber, lwWaitLink);
1066 else
1067 proclist_push_tail(&lock->waiters, MyProcNumber, lwWaitLink);
1068
1069 /* Can release the mutex now */
1071
1072#ifdef LOCK_DEBUG
1073 pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
1074#endif
1075}
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:364
#define PANIC
Definition: elog.h:42
#define proclist_push_tail(list, procno, link_member)
Definition: proclist.h:191
#define proclist_push_head(list, procno, link_member)
Definition: proclist.h:189
uint8 lwWaitMode
Definition: proc.h:241

References elog, LW_FLAG_HAS_WAITERS, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, mode, MyProc, MyProcNumber, PANIC, pg_atomic_fetch_add_u32(), pg_atomic_fetch_or_u32(), proclist_push_head, proclist_push_tail, LWLock::state, and LWLock::waiters.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

LWLockRelease()

void LWLockRelease ( LWLocklock )

Definition at line 1894 of file lwlock.c.

1895{
1897
1898 mode = LWLockDisownInternal(lock);
1899
1900 PRINT_LWDEBUG("LWLockRelease", lock, mode);
1901
1903
1904 /*
1905 * Now okay to allow cancel/die interrupts.
1906 */
1908}
static void LWLockReleaseInternal(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1830

References LWLockDisownInternal(), LWLockReleaseInternal(), mode, PRINT_LWDEBUG, and RESUME_INTERRUPTS.

Referenced by _bt_end_vacuum(), _bt_parallel_done(), _bt_parallel_primscan_schedule(), _bt_parallel_release(), _bt_parallel_seize(), _bt_start_vacuum(), _bt_vacuum_cycleid(), AbsorbSyncRequests(), ActivateCommitTs(), AdvanceNextFullTransactionIdPastXid(), AdvanceOldestClogXid(), AdvanceOldestCommitTsXid(), AdvanceXLInsertBuffer(), alloc_object(), AlterSystemSetConfigFile(), ApplyLauncherMain(), apw_detach_shmem(), apw_dump_now(), apw_load_buffers(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AtAbort_Twophase(), AtEOXact_LogicalRepWorkers(), AtPrepare_PredicateLocks(), attach_internal(), autoprewarm_main(), autoprewarm_start_worker(), AutoVacLauncherMain(), AutoVacuumRequestWork(), AutoVacWorkerMain(), BackendPidGetProc(), BackendXidGetPid(), BecomeLockGroupLeader(), BecomeLockGroupMember(), btparallelrescan(), BufferAlloc(), CancelDBBackends(), check_for_freed_segments(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointReplicationOrigin(), CheckPointReplicationSlots(), CheckPointTwoPhase(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), choose_next_subplan_for_leader(), choose_next_subplan_for_worker(), CleanupInvalidationState(), ClearOldPredicateLocks(), ComputeXidHorizons(), consume_xids_shortcut(), copy_replication_slot(), CountDBBackends(), CountDBConnections(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreateInitDecodingContext(), CreatePredicateLock(), CreateRestartPoint(), DeactivateCommitTs(), DeleteChildTargetLocks(), DeleteLockTarget(), destroy_superblock(), do_autovacuum(), do_pg_backup_start(), do_pg_backup_stop(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_get_total_size(), dsa_pin(), dsa_release_in_place(), dsa_set_size_limit(), dsa_trim(), dsa_unpin(), dshash_delete_entry(), dshash_delete_key(), dshash_dump(), dshash_find(), dshash_find_or_insert(), dshash_release_lock(), dshash_seq_next(), dshash_seq_term(), dsm_attach(), dsm_create(), dsm_detach(), dsm_pin_segment(), dsm_unpin_segment(), ensure_active_superblock(), entry_reset(), EvictUnpinnedBufferInternal(), Exec_ListenPreCommit(), ExecParallelHashMergeCounters(), ExecParallelHashPopChunkQueue(), ExecParallelHashTupleAlloc(), ExecParallelHashTuplePrealloc(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendBufferedRelShared(), ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), find_multixact_start(), FindAndDropRelationBuffers(), FindDeletedTupleInLocalRel(), FinishPreparedTransaction(), FlushDatabaseBuffers(), FlushRelationBuffers(), FlushRelationsAllBuffers(), ForceTransactionIdLimitUpdate(), ForwardSyncRequest(), FreeWorkerInfo(), get_local_synced_slots(), get_val_in_shmem(), get_xid_status(), GetBackgroundWorkerPid(), GetBackgroundWorkerTypeByPid(), GetBlockerStatusData(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastImportantRecPtr(), GetLastSegSwitchData(), GetLatestCommitTsData(), GetLeaderApplyWorkerPid(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetMultiXactInfo(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestSafeDecodingTransactionId(), GetOldestUnsummarizedLSN(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSafeSnapshotBlockingPids(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetStrictOldestNonRemovableTransactionId(), GetVictimBuffer(), GetVirtualXIDsDelayingChkpt(), GetWaitEventCustomIdentifier(), GetWaitEventCustomNames(), GetWalSummarizerState(), HaveVirtualXIDsDelayingChkpt(), init_conflict_slot_xmin(), init_dsm_registry(), InitWalSender(), injection_shmem_startup(), injection_stats_fixed_reset_all_cb(), injection_stats_fixed_snapshot_cb(), InjectionPointAttach(), InjectionPointDetach(), InjectionPointList(), InstallXLogFileSegment(), InvalidateBuffer(), InvalidateObsoleteReplicationSlots(), InvalidatePossiblyObsoleteSlot(), InvalidateVictimBuffer(), IoWorkerMain(), IsInstallXLogFileSegmentActive(), KnownAssignedXidsCompress(), KnownAssignedXidsReset(), lock_twophase_recover(), LockAcquireExtended(), LockBuffer(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), LockWaiterCount(), logicalrep_launcher_attach_dshmem(), logicalrep_pa_worker_stop(), logicalrep_worker_attach(), logicalrep_worker_detach(), logicalrep_worker_launch(), logicalrep_worker_stop(), logicalrep_worker_stop_internal(), logicalrep_worker_wakeup(), logicalrep_workers_find(), LogStandbySnapshot(), LookupGXact(), LookupGXactBySubid(), LWLockReleaseAll(), LWLockReleaseClearVar(), MarkAsPrepared(), MarkAsPreparing(), MaybeExtendOffsetSlru(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), OnConflict_CheckForSerializationFailure(), PageIsPredicateLocked(), perform_relmap_update(), pg_control_checkpoint(), pg_control_init(), pg_control_recovery(), pg_control_system(), pg_get_replication_slots(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_notification_queue_usage(), pg_show_replication_origin_status(), pg_stat_get_subscription(), pg_stat_statements_internal(), pg_xact_status(), pgaio_worker_die(), pgaio_worker_register(), pgaio_worker_submit_internal(), pgss_shmem_startup(), pgss_store(), pgstat_archiver_reset_all_cb(), pgstat_archiver_snapshot_cb(), pgstat_bgwriter_reset_all_cb(), pgstat_bgwriter_snapshot_cb(), pgstat_build_snapshot(), pgstat_checkpointer_reset_all_cb(), pgstat_checkpointer_snapshot_cb(), pgstat_fetch_replslot(), pgstat_io_flush_cb(), pgstat_io_reset_all_cb(), pgstat_io_snapshot_cb(), pgstat_report_inj_fixed(), pgstat_reset_matching_entries(), pgstat_reset_replslot(), pgstat_reset_slru_counter_internal(), pgstat_slru_flush_cb(), pgstat_slru_snapshot_cb(), pgstat_unlock_entry(), pgstat_wal_flush_cb(), pgstat_wal_reset_all_cb(), pgstat_wal_snapshot_cb(), PostPrepare_Locks(), PostPrepare_MultiXact(), PostPrepare_Twophase(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockTwoPhaseFinish(), PrefetchSharedBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayClearTransaction(), ProcArrayEndTransaction(), ProcArrayGetReplicationSlotXmin(), ProcArrayGroupClearXid(), ProcArrayInstallImportedXmin(), ProcArrayInstallRestoredXmin(), ProcArrayRemove(), ProcArraySetReplicationSlotXmin(), process_syncing_tables_for_apply(), ProcKill(), ProcNumberGetTransactionIds(), ProcSleep(), ReachedEndOfBackup(), read_relmap_file(), ReadMultiXactIdRange(), ReadNextFullTransactionId(), ReadNextMultiXactId(), ReadReplicationSlot(), RecordNewMultiXact(), RecoverPreparedTransactions(), RegisterDynamicBackgroundWorker(), RegisterPredicateLockingXid(), RelationCacheInitFilePostInvalidate(), RelationMapCopy(), RelationMapFinishBootstrap(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), relmap_redo(), RemoveScratchTarget(), ReplicationOriginExitCleanup(), ReplicationSlotAcquire(), ReplicationSlotCleanup(), ReplicationSlotCreate(), ReplicationSlotDropPtr(), ReplicationSlotName(), ReplicationSlotRelease(), ReplicationSlotsComputeLogicalRestartLSN(), ReplicationSlotsComputeRequiredLSN(), ReplicationSlotsComputeRequiredXmin(), ReplicationSlotsCountDBSlots(), ReplicationSlotsDropDBSlots(), replorigin_advance(), replorigin_get_progress(), replorigin_session_advance(), replorigin_session_get_progress(), replorigin_session_reset(), replorigin_session_setup(), replorigin_state_clear(), resize(), RestoreScratchTarget(), restoreTwoPhaseData(), SaveSlotToPath(), SearchNamedReplicationSlot(), SerialAdd(), SerialGetMinConflictCommitSeqNo(), SerialInit(), SerialSetActiveSerXmin(), set_indexsafe_procflags(), set_val_in_shmem(), SetCommitTsLimit(), SetInstallXLogFileSegmentActive(), SetMultiXactIdLimit(), SetNextObjectId(), SetOffsetVacuumLimit(), SetTransactionIdLimit(), SetXidCommitTsInPage(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SignalVirtualTransaction(), SIInsertDataEntries(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SimpleLruWriteAll(), SimpleLruZeroAndWritePage(), SlruDeleteSegment(), SlruInternalWritePage(), SnapBuildInitialSnapshot(), ss_get_location(), ss_report_location(), StandbyRecoverPreparedTransactions(), StandbySlotsHaveCaughtup(), StartupDecodingContext(), StartupSUBTRANS(), StartupXLOG(), sts_parallel_scan_next(), SubTransGetParent(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SummarizeWAL(), SwitchIntoArchiveRecovery(), synchronize_one_slot(), SyncOneBuffer(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), tbm_shared_iterate(), TerminateBackgroundWorker(), TerminateOtherDBBackends(), test_aio_shmem_startup(), test_slru_page_exists(), test_slru_page_read(), test_slru_page_readonly(), test_slru_page_write(), TransactionGroupUpdateXidStatus(), TransactionIdGetCommitTsData(), TransactionIdGetStatus(), TransactionIdIsInProgress(), TransactionIdSetPageStatus(), TransactionTreeSetCommitTsData(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TrimMultiXact(), TruncateMultiXact(), TwoPhaseGetGXact(), TwoPhaseGetOldestXidInCommit(), TwoPhaseGetXidByVirtualXID(), update_cached_xid_range(), update_synced_slots_inactive_since(), UpdateMinRecoveryPoint(), vac_truncate_clog(), vacuum_rel(), VacuumUpdateCosts(), validate_sync_standby_slots(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), wait_for_relation_state_change(), wait_for_worker_state_change(), WaitEventCustomNew(), WaitForReplicationWorkerAttach(), WaitForWalSummarization(), WakeupWalSummarizer(), WalSummarizerMain(), WalSummarizerShutdown(), write_relcache_init_file(), xact_redo(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), XLogFlush(), XLogNeedsFlush(), XLogReportParameters(), and XLogShutdownWalRcv().

LWLockReleaseAll()

void LWLockReleaseAll ( void  )

Definition at line 1945 of file lwlock.c.

1946{
1947 while (num_held_lwlocks > 0)
1948 {
1949 HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
1950
1952 }
1953}
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1894

References held_lwlocks, HOLD_INTERRUPTS, LWLockRelease(), and num_held_lwlocks.

Referenced by AbortSubTransaction(), AbortTransaction(), AutoVacLauncherMain(), AuxiliaryProcKill(), BackgroundWriterMain(), CheckpointerMain(), IoWorkerMain(), pgarch_archiveXlog(), ProcKill(), ShutdownAuxiliaryProcess(), WalSndErrorCleanup(), WalSummarizerMain(), and WalWriterMain().

LWLockReleaseClearVar()

void LWLockReleaseClearVar ( LWLocklock,
pg_atomic_uint64valptr,
uint64  val 
)

Definition at line 1923 of file lwlock.c.

1924{
1925 /*
1926 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1927 * that the variable is updated before releasing the lock.
1928 */
1929 pg_atomic_exchange_u64(valptr, val);
1930
1931 LWLockRelease(lock);
1932}
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:501
long val
Definition: informix.c:689

References LWLockRelease(), pg_atomic_exchange_u64(), and val.

Referenced by WALInsertLockRelease().

LWLockReleaseDisowned()

void LWLockReleaseDisowned ( LWLocklock,
LWLockMode  mode 
)

Definition at line 1914 of file lwlock.c.

1915{
1917}

References LWLockReleaseInternal(), and mode.

LWLockReleaseInternal()

static void LWLockReleaseInternal ( LWLocklock,
LWLockMode  mode 
)
static

Definition at line 1830 of file lwlock.c.

1831{
1832 uint32 oldstate;
1833 bool check_waiters;
1834
1835 /*
1836 * Release my hold on lock, after that it can immediately be acquired by
1837 * others, even if we still have to wakeup other waiters.
1838 */
1839 if (mode == LW_EXCLUSIVE)
1841 else
1842 oldstate = pg_atomic_sub_fetch_u32(&lock->state, LW_VAL_SHARED);
1843
1844 /* nobody else can have that kind of lock */
1845 Assert(!(oldstate & LW_VAL_EXCLUSIVE));
1846
1847 if (TRACE_POSTGRESQL_LWLOCK_RELEASE_ENABLED())
1848 TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock));
1849
1850 /*
1851 * We're still waiting for backends to get scheduled, don't wake them up
1852 * again.
1853 */
1854 if ((oldstate & (LW_FLAG_HAS_WAITERS | LW_FLAG_RELEASE_OK)) ==
1856 (oldstate & LW_LOCK_MASK) == 0)
1857 check_waiters = true;
1858 else
1859 check_waiters = false;
1860
1861 /*
1862 * As waking up waiters requires the spinlock to be acquired, only do so
1863 * if necessary.
1864 */
1865 if (check_waiters)
1866 {
1867 /* XXX: remove before commit? */
1868 LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
1869 LWLockWakeup(lock);
1870 }
1871}
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:437
static void LWLockWakeup(LWLock *lock)
Definition: lwlock.c:926

References Assert(), LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_HAS_WAITERS, LW_FLAG_RELEASE_OK, LW_LOCK_MASK, LW_VAL_EXCLUSIVE, LW_VAL_SHARED, LWLockWakeup(), mode, pg_atomic_sub_fetch_u32(), LWLock::state, and T_NAME.

Referenced by LWLockRelease(), and LWLockReleaseDisowned().

LWLockReportWaitEnd()

static void LWLockReportWaitEnd ( void  )
inlinestatic

Definition at line 728 of file lwlock.c.

729{
731}
static void pgstat_report_wait_end(void)
Definition: wait_event.h:85

References pgstat_report_wait_end().

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

LWLockReportWaitStart()

static void LWLockReportWaitStart ( LWLocklock )
inlinestatic

Definition at line 719 of file lwlock.c.

720{
722}
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:69

References PG_WAIT_LWLOCK, pgstat_report_wait_start(), and LWLock::tranche.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

LWLockShmemSize()

Size LWLockShmemSize ( void  )

Definition at line 397 of file lwlock.c.

398{
399 Size size;
400 int numLocks = NUM_FIXED_LWLOCKS;
401
402 /*
403 * If re-initializing shared memory, the request array will no longer be
404 * accessible, so switch to the copy in postmaster's local memory. We'll
405 * copy it back into shared memory later when CreateLWLocks() is called
406 * again.
407 */
410
411 /* Calculate total number of locks needed in the main array. */
412 numLocks += NumLWLocksForNamedTranches();
413
414 /* Space for dynamic allocation counter. */
415 size = MAXALIGN(sizeof(int));
416
417 /* Space for named tranches. */
418 size = add_size(size, mul_size(MAX_NAMED_TRANCHES, sizeof(char *)));
420
421 /*
422 * Make space for named tranche requests. This is done for the benefit of
423 * EXEC_BACKEND builds, which otherwise wouldn't be able to call
424 * GetNamedLWLockTranche() outside postmaster.
425 */
428
429 /* Space for the LWLock array, plus room for cache line alignment. */
430 size = add_size(size, LWLOCK_PADDED_SIZE);
431 size = add_size(size, mul_size(numLocks, sizeof(LWLockPadded)));
432
433 return size;
434}
static int NumLWLocksForNamedTranches(void)
Definition: lwlock.c:382
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510

References add_size(), LocalNamedLWLockTrancheRequestArray, LWLOCK_PADDED_SIZE, MAX_NAMED_TRANCHES, MAXALIGN, mul_size(), NAMEDATALEN, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NUM_FIXED_LWLOCKS, and NumLWLocksForNamedTranches().

Referenced by CalculateShmemSize(), and CreateLWLocks().

LWLockUpdateVar()

void LWLockUpdateVar ( LWLocklock,
pg_atomic_uint64valptr,
uint64  val 
)

Definition at line 1726 of file lwlock.c.

1727{
1730
1731 PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
1732
1733 /*
1734 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1735 * that the variable is updated before waking up waiters.
1736 */
1737 pg_atomic_exchange_u64(valptr, val);
1738
1740
1741 LWLockWaitListLock(lock);
1742
1744
1745 /*
1746 * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
1747 * up. They are always in the front of the queue.
1748 */
1749 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1750 {
1751 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1752
1753 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
1754 break;
1755
1756 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1757 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
1758
1759 /* see LWLockWakeup() */
1760 Assert(waiter->lwWaiting == LW_WS_WAITING);
1762 }
1763
1764 /* We are done updating shared state of the lock itself. */
1766
1767 /*
1768 * Awaken any waiters I removed from the queue.
1769 */
1770 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1771 {
1772 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1773
1774 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1775 /* check comment in LWLockWakeup() about this barrier */
1777 waiter->lwWaiting = LW_WS_NOT_WAITING;
1778 PGSemaphoreUnlock(waiter->sem);
1779 }
1780}
#define pg_write_barrier()
Definition: atomics.h:155
@ LW_WS_PENDING_WAKEUP
Definition: lwlock.h:32
#define GetPGProcByNumber(n)
Definition: proc.h:440
#define proclist_foreach_modify(iter, lhead, link_member)
Definition: proclist.h:206
static TimestampTz wakeup[NUM_WALRCV_WAKEUPS]
Definition: walreceiver.c:130

References Assert(), proclist_mutable_iter::cur, GetPGProcByNumber, LW_EXCLUSIVE, LW_VAL_EXCLUSIVE, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_PENDING_WAKEUP, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, pg_atomic_exchange_u64(), pg_atomic_read_u32(), pg_write_barrier, PGSemaphoreUnlock(), PRINT_LWDEBUG, proclist_delete, proclist_foreach_modify, proclist_init(), proclist_push_tail, PGPROC::sem, LWLock::state, val, LWLock::waiters, and wakeup.

Referenced by WALInsertLockAcquireExclusive(), and WALInsertLockUpdateInsertingAt().

LWLockWaitForVar()

bool LWLockWaitForVar ( LWLocklock,
pg_atomic_uint64valptr,
uint64  oldval,
uint64newval 
)

Definition at line 1590 of file lwlock.c.

1592{
1593 PGPROC *proc = MyProc;
1594 int extraWaits = 0;
1595 bool result = false;
1596#ifdef LWLOCK_STATS
1597 lwlock_stats *lwstats;
1598
1599 lwstats = get_lwlock_stats_entry(lock);
1600#endif
1601
1602 PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
1603
1604 /*
1605 * Lock out cancel/die interrupts while we sleep on the lock. There is no
1606 * cleanup mechanism to remove us from the wait queue if we got
1607 * interrupted.
1608 */
1610
1611 /*
1612 * Loop here to check the lock's status after each time we are signaled.
1613 */
1614 for (;;)
1615 {
1616 bool mustwait;
1617
1618 mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1619 &result);
1620
1621 if (!mustwait)
1622 break; /* the lock was free or value didn't match */
1623
1624 /*
1625 * Add myself to wait queue. Note that this is racy, somebody else
1626 * could wakeup before we're finished queuing. NB: We're using nearly
1627 * the same twice-in-a-row lock acquisition protocol as
1628 * LWLockAcquire(). Check its comments for details. The only
1629 * difference is that we also have to check the variable's values when
1630 * checking the state of the lock.
1631 */
1633
1634 /*
1635 * Set RELEASE_OK flag, to make sure we get woken up as soon as the
1636 * lock is released.
1637 */
1639
1640 /*
1641 * We're now guaranteed to be woken up if necessary. Recheck the lock
1642 * and variables state.
1643 */
1644 mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1645 &result);
1646
1647 /* Ok, no conflict after we queued ourselves. Undo queueing. */
1648 if (!mustwait)
1649 {
1650 LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
1651
1652 LWLockDequeueSelf(lock);
1653 break;
1654 }
1655
1656 /*
1657 * Wait until awakened.
1658 *
1659 * It is possible that we get awakened for a reason other than being
1660 * signaled by LWLockRelease. If so, loop back and wait again. Once
1661 * we've gotten the LWLock, re-increment the sema by the number of
1662 * additional signals received.
1663 */
1664 LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
1665
1666#ifdef LWLOCK_STATS
1667 lwstats->block_count++;
1668#endif
1669
1671 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1672 TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), LW_EXCLUSIVE);
1673
1674 for (;;)
1675 {
1676 PGSemaphoreLock(proc->sem);
1677 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1678 break;
1679 extraWaits++;
1680 }
1681
1682#ifdef LOCK_DEBUG
1683 {
1684 /* not waiting anymore */
1685 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1686
1687 Assert(nwaiters < MAX_BACKENDS);
1688 }
1689#endif
1690
1691 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1692 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), LW_EXCLUSIVE);
1694
1695 LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
1696
1697 /* Now loop back and check the status of the lock again. */
1698 }
1699
1700 /*
1701 * Fix the process wait semaphore's count for any absorbed wakeups.
1702 */
1703 while (extraWaits-- > 0)
1704 PGSemaphoreUnlock(proc->sem);
1705
1706 /*
1707 * Now okay to allow cancel/die interrupts.
1708 */
1710
1711 return result;
1712}
static bool LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
Definition: lwlock.c:1529

References Assert(), HOLD_INTERRUPTS, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_RELEASE_OK, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LWLockConflictsWithVar(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MyProc, newval, pg_atomic_fetch_or_u32(), pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, RESUME_INTERRUPTS, PGPROC::sem, LWLock::state, and T_NAME.

Referenced by WaitXLogInsertionsToFinish().

LWLockWaitListLock()

static void LWLockWaitListLock ( LWLocklock )
static

Definition at line 861 of file lwlock.c.

862{
863 uint32 old_state;
864#ifdef LWLOCK_STATS
865 lwlock_stats *lwstats;
866 uint32 delays = 0;
867
868 lwstats = get_lwlock_stats_entry(lock);
869#endif
870
871 while (true)
872 {
873 /* always try once to acquire lock directly */
874 old_state = pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_LOCKED);
875 if (!(old_state & LW_FLAG_LOCKED))
876 break; /* got lock */
877
878 /* and then spin without atomic operations until lock is released */
879 {
880 SpinDelayStatus delayStatus;
881
882 init_local_spin_delay(&delayStatus);
883
884 while (old_state & LW_FLAG_LOCKED)
885 {
886 perform_spin_delay(&delayStatus);
887 old_state = pg_atomic_read_u32(&lock->state);
888 }
889#ifdef LWLOCK_STATS
890 delays += delayStatus.delays;
891#endif
892 finish_spin_delay(&delayStatus);
893 }
894
895 /*
896 * Retry. The lock might obviously already be re-acquired by the time
897 * we're attempting to get it again.
898 */
899 }
900
901#ifdef LWLOCK_STATS
902 lwstats->spin_delay_count += delays;
903#endif
904}
#define LW_FLAG_LOCKED
Definition: lwlock.c:96
void perform_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:126
void finish_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:186
#define init_local_spin_delay(status)
Definition: s_lock.h:733
int delays
Definition: s_lock.h:714

References SpinDelayStatus::delays, finish_spin_delay(), init_local_spin_delay, LW_FLAG_LOCKED, perform_spin_delay(), pg_atomic_fetch_or_u32(), pg_atomic_read_u32(), and LWLock::state.

Referenced by LWLockDequeueSelf(), LWLockQueueSelf(), LWLockUpdateVar(), and LWLockWakeup().

LWLockWaitListUnlock()

static void LWLockWaitListUnlock ( LWLocklock )
static

Definition at line 913 of file lwlock.c.

914{
916
917 old_state = pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_LOCKED);
918
919 Assert(old_state & LW_FLAG_LOCKED);
920}

References Assert(), LW_FLAG_LOCKED, pg_atomic_fetch_and_u32(), PG_USED_FOR_ASSERTS_ONLY, and LWLock::state.

Referenced by LWLockDequeueSelf(), LWLockQueueSelf(), and LWLockUpdateVar().

LWLockWakeup()

static void LWLockWakeup ( LWLocklock )
static

Definition at line 926 of file lwlock.c.

927{
928 bool new_release_ok;
929 bool wokeup_somebody = false;
932
934
935 new_release_ok = true;
936
937 /* lock wait list while collecting backends to wake up */
938 LWLockWaitListLock(lock);
939
940 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
941 {
942 PGPROC *waiter = GetPGProcByNumber(iter.cur);
943
944 if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
945 continue;
946
947 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
948 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
949
950 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
951 {
952 /*
953 * Prevent additional wakeups until retryer gets to run. Backends
954 * that are just waiting for the lock to become free don't retry
955 * automatically.
956 */
957 new_release_ok = false;
958
959 /*
960 * Don't wakeup (further) exclusive locks.
961 */
962 wokeup_somebody = true;
963 }
964
965 /*
966 * Signal that the process isn't on the wait list anymore. This allows
967 * LWLockDequeueSelf() to remove itself of the waitlist with a
968 * proclist_delete(), rather than having to check if it has been
969 * removed from the list.
970 */
971 Assert(waiter->lwWaiting == LW_WS_WAITING);
973
974 /*
975 * Once we've woken up an exclusive lock, there's no point in waking
976 * up anybody else.
977 */
978 if (waiter->lwWaitMode == LW_EXCLUSIVE)
979 break;
980 }
981
983
984 /* unset required flags, and release lock, in one fell swoop */
985 {
986 uint32 old_state;
987 uint32 desired_state;
988
989 old_state = pg_atomic_read_u32(&lock->state);
990 while (true)
991 {
992 desired_state = old_state;
993
994 /* compute desired flags */
995
996 if (new_release_ok)
997 desired_state |= LW_FLAG_RELEASE_OK;
998 else
999 desired_state &= ~LW_FLAG_RELEASE_OK;
1000
1002 desired_state &= ~LW_FLAG_HAS_WAITERS;
1003
1004 desired_state &= ~LW_FLAG_LOCKED; /* release lock */
1005
1006 if (pg_atomic_compare_exchange_u32(&lock->state, &old_state,
1007 desired_state))
1008 break;
1009 }
1010 }
1011
1012 /* Awaken any waiters I removed from the queue. */
1013 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1014 {
1015 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1016
1017 LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
1018 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1019
1020 /*
1021 * Guarantee that lwWaiting being unset only becomes visible once the
1022 * unlink from the link has completed. Otherwise the target backend
1023 * could be woken up for other reason and enqueue for a new lock - if
1024 * that happens before the list unlink happens, the list would end up
1025 * being corrupted.
1026 *
1027 * The barrier pairs with the LWLockWaitListLock() when enqueuing for
1028 * another lock.
1029 */
1031 waiter->lwWaiting = LW_WS_NOT_WAITING;
1032 PGSemaphoreUnlock(waiter->sem);
1033 }
1034}

References Assert(), proclist_mutable_iter::cur, GetPGProcByNumber, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_HAS_WAITERS, LW_FLAG_RELEASE_OK, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_PENDING_WAKEUP, LW_WS_WAITING, LWLockWaitListLock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pg_write_barrier, PGSemaphoreUnlock(), proclist_delete, proclist_foreach_modify, proclist_init(), proclist_is_empty(), proclist_push_tail, PGPROC::sem, LWLock::state, LWLock::waiters, and wakeup.

Referenced by LWLockReleaseInternal().

NumLWLocksForNamedTranches()

static int NumLWLocksForNamedTranches ( void  )
static

Definition at line 382 of file lwlock.c.

383{
384 int numLocks = 0;
385 int i;
386
387 for (i = 0; i < NamedLWLockTrancheRequests; i++)
388 numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
389
390 return numLocks;
391}

References i, NamedLWLockTrancheRequestArray, and NamedLWLockTrancheRequests.

Referenced by LWLockShmemSize().

RequestNamedLWLockTranche()

void RequestNamedLWLockTranche ( const char *  tranche_name,
int  num_lwlocks 
)

Definition at line 649 of file lwlock.c.

650{
652 static int NamedLWLockTrancheRequestsAllocated;
653
655 elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook");
656
657 if (!tranche_name)
659 (errcode(ERRCODE_INVALID_NAME),
660 errmsg("tranche name cannot be NULL")));
661
662 if (strlen(tranche_name) >= NAMEDATALEN)
664 (errcode(ERRCODE_NAME_TOO_LONG),
665 errmsg("tranche name too long"),
666 errdetail("LWLock tranche names must be no longer than %d bytes.",
667 NAMEDATALEN - 1)));
668
670 {
671 NamedLWLockTrancheRequestsAllocated = 16;
674 NamedLWLockTrancheRequestsAllocated
675 * sizeof(NamedLWLockTrancheRequest));
676 }
677
678 if (NamedLWLockTrancheRequests >= NamedLWLockTrancheRequestsAllocated)
679 {
681
684 i * sizeof(NamedLWLockTrancheRequest));
685 NamedLWLockTrancheRequestsAllocated = i;
686 }
687
689 strlcpy(request->tranche_name, tranche_name, NAMEDATALEN);
690 request->num_lwlocks = num_lwlocks;
692}
#define FATAL
Definition: elog.h:41
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1610
MemoryContext TopMemoryContext
Definition: mcxt.c:166
bool process_shmem_requests_in_progress
Definition: miscinit.c:1790
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189

References elog, ereport, errcode(), errdetail(), errmsg(), ERROR, FATAL, i, MemoryContextAlloc(), NAMEDATALEN, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NamedLWLockTrancheRequest::num_lwlocks, pg_nextpower2_32(), process_shmem_requests_in_progress, repalloc(), strlcpy(), TopMemoryContext, and NamedLWLockTrancheRequest::tranche_name.

Referenced by pgss_shmem_request(), and test_lwlock_tranches_shmem_request().

StaticAssertDecl() [1/4]

StaticAssertDecl ( ((MAX_BACKENDS+1) &MAX_BACKENDS)  = =0,
"MAX_BACKENDS + 1 needs to be a power of 2"   
)

StaticAssertDecl() [2/4]

StaticAssertDecl ( (LW_VAL_EXCLUSIVE &LW_FLAG_MASK)  = =0,
"LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap"   
)

StaticAssertDecl() [3/4]

StaticAssertDecl ( (MAX_BACKENDS &LW_FLAG_MASK)  = =0,
"MAX_BACKENDS and LW_FLAG_MASK overlap"   
)

StaticAssertDecl() [4/4]

"missing entries in BuiltinTrancheNames"  [] 
)

Variable Documentation

BuiltinTrancheNames

const char* const BuiltinTrancheNames[]
static
Initial value:
= {
#define PG_LWLOCK(id, lockname)
#define PG_LWLOCKTRANCHE(id, lockname)
}

Definition at line 135 of file lwlock.c.

Referenced by GetLWTrancheName().

held_lwlocks

static

Definition at line 178 of file lwlock.c.

Referenced by ForEachLWLockHeldByMe(), LWLockAcquire(), LWLockAcquireOrWait(), LWLockAnyHeldByMe(), LWLockConditionalAcquire(), LWLockDisownInternal(), LWLockHeldByMe(), LWLockHeldByMeInMode(), and LWLockReleaseAll().

LocalLWLockCounter

int LocalLWLockCounter
static

Definition at line 202 of file lwlock.c.

Referenced by GetLWTrancheName(), and LWLockNewTrancheId().

LocalNamedLWLockTrancheRequestArray

NamedLWLockTrancheRequest* LocalNamedLWLockTrancheRequestArray = NULL
static

Definition at line 196 of file lwlock.c.

Referenced by CreateLWLocks(), and LWLockShmemSize().

LWLockCounter

int* LWLockCounter = NULL

Definition at line 199 of file lwlock.c.

Referenced by CreateLWLocks(), GetLWTrancheName(), and LWLockNewTrancheId().

LWLockTrancheNames

char** LWLockTrancheNames = NULL

Definition at line 154 of file lwlock.c.

Referenced by CreateLWLocks(), GetLWTrancheName(), and LWLockNewTrancheId().

MainLWLockArray

LWLockPadded* MainLWLockArray = NULL

Definition at line 161 of file lwlock.c.

Referenced by BufMappingPartitionLock(), BufMappingPartitionLockByIndex(), CreateLWLocks(), GetNamedLWLockTranche(), and InitializeLWLocks().

NamedLWLockTrancheRequestArray

NamedLWLockTrancheRequest* NamedLWLockTrancheRequestArray = NULL

Definition at line 193 of file lwlock.c.

Referenced by CreateLWLocks(), GetNamedLWLockTranche(), InitializeLWLocks(), LWLockShmemSize(), NumLWLocksForNamedTranches(), and RequestNamedLWLockTranche().

NamedLWLockTrancheRequests

int NamedLWLockTrancheRequests = 0

Definition at line 192 of file lwlock.c.

Referenced by CreateLWLocks(), GetNamedLWLockTranche(), InitializeLWLocks(), LWLockShmemSize(), NumLWLocksForNamedTranches(), and RequestNamedLWLockTranche().

num_held_lwlocks

int num_held_lwlocks = 0
static

Definition at line 177 of file lwlock.c.

Referenced by ForEachLWLockHeldByMe(), LWLockAcquire(), LWLockAcquireOrWait(), LWLockAnyHeldByMe(), LWLockConditionalAcquire(), LWLockDisownInternal(), LWLockHeldByMe(), LWLockHeldByMeInMode(), and LWLockReleaseAll().

AltStyle によって変換されたページ (->オリジナル) /