1/*--------------------------------------------------------------------------
4 * Code to set up a dynamic shared memory segments and a specified
5 * number of background workers for shared memory message queue
8 * Copyright (c) 2013-2025, PostgreSQL Global Development Group
11 * src/test/modules/test_shm_mq/setup.c
13 * -------------------------------------------------------------------------
42/* value cached, fetched from shared memory */
46 * Set up a dynamic shared memory segment and zero or more background workers
55 shm_mq *outq = NULL;
/* placate compiler */
56 shm_mq *inq = NULL;
/* placate compiler */
59 /* Set up a dynamic shared memory segment. */
63 /* Register background workers. */
66 /* Attach the queues. */
70 /* Wait for workers to become ready. */
74 * Once we reach this point, all workers are ready. We no longer need to
75 * kill them if we die; they'll die on their own as the message queues
84 * Set up a dynamic shared memory segment.
86 * We set up a small control region that contains only a test_shm_mq_header,
87 * plus one region per message queue. There are as many message queues as
88 * the number of workers, plus one.
102 /* Ensure a valid queue size. */
105 (
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
106 errmsg(
"queue size must be at least %zu bytes",
108 if (queue_size != ((
Size) queue_size))
110 (
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
111 errmsg(
"queue size overflows size_t")));
114 * Estimate how much shared memory we need.
116 * Because the TOC machinery may choose to insert padding of oddly-sized
117 * requests, we must estimate each chunk separately.
119 * We need one key to register the location of the header, and we need
120 * nworkers + 1 keys to track the locations of the message queues.
124 for (
i = 0;
i <= nworkers; ++
i)
129 /* Create the shared memory segment and establish a table of contents. */
134 /* Set up the header region. */
142 /* Set up one message queue per worker, plus one. */
143 for (
i = 0;
i <= nworkers; ++
i)
153 /* We send messages to the first queue. */
159 /* We receive messages from the last queue. */
165 /* Return results to caller. */
171 * Register background workers.
182 * We need the worker_state object and the background worker handles to
183 * which it points to be allocated in CurTransactionContext rather than
184 * ExprContext; otherwise, they'll be destroyed before the on_dsm_detach
189 /* Create worker state object. */
196 * Arrange to kill all the workers if we abort before all workers are
197 * finished hooking themselves up to the dynamic shared memory segment.
199 * If we die after all the workers have finished hooking themselves up to
200 * the dynamic shared memory segment, we'll mark the two queues to which
201 * we're directly connected as detached, and the worker(s) connected to
202 * those queues will exit, marking any other queues to which they are
203 * connected as detached. This will cause any as-yet-unaware workers
204 * connected to those queues to exit in their turn, and so on, until
207 * But suppose the workers which are supposed to connect to the queues to
208 * which we're directly attached exit due to some error before they
209 * actually attach the queues. The remaining workers will have no way of
210 * knowing this. From their perspective, they're still waiting for those
211 * workers to start, when in fact they've already died.
216 /* Configure a worker. */
217 memset(&worker, 0,
sizeof(worker));
225 /* set bgw_notify_pid, so we can detect if the worker stops */
228 /* Register the workers. */
229 for (
i = 0;
i < nworkers; ++
i)
233 (
errcode(ERRCODE_INSUFFICIENT_RESOURCES),
234 errmsg(
"could not register background process"),
235 errhint(
"You may need to increase \"max_worker_processes\".")));
266 /* If all the workers are ready, we have succeeded. */
270 if (workers_ready >= wstate->
nworkers)
276 /* If any workers (or the postmaster) have died, we have failed. */
283 /* first time, allocate or get the custom wait event */
287 /* Wait to be signaled. */
291 /* Reset the latch so we don't spin. */
294 /* An interrupt may have occurred while we were waiting. */
300 (
errcode(ERRCODE_INSUFFICIENT_RESOURCES),
301 errmsg(
"one or more background workers failed to start")));
309 /* If any workers (or the postmaster) have died, we have failed. */
310 for (n = 0; n < wstate->
nworkers; ++n)
320 /* Otherwise, things still look OK. */
void TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle)
#define BGW_NEVER_RESTART
@ BgWorkerStart_ConsistentState
#define BGWORKER_SHMEM_ACCESS
#define FLEXIBLE_ARRAY_MEMBER
dsm_handle dsm_segment_handle(dsm_segment *seg)
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
void * dsm_segment_address(dsm_segment *seg)
dsm_segment * dsm_create(Size size, int flags)
void cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
int errhint(const char *fmt,...)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
void ResetLatch(Latch *latch)
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
void * MemoryContextAlloc(MemoryContext context, Size size)
MemoryContext TopTransactionContext
void pfree(void *pointer)
MemoryContext CurTransactionContext
#define CHECK_FOR_INTERRUPTS()
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
static Datum PointerGetDatum(const void *X)
static Pointer DatumGetPointer(Datum X)
static Datum UInt32GetDatum(uint32 X)
static worker_state * setup_background_workers(int nworkers, dsm_segment *seg)
static void cleanup_background_workers(dsm_segment *seg, Datum arg)
static void wait_for_workers_to_become_ready(worker_state *wstate, volatile test_shm_mq_header *hdr)
void test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp, shm_mq_handle **output, shm_mq_handle **input)
static uint32 we_bgworker_startup
static bool check_worker_status(worker_state *wstate)
static void setup_dynamic_shared_memory(int64 queue_size, int nworkers, dsm_segment **segp, test_shm_mq_header **hdrp, shm_mq **outp, shm_mq **inp)
void shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
shm_mq * shm_mq_create(void *address, Size size)
void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
const Size shm_mq_minimum_size
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Size shm_toc_estimate(shm_toc_estimator *e)
shm_toc * shm_toc_create(uint64 magic, void *address, Size nbytes)
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
#define shm_toc_estimate_chunk(e, sz)
#define shm_toc_initialize_estimator(e)
#define shm_toc_estimate_keys(e, cnt)
#define SpinLockInit(lock)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
char bgw_function_name[BGW_MAXLEN]
char bgw_type[BGW_MAXLEN]
BgWorkerStartTime bgw_start_time
char bgw_library_name[MAXPGPATH]
BackgroundWorkerHandle * handle[FLEXIBLE_ARRAY_MEMBER]
#define PG_TEST_SHM_MQ_MAGIC
uint32 WaitEventExtensionNew(const char *wait_event_name)
#define WL_EXIT_ON_PM_DEATH