Super User's BSD Cross Reference: /NetBSD/sys/kern/kern_sig.c

1 /* $NetBSD: kern_sig.c,v 1.410 2025年03月13日 12:48:21 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2008, 2019, 2023 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1982, 1986, 1989, 1991, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
66 */
67
68 /*
69 * Signal subsystem.
70 */
71
72#include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.410 2025年03月13日 12:48:21 riastradh Exp $");
74
75#include "opt_execfmt.h"
76#include "opt_ptrace.h"
77#include "opt_dtrace.h"
78#include "opt_compat_sunos.h"
79#include "opt_compat_netbsd.h"
80#include "opt_compat_netbsd32.h"
81#include "opt_pax.h"
82
83#define SIGPROP /* include signal properties table */
84#include <sys/param.h>
85#include <sys/signalvar.h>
86#include <sys/proc.h>
87#include <sys/ptrace.h>
88#include <sys/systm.h>
89#include <sys/wait.h>
90#include <sys/ktrace.h>
91#include <sys/syslog.h>
92#include <sys/filedesc.h>
93#include <sys/file.h>
94#include <sys/pool.h>
95#include <sys/ucontext.h>
96#include <sys/exec.h>
97#include <sys/kauth.h>
98#include <sys/acct.h>
99#include <sys/callout.h>
100#include <sys/atomic.h>
101#include <sys/cpu.h>
102#include <sys/module.h>
103#include <sys/sdt.h>
104#include <sys/exec_elf.h>
105#include <sys/compat_stub.h>
106
107#ifdef PAX_SEGVGUARD
108#include <sys/pax.h>
109#endif /* PAX_SEGVGUARD */
110
111#include <uvm/uvm_extern.h>
112
113 /* Many hard-coded assumptions that there are <= 4 x 32bit signal mask bits */
114 __CTASSERT(NSIG <= 128);
115
116#define SIGQUEUE_MAX 32
117 static pool_cache_t sigacts_cache __read_mostly;
118 static pool_cache_t ksiginfo_cache __read_mostly;
119 static callout_t proc_stop_ch __cacheline_aligned;
120
121 sigset_t contsigmask __cacheline_aligned;
122 sigset_t stopsigmask __cacheline_aligned;
123 static sigset_t vforksigmask __cacheline_aligned;
124 sigset_t sigcantmask __cacheline_aligned;
125
126 static void proc_stop(struct proc *, int);
127 static void proc_stop_done(struct proc *, int);
128 static void proc_stop_callout(void *);
129 static int sigchecktrace(void);
130 static int sigpost(struct lwp *, sig_t, int, int);
131 static int sigput(sigpend_t *, struct proc *, ksiginfo_t *);
132 static int sigunwait(struct proc *, const ksiginfo_t *);
133 static void sigswitch(int, int, bool);
134 static void sigswitch_unlock_and_switch_away(struct lwp *);
135
136 static void sigacts_poolpage_free(struct pool *, void *);
137 static void *sigacts_poolpage_alloc(struct pool *, int);
138
139 /*
140 * DTrace SDT provider definitions
141 */
142 SDT_PROVIDER_DECLARE(proc);
143 SDT_PROBE_DEFINE3(proc, kernel, , signal__send,
144 "struct lwp *", /* target thread */
145 "struct proc *", /* target process */
146 "int"); /* signal */
147 SDT_PROBE_DEFINE3(proc, kernel, , signal__discard,
148 "struct lwp *", /* target thread */
149 "struct proc *", /* target process */
150 "int"); /* signal */
151 SDT_PROBE_DEFINE3(proc, kernel, , signal__handle,
152 "int", /* signal */
153 "ksiginfo_t *", /* signal info */
154 "void (*)(void)"); /* handler address */
155
156
157 static struct pool_allocator sigactspool_allocator = {
158 .pa_alloc = sigacts_poolpage_alloc,
159 .pa_free = sigacts_poolpage_free
160};
161
162#ifdef DEBUG
163 int kern_logsigexit = 1;
164#else
165 int kern_logsigexit = 0;
166#endif
167
168 static const char logcoredump[] =
169 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
170 static const char lognocoredump[] =
171 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
172
173 static kauth_listener_t signal_listener;
174
175 static int
176 signal_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
177 void *arg0, void *arg1, void *arg2, void *arg3)
178{
179 struct proc *p;
180 int result, signum;
181
182 result = KAUTH_RESULT_DEFER;
183 p = arg0;
184 signum = (int)(unsigned long)arg1;
185
186 if (action != KAUTH_PROCESS_SIGNAL)
187 return result;
188
189 if (kauth_cred_uidmatch(cred, p->p_cred) ||
190 (signum == SIGCONT && (curproc->p_session == p->p_session)))
191 result = KAUTH_RESULT_ALLOW;
192
193 return result;
194}
195
196 static int
197 sigacts_ctor(void *arg __unused, void *obj, int flags __unused)
198{
199 memset(obj, 0, sizeof(struct sigacts));
200 return 0;
201}
202
203 /*
204 * signal_init:
205 *
206 * Initialize global signal-related data structures.
207 */
208 void
209 signal_init(void)
210{
211
212 sigactspool_allocator.pa_pagesz = (PAGE_SIZE)*2;
213
214 sigacts_cache = pool_cache_init(sizeof(struct sigacts), 0, 0, 0,
215 "sigacts", sizeof(struct sigacts) > PAGE_SIZE ?
216 &sigactspool_allocator : NULL, IPL_NONE, sigacts_ctor, NULL, NULL);
217 ksiginfo_cache = pool_cache_init(sizeof(ksiginfo_t), 0, 0, 0,
218 "ksiginfo", NULL, IPL_VM, NULL, NULL, NULL);
219
220 callout_init(&proc_stop_ch, CALLOUT_MPSAFE);
221 callout_setfunc(&proc_stop_ch, proc_stop_callout, NULL);
222
223 signal_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
224 signal_listener_cb, NULL);
225}
226
227 /*
228 * sigacts_poolpage_alloc:
229 *
230 * Allocate a page for the sigacts memory pool.
231 */
232 static void *
233 sigacts_poolpage_alloc(struct pool *pp, int flags)
234{
235
236 return (void *)uvm_km_alloc(kernel_map,
237 PAGE_SIZE * 2, PAGE_SIZE * 2,
238 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
239 | UVM_KMF_WIRED);
240}
241
242 /*
243 * sigacts_poolpage_free:
244 *
245 * Free a page on behalf of the sigacts memory pool.
246 */
247 static void
248 sigacts_poolpage_free(struct pool *pp, void *v)
249{
250
251 uvm_km_free(kernel_map, (vaddr_t)v, PAGE_SIZE * 2, UVM_KMF_WIRED);
252}
253
254 /*
255 * sigactsinit:
256 *
257 * Create an initial sigacts structure, using the same signal state
258 * as of specified process. If 'share' is set, share the sigacts by
259 * holding a reference, otherwise just copy it from parent.
260 */
261 struct sigacts *
262 sigactsinit(struct proc *pp, int share)
263{
264 struct sigacts *ps = pp->p_sigacts, *ps2;
265
266 if (__predict_false(share)) {
267 atomic_inc_uint(&ps->sa_refcnt);
268 return ps;
269 }
270 ps2 = pool_cache_get(sigacts_cache, PR_WAITOK);
271 mutex_init(&ps2->sa_mutex, MUTEX_DEFAULT, IPL_SCHED);
272 ps2->sa_refcnt = 1;
273
274 mutex_enter(&ps->sa_mutex);
275 memcpy(ps2->sa_sigdesc, ps->sa_sigdesc, sizeof(ps2->sa_sigdesc));
276 mutex_exit(&ps->sa_mutex);
277 return ps2;
278}
279
280 /*
281 * sigactsunshare:
282 *
283 * Make this process not share its sigacts, maintaining all signal state.
284 */
285 void
286 sigactsunshare(struct proc *p)
287{
288 struct sigacts *ps, *oldps = p->p_sigacts;
289
290 if (__predict_true(oldps->sa_refcnt == 1))
291 return;
292
293 ps = pool_cache_get(sigacts_cache, PR_WAITOK);
294 mutex_init(&ps->sa_mutex, MUTEX_DEFAULT, IPL_SCHED);
295 memcpy(ps->sa_sigdesc, oldps->sa_sigdesc, sizeof(ps->sa_sigdesc));
296 ps->sa_refcnt = 1;
297
298 p->p_sigacts = ps;
299 sigactsfree(oldps);
300}
301
302 /*
303 * sigactsfree;
304 *
305 * Release a sigacts structure.
306 */
307 void
308 sigactsfree(struct sigacts *ps)
309{
310
311 membar_release();
312 if (atomic_dec_uint_nv(&ps->sa_refcnt) == 0) {
313 membar_acquire();
314 mutex_destroy(&ps->sa_mutex);
315 pool_cache_put(sigacts_cache, ps);
316 }
317}
318
319 /*
320 * siginit:
321 *
322 * Initialize signal state for process 0; set to ignore signals that
323 * are ignored by default and disable the signal stack. Locking not
324 * required as the system is still cold.
325 */
326 void
327 siginit(struct proc *p)
328{
329 struct lwp *l;
330 struct sigacts *ps;
331 int signo, prop;
332
333 ps = p->p_sigacts;
334 sigemptyset(&contsigmask);
335 sigemptyset(&stopsigmask);
336 sigemptyset(&vforksigmask);
337 sigemptyset(&sigcantmask);
338 for (signo = 1; signo < NSIG; signo++) {
339 prop = sigprop[signo];
340 if (prop & SA_CONT)
341 sigaddset(&contsigmask, signo);
342 if (prop & SA_STOP)
343 sigaddset(&stopsigmask, signo);
344 if (prop & SA_STOP && signo != SIGSTOP)
345 sigaddset(&vforksigmask, signo);
346 if (prop & SA_CANTMASK)
347 sigaddset(&sigcantmask, signo);
348 if (prop & SA_IGNORE && signo != SIGCONT)
349 sigaddset(&p->p_sigctx.ps_sigignore, signo);
350 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask);
351 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART;
352 }
353 sigemptyset(&p->p_sigctx.ps_sigcatch);
354 p->p_sflag &= ~PS_NOCLDSTOP;
355
356 ksiginfo_queue_init(&p->p_sigpend.sp_info);
357 sigemptyset(&p->p_sigpend.sp_set);
358
359 /*
360 * Reset per LWP state.
361 */
362 l = LIST_FIRST(&p->p_lwps);
363 l->l_sigwaited = NULL;
364 l->l_sigstk = SS_INIT;
365 ksiginfo_queue_init(&l->l_sigpend.sp_info);
366 sigemptyset(&l->l_sigpend.sp_set);
367
368 /* One reference. */
369 ps->sa_refcnt = 1;
370}
371
372 /*
373 * execsigs:
374 *
375 * Reset signals for an exec of the specified process.
376 */
377 void
378 execsigs(struct proc *p)
379{
380 struct sigacts *ps;
381 struct lwp *l;
382 int signo, prop;
383 sigset_t tset;
384 ksiginfoq_t kq;
385
386 KASSERT(p->p_nlwps == 1);
387
388 sigactsunshare(p);
389 ps = p->p_sigacts;
390
391 /*
392 * Reset caught signals. Held signals remain held through
393 * l->l_sigmask (unless they were caught, and are now ignored
394 * by default).
395 *
396 * No need to lock yet, the process has only one LWP and
397 * at this point the sigacts are private to the process.
398 */
399 sigemptyset(&tset);
400 for (signo = 1; signo < NSIG; signo++) {
401 if (sigismember(&p->p_sigctx.ps_sigcatch, signo)) {
402 prop = sigprop[signo];
403 if (prop & SA_IGNORE) {
404 if ((prop & SA_CONT) == 0)
405 sigaddset(&p->p_sigctx.ps_sigignore,
406 signo);
407 sigaddset(&tset, signo);
408 }
409 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL;
410 }
411 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask);
412 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART;
413 }
414 ksiginfo_queue_init(&kq);
415
416 mutex_enter(p->p_lock);
417 sigclearall(p, &tset, &kq);
418 sigemptyset(&p->p_sigctx.ps_sigcatch);
419
420 /*
421 * Reset no zombies if child dies flag as Solaris does.
422 */
423 p->p_flag &= ~(PK_NOCLDWAIT | PK_CLDSIGIGN);
424 if (SIGACTION_PS(ps, SIGCHLD).sa_handler == SIG_IGN)
425 SIGACTION_PS(ps, SIGCHLD).sa_handler = SIG_DFL;
426
427 /*
428 * Reset per-LWP state.
429 */
430 l = LIST_FIRST(&p->p_lwps);
431 l->l_sigwaited = NULL;
432 l->l_sigstk = SS_INIT;
433 ksiginfo_queue_init(&l->l_sigpend.sp_info);
434 sigemptyset(&l->l_sigpend.sp_set);
435 mutex_exit(p->p_lock);
436
437 ksiginfo_queue_drain(&kq);
438}
439
440 /*
441 * ksiginfo_alloc:
442 *
443 * Allocate a new ksiginfo structure from the pool, and optionally copy
444 * an existing one. If the existing ksiginfo_t is from the pool, and
445 * has not been queued somewhere, then just return it. Additionally,
446 * if the existing ksiginfo_t does not contain any information beyond
447 * the signal number, then just return it.
448 */
449 ksiginfo_t *
450 ksiginfo_alloc(struct proc *p, ksiginfo_t *ok, int flags)
451{
452 ksiginfo_t *kp;
453
454 if (ok != NULL) {
455 if ((ok->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) ==
456 KSI_FROMPOOL)
457 return ok;
458 if (KSI_EMPTY_P(ok))
459 return ok;
460 }
461
462 kp = pool_cache_get(ksiginfo_cache, flags);
463 if (kp == NULL) {
464#ifdef DIAGNOSTIC
465 printf("Out of memory allocating ksiginfo for pid %d\n",
466 p->p_pid);
467#endif
468 return NULL;
469 }
470
471 if (ok != NULL) {
472 memcpy(kp, ok, sizeof(*kp));
473 kp->ksi_flags &= ~KSI_QUEUED;
474 } else
475 KSI_INIT_EMPTY(kp);
476
477 kp->ksi_flags |= KSI_FROMPOOL;
478
479 return kp;
480}
481
482 /*
483 * ksiginfo_free:
484 *
485 * If the given ksiginfo_t is from the pool and has not been queued,
486 * then free it.
487 */
488 void
489 ksiginfo_free(ksiginfo_t *kp)
490{
491
492 if ((kp->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) != KSI_FROMPOOL)
493 return;
494 pool_cache_put(ksiginfo_cache, kp);
495}
496
497 /*
498 * ksiginfo_queue_drain:
499 *
500 * Drain a non-empty ksiginfo_t queue.
501 */
502 void
503 ksiginfo_queue_drain0(ksiginfoq_t *kq)
504{
505 ksiginfo_t *ksi;
506
507 KASSERT(!TAILQ_EMPTY(kq));
508
509 while (!TAILQ_EMPTY(kq)) {
510 ksi = TAILQ_FIRST(kq);
511 TAILQ_REMOVE(kq, ksi, ksi_list);
512 pool_cache_put(ksiginfo_cache, ksi);
513 }
514}
515
516 static int
517 siggetinfo(sigpend_t *sp, ksiginfo_t *out, int signo)
518{
519 ksiginfo_t *ksi, *nksi;
520
521 if (sp == NULL)
522 goto out;
523
524 /* Find siginfo and copy it out. */
525 int count = 0;
526 TAILQ_FOREACH_SAFE(ksi, &sp->sp_info, ksi_list, nksi) {
527 if (ksi->ksi_signo != signo)
528 continue;
529 if (count++ > 0) /* Only remove the first, count all of them */
530 continue;
531 TAILQ_REMOVE(&sp->sp_info, ksi, ksi_list);
532 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0);
533 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0);
534 ksi->ksi_flags &= ~KSI_QUEUED;
535 if (out != NULL) {
536 memcpy(out, ksi, sizeof(*out));
537 out->ksi_flags &= ~(KSI_FROMPOOL | KSI_QUEUED);
538 }
539 ksiginfo_free(ksi);
540 }
541 if (count)
542 return count;
543
544 out:
545 /* If there is no siginfo, then manufacture it. */
546 if (out != NULL) {
547 KSI_INIT(out);
548 out->ksi_info._signo = signo;
549 out->ksi_info._code = SI_NOINFO;
550 }
551 return 0;
552}
553
554 /*
555 * sigget:
556 *
557 * Fetch the first pending signal from a set. Optionally, also fetch
558 * or manufacture a ksiginfo element. Returns the number of the first
559 * pending signal, or zero.
560 */
561 int
562 sigget(sigpend_t *sp, ksiginfo_t *out, int signo, const sigset_t *mask)
563{
564 sigset_t tset;
565 int count;
566
567 /* If there's no pending set, the signal is from the debugger. */
568 if (sp == NULL)
569 goto out;
570
571 /* Construct mask from signo, and 'mask'. */
572 if (signo == 0) {
573 if (mask != NULL) {
574 tset = *mask;
575 __sigandset(&sp->sp_set, &tset);
576 } else
577 tset = sp->sp_set;
578
579 /* If there are no signals pending - return. */
580 if ((signo = firstsig(&tset)) == 0)
581 goto out;
582 } else {
583 KASSERT(sigismember(&sp->sp_set, signo));
584 }
585
586 sigdelset(&sp->sp_set, signo);
587 out:
588 count = siggetinfo(sp, out, signo);
589 if (count > 1)
590 sigaddset(&sp->sp_set, signo);
591 return signo;
592}
593
594 /*
595 * sigput:
596 *
597 * Append a new ksiginfo element to the list of pending ksiginfo's.
598 */
599 static int
600 sigput(sigpend_t *sp, struct proc *p, ksiginfo_t *ksi)
601{
602 ksiginfo_t *kp;
603
604 KASSERT(mutex_owned(p->p_lock));
605 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0);
606
607 sigaddset(&sp->sp_set, ksi->ksi_signo);
608
609 /*
610 * If there is no siginfo, we are done.
611 */
612 if (KSI_EMPTY_P(ksi))
613 return 0;
614
615 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0);
616
617 size_t count = 0;
618 TAILQ_FOREACH(kp, &sp->sp_info, ksi_list) {
619 count++;
620 if (ksi->ksi_signo >= SIGRTMIN && ksi->ksi_signo <= SIGRTMAX)
621 continue;
622 if (kp->ksi_signo == ksi->ksi_signo) {
623 KSI_COPY(ksi, kp);
624 kp->ksi_flags |= KSI_QUEUED;
625 return 0;
626 }
627 }
628
629 if (count >= SIGQUEUE_MAX) {
630#ifdef DIAGNOSTIC
631 printf("%s(%d): Signal queue is full signal=%d\n",
632 p->p_comm, p->p_pid, ksi->ksi_signo);
633#endif
634 return EAGAIN;
635 }
636 ksi->ksi_flags |= KSI_QUEUED;
637 TAILQ_INSERT_TAIL(&sp->sp_info, ksi, ksi_list);
638
639 return 0;
640}
641
642 /*
643 * sigclear:
644 *
645 * Clear all pending signals in the specified set.
646 */
647 void
648 sigclear(sigpend_t *sp, const sigset_t *mask, ksiginfoq_t *kq)
649{
650 ksiginfo_t *ksi, *next;
651
652 if (mask == NULL)
653 sigemptyset(&sp->sp_set);
654 else
655 sigminusset(mask, &sp->sp_set);
656
657 TAILQ_FOREACH_SAFE(ksi, &sp->sp_info, ksi_list, next) {
658 if (mask == NULL || sigismember(mask, ksi->ksi_signo)) {
659 TAILQ_REMOVE(&sp->sp_info, ksi, ksi_list);
660 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0);
661 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0);
662 TAILQ_INSERT_TAIL(kq, ksi, ksi_list);
663 }
664 }
665}
666
667 /*
668 * sigclearall:
669 *
670 * Clear all pending signals in the specified set from a process and
671 * its LWPs.
672 */
673 void
674 sigclearall(struct proc *p, const sigset_t *mask, ksiginfoq_t *kq)
675{
676 struct lwp *l;
677
678 KASSERT(mutex_owned(p->p_lock));
679
680 sigclear(&p->p_sigpend, mask, kq);
681
682 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
683 sigclear(&l->l_sigpend, mask, kq);
684 }
685}
686
687 /*
688 * sigispending:
689 *
690 * Return the first signal number if there are pending signals for the
691 * current LWP. May be called unlocked provided that LW_PENDSIG is set,
692 * and that the signal has been posted to the appopriate queue before
693 * LW_PENDSIG is set.
694 *
695 * This should only ever be called with (l == curlwp), unless the
696 * result does not matter (procfs, sysctl).
697 */
698 int
699 sigispending(struct lwp *l, int signo)
700{
701 struct proc *p = l->l_proc;
702 sigset_t tset;
703
704 membar_consumer();
705
706 tset = l->l_sigpend.sp_set;
707 sigplusset(&p->p_sigpend.sp_set, &tset);
708 sigminusset(&p->p_sigctx.ps_sigignore, &tset);
709 sigminusset(&l->l_sigmask, &tset);
710
711 if (signo == 0) {
712 return firstsig(&tset);
713 }
714 return sigismember(&tset, signo) ? signo : 0;
715}
716
717 void
718 getucontext(struct lwp *l, ucontext_t *ucp)
719{
720 struct proc *p = l->l_proc;
721
722 KASSERT(mutex_owned(p->p_lock));
723
724 ucp->uc_flags = 0;
725 ucp->uc_link = l->l_ctxlink;
726 ucp->uc_sigmask = l->l_sigmask;
727 ucp->uc_flags |= _UC_SIGMASK;
728
729 /*
730 * The (unsupplied) definition of the `current execution stack'
731 * in the System V Interface Definition appears to allow returning
732 * the main context stack.
733 */
734 if ((l->l_sigstk.ss_flags & SS_ONSTACK) == 0) {
735 ucp->uc_stack.ss_sp = (void *)l->l_proc->p_stackbase;
736 ucp->uc_stack.ss_size = ctob(l->l_proc->p_vmspace->vm_ssize);
737 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
738 } else {
739 /* Simply copy alternate signal execution stack. */
740 ucp->uc_stack = l->l_sigstk;
741 }
742 ucp->uc_flags |= _UC_STACK;
743 mutex_exit(p->p_lock);
744 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
745 mutex_enter(p->p_lock);
746}
747
748 int
749 setucontext(struct lwp *l, const ucontext_t *ucp)
750{
751 struct proc *p = l->l_proc;
752 int error;
753
754 KASSERT(mutex_owned(p->p_lock));
755
756 if ((ucp->uc_flags & _UC_SIGMASK) != 0) {
757 error = sigprocmask1(l, SIG_SETMASK, &ucp->uc_sigmask, NULL);
758 if (error != 0)
759 return error;
760 }
761
762 mutex_exit(p->p_lock);
763 error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags);
764 mutex_enter(p->p_lock);
765 if (error != 0)
766 return (error);
767
768 l->l_ctxlink = ucp->uc_link;
769
770 /*
771 * If there was stack information, update whether or not we are
772 * still running on an alternate signal stack.
773 */
774 if ((ucp->uc_flags & _UC_STACK) != 0) {
775 if (ucp->uc_stack.ss_flags & SS_ONSTACK)
776 l->l_sigstk.ss_flags |= SS_ONSTACK;
777 else
778 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
779 }
780
781 return 0;
782}
783
784 /*
785 * killpg1: common code for kill process group/broadcast kill.
786 */
787 int
788 killpg1(struct lwp *l, ksiginfo_t *ksi, int pgid, int all)
789{
790 struct proc *p, *cp;
791 kauth_cred_t pc;
792 struct pgrp *pgrp;
793 int nfound;
794 int signo = ksi->ksi_signo;
795
796 cp = l->l_proc;
797 pc = l->l_cred;
798 nfound = 0;
799
800 mutex_enter(&proc_lock);
801 if (all) {
802 /*
803 * Broadcast.
804 */
805 PROCLIST_FOREACH(p, &allproc) {
806 if (p->p_pid <= 1 || p == cp ||
807 (p->p_flag & PK_SYSTEM) != 0)
808 continue;
809 mutex_enter(p->p_lock);
810 if (kauth_authorize_process(pc,
811 KAUTH_PROCESS_SIGNAL, p, KAUTH_ARG(signo), NULL,
812 NULL) == 0) {
813 nfound++;
814 if (signo)
815 kpsignal2(p, ksi);
816 }
817 mutex_exit(p->p_lock);
818 }
819 } else {
820 if (pgid == 0)
821 /* Zero pgid means send to my process group. */
822 pgrp = cp->p_pgrp;
823 else {
824 pgrp = pgrp_find(pgid);
825 if (pgrp == NULL)
826 goto out;
827 }
828 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
829 if (p->p_pid <= 1 || p->p_flag & PK_SYSTEM)
830 continue;
831 mutex_enter(p->p_lock);
832 if (kauth_authorize_process(pc, KAUTH_PROCESS_SIGNAL,
833 p, KAUTH_ARG(signo), NULL, NULL) == 0) {
834 nfound++;
835 if (signo && P_ZOMBIE(p) == 0)
836 kpsignal2(p, ksi);
837 }
838 mutex_exit(p->p_lock);
839 }
840 }
841 out:
842 mutex_exit(&proc_lock);
843 return nfound ? 0 : ESRCH;
844}
845
846 /*
847 * Send a signal to a process group. If checktty is set, limit to members
848 * which have a controlling terminal.
849 */
850 void
851 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
852{
853 ksiginfo_t ksi;
854
855 KASSERT(!cpu_intr_p());
856 KASSERT(mutex_owned(&proc_lock));
857
858 KSI_INIT_EMPTY(&ksi);
859 ksi.ksi_signo = sig;
860 kpgsignal(pgrp, &ksi, NULL, checkctty);
861}
862
863 void
864 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
865{
866 struct proc *p;
867
868 KASSERT(!cpu_intr_p());
869 KASSERT(mutex_owned(&proc_lock));
870 KASSERT(pgrp != NULL);
871
872 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
873 if (checkctty == 0 || p->p_lflag & PL_CONTROLT)
874 kpsignal(p, ksi, data);
875}
876
877 /*
878 * Send a signal caused by a trap to the current LWP. If it will be caught
879 * immediately, deliver it with correct code. Otherwise, post it normally.
880 */
881 void
882 trapsignal(struct lwp *l, ksiginfo_t *ksi)
883{
884 struct proc *p;
885 struct sigacts *ps;
886 int signo = ksi->ksi_signo;
887 sigset_t *mask;
888 sig_t action;
889
890 KASSERT(KSI_TRAP_P(ksi));
891
892 ksi->ksi_lid = l->l_lid;
893 p = l->l_proc;
894
895 KASSERT(!cpu_intr_p());
896 mutex_enter(&proc_lock);
897 mutex_enter(p->p_lock);
898
899 repeat:
900 /*
901 * If we are exiting, demise now.
902 *
903 * This avoids notifying tracer and deadlocking.
904 */
905 if (__predict_false(ISSET(p->p_sflag, PS_WEXIT))) {
906 mutex_exit(p->p_lock);
907 mutex_exit(&proc_lock);
908 lwp_exit(l);
909 panic("trapsignal");
910 /* NOTREACHED */
911 }
912
913 /*
914 * The process is already stopping.
915 */
916 if ((p->p_sflag & PS_STOPPING) != 0) {
917 mutex_exit(&proc_lock);
918 sigswitch_unlock_and_switch_away(l);
919 mutex_enter(&proc_lock);
920 mutex_enter(p->p_lock);
921 goto repeat;
922 }
923
924 mask = &l->l_sigmask;
925 ps = p->p_sigacts;
926 action = SIGACTION_PS(ps, signo).sa_handler;
927
928 if (ISSET(p->p_slflag, PSL_TRACED) &&
929 !(p->p_pptr == p->p_opptr && ISSET(p->p_lflag, PL_PPWAIT)) &&
930 p->p_xsig != SIGKILL &&
931 !sigismember(&p->p_sigpend.sp_set, SIGKILL)) {
932 p->p_xsig = signo;
933 p->p_sigctx.ps_faked = true;
934 p->p_sigctx.ps_lwp = ksi->ksi_lid;
935 p->p_sigctx.ps_info = ksi->ksi_info;
936 sigswitch(0, signo, true);
937
938 if (ktrpoint(KTR_PSIG)) {
939 if (p->p_emul->e_ktrpsig)
940 p->p_emul->e_ktrpsig(signo, action, mask, ksi);
941 else
942 ktrpsig(signo, action, mask, ksi);
943 }
944 return;
945 }
946
947 const bool caught = sigismember(&p->p_sigctx.ps_sigcatch, signo);
948 const bool masked = sigismember(mask, signo);
949 if (caught && !masked) {
950 mutex_exit(&proc_lock);
951 l->l_ru.ru_nsignals++;
952 kpsendsig(l, ksi, mask);
953 mutex_exit(p->p_lock);
954
955 if (ktrpoint(KTR_PSIG)) {
956 if (p->p_emul->e_ktrpsig)
957 p->p_emul->e_ktrpsig(signo, action, mask, ksi);
958 else
959 ktrpsig(signo, action, mask, ksi);
960 }
961 return;
962 }
963
964 /*
965 * If the signal is masked or ignored, then unmask it and
966 * reset it to the default action so that the process or
967 * its tracer will be notified.
968 */
969 const bool ignored = action == SIG_IGN;
970 if (masked || ignored) {
971 mutex_enter(&ps->sa_mutex);
972 sigdelset(mask, signo);
973 sigdelset(&p->p_sigctx.ps_sigcatch, signo);
974 sigdelset(&p->p_sigctx.ps_sigignore, signo);
975 sigdelset(&SIGACTION_PS(ps, signo).sa_mask, signo);
976 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL;
977 mutex_exit(&ps->sa_mutex);
978 }
979
980 kpsignal2(p, ksi);
981 mutex_exit(p->p_lock);
982 mutex_exit(&proc_lock);
983}
984
985 /*
986 * Fill in signal information and signal the parent for a child status change.
987 */
988 void
989 child_psignal(struct proc *p, int mask)
990{
991 ksiginfo_t ksi;
992 struct proc *q;
993 int xsig;
994
995 KASSERT(mutex_owned(&proc_lock));
996 KASSERT(mutex_owned(p->p_lock));
997
998 xsig = p->p_xsig;
999
1000 KSI_INIT(&ksi);
1001 ksi.ksi_signo = SIGCHLD;
1002 ksi.ksi_code = (xsig == SIGCONT ? CLD_CONTINUED : CLD_STOPPED);
1003 ksi.ksi_pid = p->p_pid;
1004 ksi.ksi_uid = kauth_cred_geteuid(p->p_cred);
1005 ksi.ksi_status = xsig;
1006 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
1007 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
1008
1009 q = p->p_pptr;
1010
1011 mutex_exit(p->p_lock);
1012 mutex_enter(q->p_lock);
1013
1014 if ((q->p_sflag & mask) == 0)
1015 kpsignal2(q, &ksi);
1016
1017 mutex_exit(q->p_lock);
1018 mutex_enter(p->p_lock);
1019}
1020
1021 void
1022 psignal(struct proc *p, int signo)
1023{
1024 ksiginfo_t ksi;
1025
1026 KASSERT(!cpu_intr_p());
1027 KASSERT(mutex_owned(&proc_lock));
1028
1029 KSI_INIT_EMPTY(&ksi);
1030 ksi.ksi_signo = signo;
1031 mutex_enter(p->p_lock);
1032 kpsignal2(p, &ksi);
1033 mutex_exit(p->p_lock);
1034}
1035
1036 void
1037 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
1038{
1039 fdfile_t *ff;
1040 file_t *fp;
1041 fdtab_t *dt;
1042
1043 KASSERT(!cpu_intr_p());
1044 KASSERT(mutex_owned(&proc_lock));
1045
1046 if ((p->p_sflag & PS_WEXIT) == 0 && data) {
1047 size_t fd;
1048 filedesc_t *fdp = p->p_fd;
1049
1050 /* XXXSMP locking */
1051 ksi->ksi_fd = -1;
1052 dt = atomic_load_consume(&fdp->fd_dt);
1053 for (fd = 0; fd < dt->dt_nfiles; fd++) {
1054 if ((ff = dt->dt_ff[fd]) == NULL)
1055 continue;
1056 if ((fp = atomic_load_consume(&ff->ff_file)) == NULL)
1057 continue;
1058 if (fp->f_data == data) {
1059 ksi->ksi_fd = fd;
1060 break;
1061 }
1062 }
1063 }
1064 mutex_enter(p->p_lock);
1065 kpsignal2(p, ksi);
1066 mutex_exit(p->p_lock);
1067}
1068
1069 /*
1070 * sigismasked:
1071 *
1072 * Returns true if signal is ignored or masked for the specified LWP.
1073 */
1074 int
1075 sigismasked(struct lwp *l, int sig)
1076{
1077 struct proc *p = l->l_proc;
1078
1079 return sigismember(&p->p_sigctx.ps_sigignore, sig) ||
1080 sigismember(&l->l_sigmask, sig);
1081}
1082
1083 /*
1084 * sigpost:
1085 *
1086 * Post a pending signal to an LWP. Returns non-zero if the LWP may
1087 * be able to take the signal.
1088 */
1089 static int
1090 sigpost(struct lwp *l, sig_t action, int prop, int sig)
1091{
1092 int rv, masked;
1093 struct proc *p = l->l_proc;
1094
1095 KASSERT(mutex_owned(p->p_lock));
1096
1097 /*
1098 * If the LWP is on the way out, sigclear() will be busy draining all
1099 * pending signals. Don't give it more.
1100 */
1101 if (l->l_stat == LSZOMB)
1102 return 0;
1103
1104 SDT_PROBE(proc, kernel, , signal__send, l, p, sig, 0, 0);
1105
1106 lwp_lock(l);
1107 if (__predict_false((l->l_flag & LW_DBGSUSPEND) != 0)) {
1108 if ((prop & SA_KILL) != 0)
1109 l->l_flag &= ~LW_DBGSUSPEND;
1110 else {
1111 lwp_unlock(l);
1112 return 0;
1113 }
1114 }
1115
1116 /*
1117 * Have the LWP check for signals. This ensures that even if no LWP
1118 * is found to take the signal immediately, it should be taken soon.
1119 */
1120 signotify(l);
1121
1122 /*
1123 * SIGCONT can be masked, but if LWP is stopped, it needs restart.
1124 * Note: SIGKILL and SIGSTOP cannot be masked.
1125 */
1126 masked = sigismember(&l->l_sigmask, sig);
1127 if (masked && ((prop & SA_CONT) == 0 || l->l_stat != LSSTOP)) {
1128 lwp_unlock(l);
1129 return 0;
1130 }
1131
1132 /*
1133 * If killing the process, make it run fast.
1134 */
1135 if (__predict_false((prop & SA_KILL) != 0) &&
1136 action == SIG_DFL && l->l_priority < MAXPRI_USER) {
1137 KASSERT(l->l_class == SCHED_OTHER);
1138 lwp_changepri(l, MAXPRI_USER);
1139 }
1140
1141 /*
1142 * If the LWP is running or on a run queue, then we win. If it's
1143 * sleeping interruptably, wake it and make it take the signal. If
1144 * the sleep isn't interruptable, then the chances are it will get
1145 * to see the signal soon anyhow. If suspended, it can't take the
1146 * signal right now. If it's LWP private or for all LWPs, save it
1147 * for later; otherwise punt.
1148 */
1149 rv = 0;
1150
1151 switch (l->l_stat) {
1152 case LSRUN:
1153 case LSONPROC:
1154 rv = 1;
1155 break;
1156
1157 case LSSLEEP:
1158 if ((l->l_flag & LW_SINTR) != 0) {
1159 /* setrunnable() will release the lock. */
1160 setrunnable(l);
1161 return 1;
1162 }
1163 break;
1164
1165 case LSSUSPENDED:
1166 if ((prop & SA_KILL) != 0 && (l->l_flag & LW_WCORE) != 0) {
1167 /* lwp_continue() will release the lock. */
1168 lwp_continue(l);
1169 return 1;
1170 }
1171 break;
1172
1173 case LSSTOP:
1174 if ((prop & SA_STOP) != 0)
1175 break;
1176
1177 /*
1178 * If the LWP is stopped and we are sending a continue
1179 * signal, then start it again.
1180 */
1181 if ((prop & SA_CONT) != 0) {
1182 if (l->l_wchan != NULL) {
1183 l->l_stat = LSSLEEP;
1184 p->p_nrlwps++;
1185 rv = 1;
1186 break;
1187 }
1188 /* setrunnable() will release the lock. */
1189 setrunnable(l);
1190 return 1;
1191 } else if (l->l_wchan == NULL || (l->l_flag & LW_SINTR) != 0) {
1192 /* setrunnable() will release the lock. */
1193 setrunnable(l);
1194 return 1;
1195 }
1196 break;
1197
1198 default:
1199 break;
1200 }
1201
1202 lwp_unlock(l);
1203 return rv;
1204}
1205
1206 /*
1207 * Notify an LWP that it has a pending signal.
1208 */
1209 void
1210 signotify(struct lwp *l)
1211{
1212 KASSERT(lwp_locked(l, NULL));
1213
1214 l->l_flag |= LW_PENDSIG;
1215 lwp_need_userret(l);
1216}
1217
1218 /*
1219 * Find an LWP within process p that is waiting on signal ksi, and hand
1220 * it on.
1221 */
1222 static int
1223 sigunwait(struct proc *p, const ksiginfo_t *ksi)
1224{
1225 struct lwp *l;
1226 int signo;
1227
1228 KASSERT(mutex_owned(p->p_lock));
1229
1230 signo = ksi->ksi_signo;
1231
1232 if (ksi->ksi_lid != 0) {
1233 /*
1234 * Signal came via _lwp_kill(). Find the LWP and see if
1235 * it's interested.
1236 */
1237 if ((l = lwp_find(p, ksi->ksi_lid)) == NULL)
1238 return 0;
1239 if (l->l_sigwaited == NULL ||
1240 !sigismember(&l->l_sigwaitset, signo))
1241 return 0;
1242 } else {
1243 /*
1244 * Look for any LWP that may be interested.
1245 */
1246 LIST_FOREACH(l, &p->p_sigwaiters, l_sigwaiter) {
1247 KASSERT(l->l_sigwaited != NULL);
1248 if (sigismember(&l->l_sigwaitset, signo))
1249 break;
1250 }
1251 }
1252
1253 if (l != NULL) {
1254 l->l_sigwaited->ksi_info = ksi->ksi_info;
1255 l->l_sigwaited = NULL;
1256 LIST_REMOVE(l, l_sigwaiter);
1257 cv_signal(&l->l_sigcv);
1258 return 1;
1259 }
1260
1261 return 0;
1262}
1263
1264 /*
1265 * Send the signal to the process. If the signal has an action, the action
1266 * is usually performed by the target process rather than the caller; we add
1267 * the signal to the set of pending signals for the process.
1268 *
1269 * Exceptions:
1270 * o When a stop signal is sent to a sleeping process that takes the
1271 * default action, the process is stopped without awakening it.
1272 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1273 * regardless of the signal action (eg, blocked or ignored).
1274 *
1275 * Other ignored signals are discarded immediately.
1276 */
1277 int
1278 kpsignal2(struct proc *p, ksiginfo_t *ksi)
1279{
1280 int prop, signo = ksi->ksi_signo;
1281 struct lwp *l = NULL;
1282 ksiginfo_t *kp;
1283 lwpid_t lid;
1284 sig_t action;
1285 bool toall;
1286 bool traced;
1287 int error = 0;
1288
1289 KASSERT(!cpu_intr_p());
1290 KASSERT(mutex_owned(&proc_lock));
1291 KASSERT(mutex_owned(p->p_lock));
1292 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0);
1293 KASSERT(signo > 0);
1294 KASSERT(signo < NSIG);
1295
1296 /*
1297 * If the process is being created by fork, is a zombie or is
1298 * exiting, then just drop the signal here and bail out.
1299 */
1300 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
1301 return 0;
1302
1303 /*
1304 * Notify any interested parties of the signal.
1305 */
1306 KNOTE(&p->p_klist, NOTE_SIGNAL | signo);
1307
1308 /*
1309 * Some signals including SIGKILL must act on the entire process.
1310 */
1311 kp = NULL;
1312 prop = sigprop[signo];
1313 toall = ((prop & SA_TOALL) != 0);
1314 lid = toall ? 0 : ksi->ksi_lid;
1315 traced = ISSET(p->p_slflag, PSL_TRACED) &&
1316 !sigismember(&p->p_sigctx.ps_sigpass, signo);
1317
1318 /*
1319 * If proc is traced, always give parent a chance.
1320 */
1321 if (traced) {
1322 action = SIG_DFL;
1323
1324 if (lid == 0) {
1325 /*
1326 * If the process is being traced and the signal
1327 * is being caught, make sure to save any ksiginfo.
1328 */
1329 if ((kp = ksiginfo_alloc(p, ksi, PR_NOWAIT)) == NULL)
1330 goto discard;
1331 if ((error = sigput(&p->p_sigpend, p, kp)) != 0)
1332 goto out;
1333 }
1334 } else {
1335
1336 /*
1337 * If the signal is being ignored, then drop it. Note: we
1338 * don't set SIGCONT in ps_sigignore, and if it is set to
1339 * SIG_IGN, action will be SIG_DFL here.
1340 */
1341 if (sigismember(&p->p_sigctx.ps_sigignore, signo))
1342 goto discard;
1343
1344 else if (sigismember(&p->p_sigctx.ps_sigcatch, signo))
1345 action = SIG_CATCH;
1346 else {
1347 action = SIG_DFL;
1348
1349 /*
1350 * If sending a tty stop signal to a member of an
1351 * orphaned process group, discard the signal here if
1352 * the action is default; don't stop the process below
1353 * if sleeping, and don't clear any pending SIGCONT.
1354 */
1355 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
1356 goto discard;
1357
1358 if (prop & SA_KILL && p->p_nice > NZERO)
1359 p->p_nice = NZERO;
1360 }
1361 }
1362
1363 /*
1364 * If stopping or continuing a process, discard any pending
1365 * signals that would do the inverse.
1366 */
1367 if ((prop & (SA_CONT | SA_STOP)) != 0) {
1368 ksiginfoq_t kq;
1369
1370 ksiginfo_queue_init(&kq);
1371 if ((prop & SA_CONT) != 0)
1372 sigclear(&p->p_sigpend, &stopsigmask, &kq);
1373 if ((prop & SA_STOP) != 0)
1374 sigclear(&p->p_sigpend, &contsigmask, &kq);
1375 ksiginfo_queue_drain(&kq); /* XXXSMP */
1376 }
1377
1378 /*
1379 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
1380 * please!), check if any LWPs are waiting on it. If yes, pass on
1381 * the signal info. The signal won't be processed further here.
1382 */
1383 if ((prop & SA_CANTMASK) == 0 && !LIST_EMPTY(&p->p_sigwaiters) &&
1384 p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0 &&
1385 sigunwait(p, ksi))
1386 goto discard;
1387
1388 /*
1389 * XXXSMP Should be allocated by the caller, we're holding locks
1390 * here.
1391 */
1392 if (kp == NULL && (kp = ksiginfo_alloc(p, ksi, PR_NOWAIT)) == NULL)
1393 goto discard;
1394
1395 /*
1396 * LWP private signals are easy - just find the LWP and post
1397 * the signal to it.
1398 */
1399 if (lid != 0) {
1400 l = lwp_find(p, lid);
1401 if (l != NULL) {
1402 if ((error = sigput(&l->l_sigpend, p, kp)) != 0)
1403 goto out;
1404 membar_producer();
1405 if (sigpost(l, action, prop, kp->ksi_signo) != 0)
1406 signo = -1;
1407 }
1408 goto out;
1409 }
1410
1411 /*
1412 * Some signals go to all LWPs, even if posted with _lwp_kill()
1413 * or for an SA process.
1414 */
1415 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) {
1416 if (traced)
1417 goto deliver;
1418
1419 /*
1420 * If SIGCONT is default (or ignored) and process is
1421 * asleep, we are finished; the process should not
1422 * be awakened.
1423 */
1424 if ((prop & SA_CONT) != 0 && action == SIG_DFL)
1425 goto out;
1426 } else {
1427 /*
1428 * Process is stopped or stopping.
1429 * - If traced, then no action is needed, unless killing.
1430 * - Run the process only if sending SIGCONT or SIGKILL.
1431 */
1432 if (traced && signo != SIGKILL) {
1433 goto out;
1434 }
1435 if ((prop & SA_CONT) != 0 || signo == SIGKILL) {
1436 /*
1437 * Re-adjust p_nstopchild if the process was
1438 * stopped but not yet collected by its parent.
1439 */
1440 if (p->p_stat == SSTOP && !p->p_waited)
1441 p->p_pptr->p_nstopchild--;
1442 p->p_stat = SACTIVE;
1443 p->p_sflag &= ~PS_STOPPING;
1444 if (traced) {
1445 KASSERT(signo == SIGKILL);
1446 goto deliver;
1447 }
1448 /*
1449 * Do not make signal pending if SIGCONT is default.
1450 *
1451 * If the process catches SIGCONT, let it handle the
1452 * signal itself (if waiting on event - process runs,
1453 * otherwise continues sleeping).
1454 */
1455 if ((prop & SA_CONT) != 0) {
1456 p->p_xsig = SIGCONT;
1457 p->p_sflag |= PS_CONTINUED;
1458 child_psignal(p, 0);
1459 if (action == SIG_DFL) {
1460 KASSERT(signo != SIGKILL);
1461 goto deliver;
1462 }
1463 }
1464 } else if ((prop & SA_STOP) != 0) {
1465 /*
1466 * Already stopped, don't need to stop again.
1467 * (If we did the shell could get confused.)
1468 */
1469 goto out;
1470 }
1471 }
1472 /*
1473 * Make signal pending.
1474 */
1475 KASSERT(!traced);
1476 if ((error = sigput(&p->p_sigpend, p, kp)) != 0)
1477 goto out;
1478 deliver:
1479 /*
1480 * Before we set LW_PENDSIG on any LWP, ensure that the signal is
1481 * visible on the per process list (for sigispending()). This
1482 * is unlikely to be needed in practice, but...
1483 */
1484 membar_producer();
1485
1486 /*
1487 * Try to find an LWP that can take the signal.
1488 */
1489 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1490 if (sigpost(l, action, prop, kp->ksi_signo) && !toall)
1491 break;
1492 }
1493 signo = -1;
1494 out:
1495 /*
1496 * If the ksiginfo wasn't used, then bin it. XXXSMP freeing memory
1497 * with locks held. The caller should take care of this.
1498 */
1499 ksiginfo_free(kp);
1500 if (signo == -1)
1501 return error;
1502 discard:
1503 SDT_PROBE(proc, kernel, , signal__discard, l, p, signo, 0, 0);
1504 return error;
1505}
1506
1507 void
1508 kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
1509{
1510 struct proc *p = l->l_proc;
1511
1512 KASSERT(mutex_owned(p->p_lock));
1513 (*p->p_emul->e_sendsig)(ksi, mask);
1514}
1515
1516 /*
1517 * Stop any LWPs sleeping interruptably.
1518 */
1519 static void
1520 proc_stop_lwps(struct proc *p)
1521{
1522 struct lwp *l;
1523
1524 KASSERT(mutex_owned(p->p_lock));
1525 KASSERT((p->p_sflag & PS_STOPPING) != 0);
1526
1527 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1528 lwp_lock(l);
1529 if (l->l_stat == LSSLEEP && (l->l_flag & LW_SINTR) != 0) {
1530 l->l_stat = LSSTOP;
1531 p->p_nrlwps--;
1532 }
1533 lwp_unlock(l);
1534 }
1535}
1536
1537 /*
1538 * Finish stopping of a process. Mark it stopped and notify the parent.
1539 *
1540 * Drop p_lock briefly if ppsig is true.
1541 */
1542 static void
1543 proc_stop_done(struct proc *p, int ppmask)
1544{
1545
1546 KASSERT(mutex_owned(&proc_lock));
1547 KASSERT(mutex_owned(p->p_lock));
1548 KASSERT((p->p_sflag & PS_STOPPING) != 0);
1549 KASSERT(p->p_nrlwps == 0 || p->p_nrlwps == 1);
1550 KASSERT(p->p_nrlwps == 0 || p == curproc);
1551
1552 p->p_sflag &= ~PS_STOPPING;
1553 p->p_stat = SSTOP;
1554 p->p_waited = 0;
1555 p->p_pptr->p_nstopchild++;
1556
1557 /* child_psignal drops p_lock briefly. */
1558 child_psignal(p, ppmask);
1559 cv_broadcast(&p->p_pptr->p_waitcv);
1560}
1561
1562 /*
1563 * Stop the current process and switch away to the debugger notifying
1564 * an event specific to a traced process only.
1565 */
1566 void
1567 eventswitch(int code, int pe_report_event, int entity)
1568{
1569 struct lwp *l = curlwp;
1570 struct proc *p = l->l_proc;
1571 struct sigacts *ps;
1572 sigset_t *mask;
1573 sig_t action;
1574 ksiginfo_t ksi;
1575 const int signo = SIGTRAP;
1576
1577 KASSERT(mutex_owned(&proc_lock));
1578 KASSERT(mutex_owned(p->p_lock));
1579 KASSERT(p->p_pptr != initproc);
1580 KASSERT(l->l_stat == LSONPROC);
1581 KASSERT(ISSET(p->p_slflag, PSL_TRACED));
1582 KASSERT(!ISSET(l->l_flag, LW_SYSTEM));
1583 KASSERT(p->p_nrlwps > 0);
1584 KASSERT((code == TRAP_CHLD) || (code == TRAP_LWP) ||
1585 (code == TRAP_EXEC));
1586 KASSERT((code != TRAP_CHLD) || (entity > 1)); /* prevent pid1 */
1587 KASSERT((code != TRAP_LWP) || (entity > 0));
1588
1589 repeat:
1590 /*
1591 * If we are exiting, demise now.
1592 *
1593 * This avoids notifying tracer and deadlocking.
1594 */
1595 if (__predict_false(ISSET(p->p_sflag, PS_WEXIT))) {
1596 mutex_exit(p->p_lock);
1597 mutex_exit(&proc_lock);
1598
1599 if (pe_report_event == PTRACE_LWP_EXIT) {
1600 /* Avoid double lwp_exit() and panic. */
1601 return;
1602 }
1603
1604 lwp_exit(l);
1605 panic("eventswitch");
1606 /* NOTREACHED */
1607 }
1608
1609 /*
1610 * If we are no longer traced, abandon this event signal.
1611 *
1612 * This avoids killing a process after detaching the debugger.
1613 */
1614 if (__predict_false(!ISSET(p->p_slflag, PSL_TRACED))) {
1615 mutex_exit(p->p_lock);
1616 mutex_exit(&proc_lock);
1617 return;
1618 }
1619
1620 /*
1621 * If there's a pending SIGKILL process it immediately.
1622 */
1623 if (p->p_xsig == SIGKILL ||
1624 sigismember(&p->p_sigpend.sp_set, SIGKILL)) {
1625 mutex_exit(p->p_lock);
1626 mutex_exit(&proc_lock);
1627 return;
1628 }
1629
1630 /*
1631 * The process is already stopping.
1632 */
1633 if ((p->p_sflag & PS_STOPPING) != 0) {
1634 mutex_exit(&proc_lock);
1635 sigswitch_unlock_and_switch_away(l);
1636 mutex_enter(&proc_lock);
1637 mutex_enter(p->p_lock);
1638 goto repeat;
1639 }
1640
1641 KSI_INIT_TRAP(&ksi);
1642 ksi.ksi_lid = l->l_lid;
1643 ksi.ksi_signo = signo;
1644 ksi.ksi_code = code;
1645 ksi.ksi_pe_report_event = pe_report_event;
1646
1647 CTASSERT(sizeof(ksi.ksi_pe_other_pid) == sizeof(ksi.ksi_pe_lwp));
1648 ksi.ksi_pe_other_pid = entity;
1649
1650 /* Needed for ktrace */
1651 ps = p->p_sigacts;
1652 action = SIGACTION_PS(ps, signo).sa_handler;
1653 mask = &l->l_sigmask;
1654
1655 p->p_xsig = signo;
1656 p->p_sigctx.ps_faked = true;
1657 p->p_sigctx.ps_lwp = ksi.ksi_lid;
1658 p->p_sigctx.ps_info = ksi.ksi_info;
1659
1660 sigswitch(0, signo, true);
1661
1662 if (code == TRAP_CHLD) {
1663 mutex_enter(&proc_lock);
1664 while (l->l_vforkwaiting)
1665 cv_wait(&l->l_waitcv, &proc_lock);
1666 mutex_exit(&proc_lock);
1667 }
1668
1669 if (ktrpoint(KTR_PSIG)) {
1670 if (p->p_emul->e_ktrpsig)
1671 p->p_emul->e_ktrpsig(signo, action, mask, &ksi);
1672 else
1673 ktrpsig(signo, action, mask, &ksi);
1674 }
1675}
1676
1677 void
1678 eventswitchchild(struct proc *p, int code, int pe_report_event)
1679{
1680 mutex_enter(&proc_lock);
1681 mutex_enter(p->p_lock);
1682 if ((p->p_slflag & (PSL_TRACED|PSL_TRACEDCHILD)) !=
1683 (PSL_TRACED|PSL_TRACEDCHILD)) {
1684 mutex_exit(p->p_lock);
1685 mutex_exit(&proc_lock);
1686 return;
1687 }
1688 eventswitch(code, pe_report_event, p->p_oppid);
1689}
1690
1691 /*
1692 * Stop the current process and switch away when being stopped or traced.
1693 */
1694 static void
1695 sigswitch(int ppmask, int signo, bool proc_lock_held)
1696{
1697 struct lwp *l = curlwp;
1698 struct proc *p = l->l_proc;
1699
1700 KASSERT(mutex_owned(p->p_lock));
1701 KASSERT(l->l_stat == LSONPROC);
1702 KASSERT(p->p_nrlwps > 0);
1703
1704 if (proc_lock_held) {
1705 KASSERT(mutex_owned(&proc_lock));
1706 } else {
1707 KASSERT(!mutex_owned(&proc_lock));
1708 }
1709
1710 /*
1711 * On entry we know that the process needs to stop. If it's
1712 * the result of a 'sideways' stop signal that has been sourced
1713 * through issignal(), then stop other LWPs in the process too.
1714 */
1715 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) {
1716 KASSERT(signo != 0);
1717 proc_stop(p, signo);
1718 KASSERT(p->p_nrlwps > 0);
1719 }
1720
1721 /*
1722 * If we are the last live LWP, and the stop was a result of
1723 * a new signal, then signal the parent.
1724 */
1725 if ((p->p_sflag & PS_STOPPING) != 0) {
1726 if (!proc_lock_held && !mutex_tryenter(&proc_lock)) {
1727 mutex_exit(p->p_lock);
1728 mutex_enter(&proc_lock);
1729 mutex_enter(p->p_lock);
1730 }
1731
1732 if (p->p_nrlwps == 1 && (p->p_sflag & PS_STOPPING) != 0) {
1733 /*
1734 * Note that proc_stop_done() can drop
1735 * p->p_lock briefly.
1736 */
1737 proc_stop_done(p, ppmask);
1738 }
1739
1740 mutex_exit(&proc_lock);
1741 }
1742
1743 sigswitch_unlock_and_switch_away(l);
1744}
1745
1746 /*
1747 * Unlock and switch away.
1748 */
1749 static void
1750 sigswitch_unlock_and_switch_away(struct lwp *l)
1751{
1752 struct proc *p;
1753
1754 p = l->l_proc;
1755
1756 KASSERT(mutex_owned(p->p_lock));
1757 KASSERT(!mutex_owned(&proc_lock));
1758
1759 KASSERT(l->l_stat == LSONPROC);
1760 KASSERT(p->p_nrlwps > 0);
1761 KASSERT(l->l_blcnt == 0);
1762
1763 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
1764 p->p_nrlwps--;
1765 lwp_lock(l);
1766 KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSLEEP);
1767 l->l_stat = LSSTOP;
1768 lwp_unlock(l);
1769 }
1770
1771 mutex_exit(p->p_lock);
1772 lwp_lock(l);
1773 spc_lock(l->l_cpu);
1774 mi_switch(l);
1775}
1776
1777 /*
1778 * Check for a signal from the debugger.
1779 */
1780 static int
1781 sigchecktrace(void)
1782{
1783 struct lwp *l = curlwp;
1784 struct proc *p = l->l_proc;
1785 int signo;
1786
1787 KASSERT(mutex_owned(p->p_lock));
1788
1789 /* If there's a pending SIGKILL, process it immediately. */
1790 if (sigismember(&p->p_sigpend.sp_set, SIGKILL))
1791 return 0;
1792
1793 /*
1794 * If we are no longer being traced, or the parent didn't
1795 * give us a signal, or we're stopping, look for more signals.
1796 */
1797 if ((p->p_slflag & PSL_TRACED) == 0 || p->p_xsig == 0 ||
1798 (p->p_sflag & PS_STOPPING) != 0)
1799 return 0;
1800
1801 /*
1802 * If the new signal is being masked, look for other signals.
1803 * `p->p_sigctx.ps_siglist |= mask' is done in setrunnable().
1804 */
1805 signo = p->p_xsig;
1806 p->p_xsig = 0;
1807 if (sigismember(&l->l_sigmask, signo)) {
1808 signo = 0;
1809 }
1810 return signo;
1811}
1812
1813 /*
1814 * If the current process has received a signal (should be caught or cause
1815 * termination, should interrupt current syscall), return the signal number.
1816 *
1817 * Stop signals with default action are processed immediately, then cleared;
1818 * they aren't returned. This is checked after each entry to the system for
1819 * a syscall or trap.
1820 *
1821 * We will also return -1 if the process is exiting and the current LWP must
1822 * follow suit.
1823 */
1824 int
1825 issignal(struct lwp *l)
1826{
1827 struct proc *p;
1828 int siglwp, signo, prop;
1829 sigpend_t *sp;
1830 sigset_t ss;
1831 bool traced;
1832
1833 p = l->l_proc;
1834 sp = NULL;
1835 signo = 0;
1836
1837 KASSERT(p == curproc);
1838 KASSERT(mutex_owned(p->p_lock));
1839
1840 for (;;) {
1841 /* Discard any signals that we have decided not to take. */
1842 if (signo != 0) {
1843 (void)sigget(sp, NULL, signo, NULL);
1844 }
1845
1846 /*
1847 * If the process is stopped/stopping, then stop ourselves
1848 * now that we're on the kernel/userspace boundary. When
1849 * we awaken, check for a signal from the debugger.
1850 */
1851 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
1852 sigswitch_unlock_and_switch_away(l);
1853 mutex_enter(p->p_lock);
1854 continue;
1855 } else if (p->p_stat == SACTIVE)
1856 signo = sigchecktrace();
1857 else
1858 signo = 0;
1859
1860 /* Signals from the debugger are "out of band". */
1861 sp = NULL;
1862
1863 /*
1864 * If the debugger didn't provide a signal, find a pending
1865 * signal from our set. Check per-LWP signals first, and
1866 * then per-process.
1867 */
1868 if (signo == 0) {
1869 sp = &l->l_sigpend;
1870 ss = sp->sp_set;
1871 siglwp = l->l_lid;
1872 if ((p->p_lflag & PL_PPWAIT) != 0)
1873 sigminusset(&vforksigmask, &ss);
1874 sigminusset(&l->l_sigmask, &ss);
1875
1876 if ((signo = firstsig(&ss)) == 0) {
1877 sp = &p->p_sigpend;
1878 ss = sp->sp_set;
1879 siglwp = 0;
1880 if ((p->p_lflag & PL_PPWAIT) != 0)
1881 sigminusset(&vforksigmask, &ss);
1882 sigminusset(&l->l_sigmask, &ss);
1883
1884 if ((signo = firstsig(&ss)) == 0) {
1885 /*
1886 * No signal pending - clear the
1887 * indicator and bail out.
1888 */
1889 lwp_lock(l);
1890 l->l_flag &= ~LW_PENDSIG;
1891 lwp_unlock(l);
1892 sp = NULL;
1893 break;
1894 }
1895 }
1896 }
1897
1898 traced = ISSET(p->p_slflag, PSL_TRACED) &&
1899 !sigismember(&p->p_sigctx.ps_sigpass, signo);
1900
1901 if (sp) {
1902 /* Overwrite process' signal context to correspond
1903 * to the currently reported LWP. This is necessary
1904 * for PT_GET_SIGINFO to report the correct signal when
1905 * multiple LWPs have pending signals. We do this only
1906 * when the signal comes from the queue, for signals
1907 * created by the debugger we assume it set correct
1908 * siginfo.
1909 */
1910 ksiginfo_t *ksi = TAILQ_FIRST(&sp->sp_info);
1911 if (ksi) {
1912 p->p_sigctx.ps_lwp = ksi->ksi_lid;
1913 p->p_sigctx.ps_info = ksi->ksi_info;
1914 } else {
1915 p->p_sigctx.ps_lwp = siglwp;
1916 memset(&p->p_sigctx.ps_info, 0,
1917 sizeof(p->p_sigctx.ps_info));
1918 p->p_sigctx.ps_info._signo = signo;
1919 p->p_sigctx.ps_info._code = SI_NOINFO;
1920 }
1921 }
1922
1923 /*
1924 * We should see pending but ignored signals only if
1925 * we are being traced.
1926 */
1927 if (sigismember(&p->p_sigctx.ps_sigignore, signo) &&
1928 !traced) {
1929 /* Discard the signal. */
1930 continue;
1931 }
1932
1933 /*
1934 * If traced, always stop, and stay stopped until released
1935 * by the debugger. If the our parent is our debugger waiting
1936 * for us and we vforked, don't hang as we could deadlock.
1937 */
1938 if (traced && signo != SIGKILL &&
1939 !(ISSET(p->p_lflag, PL_PPWAIT) &&
1940 (p->p_pptr == p->p_opptr))) {
1941 /*
1942 * Take the signal, but don't remove it from the
1943 * siginfo queue, because the debugger can send
1944 * it later.
1945 */
1946 if (sp)
1947 sigdelset(&sp->sp_set, signo);
1948 p->p_xsig = signo;
1949
1950 /* Handling of signal trace */
1951 sigswitch(0, signo, false);
1952 mutex_enter(p->p_lock);
1953
1954 /* Check for a signal from the debugger. */
1955 if ((signo = sigchecktrace()) == 0)
1956 continue;
1957
1958 /* Signals from the debugger are "out of band". */
1959 sp = NULL;
1960 }
1961
1962 prop = sigprop[signo];
1963
1964 /*
1965 * Decide whether the signal should be returned.
1966 */
1967 switch ((long)SIGACTION(p, signo).sa_handler) {
1968 case (long)SIG_DFL:
1969 /*
1970 * Don't take default actions on system processes.
1971 */
1972 if (p->p_pid <= 1) {
1973#ifdef DIAGNOSTIC
1974 /*
1975 * Are you sure you want to ignore SIGSEGV
1976 * in init? XXX
1977 */
1978 printf_nolog("Process (pid %d) got sig %d\n",
1979 p->p_pid, signo);
1980#endif
1981 continue;
1982 }
1983
1984 /*
1985 * If there is a pending stop signal to process with
1986 * default action, stop here, then clear the signal.
1987 * However, if process is member of an orphaned
1988 * process group, ignore tty stop signals.
1989 */
1990 if (prop & SA_STOP) {
1991 /*
1992 * XXX Don't hold proc_lock for p_lflag,
1993 * but it's not a big deal.
1994 */
1995 if ((traced &&
1996 !(ISSET(p->p_lflag, PL_PPWAIT) &&
1997 (p->p_pptr == p->p_opptr))) ||
1998 ((p->p_lflag & PL_ORPHANPG) != 0 &&
1999 prop & SA_TTYSTOP)) {
2000 /* Ignore the signal. */
2001 continue;
2002 }
2003 /* Take the signal. */
2004 (void)sigget(sp, NULL, signo, NULL);
2005 p->p_xsig = signo;
2006 p->p_sflag &= ~PS_CONTINUED;
2007 signo = 0;
2008 sigswitch(PS_NOCLDSTOP, p->p_xsig, false);
2009 mutex_enter(p->p_lock);
2010 } else if (prop & SA_IGNORE) {
2011 /*
2012 * Except for SIGCONT, shouldn't get here.
2013 * Default action is to ignore; drop it.
2014 */
2015 continue;
2016 }
2017 break;
2018
2019 case (long)SIG_IGN:
2020#ifdef DEBUG_ISSIGNAL
2021 /*
2022 * Masking above should prevent us ever trying
2023 * to take action on an ignored signal other
2024 * than SIGCONT, unless process is traced.
2025 */
2026 if ((prop & SA_CONT) == 0 && !traced)
2027 printf_nolog("issignal\n");
2028#endif
2029 continue;
2030
2031 default:
2032 /*
2033 * This signal has an action, let postsig() process
2034 * it.
2035 */
2036 break;
2037 }
2038
2039 break;
2040 }
2041
2042 l->l_sigpendset = sp;
2043 return signo;
2044}
2045
2046 /*
2047 * Take the action for the specified signal
2048 * from the current set of pending signals.
2049 */
2050 void
2051 postsig(int signo)
2052{
2053 struct lwp *l;
2054 struct proc *p;
2055 struct sigacts *ps;
2056 sig_t action;
2057 sigset_t *returnmask;
2058 ksiginfo_t ksi;
2059
2060 l = curlwp;
2061 p = l->l_proc;
2062 ps = p->p_sigacts;
2063
2064 KASSERT(mutex_owned(p->p_lock));
2065 KASSERT(signo > 0);
2066
2067 /*
2068 * Set the new mask value and also defer further occurrences of this
2069 * signal.
2070 *
2071 * Special case: user has done a sigsuspend. Here the current mask is
2072 * not of interest, but rather the mask from before the sigsuspend is
2073 * what we want restored after the signal processing is completed.
2074 */
2075 if (l->l_sigrestore) {
2076 returnmask = &l->l_sigoldmask;
2077 l->l_sigrestore = 0;
2078 } else
2079 returnmask = &l->l_sigmask;
2080
2081 /*
2082 * Commit to taking the signal before releasing the mutex.
2083 */
2084 action = SIGACTION_PS(ps, signo).sa_handler;
2085 l->l_ru.ru_nsignals++;
2086 if (l->l_sigpendset == NULL) {
2087 /* From the debugger */
2088 if (p->p_sigctx.ps_faked &&
2089 signo == p->p_sigctx.ps_info._signo) {
2090 KSI_INIT(&ksi);
2091 ksi.ksi_info = p->p_sigctx.ps_info;
2092 ksi.ksi_lid = p->p_sigctx.ps_lwp;
2093 p->p_sigctx.ps_faked = false;
2094 } else {
2095 if (!siggetinfo(&l->l_sigpend, &ksi, signo))
2096 (void)siggetinfo(&p->p_sigpend, &ksi, signo);
2097 }
2098 } else
2099 sigget(l->l_sigpendset, &ksi, signo, NULL);
2100
2101 if (ktrpoint(KTR_PSIG)) {
2102 mutex_exit(p->p_lock);
2103 if (p->p_emul->e_ktrpsig)
2104 p->p_emul->e_ktrpsig(signo, action,
2105 returnmask, &ksi);
2106 else
2107 ktrpsig(signo, action, returnmask, &ksi);
2108 mutex_enter(p->p_lock);
2109 }
2110
2111 SDT_PROBE(proc, kernel, , signal__handle, signo, &ksi, action, 0, 0);
2112
2113 if (action == SIG_DFL) {
2114 /*
2115 * Default action, where the default is to kill
2116 * the process. (Other cases were ignored above.)
2117 */
2118 sigexit(l, signo);
2119 return;
2120 }
2121
2122 /*
2123 * If we get here, the signal must be caught.
2124 */
2125#ifdef DIAGNOSTIC
2126 if (action == SIG_IGN || sigismember(&l->l_sigmask, signo))
2127 panic("postsig action");
2128#endif
2129
2130 kpsendsig(l, &ksi, returnmask);
2131}
2132
2133 /*
2134 * sendsig:
2135 *
2136 * Default signal delivery method for NetBSD.
2137 */
2138 void
2139 sendsig(const struct ksiginfo *ksi, const sigset_t *mask)
2140{
2141 struct sigacts *sa;
2142 int sig;
2143
2144 sig = ksi->ksi_signo;
2145 sa = curproc->p_sigacts;
2146
2147 switch (sa->sa_sigdesc[sig].sd_vers) {
2148 case __SIGTRAMP_SIGCODE_VERSION:
2149#ifdef __HAVE_STRUCT_SIGCONTEXT
2150 case __SIGTRAMP_SIGCONTEXT_VERSION_MIN ...
2151 __SIGTRAMP_SIGCONTEXT_VERSION_MAX:
2152 /* Compat for 1.6 and earlier. */
2153 MODULE_HOOK_CALL_VOID(sendsig_sigcontext_16_hook, (ksi, mask),
2154 break);
2155 return;
2156#endif /* __HAVE_STRUCT_SIGCONTEXT */
2157 case __SIGTRAMP_SIGINFO_VERSION_MIN ...
2158 __SIGTRAMP_SIGINFO_VERSION_MAX:
2159 sendsig_siginfo(ksi, mask);
2160 return;
2161 default:
2162 break;
2163 }
2164
2165 printf("sendsig: bad version %d\n", sa->sa_sigdesc[sig].sd_vers);
2166 sigexit(curlwp, SIGILL);
2167}
2168
2169 /*
2170 * sendsig_reset:
2171 *
2172 * Reset the signal action. Called from emulation specific sendsig()
2173 * before unlocking to deliver the signal.
2174 */
2175 void
2176 sendsig_reset(struct lwp *l, int signo)
2177{
2178 struct proc *p = l->l_proc;
2179 struct sigacts *ps = p->p_sigacts;
2180
2181 KASSERT(mutex_owned(p->p_lock));
2182
2183 p->p_sigctx.ps_lwp = 0;
2184 memset(&p->p_sigctx.ps_info, 0, sizeof(p->p_sigctx.ps_info));
2185
2186 mutex_enter(&ps->sa_mutex);
2187 sigplusset(&SIGACTION_PS(ps, signo).sa_mask, &l->l_sigmask);
2188 if (SIGACTION_PS(ps, signo).sa_flags & SA_RESETHAND) {
2189 sigdelset(&p->p_sigctx.ps_sigcatch, signo);
2190 if (signo != SIGCONT && sigprop[signo] & SA_IGNORE)
2191 sigaddset(&p->p_sigctx.ps_sigignore, signo);
2192 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL;
2193 }
2194 mutex_exit(&ps->sa_mutex);
2195}
2196
2197 /*
2198 * Kill the current process for stated reason.
2199 */
2200 void
2201 killproc(struct proc *p, const char *why)
2202{
2203
2204 KASSERT(mutex_owned(&proc_lock));
2205
2206 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
2207 uprintf_locked("sorry, pid %d was killed: %s\n", p->p_pid, why);
2208 psignal(p, SIGKILL);
2209}
2210
2211 /*
2212 * Force the current process to exit with the specified signal, dumping core
2213 * if appropriate. We bypass the normal tests for masked and caught
2214 * signals, allowing unrecoverable failures to terminate the process without
2215 * changing signal state. Mark the accounting record with the signal
2216 * termination. If dumping core, save the signal number for the debugger.
2217 * Calls exit and does not return.
2218 */
2219 void
2220 sigexit(struct lwp *l, int signo)
2221{
2222 int exitsig, error, docore;
2223 struct proc *p;
2224 struct lwp *t;
2225
2226 p = l->l_proc;
2227
2228 KASSERT(mutex_owned(p->p_lock));
2229 KASSERT(l->l_blcnt == 0);
2230
2231 /*
2232 * Don't permit coredump() multiple times in the same process.
2233 * Call back into sigexit, where we will be suspended until
2234 * the deed is done. Note that this is a recursive call, but
2235 * LW_WCORE will prevent us from coming back this way.
2236 */
2237 if ((p->p_sflag & PS_WCORE) != 0) {
2238 lwp_lock(l);
2239 l->l_flag |= (LW_WCORE | LW_WEXIT | LW_WSUSPEND);
2240 lwp_need_userret(l);
2241 lwp_unlock(l);
2242 mutex_exit(p->p_lock);
2243 lwp_userret(l);
2244 panic("sigexit 1");
2245 /* NOTREACHED */
2246 }
2247
2248 /* If process is already on the way out, then bail now. */
2249 if ((p->p_sflag & PS_WEXIT) != 0) {
2250 mutex_exit(p->p_lock);
2251 lwp_exit(l);
2252 panic("sigexit 2");
2253 /* NOTREACHED */
2254 }
2255
2256 /*
2257 * Prepare all other LWPs for exit. If dumping core, suspend them
2258 * so that their registers are available long enough to be dumped.
2259 */
2260 if ((docore = (sigprop[signo] & SA_CORE)) != 0) {
2261 p->p_sflag |= PS_WCORE;
2262 for (;;) {
2263 LIST_FOREACH(t, &p->p_lwps, l_sibling) {
2264 lwp_lock(t);
2265 if (t == l) {
2266 t->l_flag &=
2267 ~(LW_WSUSPEND | LW_DBGSUSPEND);
2268 lwp_unlock(t);
2269 continue;
2270 }
2271 t->l_flag |= (LW_WCORE | LW_WEXIT);
2272 lwp_need_userret(t);
2273 lwp_suspend(l, t);
2274 }
2275
2276 if (p->p_nrlwps == 1)
2277 break;
2278
2279 /*
2280 * Kick any LWPs sitting in lwp_wait1(), and wait
2281 * for everyone else to stop before proceeding.
2282 */
2283 p->p_nlwpwait++;
2284 cv_broadcast(&p->p_lwpcv);
2285 cv_wait(&p->p_lwpcv, p->p_lock);
2286 p->p_nlwpwait--;
2287 }
2288 }
2289
2290 exitsig = signo;
2291 p->p_acflag |= AXSIG;
2292 memset(&p->p_sigctx.ps_info, 0, sizeof(p->p_sigctx.ps_info));
2293 p->p_sigctx.ps_info._signo = signo;
2294 p->p_sigctx.ps_info._code = SI_NOINFO;
2295
2296 if (docore) {
2297 mutex_exit(p->p_lock);
2298 MODULE_HOOK_CALL(coredump_hook, (l, NULL), enosys(), error);
2299
2300 if (kern_logsigexit) {
2301 int uid = l->l_cred ?
2302 (int)kauth_cred_geteuid(l->l_cred) : -1;
2303
2304 if (error)
2305 log(LOG_INFO, lognocoredump, p->p_pid,
2306 p->p_comm, uid, signo, error);
2307 else
2308 log(LOG_INFO, logcoredump, p->p_pid,
2309 p->p_comm, uid, signo);
2310 }
2311
2312#ifdef PAX_SEGVGUARD
2313 rw_enter(&exec_lock, RW_WRITER);
2314 pax_segvguard(l, p->p_textvp, p->p_comm, true);
2315 rw_exit(&exec_lock);
2316#endif /* PAX_SEGVGUARD */
2317
2318 /* Acquire the sched state mutex. exit1() will release it. */
2319 mutex_enter(p->p_lock);
2320 if (error == 0)
2321 p->p_sflag |= PS_COREDUMP;
2322 }
2323
2324 /* No longer dumping core. */
2325 p->p_sflag &= ~PS_WCORE;
2326
2327 exit1(l, 0, exitsig);
2328 /* NOTREACHED */
2329}
2330
2331 /*
2332 * Since the "real" code may (or may not) be present in loadable module,
2333 * we provide routines here which calls the module hooks.
2334 */
2335
2336 int
2337 coredump_netbsd(struct lwp *l, struct coredump_iostate *iocookie)
2338{
2339
2340 int retval;
2341
2342 MODULE_HOOK_CALL(coredump_netbsd_hook, (l, iocookie), ENOSYS, retval);
2343 return retval;
2344}
2345
2346 int
2347 coredump_netbsd32(struct lwp *l, struct coredump_iostate *iocookie)
2348{
2349
2350 int retval;
2351
2352 MODULE_HOOK_CALL(coredump_netbsd32_hook, (l, iocookie), ENOSYS, retval);
2353 return retval;
2354}
2355
2356 int
2357 coredump_elf32(struct lwp *l, struct coredump_iostate *iocookie)
2358{
2359 int retval;
2360
2361 MODULE_HOOK_CALL(coredump_elf32_hook, (l, iocookie), ENOSYS, retval);
2362 return retval;
2363}
2364
2365 int
2366 coredump_elf64(struct lwp *l, struct coredump_iostate *iocookie)
2367{
2368 int retval;
2369
2370 MODULE_HOOK_CALL(coredump_elf64_hook, (l, iocookie), ENOSYS, retval);
2371 return retval;
2372}
2373
2374 /*
2375 * Put process 'p' into the stopped state and optionally, notify the parent.
2376 */
2377 void
2378 proc_stop(struct proc *p, int signo)
2379{
2380 struct lwp *l;
2381
2382 KASSERT(mutex_owned(p->p_lock));
2383
2384 /*
2385 * First off, set the stopping indicator and bring all sleeping
2386 * LWPs to a halt so they are included in p->p_nrlwps. We mustn't
2387 * unlock between here and the p->p_nrlwps check below.
2388 */
2389 p->p_sflag |= PS_STOPPING;
2390 membar_producer();
2391
2392 proc_stop_lwps(p);
2393
2394 /*
2395 * If there are no LWPs available to take the signal, then we
2396 * signal the parent process immediately. Otherwise, the last
2397 * LWP to stop will take care of it.
2398 */
2399
2400 if (p->p_nrlwps == 0) {
2401 proc_stop_done(p, PS_NOCLDSTOP);
2402 } else {
2403 /*
2404 * Have the remaining LWPs come to a halt, and trigger
2405 * proc_stop_callout() to ensure that they do.
2406 */
2407 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
2408 sigpost(l, SIG_DFL, SA_STOP, signo);
2409 }
2410 callout_schedule(&proc_stop_ch, 1);
2411 }
2412}
2413
2414 /*
2415 * When stopping a process, we do not immediately set sleeping LWPs stopped,
2416 * but wait for them to come to a halt at the kernel-user boundary. This is
2417 * to allow LWPs to release any locks that they may hold before stopping.
2418 *
2419 * Non-interruptable sleeps can be long, and there is the potential for an
2420 * LWP to begin sleeping interruptably soon after the process has been set
2421 * stopping (PS_STOPPING). These LWPs will not notice that the process is
2422 * stopping, and so complete halt of the process and the return of status
2423 * information to the parent could be delayed indefinitely.
2424 *
2425 * To handle this race, proc_stop_callout() runs once per tick while there
2426 * are stopping processes in the system. It sets LWPs that are sleeping
2427 * interruptably into the LSSTOP state.
2428 *
2429 * Note that we are not concerned about keeping all LWPs stopped while the
2430 * process is stopped: stopped LWPs can awaken briefly to handle signals.
2431 * What we do need to ensure is that all LWPs in a stopping process have
2432 * stopped at least once, so that notification can be sent to the parent
2433 * process.
2434 */
2435 static void
2436 proc_stop_callout(void *cookie)
2437{
2438 bool more, restart;
2439 struct proc *p;
2440
2441 (void)cookie;
2442
2443 do {
2444 restart = false;
2445 more = false;
2446
2447 mutex_enter(&proc_lock);
2448 PROCLIST_FOREACH(p, &allproc) {
2449 mutex_enter(p->p_lock);
2450
2451 if ((p->p_sflag & PS_STOPPING) == 0) {
2452 mutex_exit(p->p_lock);
2453 continue;
2454 }
2455
2456 /* Stop any LWPs sleeping interruptably. */
2457 proc_stop_lwps(p);
2458 if (p->p_nrlwps == 0) {
2459 /*
2460 * We brought the process to a halt.
2461 * Mark it as stopped and notify the
2462 * parent.
2463 *
2464 * Note that proc_stop_done() will
2465 * drop p->p_lock briefly.
2466 * Arrange to restart and check
2467 * all processes again.
2468 */
2469 restart = true;
2470 proc_stop_done(p, PS_NOCLDSTOP);
2471 } else
2472 more = true;
2473
2474 mutex_exit(p->p_lock);
2475 if (restart)
2476 break;
2477 }
2478 mutex_exit(&proc_lock);
2479 } while (restart);
2480
2481 /*
2482 * If we noted processes that are stopping but still have
2483 * running LWPs, then arrange to check again in 1 tick.
2484 */
2485 if (more)
2486 callout_schedule(&proc_stop_ch, 1);
2487}
2488
2489 /*
2490 * Given a process in state SSTOP, set the state back to SACTIVE and
2491 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
2492 */
2493 void
2494 proc_unstop(struct proc *p)
2495{
2496 struct lwp *l;
2497 int sig;
2498
2499 KASSERT(mutex_owned(&proc_lock));
2500 KASSERT(mutex_owned(p->p_lock));
2501
2502 p->p_stat = SACTIVE;
2503 p->p_sflag &= ~PS_STOPPING;
2504 sig = p->p_xsig;
2505
2506 if (!p->p_waited)
2507 p->p_pptr->p_nstopchild--;
2508
2509 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
2510 lwp_lock(l);
2511 if (l->l_stat != LSSTOP || (l->l_flag & LW_DBGSUSPEND) != 0) {
2512 lwp_unlock(l);
2513 continue;
2514 }
2515 if (l->l_wchan == NULL) {
2516 setrunnable(l);
2517 continue;
2518 }
2519 if (sig && (l->l_flag & LW_SINTR) != 0) {
2520 setrunnable(l);
2521 sig = 0;
2522 } else {
2523 l->l_stat = LSSLEEP;
2524 p->p_nrlwps++;
2525 lwp_unlock(l);
2526 }
2527 }
2528}
2529
2530 void
2531 proc_stoptrace(int trapno, int sysnum, const register_t args[],
2532 const register_t *ret, int error)
2533{
2534 struct lwp *l = curlwp;
2535 struct proc *p = l->l_proc;
2536 struct sigacts *ps;
2537 sigset_t *mask;
2538 sig_t action;
2539 ksiginfo_t ksi;
2540 size_t i, sy_narg;
2541 const int signo = SIGTRAP;
2542
2543 KASSERT((trapno == TRAP_SCE) || (trapno == TRAP_SCX));
2544 KASSERT(p->p_pptr != initproc);
2545 KASSERT(ISSET(p->p_slflag, PSL_TRACED));
2546 KASSERT(ISSET(p->p_slflag, PSL_SYSCALL));
2547
2548 sy_narg = p->p_emul->e_sysent[sysnum].sy_narg;
2549
2550 KSI_INIT_TRAP(&ksi);
2551 ksi.ksi_lid = l->l_lid;
2552 ksi.ksi_signo = signo;
2553 ksi.ksi_code = trapno;
2554
2555 ksi.ksi_sysnum = sysnum;
2556 if (trapno == TRAP_SCE) {
2557 ksi.ksi_retval[0] = 0;
2558 ksi.ksi_retval[1] = 0;
2559 ksi.ksi_error = 0;
2560 } else {
2561 ksi.ksi_retval[0] = ret[0];
2562 ksi.ksi_retval[1] = ret[1];
2563 ksi.ksi_error = error;
2564 }
2565
2566 memset(ksi.ksi_args, 0, sizeof(ksi.ksi_args));
2567
2568 for (i = 0; i < sy_narg; i++)
2569 ksi.ksi_args[i] = args[i];
2570
2571 mutex_enter(p->p_lock);
2572
2573 repeat:
2574 /*
2575 * If we are exiting, demise now.
2576 *
2577 * This avoids notifying tracer and deadlocking.
2578 */
2579 if (__predict_false(ISSET(p->p_sflag, PS_WEXIT))) {
2580 mutex_exit(p->p_lock);
2581 lwp_exit(l);
2582 panic("proc_stoptrace");
2583 /* NOTREACHED */
2584 }
2585
2586 /*
2587 * If there's a pending SIGKILL process it immediately.
2588 */
2589 if (p->p_xsig == SIGKILL ||
2590 sigismember(&p->p_sigpend.sp_set, SIGKILL)) {
2591 mutex_exit(p->p_lock);
2592 return;
2593 }
2594
2595 /*
2596 * If we are no longer traced, abandon this event signal.
2597 *
2598 * This avoids killing a process after detaching the debugger.
2599 */
2600 if (__predict_false(!ISSET(p->p_slflag, PSL_TRACED))) {
2601 mutex_exit(p->p_lock);
2602 return;
2603 }
2604
2605 /*
2606 * The process is already stopping.
2607 */
2608 if ((p->p_sflag & PS_STOPPING) != 0) {
2609 sigswitch_unlock_and_switch_away(l);
2610 mutex_enter(p->p_lock);
2611 goto repeat;
2612 }
2613
2614 /* Needed for ktrace */
2615 ps = p->p_sigacts;
2616 action = SIGACTION_PS(ps, signo).sa_handler;
2617 mask = &l->l_sigmask;
2618
2619 p->p_xsig = signo;
2620 p->p_sigctx.ps_lwp = ksi.ksi_lid;
2621 p->p_sigctx.ps_info = ksi.ksi_info;
2622 sigswitch(0, signo, false);
2623
2624 if (ktrpoint(KTR_PSIG)) {
2625 if (p->p_emul->e_ktrpsig)
2626 p->p_emul->e_ktrpsig(signo, action, mask, &ksi);
2627 else
2628 ktrpsig(signo, action, mask, &ksi);
2629 }
2630}
2631
2632 static int
2633 filt_sigattach(struct knote *kn)
2634{
2635 struct proc *p = curproc;
2636
2637 kn->kn_obj = p;
2638 kn->kn_flags |= EV_CLEAR; /* automatically set */
2639
2640 mutex_enter(p->p_lock);
2641 klist_insert(&p->p_klist, kn);
2642 mutex_exit(p->p_lock);
2643
2644 return 0;
2645}
2646
2647 static void
2648 filt_sigdetach(struct knote *kn)
2649{
2650 struct proc *p = kn->kn_obj;
2651
2652 mutex_enter(p->p_lock);
2653 klist_remove(&p->p_klist, kn);
2654 mutex_exit(p->p_lock);
2655}
2656
2657 /*
2658 * Signal knotes are shared with proc knotes, so we apply a mask to
2659 * the hint in order to differentiate them from process hints. This
2660 * could be avoided by using a signal-specific knote list, but probably
2661 * isn't worth the trouble.
2662 */
2663 static int
2664 filt_signal(struct knote *kn, long hint)
2665{
2666
2667 if (hint & NOTE_SIGNAL) {
2668 hint &= ~NOTE_SIGNAL;
2669
2670 if (kn->kn_id == hint)
2671 kn->kn_data++;
2672 }
2673 return (kn->kn_data != 0);
2674}
2675
2676 const struct filterops sig_filtops = {
2677 .f_flags = FILTEROP_MPSAFE,
2678 .f_attach = filt_sigattach,
2679 .f_detach = filt_sigdetach,
2680 .f_event = filt_signal,
2681};
2682 

AltStyle によって変換されたページ (->オリジナル) /