1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. 6 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33#ifndef _LINUX_WAIT_H_ 34#define _LINUX_WAIT_H_ 35 36#include <linux/compiler.h> 37#include <linux/list.h> 38#include <linux/spinlock.h> 39#include <linux/sched.h> 40 41#include <asm/atomic.h> 42 43#include <sys/param.h> 44#include <sys/systm.h> 45 46#define SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active) 47 48#define might_sleep() \ 49 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()") 50 51#define might_sleep_if(cond) do { \ 52 if (cond) { might_sleep(); } \ 53} while (0) 54 55 struct wait_queue; 56 struct wait_queue_head; 57 58#define wait_queue_entry wait_queue 59 60 typedef struct wait_queue wait_queue_t; 61 typedef struct wait_queue_entry wait_queue_entry_t; 62 typedef struct wait_queue_head wait_queue_head_t; 63 64 typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *); 65 66 /* 67 * Many API consumers directly reference these fields and those of 68 * wait_queue_head. 69 */ 70 struct wait_queue { 71 unsigned int flags; /* always 0 */ 72 void *private; 73 wait_queue_func_t *func; 74 union { 75 struct list_head task_list; /* < v4.13 */ 76 struct list_head entry; /* >= v4.13 */ 77 }; 78}; 79 80 struct wait_queue_head { 81 spinlock_t lock; 82 union { 83 struct list_head task_list; /* < v4.13 */ 84 struct list_head head; /* >= v4.13 */ 85 }; 86}; 87 88 /* 89 * This function is referenced by at least one DRM driver, so it may not be 90 * renamed and furthermore must be the default wait queue callback. 91 */ 92 extern wait_queue_func_t autoremove_wake_function; 93 extern wait_queue_func_t default_wake_function; 94 95#define DEFINE_WAIT_FUNC(name, function) \ 96 wait_queue_t name = { \ 97 .private = current, \ 98 .func = function, \ 99 .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ 100 } 101 102#define DEFINE_WAIT(name) \ 103 DEFINE_WAIT_FUNC(name, autoremove_wake_function) 104 105#define DECLARE_WAITQUEUE(name, task) \ 106 wait_queue_t name = { \ 107 .private = task, \ 108 .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ 109 } 110 111#define DECLARE_WAIT_QUEUE_HEAD(name) \ 112 wait_queue_head_t name = { \ 113 .task_list = LINUX_LIST_HEAD_INIT(name.task_list), \ 114 }; \ 115 MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF) 116 117#define init_waitqueue_head(wqh) do { \ 118 mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), \ 119 NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \ 120 INIT_LIST_HEAD(&(wqh)->task_list); \ 121} while (0) 122 123#define __init_waitqueue_head(wqh, name, lk) init_waitqueue_head(wqh) 124 125 void linux_init_wait_entry(wait_queue_t *, int); 126 void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool); 127 128#define init_wait_entry(wq, flags) \ 129 linux_init_wait_entry(wq, flags) 130#define wake_up(wqh) \ 131 linux_wake_up(wqh, TASK_NORMAL, 1, false) 132#define wake_up_all(wqh) \ 133 linux_wake_up(wqh, TASK_NORMAL, 0, false) 134#define wake_up_locked(wqh) \ 135 linux_wake_up(wqh, TASK_NORMAL, 1, true) 136#define wake_up_all_locked(wqh) \ 137 linux_wake_up(wqh, TASK_NORMAL, 0, true) 138#define wake_up_interruptible(wqh) \ 139 linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false) 140#define wake_up_interruptible_all(wqh) \ 141 linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false) 142 143 int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int, 144 unsigned int, spinlock_t *); 145 146 /* 147 * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if 148 * cond is true after timeout, remaining jiffies (> 0) if cond is true before 149 * timeout. 150 */ 151#define __wait_event_common(wqh, cond, timeout, state, lock) ({ \ 152 DEFINE_WAIT(__wq); \ 153 const int __timeout = ((int)(timeout)) < 1 ? 1 : (timeout); \ 154 int __start = ticks; \ 155 int __ret = 0; \ 156 \ 157 for (;;) { \ 158 linux_prepare_to_wait(&(wqh), &__wq, state); \ 159 if (cond) \ 160 break; \ 161 __ret = linux_wait_event_common(&(wqh), &__wq, \ 162 __timeout, state, lock); \ 163 if (__ret != 0) \ 164 break; \ 165 } \ 166 linux_finish_wait(&(wqh), &__wq); \ 167 if (__timeout != MAX_SCHEDULE_TIMEOUT) { \ 168 if (__ret == -EWOULDBLOCK) \ 169 __ret = !!(cond); \ 170 else if (__ret != -ERESTARTSYS) { \ 171 __ret = __timeout + __start - ticks; \ 172 /* range check return value */ \ 173 if (__ret < 1) \ 174 __ret = 1; \ 175 else if (__ret > __timeout) \ 176 __ret = __timeout; \ 177 } \ 178 } \ 179 __ret; \ 180}) 181 182#define wait_event(wqh, cond) do { \ 183 (void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 184 TASK_UNINTERRUPTIBLE, NULL); \ 185} while (0) 186 187#define wait_event_timeout(wqh, cond, timeout) ({ \ 188 __wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE, \ 189 NULL); \ 190}) 191 192#define wait_event_killable(wqh, cond) ({ \ 193 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 194 TASK_INTERRUPTIBLE, NULL); \ 195}) 196 197#define wait_event_interruptible(wqh, cond) ({ \ 198 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 199 TASK_INTERRUPTIBLE, NULL); \ 200}) 201 202#define wait_event_interruptible_timeout(wqh, cond, timeout) ({ \ 203 __wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE, \ 204 NULL); \ 205}) 206 207 /* 208 * Wait queue is already locked. 209 */ 210#define wait_event_interruptible_locked(wqh, cond) ({ \ 211 int __ret; \ 212 \ 213 spin_unlock(&(wqh).lock); \ 214 __ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 215 TASK_INTERRUPTIBLE, NULL); \ 216 spin_lock(&(wqh).lock); \ 217 __ret; \ 218}) 219 220 /* 221 * The passed spinlock is held when testing the condition. 222 */ 223#define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \ 224 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 225 TASK_INTERRUPTIBLE, &(lock)); \ 226}) 227 228 /* 229 * The passed spinlock is held when testing the condition. 230 */ 231#define wait_event_lock_irq(wqh, cond, lock) ({ \ 232 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 233 TASK_UNINTERRUPTIBLE, &(lock)); \ 234}) 235 236 static inline void 237 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 238{ 239 list_add(&wq->task_list, &wqh->task_list); 240} 241 242 static inline void 243 add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 244{ 245 246 spin_lock(&wqh->lock); 247 __add_wait_queue(wqh, wq); 248 spin_unlock(&wqh->lock); 249} 250 251 static inline void 252 __add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq) 253{ 254 list_add_tail(&wq->task_list, &wqh->task_list); 255} 256 257 static inline void 258 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wq) 259{ 260 list_add_tail(&wq->entry, &wqh->head); 261} 262 263 static inline void 264 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 265{ 266 list_del(&wq->task_list); 267} 268 269 static inline void 270 remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 271{ 272 273 spin_lock(&wqh->lock); 274 __remove_wait_queue(wqh, wq); 275 spin_unlock(&wqh->lock); 276} 277 278 bool linux_waitqueue_active(wait_queue_head_t *); 279 280#define waitqueue_active(wqh) linux_waitqueue_active(wqh) 281 282 void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int); 283 void linux_finish_wait(wait_queue_head_t *, wait_queue_t *); 284 285#define prepare_to_wait(wqh, wq, state) linux_prepare_to_wait(wqh, wq, state) 286#define finish_wait(wqh, wq) linux_finish_wait(wqh, wq) 287 288 void linux_wake_up_bit(void *, int); 289 int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int); 290 void linux_wake_up_atomic_t(atomic_t *); 291 int linux_wait_on_atomic_t(atomic_t *, unsigned int); 292 293#define wake_up_bit(word, bit) linux_wake_up_bit(word, bit) 294#define wait_on_bit(word, bit, state) \ 295 linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT) 296#define wait_on_bit_timeout(word, bit, state, timeout) \ 297 linux_wait_on_bit_timeout(word, bit, state, timeout) 298#define wake_up_atomic_t(a) linux_wake_up_atomic_t(a) 299 /* 300 * All existing callers have a cb that just schedule()s. To avoid adding 301 * complexity, just emulate that internally. The prototype is different so that 302 * callers must be manually modified; a cb that does something other than call 303 * schedule() will require special treatment. 304 */ 305#define wait_on_atomic_t(a, state) linux_wait_on_atomic_t(a, state) 306 307 struct task_struct; 308 bool linux_wake_up_state(struct task_struct *, unsigned int); 309 310#define wake_up_process(task) linux_wake_up_state(task, TASK_NORMAL) 311#define wake_up_state(task, state) linux_wake_up_state(task, state) 312 313#endif /* _LINUX_WAIT_H_ */ 314