1
/*********************************************************
2
* Copyright (C) 2004 VMware, Inc. All rights reserved.
4
* This program is free software; you can redistribute it and/or modify it
5
* under the terms of the GNU General Public License as published by the
6
* Free Software Foundation version 2 and no later version.
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
10
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* You should have received a copy of the GNU General Public License along
14
* with this program; if not, write to the Free Software Foundation, Inc.,
15
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17
*********************************************************/
19
#ifndef __COMPAT_COMPLETION_H__
20
# define __COMPAT_COMPLETION_H__
23
* The kernel's completion objects were made available for module use in 2.4.9.
25
* Between 2.4.0 and 2.4.9, we implement completions on our own using
26
* waitqueues and counters. This was done so that we could safely support
27
* functions like complete_all(), which cannot be implemented using semaphores.
29
* Prior to that, the waitqueue API is substantially different, and since none
30
* of our modules that are built against older kernels need complete_all(),
31
* we fallback on a simple semaphore-based implementation.
37
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 9)
39
#include <linux/completion.h>
40
#define compat_completion struct completion
41
#define compat_init_completion(comp) init_completion(comp)
42
#define COMPAT_DECLARE_COMPLETION DECLARE_COMPLETION
43
#define compat_wait_for_completion(comp) wait_for_completion(comp)
44
#define compat_complete(comp) complete(comp)
46
/* complete_all() was exported in 2.6.6. */
47
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6)
48
# include "compat_wait.h"
49
# include "compat_list.h"
50
# include "compat_spinlock.h"
51
# include "compat_sched.h"
52
# define compat_complete_all(x) \
54
struct list_head *currLinks; \
55
spin_lock(&(x)->wait.lock); \
56
(x)->done += UINT_MAX/2; \
58
list_for_each(currLinks, &(x)->wait.task_list) { \
59
wait_queue_t *currQueue = list_entry(currLinks, wait_queue_t, task_list); \
60
wake_up_process(currQueue->task); \
62
spin_unlock(&(x)->wait.lock); \
65
# define compat_complete_all complete_all
69
* Completions via waitqueues.
71
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
74
* Kernel completions in 2.4.9 and beyond use a counter and a waitqueue, and
75
* our implementation is quite similar. Because __wake_up_common() is not
76
* exported, our implementations of compat_complete() and compat_complete_all()
77
* are somewhat racy: the counter is incremented outside of the waitqueue's
80
* As a result, our completion cannot guarantee in-order wake ups. For example,
81
* suppose thread A is entering compat_complete(), thread B is sleeping inside
82
* compat_wait_for_completion(), and thread C is just now entering
83
* compat_wait_for_completion(). If Thread A is scheduled first and increments
84
* the counter, then gets swapped out, thread C may get scheduled and will
85
* quickly go through compat_wait_for_completion() (since done != 0) while
86
* thread B continues to sleep, even though thread B should have been the one
90
#include <asm/current.h>
91
#include "compat_sched.h"
92
#include "compat_list.h"
93
#include <linux/smp_lock.h> // for lock_kernel()/unlock_kernel()
94
#include "compat_wait.h"
96
typedef struct compat_completion {
101
#define compat_init_completion(comp) do { \
103
init_waitqueue_head(&(comp)->wq); \
105
#define COMPAT_DECLARE_COMPLETION(comp) \
106
compat_completion comp = { \
108
.wq = __WAIT_QUEUE_HEAD_INITIALIZER((comp).wq), \
112
* Locking and unlocking the kernel lock here ensures that the thread
113
* is no longer running in module code: compat_complete_and_exit
114
* performs the sequence { lock_kernel(); up(comp); compat_exit(); }, with
115
* the final unlock_kernel performed implicitly by the resident kernel
118
#define compat_wait_for_completion(comp) do { \
119
spin_lock_irq(&(comp)->wq.lock); \
120
if (!(comp)->done) { \
121
DECLARE_WAITQUEUE(wait, current); \
122
wait.flags |= WQ_FLAG_EXCLUSIVE; \
123
__add_wait_queue_tail(&(comp)->wq, &wait); \
125
__set_current_state(TASK_UNINTERRUPTIBLE); \
126
spin_unlock_irq(&(comp)->wq.lock); \
128
spin_lock_irq(&(comp)->wq.lock); \
129
} while (!(comp)->done); \
130
__remove_wait_queue(&(comp)->wq, &wait); \
133
spin_unlock_irq(&(comp)->wq.lock); \
138
/* XXX: I don't think I need to touch the BKL. */
139
#define compat_complete(comp) do { \
140
unsigned long flags; \
141
spin_lock_irqsave(&(comp)->wq.lock, flags); \
143
spin_unlock_irqrestore(&(comp)->wq.lock, flags); \
144
wake_up(&(comp)->wq); \
147
#define compat_complete_all(comp) do { \
148
unsigned long flags; \
149
spin_lock_irqsave(&(comp)->wq.lock, flags); \
150
(comp)->done += UINT_MAX / 2; \
151
spin_unlock_irqrestore(&(comp)->wq.lock, flags); \
152
wake_up_all(&(comp)->wq); \
156
* Completions via semaphores.
160
#include "compat_semaphore.h"
161
#define compat_completion struct semaphore
162
#define compat_init_completion(comp) init_MUTEX_LOCKED(comp)
163
#define COMPAT_DECLARE_COMPLETION(comp) DECLARE_MUTEX_LOCKED(comp)
165
#define compat_wait_for_completion(comp) do { \
171
#define compat_complete(comp) up(comp)
175
#endif /* __COMPAT_COMPLETION_H__ */