~ubuntu-branches/debian/wheezy/linux-2.6/wheezy

43.1.29 by Ben Hutchings, Bastian Blank, Ben Hutchings, Uwe Kleine-König
* New upstream stable update:
1
Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
2
From: Steven Rostedt <rostedt@goodmis.org>
3
Date: Fri, 02 Mar 2012 10:36:57 -0500
4
5
Tasks can block on hotplug.lock in pin_current_cpu(), but their state
6
might be != RUNNING. So the mutex wakeup will set the state
7
unconditionally to RUNNING. That might cause spurious unexpected
8
wakeups. We could provide a state preserving mutex_lock() function,
9
but this is semantically backwards. So instead we convert the
10
hotplug.lock() to a spinlock for RT, which has the state preserving
11
semantics already.
12
13
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
14
Cc: Carsten Emde <C.Emde@osadl.org>
15
Cc: John Kacur <jkacur@redhat.com>
16
Cc: Peter Zijlstra <peterz@infradead.org>
17
Cc: Clark Williams <clark.williams@gmail.com>
18
Cc: stable-rt@vger.kernel.org
19
Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
20
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
21
diff --git a/kernel/cpu.c b/kernel/cpu.c
22
index fa40834..c25b5ff 100644
23
--- a/kernel/cpu.c
24
+++ b/kernel/cpu.c
25
@@ -46,7 +46,12 @@ static int cpu_hotplug_disabled;
26
 
27
 static struct {
28
 	struct task_struct *active_writer;
29
+#ifdef CONFIG_PREEMPT_RT_FULL
30
+	/* Makes the lock keep the task's state */
31
+	spinlock_t lock;
32
+#else
33
 	struct mutex lock; /* Synchronizes accesses to refcount, */
34
+#endif
35
 	/*
36
 	 * Also blocks the new readers during
37
 	 * an ongoing cpu hotplug operation.
38
@@ -58,6 +63,14 @@ static struct {
39
 	.refcount = 0,
40
 };
41
 
42
+#ifdef CONFIG_PREEMPT_RT_FULL
43
+# define hotplug_lock() spin_lock(&cpu_hotplug.lock)
44
+# define hotplug_unlock() spin_unlock(&cpu_hotplug.lock)
45
+#else
46
+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
47
+# define hotplug_lock() mutex_unlock(&cpu_hotplug.lock)
48
+#endif
49
+
50
 struct hotplug_pcp {
51
 	struct task_struct *unplug;
52
 	int refcount;
53
@@ -87,8 +100,8 @@ retry:
54
 		return;
55
 	}
56
 	preempt_enable();
57
-	mutex_lock(&cpu_hotplug.lock);
58
-	mutex_unlock(&cpu_hotplug.lock);
59
+	hotplug_lock();
60
+	hotplug_unlock();
61
 	preempt_disable();
62
 	goto retry;
63
 }
64
@@ -161,9 +174,9 @@ void get_online_cpus(void)
65
 	might_sleep();
66
 	if (cpu_hotplug.active_writer == current)
67
 		return;
68
-	mutex_lock(&cpu_hotplug.lock);
69
+	hotplug_lock();
70
 	cpu_hotplug.refcount++;
71
-	mutex_unlock(&cpu_hotplug.lock);
72
+	hotplug_unlock();
73
 
74
 }
75
 EXPORT_SYMBOL_GPL(get_online_cpus);
76
@@ -172,10 +185,10 @@ void put_online_cpus(void)
77
 {
78
 	if (cpu_hotplug.active_writer == current)
79
 		return;
80
-	mutex_lock(&cpu_hotplug.lock);
81
+	hotplug_lock();
82
 	if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
83
 		wake_up_process(cpu_hotplug.active_writer);
84
-	mutex_unlock(&cpu_hotplug.lock);
85
+	hotplug_unlock();
86
 
87
 }
88
 EXPORT_SYMBOL_GPL(put_online_cpus);
89
@@ -207,11 +220,11 @@ static void cpu_hotplug_begin(void)
90
 	cpu_hotplug.active_writer = current;
91
 
92
 	for (;;) {
93
-		mutex_lock(&cpu_hotplug.lock);
94
+		hotplug_lock();
95
 		if (likely(!cpu_hotplug.refcount))
96
 			break;
97
 		__set_current_state(TASK_UNINTERRUPTIBLE);
98
-		mutex_unlock(&cpu_hotplug.lock);
99
+		hotplug_unlock();
100
 		schedule();
101
 	}
102
 }
103
@@ -219,7 +232,7 @@ static void cpu_hotplug_begin(void)
104
 static void cpu_hotplug_done(void)
105
 {
106
 	cpu_hotplug.active_writer = NULL;
107
-	mutex_unlock(&cpu_hotplug.lock);
108
+	hotplug_unlock();
109
 }
110
 
111
 #else /* #if CONFIG_HOTPLUG_CPU */
112
113
114
115