View | Details | Raw Unified | Return to bug 26661
Collapse All | Expand All

(-)file_not_specified_in_diff (-3 / +223 lines)
Line     Link Here 
0
-- /dev/null
0
++ linux-2.6.32-2.6.32/debian/patches/bugfix/x86/x86-mm-Fix-pgd_lock-deadlock.patch
Line 0    Link Here 
0
-- linux-2.6.32-2.6.32/debian/patches/series/37-extra
1
It's forbidden to take the page_table_lock with the irq disabled
2
or if there's contention the IPIs (for tlb flushes) sent with
3
the page_table_lock held will never run leading to a deadlock.
4
5
Nobody takes the pgd_lock from irq context so the _irqsave can be
6
removed.
7
8
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
9
Acked-by: Rik van Riel <riel@redhat.com>
10
Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
11
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
12
Cc: Peter Zijlstra <peterz@infradead.org>
13
Cc: Linus Torvalds <torvalds@linux-foundation.org>
14
Cc: <stable@kernel.org>
15
LKML-Reference: <201102162345.p1GNjMjm021738@imap1.linux-foundation.org>
16
Signed-off-by: Ingo Molnar <mingo@elte.hu>
17
Git-commit: a79e53d85683c6dd9f99c90511028adc2043031f
18
--- a/arch/x86/mm/fault.c
19
+++ b/arch/x86/mm/fault.c
20
@@ -223,15 +223,14 @@ void vmalloc_sync_all(void)
21
 	     address >= TASK_SIZE && address < FIXADDR_TOP;
22
 	     address += PMD_SIZE) {
23
 
24
-		unsigned long flags;
25
 		struct page *page;
26
 
27
-		spin_lock_irqsave(&pgd_lock, flags);
28
+		spin_lock(&pgd_lock);
29
 		list_for_each_entry(page, &pgd_list, lru) {
30
 			if (!vmalloc_sync_one(page_address(page), address))
31
 				break;
32
 		}
33
-		spin_unlock_irqrestore(&pgd_lock, flags);
34
+		spin_unlock(&pgd_lock);
35
 	}
36
 }
37
 
38
@@ -331,13 +330,12 @@ void vmalloc_sync_all(void)
39
 	     address += PGDIR_SIZE) {
40
 
41
 		const pgd_t *pgd_ref = pgd_offset_k(address);
42
-		unsigned long flags;
43
 		struct page *page;
44
 
45
 		if (pgd_none(*pgd_ref))
46
 			continue;
47
 
48
-		spin_lock_irqsave(&pgd_lock, flags);
49
+		spin_lock(&pgd_lock);
50
 		list_for_each_entry(page, &pgd_list, lru) {
51
 			pgd_t *pgd;
52
 			pgd = (pgd_t *)page_address(page) + pgd_index(address);
53
@@ -346,7 +344,7 @@ void vmalloc_sync_all(void)
54
 			else
55
 				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
56
 		}
57
-		spin_unlock_irqrestore(&pgd_lock, flags);
58
+		spin_unlock(&pgd_lock);
59
 	}
60
 }
61
 
62
--- a/arch/x86/mm/pageattr.c
63
+++ b/arch/x86/mm/pageattr.c
64
@@ -56,12 +56,10 @@ static unsigned long direct_pages_count[
65
 
66
 void update_page_count(int level, unsigned long pages)
67
 {
68
-	unsigned long flags;
69
-
70
 	/* Protect against CPA */
71
-	spin_lock_irqsave(&pgd_lock, flags);
72
+	spin_lock(&pgd_lock);
73
 	direct_pages_count[level] += pages;
74
-	spin_unlock_irqrestore(&pgd_lock, flags);
75
+	spin_unlock(&pgd_lock);
76
 }
77
 
78
 static void split_page_count(int level)
79
@@ -354,7 +352,7 @@ static int
80
 try_preserve_large_page(pte_t *kpte, unsigned long address,
81
 			struct cpa_data *cpa)
82
 {
83
-	unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
84
+	unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
85
 	pte_t new_pte, old_pte, *tmp;
86
 	pgprot_t old_prot, new_prot;
87
 	int i, do_split = 1;
88
@@ -363,7 +361,7 @@ try_preserve_large_page(pte_t *kpte, uns
89
 	if (cpa->force_split)
90
 		return 1;
91
 
92
-	spin_lock_irqsave(&pgd_lock, flags);
93
+	spin_lock(&pgd_lock);
94
 	/*
95
 	 * Check for races, another CPU might have split this page
96
 	 * up already:
97
@@ -458,14 +456,14 @@ try_preserve_large_page(pte_t *kpte, uns
98
 	}
99
 
100
 out_unlock:
101
-	spin_unlock_irqrestore(&pgd_lock, flags);
102
+	spin_unlock(&pgd_lock);
103
 
104
 	return do_split;
105
 }
106
 
107
 static int split_large_page(pte_t *kpte, unsigned long address)
108
 {
109
-	unsigned long flags, pfn, pfninc = 1;
110
+	unsigned long pfn, pfninc = 1;
111
 	unsigned int i, level;
112
 	pte_t *pbase, *tmp;
113
 	pgprot_t ref_prot;
114
@@ -479,7 +477,7 @@ static int split_large_page(pte_t *kpte,
115
 	if (!base)
116
 		return -ENOMEM;
117
 
118
-	spin_lock_irqsave(&pgd_lock, flags);
119
+	spin_lock(&pgd_lock);
120
 	/*
121
 	 * Check for races, another CPU might have split this page
122
 	 * up for us already:
123
@@ -551,7 +549,7 @@ out_unlock:
124
 	 */
125
 	if (base)
126
 		__free_page(base);
127
-	spin_unlock_irqrestore(&pgd_lock, flags);
128
+	spin_unlock(&pgd_lock);
129
 
130
 	return 0;
131
 }
132
--- a/arch/x86/mm/pgtable.c
133
+++ b/arch/x86/mm/pgtable.c
134
@@ -110,14 +110,12 @@ static void pgd_ctor(pgd_t *pgd)
135
 
136
 static void pgd_dtor(pgd_t *pgd)
137
 {
138
-	unsigned long flags; /* can be called from interrupt context */
139
-
140
 	if (SHARED_KERNEL_PMD)
141
 		return;
142
 
143
-	spin_lock_irqsave(&pgd_lock, flags);
144
+	spin_lock(&pgd_lock);
145
 	pgd_list_del(pgd);
146
-	spin_unlock_irqrestore(&pgd_lock, flags);
147
+	spin_unlock(&pgd_lock);
148
 }
149
 
150
 /*
151
@@ -248,7 +246,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
152
 {
153
 	pgd_t *pgd;
154
 	pmd_t *pmds[PREALLOCATED_PMDS];
155
-	unsigned long flags;
156
 
157
 	pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
158
 
159
@@ -268,12 +265,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
160
 	 * respect to anything walking the pgd_list, so that they
161
 	 * never see a partially populated pgd.
162
 	 */
163
-	spin_lock_irqsave(&pgd_lock, flags);
164
+	spin_lock(&pgd_lock);
165
 
166
 	pgd_ctor(pgd);
167
 	pgd_prepopulate_pmd(mm, pgd, pmds);
168
 
169
-	spin_unlock_irqrestore(&pgd_lock, flags);
170
+	spin_unlock(&pgd_lock);
171
 
172
 	return pgd;
173
 
174
--- a/arch/x86/xen/mmu.c
175
+++ b/arch/x86/xen/mmu.c
176
@@ -988,10 +988,9 @@ static void xen_pgd_pin(struct mm_struct
177
  */
178
 void xen_mm_pin_all(void)
179
 {
180
-	unsigned long flags;
181
 	struct page *page;
182
 
183
-	spin_lock_irqsave(&pgd_lock, flags);
184
+	spin_lock(&pgd_lock);
185
 
186
 	list_for_each_entry(page, &pgd_list, lru) {
187
 		if (!PagePinned(page)) {
188
@@ -1000,7 +999,7 @@ void xen_mm_pin_all(void)
189
 		}
190
 	}
191
 
192
-	spin_unlock_irqrestore(&pgd_lock, flags);
193
+	spin_unlock(&pgd_lock);
194
 }
195
 
196
 /*
197
@@ -1101,10 +1100,9 @@ static void xen_pgd_unpin(struct mm_stru
198
  */
199
 void xen_mm_unpin_all(void)
200
 {
201
-	unsigned long flags;
202
 	struct page *page;
203
 
204
-	spin_lock_irqsave(&pgd_lock, flags);
205
+	spin_lock(&pgd_lock);
206
 
207
 	list_for_each_entry(page, &pgd_list, lru) {
208
 		if (PageSavePinned(page)) {
209
@@ -1114,7 +1112,7 @@ void xen_mm_unpin_all(void)
210
 		}
211
 	}
212
 
213
-	spin_unlock_irqrestore(&pgd_lock, flags);
214
+	spin_unlock(&pgd_lock);
215
 }
216
 
217
 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
218
++ linux-2.6.32-2.6.32/debian/patches/series/37-extra
 Lines 12-17    Link Here 
12
+ features/all/vserver/vs2.3.0.36.29.7.patch featureset=vserver
12
+ features/all/vserver/vs2.3.0.36.29.7.patch featureset=vserver
13
+ features/all/vserver/vserver-complete-fix-for-CVE-2010-4243.patch featureset=vserver
13
+ features/all/vserver/vserver-complete-fix-for-CVE-2010-4243.patch featureset=vserver
14
14
15
- bugfix/x86/x86-mm-Fix-pgd_lock-deadlock.patch featureset=xen
15
+ features/all/xen/pvops.patch featureset=xen
16
+ features/all/xen/pvops.patch featureset=xen
16
+ features/all/xen/xen-netfront-make-smartpoll-optional-and-default-off.patch featureset=xen
17
+ features/all/xen/xen-netfront-make-smartpoll-optional-and-default-off.patch featureset=xen
17
+ features/all/xen/xen-grant-table-do-not-truncate-machine-address-on-g.patch featureset=xen
18
+ features/all/xen/xen-grant-table-do-not-truncate-machine-address-on-g.patch featureset=xen
18
-- linux-2.6.32-2.6.32/debian/patches/series/38
19
++ linux-2.6.32-2.6.32/debian/patches/series/38
 Lines 5-7    Link Here 
5
+ bugfix/x86/92_23258_kvm-clock-reset.patch
5
+ bugfix/x86/92_23258_kvm-clock-reset.patch
6
+ bugfix/x86/92_5f4e3f882731c65b5d64a2ff743fda96eaebb9ee.patch
6
+ bugfix/x86/92_5f4e3f882731c65b5d64a2ff743fda96eaebb9ee.patch
7
+ bugfix/x86/92_7c4c0f4fd5c3e82234c0ab61c7e7ffdb8f3af07b.patch
7
+ bugfix/x86/92_7c4c0f4fd5c3e82234c0ab61c7e7ffdb8f3af07b.patch
8
9
+ bugfix/x86/x86-mm-Fix-pgd_lock-deadlock.patch

Return to bug 26661