This repository has been archived by the owner on Jul 6, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 6
/
0004-x86-mm-cpa-Rework-static_protections.patch
211 lines (198 loc) · 7.54 KB
/
0004-x86-mm-cpa-Rework-static_protections.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <[email protected]>
Date: Mon, 17 Sep 2018 16:29:09 +0200
Subject: [PATCH] x86/mm/cpa: Rework static_protections()
static_protections() is pretty unreadable. Split it up into separate checks
for each protection area.
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Dave Hansen <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Bin Yang <[email protected]>
Cc: Mark Gross <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
Cc: Zhang Ning <[email protected]>
Signed-off-by: Lili Li <[email protected]>
---
arch/x86/mm/pageattr.c | 162 +++++++++++++++++++++++++----------------
1 file changed, 98 insertions(+), 64 deletions(-)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index c9f98fdf1310..44964ceaea96 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -286,84 +286,118 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
}
}
+#ifdef CONFIG_PCI_BIOS
/*
- * Certain areas of memory on x86 require very specific protection flags,
- * for example the BIOS area or kernel text. Callers don't always get this
- * right (again, ioremap() on BIOS memory is not uncommon) so this function
- * checks and fixes these known static required protection bits.
+ * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS
+ * based config access (CONFIG_PCI_GOBIOS) support.
*/
-static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
- unsigned long pfn)
-{
- pgprot_t forbidden = __pgprot(0);
+#define BIOS_PFN PFN_DOWN(BIOS_BEGIN)
+#define BIOS_PFN_END PFN_DOWN(BIOS_END)
- /*
- * The BIOS area between 640k and 1Mb needs to be executable for
- * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
- */
-#ifdef CONFIG_PCI_BIOS
- if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
- pgprot_val(forbidden) |= _PAGE_NX;
+static pgprotval_t protect_pci_bios(unsigned long pfn)
+{
+ if (pcibios_enabled && within(pfn, BIOS_PFN, BIOS_PFN_END))
+ return _PAGE_NX;
+ return 0;
+}
+#else
+static pgprotval_t protect_pci_bios(unsigned long pfn)
+{
+ return 0;
+}
#endif
- /*
- * The kernel text needs to be executable for obvious reasons
- * Does not cover __inittext since that is gone later on. On
- * 64bit we do not enforce !NX on the low mapping
- */
- if (within(address, (unsigned long)_text, (unsigned long)_etext))
- pgprot_val(forbidden) |= _PAGE_NX;
+/*
+ * The .rodata section needs to be read-only. Using the pfn catches all
+ * aliases. This also includes __ro_after_init, so do not enforce until
+ * kernel_set_to_readonly is true.
+ */
+static pgprotval_t protect_rodata(unsigned long pfn)
+{
+ unsigned long start_pfn = __pa_symbol(__start_rodata) >> PAGE_SHIFT;
+ unsigned long end_pfn = __pa_symbol(__end_rodata) >> PAGE_SHIFT;
- /*
- * The .rodata section needs to be read-only. Using the pfn
- * catches all aliases. This also includes __ro_after_init,
- * so do not enforce until kernel_set_to_readonly is true.
- */
- if (kernel_set_to_readonly &&
- within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
- __pa_symbol(__end_rodata) >> PAGE_SHIFT))
- pgprot_val(forbidden) |= _PAGE_RW;
+ if (kernel_set_to_readonly && within(pfn, start_pfn, end_pfn))
+ return _PAGE_RW;
+ return 0;
+}
+
+/*
+ * Protect kernel text against becoming non executable by forbidding
+ * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext)
+ * out of which the kernel actually executes. Do not protect the low
+ * mapping.
+ *
+ * This does not cover __inittext since that is gone after boot.
+ */
+static pgprotval_t protect_kernel_text(unsigned long address)
+{
+ if (within(address, (unsigned long)_text, (unsigned long)_etext))
+ return _PAGE_NX;
+ return 0;
+}
#if defined(CONFIG_X86_64)
+/*
+ * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
+ * kernel text mappings for the large page aligned text, rodata sections
+ * will be always read-only. For the kernel identity mappings covering the
+ * holes caused by this alignment can be anything that user asks.
+ *
+ * This will preserve the large page mappings for kernel text/data at no
+ * extra cost.
+ */
+static pgprotval_t protect_kernel_text_ro(unsigned long address)
+{
+ unsigned long end = (unsigned long)__end_rodata_hpage_align;
+ unsigned long start = (unsigned long)_text;
+ unsigned int level;
+
+ if (!kernel_set_to_readonly || !within(address, start, end))
+ return 0;
/*
- * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
- * kernel text mappings for the large page aligned text, rodata sections
- * will be always read-only. For the kernel identity mappings covering
- * the holes caused by this alignment can be anything that user asks.
+ * Don't enforce the !RW mapping for the kernel text mapping, if
+ * the current mapping is already using small page mapping. No
+ * need to work hard to preserve large page mappings in this case.
*
- * This will preserve the large page mappings for kernel text/data
- * at no extra cost.
+ * This also fixes the Linux Xen paravirt guest boot failure caused
+ * by unexpected read-only mappings for kernel identity
+ * mappings. In this paravirt guest case, the kernel text mapping
+ * and the kernel identity mapping share the same page-table pages,
+ * so the protections for kernel text and identity mappings have to
+ * be the same.
*/
- if (kernel_set_to_readonly &&
- within(address, (unsigned long)_text,
- (unsigned long)__end_rodata_hpage_align)) {
- unsigned int level;
-
- /*
- * Don't enforce the !RW mapping for the kernel text mapping,
- * if the current mapping is already using small page mapping.
- * No need to work hard to preserve large page mappings in this
- * case.
- *
- * This also fixes the Linux Xen paravirt guest boot failure
- * (because of unexpected read-only mappings for kernel identity
- * mappings). In this paravirt guest case, the kernel text
- * mapping and the kernel identity mapping share the same
- * page-table pages. Thus we can't really use different
- * protections for the kernel text and identity mappings. Also,
- * these shared mappings are made of small page mappings.
- * Thus this don't enforce !RW mapping for small page kernel
- * text mapping logic will help Linux Xen parvirt guest boot
- * as well.
- */
- if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
- pgprot_val(forbidden) |= _PAGE_RW;
- }
+ if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
+ return _PAGE_RW;
+ return 0;
+}
+#else
+static pgprotval_t protect_kernel_text_ro(unsigned long address)
+{
+ return 0;
+}
#endif
- prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+/*
+ * Certain areas of memory on x86 require very specific protection flags,
+ * for example the BIOS area or kernel text. Callers don't always get this
+ * right (again, ioremap() on BIOS memory is not uncommon) so this function
+ * checks and fixes these known static required protection bits.
+ */
+static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+ unsigned long pfn)
+{
+ pgprotval_t forbidden;
+
+ /* Operate on the virtual address */
+ forbidden = protect_kernel_text(address);
+ forbidden |= protect_kernel_text_ro(address);
- return prot;
+ /* Check the PFN directly */
+ forbidden |= protect_pci_bios(pfn);
+ forbidden |= protect_rodata(pfn);
+
+ return __pgprot(pgprot_val(prot) & ~forbidden);
}
/*
--
https://clearlinux.org