/[packages]/updates/6/kernel/current/PATCHES/patches/stable-x86-kaslr-fix-the-vaddr_end-mess.patch
ViewVC logotype

Contents of /updates/6/kernel/current/PATCHES/patches/stable-x86-kaslr-fix-the-vaddr_end-mess.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1190848 - (show annotations) (download)
Sat Jan 6 13:15:18 2018 UTC (6 years, 3 months ago) by tmb
File size: 5818 byte(s)
- update to 4.14.12
- add current -stable queue
- add BFQ performance updates from upstream
- enable PAGE_TABLE_ISOLATION on all x86_64 kernels
 (can be disabled at boot time with pti=off on kernel command line)
- iwlwifi: pcie: fix DMA memory mapping / unmapping
- update conflicts on microcode


1 From 1dddd25125112ba49706518ac9077a1026a18f37 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Thu, 4 Jan 2018 12:32:03 +0100
4 Subject: x86/kaslr: Fix the vaddr_end mess
5
6 From: Thomas Gleixner <tglx@linutronix.de>
7
8 commit 1dddd25125112ba49706518ac9077a1026a18f37 upstream.
9
10 vaddr_end for KASLR is only documented in the KASLR code itself and is
11 adjusted depending on config options. So it's not surprising that a change
12 of the memory layout causes KASLR to have the wrong vaddr_end. This can map
13 arbitrary stuff into other areas causing hard to understand problems.
14
15 Remove the whole ifdef magic and define the start of the cpu_entry_area to
16 be the end of the KASLR vaddr range.
17
18 Add documentation to that effect.
19
20 Fixes: 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap")
21 Reported-by: Benjamin Gilbert <benjamin.gilbert@coreos.com>
22 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
23 Tested-by: Benjamin Gilbert <benjamin.gilbert@coreos.com>
24 Cc: Andy Lutomirski <luto@kernel.org>
25 Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
26 Cc: Dave Hansen <dave.hansen@linux.intel.com>
27 Cc: Peter Zijlstra <peterz@infradead.org>
28 Cc: Thomas Garnier <thgarnie@google.com>,
29 Cc: Alexander Kuleshov <kuleshovmail@gmail.com>
30 Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801041320360.1771@nanos
31 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
32
33 ---
34 Documentation/x86/x86_64/mm.txt | 6 ++++++
35 arch/x86/include/asm/pgtable_64_types.h | 8 +++++++-
36 arch/x86/mm/kaslr.c | 32 +++++++++-----------------------
37 3 files changed, 22 insertions(+), 24 deletions(-)
38
39 --- a/Documentation/x86/x86_64/mm.txt
40 +++ b/Documentation/x86/x86_64/mm.txt
41 @@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40
42 ... unused hole ...
43 ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
44 ... unused hole ...
45 + vaddr_end for KASLR
46 fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
47 fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
48 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
49 @@ -37,6 +38,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49
50 ... unused hole ...
51 ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
52 ... unused hole ...
53 + vaddr_end for KASLR
54 fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
55 ... unused hole ...
56 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
57 @@ -71,3 +73,7 @@ during EFI runtime calls.
58 Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
59 physical memory, vmalloc/ioremap space and virtual memory map are randomized.
60 Their order is preserved but their base will be offset early at boot time.
61 +
62 +Be very careful vs. KASLR when changing anything here. The KASLR address
63 +range must not overlap with anything except the KASAN shadow area, which is
64 +correct as KASAN disables KASLR.
65 --- a/arch/x86/include/asm/pgtable_64_types.h
66 +++ b/arch/x86/include/asm/pgtable_64_types.h
67 @@ -75,7 +75,13 @@ typedef struct { pteval_t pte; } pte_t;
68 #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
69 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
70
71 -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
72 +/*
73 + * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
74 + *
75 + * Be very careful vs. KASLR when changing anything here. The KASLR address
76 + * range must not overlap with anything except the KASAN shadow area, which
77 + * is correct as KASAN disables KASLR.
78 + */
79 #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
80
81 #ifdef CONFIG_X86_5LEVEL
82 --- a/arch/x86/mm/kaslr.c
83 +++ b/arch/x86/mm/kaslr.c
84 @@ -34,25 +34,14 @@
85 #define TB_SHIFT 40
86
87 /*
88 - * Virtual address start and end range for randomization. The end changes base
89 - * on configuration to have the highest amount of space for randomization.
90 - * It increases the possible random position for each randomized region.
91 + * Virtual address start and end range for randomization.
92 *
93 - * You need to add an if/def entry if you introduce a new memory region
94 - * compatible with KASLR. Your entry must be in logical order with memory
95 - * layout. For example, ESPFIX is before EFI because its virtual address is
96 - * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
97 - * ensure that this order is correct and won't be changed.
98 + * The end address could depend on more configuration options to make the
99 + * highest amount of space for randomization available, but that's too hard
100 + * to keep straight and caused issues already.
101 */
102 static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
103 -
104 -#if defined(CONFIG_X86_ESPFIX64)
105 -static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
106 -#elif defined(CONFIG_EFI)
107 -static const unsigned long vaddr_end = EFI_VA_END;
108 -#else
109 -static const unsigned long vaddr_end = __START_KERNEL_map;
110 -#endif
111 +static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
112
113 /* Default values */
114 unsigned long page_offset_base = __PAGE_OFFSET_BASE;
115 @@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void
116 unsigned long remain_entropy;
117
118 /*
119 - * All these BUILD_BUG_ON checks ensures the memory layout is
120 - * consistent with the vaddr_start/vaddr_end variables.
121 + * These BUILD_BUG_ON checks ensure the memory layout is consistent
122 + * with the vaddr_start/vaddr_end variables. These checks are very
123 + * limited....
124 */
125 BUILD_BUG_ON(vaddr_start >= vaddr_end);
126 - BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
127 - vaddr_end >= EFI_VA_END);
128 - BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
129 - IS_ENABLED(CONFIG_EFI)) &&
130 - vaddr_end >= __START_KERNEL_map);
131 + BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
132 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
133
134 if (!kaslr_memory_enabled())

  ViewVC Help
Powered by ViewVC 1.1.30