/[packages]/backports/8/kernel/current/SOURCES/arm64-mte-avoid-the-racy-walk-of-the-vma-list-during-core-dump.patch
ViewVC logotype

Contents of /backports/8/kernel/current/SOURCES/arm64-mte-avoid-the-racy-walk-of-the-vma-list-during-core-dump.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1932881 - (show annotations) (download)
Sat Jan 14 11:15:03 2023 UTC (15 months, 1 week ago) by tmb
File size: 4456 byte(s)
- update to 6.1.6
  * drop merged patches
- add current -stable queue
- Revert "mm/compaction: fix set skip in fast_find_migrateblock"
- wifi: brcmfmac: fix regression for Broadcom PCIe wifi devices


1 From 4f4c549feb4ecca95ae9abb88887b941d196f83a Mon Sep 17 00:00:00 2001
2 From: Catalin Marinas <catalin.marinas@arm.com>
3 Date: Thu, 22 Dec 2022 18:12:51 +0000
4 Subject: arm64: mte: Avoid the racy walk of the vma list during core dump
5
6 From: Catalin Marinas <catalin.marinas@arm.com>
7
8 commit 4f4c549feb4ecca95ae9abb88887b941d196f83a upstream.
9
10 The MTE coredump code in arch/arm64/kernel/elfcore.c iterates over the
11 vma list without the mmap_lock held. This can race with another process
12 or userfaultfd concurrently modifying the vma list. Change the
13 for_each_mte_vma macro and its callers to instead use the vma snapshot
14 taken by dump_vma_snapshot() and stored in the cprm object.
15
16 Fixes: 6dd8b1a0b6cb ("arm64: mte: Dump the MTE tags in the core file")
17 Cc: <stable@vger.kernel.org> # 5.18.x
18 Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
19 Reported-by: Seth Jenkins <sethjenkins@google.com>
20 Suggested-by: Seth Jenkins <sethjenkins@google.com>
21 Cc: Will Deacon <will@kernel.org>
22 Link: https://lore.kernel.org/r/20221222181251.1345752-4-catalin.marinas@arm.com
23 Signed-off-by: Will Deacon <will@kernel.org>
24 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
25 ---
26 arch/arm64/kernel/elfcore.c | 56 ++++++++++++++++++++------------------------
27 1 file changed, 26 insertions(+), 30 deletions(-)
28
29 --- a/arch/arm64/kernel/elfcore.c
30 +++ b/arch/arm64/kernel/elfcore.c
31 @@ -8,28 +8,27 @@
32 #include <asm/cpufeature.h>
33 #include <asm/mte.h>
34
35 -#define for_each_mte_vma(vmi, vma) \
36 +#define for_each_mte_vma(cprm, i, m) \
37 if (system_supports_mte()) \
38 - for_each_vma(vmi, vma) \
39 - if (vma->vm_flags & VM_MTE)
40 + for (i = 0, m = cprm->vma_meta; \
41 + i < cprm->vma_count; \
42 + i++, m = cprm->vma_meta + i) \
43 + if (m->flags & VM_MTE)
44
45 -static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
46 +static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
47 {
48 - if (vma->vm_flags & VM_DONTDUMP)
49 - return 0;
50 -
51 - return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
52 + return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
53 }
54
55 /* Derived from dump_user_range(); start/end must be page-aligned */
56 static int mte_dump_tag_range(struct coredump_params *cprm,
57 - unsigned long start, unsigned long end)
58 + unsigned long start, unsigned long len)
59 {
60 int ret = 1;
61 unsigned long addr;
62 void *tags = NULL;
63
64 - for (addr = start; addr < end; addr += PAGE_SIZE) {
65 + for (addr = start; addr < start + len; addr += PAGE_SIZE) {
66 struct page *page = get_dump_page(addr);
67
68 /*
69 @@ -78,11 +77,11 @@ static int mte_dump_tag_range(struct cor
70
71 Elf_Half elf_core_extra_phdrs(void)
72 {
73 - struct vm_area_struct *vma;
74 + int i;
75 + struct core_vma_metadata *m;
76 int vma_count = 0;
77 - VMA_ITERATOR(vmi, current->mm, 0);
78
79 - for_each_mte_vma(vmi, vma)
80 + for_each_mte_vma(cprm, i, m)
81 vma_count++;
82
83 return vma_count;
84 @@ -90,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void)
85
86 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
87 {
88 - struct vm_area_struct *vma;
89 - VMA_ITERATOR(vmi, current->mm, 0);
90 + int i;
91 + struct core_vma_metadata *m;
92
93 - for_each_mte_vma(vmi, vma) {
94 + for_each_mte_vma(cprm, i, m) {
95 struct elf_phdr phdr;
96
97 phdr.p_type = PT_AARCH64_MEMTAG_MTE;
98 phdr.p_offset = offset;
99 - phdr.p_vaddr = vma->vm_start;
100 + phdr.p_vaddr = m->start;
101 phdr.p_paddr = 0;
102 - phdr.p_filesz = mte_vma_tag_dump_size(vma);
103 - phdr.p_memsz = vma->vm_end - vma->vm_start;
104 + phdr.p_filesz = mte_vma_tag_dump_size(m);
105 + phdr.p_memsz = m->end - m->start;
106 offset += phdr.p_filesz;
107 phdr.p_flags = 0;
108 phdr.p_align = 0;
109 @@ -115,26 +114,23 @@ int elf_core_write_extra_phdrs(struct co
110
111 size_t elf_core_extra_data_size(void)
112 {
113 - struct vm_area_struct *vma;
114 + int i;
115 + struct core_vma_metadata *m;
116 size_t data_size = 0;
117 - VMA_ITERATOR(vmi, current->mm, 0);
118
119 - for_each_mte_vma(vmi, vma)
120 - data_size += mte_vma_tag_dump_size(vma);
121 + for_each_mte_vma(cprm, i, m)
122 + data_size += mte_vma_tag_dump_size(m);
123
124 return data_size;
125 }
126
127 int elf_core_write_extra_data(struct coredump_params *cprm)
128 {
129 - struct vm_area_struct *vma;
130 - VMA_ITERATOR(vmi, current->mm, 0);
131 -
132 - for_each_mte_vma(vmi, vma) {
133 - if (vma->vm_flags & VM_DONTDUMP)
134 - continue;
135 + int i;
136 + struct core_vma_metadata *m;
137
138 - if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
139 + for_each_mte_vma(cprm, i, m) {
140 + if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
141 return 0;
142 }
143

  ViewVC Help
Powered by ViewVC 1.1.30