1 |
From 61cc4534b6550997c97a03759ab46b29d44c0017 Mon Sep 17 00:00:00 2001 |
2 |
From: Waiman Long <longman@redhat.com> |
3 |
Date: Sun, 2 Jan 2022 21:35:58 -0500 |
4 |
Subject: locking/lockdep: Avoid potential access of invalid memory in |
5 |
lock_class |
6 |
|
7 |
It was found that reading /proc/lockdep after a lockdep splat may |
8 |
potentially cause an access to freed memory if lockdep_unregister_key() |
9 |
is called after the splat but before access to /proc/lockdep [1]. This |
10 |
is due to the fact that graph_lock() call in lockdep_unregister_key() |
11 |
fails after the clearing of debug_locks by the splat process. |
12 |
|
13 |
After lockdep_unregister_key() is called, the lock_name may be freed |
14 |
but the corresponding lock_class structure still have a reference to |
15 |
it. That invalid memory pointer will then be accessed when /proc/lockdep |
16 |
is read by a user and a use-after-free (UAF) error will be reported if |
17 |
KASAN is enabled. |
18 |
|
19 |
To fix this problem, lockdep_unregister_key() is now modified to always |
20 |
search for a matching key irrespective of the debug_locks state and |
21 |
zap the corresponding lock class if a matching one is found. |
22 |
|
23 |
[1] https://lore.kernel.org/lkml/77f05c15-81b6-bddd-9650-80d5f23fe330@i-love.sakura.ne.jp/ |
24 |
|
25 |
Fixes: 8b39adbee805 ("locking/lockdep: Make lockdep_unregister_key() honor 'debug_locks' again") |
26 |
Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> |
27 |
Signed-off-by: Waiman Long <longman@redhat.com> |
28 |
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
29 |
Reviewed-by: Bart Van Assche <bvanassche@acm.org> |
30 |
Link: https://lkml.kernel.org/r/20220103023558.1377055-1-longman@redhat.com |
31 |
--- |
32 |
kernel/locking/lockdep.c | 24 +++++++++++++++--------- |
33 |
1 file changed, 15 insertions(+), 9 deletions(-) |
34 |
|
35 |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c |
36 |
index 89b3df51fd988..2e6892ec3756b 100644 |
37 |
--- a/kernel/locking/lockdep.c |
38 |
+++ b/kernel/locking/lockdep.c |
39 |
@@ -6287,7 +6287,13 @@ void lockdep_reset_lock(struct lockdep_map *lock) |
40 |
lockdep_reset_lock_reg(lock); |
41 |
} |
42 |
|
43 |
-/* Unregister a dynamically allocated key. */ |
44 |
+/* |
45 |
+ * Unregister a dynamically allocated key. |
46 |
+ * |
47 |
+ * Unlike lockdep_register_key(), a search is always done to find a matching |
48 |
+ * key irrespective of debug_locks to avoid potential invalid access to freed |
49 |
+ * memory in lock_class entry. |
50 |
+ */ |
51 |
void lockdep_unregister_key(struct lock_class_key *key) |
52 |
{ |
53 |
struct hlist_head *hash_head = keyhashentry(key); |
54 |
@@ -6302,10 +6308,8 @@ void lockdep_unregister_key(struct lock_class_key *key) |
55 |
return; |
56 |
|
57 |
raw_local_irq_save(flags); |
58 |
- if (!graph_lock()) |
59 |
- goto out_irq; |
60 |
+ lockdep_lock(); |
61 |
|
62 |
- pf = get_pending_free(); |
63 |
hlist_for_each_entry_rcu(k, hash_head, hash_entry) { |
64 |
if (k == key) { |
65 |
hlist_del_rcu(&k->hash_entry); |
66 |
@@ -6313,11 +6317,13 @@ void lockdep_unregister_key(struct lock_class_key *key) |
67 |
break; |
68 |
} |
69 |
} |
70 |
- WARN_ON_ONCE(!found); |
71 |
- __lockdep_free_key_range(pf, key, 1); |
72 |
- call_rcu_zapped(pf); |
73 |
- graph_unlock(); |
74 |
-out_irq: |
75 |
+ WARN_ON_ONCE(!found && debug_locks); |
76 |
+ if (found) { |
77 |
+ pf = get_pending_free(); |
78 |
+ __lockdep_free_key_range(pf, key, 1); |
79 |
+ call_rcu_zapped(pf); |
80 |
+ } |
81 |
+ lockdep_unlock(); |
82 |
raw_local_irq_restore(flags); |
83 |
|
84 |
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ |
85 |
-- |
86 |
cgit |
87 |
|