1 |
From 9b25bd0368d562d1929059e8eb9de4102567b923 Mon Sep 17 00:00:00 2001 |
2 |
From: Paolo Valente <paolo.valente@linaro.org> |
3 |
Date: Mon, 4 Dec 2017 11:42:05 +0100 |
4 |
Subject: [PATCH] block, bfq: remove batches of confusing ifdefs |
5 |
|
6 |
Commit a33801e8b473 ("block, bfq: move debug blkio stats behind |
7 |
CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs: |
8 |
one reported in [1], plus a similar one in another function. This |
9 |
commit removes both batches, in the way suggested in [1]. |
10 |
|
11 |
[1] https://www.spinics.net/lists/linux-block/msg20043.html |
12 |
|
13 |
Fixes: a33801e8b473 ("block, bfq: move debug blkio stats behind CONFIG_DEBUG_BLK_CGROUP") |
14 |
Reported-by: Linus Torvalds <torvalds@linux-foundation.org> |
15 |
Tested-by: Luca Miccio <lucmiccio@gmail.com> |
16 |
Signed-off-by: Paolo Valente <paolo.valente@linaro.org> |
17 |
Signed-off-by: Jens Axboe <axboe@kernel.dk> |
18 |
--- |
19 |
block/bfq-iosched.c | 127 +++++++++++++++++++++++++++++----------------------- |
20 |
1 file changed, 72 insertions(+), 55 deletions(-) |
21 |
|
22 |
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c |
23 |
index e33c5c4c9856..7bd789da7a29 100644 |
24 |
--- a/block/bfq-iosched.c |
25 |
+++ b/block/bfq-iosched.c |
26 |
@@ -3743,35 +3743,16 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
27 |
return rq; |
28 |
} |
29 |
|
30 |
-static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
31 |
-{ |
32 |
- struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
33 |
- struct request *rq; |
34 |
#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
35 |
- struct bfq_queue *in_serv_queue, *bfqq; |
36 |
- bool waiting_rq, idle_timer_disabled; |
37 |
-#endif |
38 |
- |
39 |
- spin_lock_irq(&bfqd->lock); |
40 |
- |
41 |
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
42 |
- in_serv_queue = bfqd->in_service_queue; |
43 |
- waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); |
44 |
- |
45 |
- rq = __bfq_dispatch_request(hctx); |
46 |
- |
47 |
- idle_timer_disabled = |
48 |
- waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); |
49 |
- |
50 |
-#else |
51 |
- rq = __bfq_dispatch_request(hctx); |
52 |
-#endif |
53 |
- spin_unlock_irq(&bfqd->lock); |
54 |
+static void bfq_update_dispatch_stats(struct request_queue *q, |
55 |
+ struct request *rq, |
56 |
+ struct bfq_queue *in_serv_queue, |
57 |
+ bool idle_timer_disabled) |
58 |
+{ |
59 |
+ struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL; |
60 |
|
61 |
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
62 |
- bfqq = rq ? RQ_BFQQ(rq) : NULL; |
63 |
if (!idle_timer_disabled && !bfqq) |
64 |
- return rq; |
65 |
+ return; |
66 |
|
67 |
/* |
68 |
* rq and bfqq are guaranteed to exist until this function |
69 |
@@ -3786,7 +3767,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
70 |
* In addition, the following queue lock guarantees that |
71 |
* bfqq_group(bfqq) exists as well. |
72 |
*/ |
73 |
- spin_lock_irq(hctx->queue->queue_lock); |
74 |
+ spin_lock_irq(q->queue_lock); |
75 |
if (idle_timer_disabled) |
76 |
/* |
77 |
* Since the idle timer has been disabled, |
78 |
@@ -3805,9 +3786,37 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
79 |
bfqg_stats_set_start_empty_time(bfqg); |
80 |
bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); |
81 |
} |
82 |
- spin_unlock_irq(hctx->queue->queue_lock); |
83 |
+ spin_unlock_irq(q->queue_lock); |
84 |
+} |
85 |
+#else |
86 |
+static inline void bfq_update_dispatch_stats(struct request_queue *q, |
87 |
+ struct request *rq, |
88 |
+ struct bfq_queue *in_serv_queue, |
89 |
+ bool idle_timer_disabled) {} |
90 |
#endif |
91 |
|
92 |
+static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
93 |
+{ |
94 |
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
95 |
+ struct request *rq; |
96 |
+ struct bfq_queue *in_serv_queue; |
97 |
+ bool waiting_rq, idle_timer_disabled; |
98 |
+ |
99 |
+ spin_lock_irq(&bfqd->lock); |
100 |
+ |
101 |
+ in_serv_queue = bfqd->in_service_queue; |
102 |
+ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); |
103 |
+ |
104 |
+ rq = __bfq_dispatch_request(hctx); |
105 |
+ |
106 |
+ idle_timer_disabled = |
107 |
+ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); |
108 |
+ |
109 |
+ spin_unlock_irq(&bfqd->lock); |
110 |
+ |
111 |
+ bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, |
112 |
+ idle_timer_disabled); |
113 |
+ |
114 |
return rq; |
115 |
} |
116 |
|
117 |
@@ -4335,16 +4344,46 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) |
118 |
return idle_timer_disabled; |
119 |
} |
120 |
|
121 |
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
122 |
+static void bfq_update_insert_stats(struct request_queue *q, |
123 |
+ struct bfq_queue *bfqq, |
124 |
+ bool idle_timer_disabled, |
125 |
+ unsigned int cmd_flags) |
126 |
+{ |
127 |
+ if (!bfqq) |
128 |
+ return; |
129 |
+ |
130 |
+ /* |
131 |
+ * bfqq still exists, because it can disappear only after |
132 |
+ * either it is merged with another queue, or the process it |
133 |
+ * is associated with exits. But both actions must be taken by |
134 |
+ * the same process currently executing this flow of |
135 |
+ * instructions. |
136 |
+ * |
137 |
+ * In addition, the following queue lock guarantees that |
138 |
+ * bfqq_group(bfqq) exists as well. |
139 |
+ */ |
140 |
+ spin_lock_irq(q->queue_lock); |
141 |
+ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); |
142 |
+ if (idle_timer_disabled) |
143 |
+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); |
144 |
+ spin_unlock_irq(q->queue_lock); |
145 |
+} |
146 |
+#else |
147 |
+static inline void bfq_update_insert_stats(struct request_queue *q, |
148 |
+ struct bfq_queue *bfqq, |
149 |
+ bool idle_timer_disabled, |
150 |
+ unsigned int cmd_flags) {} |
151 |
+#endif |
152 |
+ |
153 |
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
154 |
bool at_head) |
155 |
{ |
156 |
struct request_queue *q = hctx->queue; |
157 |
struct bfq_data *bfqd = q->elevator->elevator_data; |
158 |
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
159 |
struct bfq_queue *bfqq = RQ_BFQQ(rq); |
160 |
bool idle_timer_disabled = false; |
161 |
unsigned int cmd_flags; |
162 |
-#endif |
163 |
|
164 |
spin_lock_irq(&bfqd->lock); |
165 |
if (blk_mq_sched_try_insert_merge(q, rq)) { |
166 |
@@ -4363,7 +4402,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
167 |
else |
168 |
list_add_tail(&rq->queuelist, &bfqd->dispatch); |
169 |
} else { |
170 |
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
171 |
idle_timer_disabled = __bfq_insert_request(bfqd, rq); |
172 |
/* |
173 |
* Update bfqq, because, if a queue merge has occurred |
174 |
@@ -4371,9 +4409,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
175 |
* redirected into a new queue. |
176 |
*/ |
177 |
bfqq = RQ_BFQQ(rq); |
178 |
-#else |
179 |
- __bfq_insert_request(bfqd, rq); |
180 |
-#endif |
181 |
|
182 |
if (rq_mergeable(rq)) { |
183 |
elv_rqhash_add(q, rq); |
184 |
@@ -4382,35 +4417,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
185 |
} |
186 |
} |
187 |
|
188 |
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
189 |
/* |
190 |
* Cache cmd_flags before releasing scheduler lock, because rq |
191 |
* may disappear afterwards (for example, because of a request |
192 |
* merge). |
193 |
*/ |
194 |
cmd_flags = rq->cmd_flags; |
195 |
-#endif |
196 |
+ |
197 |
spin_unlock_irq(&bfqd->lock); |
198 |
|
199 |
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
200 |
- if (!bfqq) |
201 |
- return; |
202 |
- /* |
203 |
- * bfqq still exists, because it can disappear only after |
204 |
- * either it is merged with another queue, or the process it |
205 |
- * is associated with exits. But both actions must be taken by |
206 |
- * the same process currently executing this flow of |
207 |
- * instruction. |
208 |
- * |
209 |
- * In addition, the following queue lock guarantees that |
210 |
- * bfqq_group(bfqq) exists as well. |
211 |
- */ |
212 |
- spin_lock_irq(q->queue_lock); |
213 |
- bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); |
214 |
- if (idle_timer_disabled) |
215 |
- bfqg_stats_update_idle_time(bfqq_group(bfqq)); |
216 |
- spin_unlock_irq(q->queue_lock); |
217 |
-#endif |
218 |
+ bfq_update_insert_stats(q, bfqq, idle_timer_disabled, |
219 |
+ cmd_flags); |
220 |
} |
221 |
|
222 |
static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, |
223 |
-- |
224 |
2.15.1 |
225 |
|