1 |
tmb |
1329221 |
From 430c2546a4d8503d963bab99abf0083e98019052 Mon Sep 17 00:00:00 2001 |
2 |
|
|
From: Shaul Triebitz <shaul.triebitz@intel.com> |
3 |
|
|
Date: Wed, 6 Jun 2018 17:20:58 +0300 |
4 |
|
|
Subject: [PATCH 027/145] iwlwifi: pcie: avoid empty free RB queue |
5 |
|
|
|
6 |
|
|
[ Upstream commit 868a1e863f95183f00809363fefba6d4f5bcd116 ] |
7 |
|
|
|
8 |
|
|
If all free RB queues are empty, the driver will never restock the |
9 |
|
|
free RB queue. That's because the restocking happens in the Rx flow, |
10 |
|
|
and if the free queue is empty there will be no Rx. |
11 |
|
|
|
12 |
|
|
Although there's a background worker (a.k.a. allocator) allocating |
13 |
|
|
memory for RBs so that the Rx handler can restock them, the worker may |
14 |
|
|
run only after the free queue has become empty (and then it is too |
15 |
|
|
late for restocking as explained above). |
16 |
|
|
|
17 |
|
|
There is a solution for that called 'emergency': If the number of used |
18 |
|
|
RB's reaches half the amount of all RB's, the Rx handler will not wait |
19 |
|
|
for the allocator but immediately allocate memory for the used RB's |
20 |
|
|
and restock the free queue. |
21 |
|
|
|
22 |
|
|
But, since the used RB's is per queue, it may happen that the used |
23 |
|
|
RB's are spread between the queues such that the emergency check will |
24 |
|
|
fail for each of the queues |
25 |
|
|
(and still run out of RBs, causing the above symptom). |
26 |
|
|
|
27 |
|
|
To fix it, move to emergency mode if the sum of *all* used RBs (for |
28 |
|
|
all Rx queues) reaches half the amount of all RB's |
29 |
|
|
|
30 |
|
|
Signed-off-by: Shaul Triebitz <shaul.triebitz@intel.com> |
31 |
|
|
Signed-off-by: Luca Coelho <luciano.coelho@intel.com> |
32 |
|
|
Signed-off-by: Sasha Levin <sashal@kernel.org> |
33 |
|
|
--- |
34 |
|
|
drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 32 +++++++++++++------- |
35 |
|
|
1 file changed, 21 insertions(+), 11 deletions(-) |
36 |
|
|
|
37 |
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c |
38 |
|
|
index d017aa2a0a8b..d4a31e014c82 100644 |
39 |
|
|
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c |
40 |
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c |
41 |
|
|
@@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) |
42 |
|
|
kfree(trans_pcie->rxq); |
43 |
|
|
} |
44 |
|
|
|
45 |
|
|
+static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, |
46 |
|
|
+ struct iwl_rb_allocator *rba) |
47 |
|
|
+{ |
48 |
|
|
+ spin_lock(&rba->lock); |
49 |
|
|
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); |
50 |
|
|
+ spin_unlock(&rba->lock); |
51 |
|
|
+} |
52 |
|
|
+ |
53 |
|
|
/* |
54 |
|
|
* iwl_pcie_rx_reuse_rbd - Recycle used RBDs |
55 |
|
|
* |
56 |
|
|
@@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, |
57 |
|
|
if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { |
58 |
|
|
/* Move the 2 RBDs to the allocator ownership. |
59 |
|
|
Allocator has another 6 from pool for the request completion*/ |
60 |
|
|
- spin_lock(&rba->lock); |
61 |
|
|
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); |
62 |
|
|
- spin_unlock(&rba->lock); |
63 |
|
|
+ iwl_pcie_rx_move_to_allocator(rxq, rba); |
64 |
|
|
|
65 |
|
|
atomic_inc(&rba->req_pending); |
66 |
|
|
queue_work(rba->alloc_wq, &rba->rx_alloc); |
67 |
|
|
@@ -1396,10 +1402,18 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) |
68 |
|
|
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); |
69 |
|
|
|
70 |
|
|
while (i != r) { |
71 |
|
|
+ struct iwl_rb_allocator *rba = &trans_pcie->rba; |
72 |
|
|
struct iwl_rx_mem_buffer *rxb; |
73 |
|
|
- |
74 |
|
|
- if (unlikely(rxq->used_count == rxq->queue_size / 2)) |
75 |
|
|
+ /* number of RBDs still waiting for page allocation */ |
76 |
|
|
+ u32 rb_pending_alloc = |
77 |
|
|
+ atomic_read(&trans_pcie->rba.req_pending) * |
78 |
|
|
+ RX_CLAIM_REQ_ALLOC; |
79 |
|
|
+ |
80 |
|
|
+ if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && |
81 |
|
|
+ !emergency)) { |
82 |
|
|
+ iwl_pcie_rx_move_to_allocator(rxq, rba); |
83 |
|
|
emergency = true; |
84 |
|
|
+ } |
85 |
|
|
|
86 |
|
|
rxb = iwl_pcie_get_rxb(trans, rxq, i); |
87 |
|
|
if (!rxb) |
88 |
|
|
@@ -1421,17 +1435,13 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) |
89 |
|
|
iwl_pcie_rx_allocator_get(trans, rxq); |
90 |
|
|
|
91 |
|
|
if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { |
92 |
|
|
- struct iwl_rb_allocator *rba = &trans_pcie->rba; |
93 |
|
|
- |
94 |
|
|
/* Add the remaining empty RBDs for allocator use */ |
95 |
|
|
- spin_lock(&rba->lock); |
96 |
|
|
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); |
97 |
|
|
- spin_unlock(&rba->lock); |
98 |
|
|
+ iwl_pcie_rx_move_to_allocator(rxq, rba); |
99 |
|
|
} else if (emergency) { |
100 |
|
|
count++; |
101 |
|
|
if (count == 8) { |
102 |
|
|
count = 0; |
103 |
|
|
- if (rxq->used_count < rxq->queue_size / 3) |
104 |
|
|
+ if (rb_pending_alloc < rxq->queue_size / 3) |
105 |
|
|
emergency = false; |
106 |
|
|
|
107 |
|
|
rxq->read = i; |
108 |
|
|
-- |
109 |
|
|
2.19.1 |
110 |
|
|
|