1 |
From 238231dd6198e7e3b089de1565428aa112d62b19 Mon Sep 17 00:00:00 2001 |
2 |
From: Ping-Ke Shih <pkshih@realtek.com> |
3 |
Date: Fri, 8 Oct 2021 11:56:24 +0800 |
4 |
Subject: [PATCH 21/24] rtw89: add ser to recover error reported by firmware |
5 |
|
6 |
SER stands for System Error Recovery. When firmware detects error, it |
7 |
interrupts driver. Driver read ser event code from register and do ser |
8 |
processes as following: |
9 |
|
10 |
1. driver stop TX |
11 |
2. driver stop DMA |
12 |
3. driver stop RX |
13 |
4. reset TRX variable and counter |
14 |
5. Inform FW driver reset is finish |
15 |
FW also resets itself when receiving driver's information |
16 |
6. Wait FW to confirm reset finish |
17 |
7. Enable DMA |
18 |
8. driver resume rx |
19 |
9. driver resume tx |
20 |
|
21 |
Signed-off-by: Ping-Ke Shih <pkshih@realtek.com> |
22 |
Signed-off-by: Kalle Valo <kvalo@codeaurora.org> |
23 |
Link: https://lore.kernel.org/r/20211008035627.19463-22-pkshih@realtek.com |
24 |
--- |
25 |
drivers/net/wireless/realtek/rtw89/ser.c | 491 +++++++++++++++++++++++ |
26 |
drivers/net/wireless/realtek/rtw89/ser.h | 15 + |
27 |
2 files changed, 506 insertions(+) |
28 |
create mode 100644 drivers/net/wireless/realtek/rtw89/ser.c |
29 |
create mode 100644 drivers/net/wireless/realtek/rtw89/ser.h |
30 |
|
31 |
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c |
32 |
new file mode 100644 |
33 |
index 000000000000..837cdc366a61 |
34 |
--- /dev/null |
35 |
+++ b/drivers/net/wireless/realtek/rtw89/ser.c |
36 |
@@ -0,0 +1,491 @@ |
37 |
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
38 |
+/* Copyright(c) 2019-2020 Realtek Corporation |
39 |
+ */ |
40 |
+ |
41 |
+#include "cam.h" |
42 |
+#include "debug.h" |
43 |
+#include "mac.h" |
44 |
+#include "ps.h" |
45 |
+#include "ser.h" |
46 |
+#include "util.h" |
47 |
+ |
48 |
+#define SER_RECFG_TIMEOUT 1000 |
49 |
+ |
50 |
+enum ser_evt { |
51 |
+ SER_EV_NONE, |
52 |
+ SER_EV_STATE_IN, |
53 |
+ SER_EV_STATE_OUT, |
54 |
+ SER_EV_L1_RESET, /* M1 */ |
55 |
+ SER_EV_DO_RECOVERY, /* M3 */ |
56 |
+ SER_EV_MAC_RESET_DONE, /* M5 */ |
57 |
+ SER_EV_L2_RESET, |
58 |
+ SER_EV_L2_RECFG_DONE, |
59 |
+ SER_EV_L2_RECFG_TIMEOUT, |
60 |
+ SER_EV_M3_TIMEOUT, |
61 |
+ SER_EV_FW_M5_TIMEOUT, |
62 |
+ SER_EV_L0_RESET, |
63 |
+ SER_EV_MAXX |
64 |
+}; |
65 |
+ |
66 |
+enum ser_state { |
67 |
+ SER_IDLE_ST, |
68 |
+ SER_RESET_TRX_ST, |
69 |
+ SER_DO_HCI_ST, |
70 |
+ SER_L2_RESET_ST, |
71 |
+ SER_ST_MAX_ST |
72 |
+}; |
73 |
+ |
74 |
+struct ser_msg { |
75 |
+ struct list_head list; |
76 |
+ u8 event; |
77 |
+}; |
78 |
+ |
79 |
+struct state_ent { |
80 |
+ u8 state; |
81 |
+ char *name; |
82 |
+ void (*st_func)(struct rtw89_ser *ser, u8 event); |
83 |
+}; |
84 |
+ |
85 |
+struct event_ent { |
86 |
+ u8 event; |
87 |
+ char *name; |
88 |
+}; |
89 |
+ |
90 |
+static char *ser_ev_name(struct rtw89_ser *ser, u8 event) |
91 |
+{ |
92 |
+ if (event < SER_EV_MAXX) |
93 |
+ return ser->ev_tbl[event].name; |
94 |
+ |
95 |
+ return "err_ev_name"; |
96 |
+} |
97 |
+ |
98 |
+static char *ser_st_name(struct rtw89_ser *ser) |
99 |
+{ |
100 |
+ if (ser->state < SER_ST_MAX_ST) |
101 |
+ return ser->st_tbl[ser->state].name; |
102 |
+ |
103 |
+ return "err_st_name"; |
104 |
+} |
105 |
+ |
106 |
+static void ser_state_run(struct rtw89_ser *ser, u8 evt) |
107 |
+{ |
108 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
109 |
+ |
110 |
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", |
111 |
+ ser_st_name(ser), ser_ev_name(ser, evt)); |
112 |
+ |
113 |
+ rtw89_leave_lps(rtwdev); |
114 |
+ ser->st_tbl[ser->state].st_func(ser, evt); |
115 |
+} |
116 |
+ |
117 |
+static void ser_state_goto(struct rtw89_ser *ser, u8 new_state) |
118 |
+{ |
119 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
120 |
+ |
121 |
+ if (ser->state == new_state || new_state >= SER_ST_MAX_ST) |
122 |
+ return; |
123 |
+ ser_state_run(ser, SER_EV_STATE_OUT); |
124 |
+ |
125 |
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n", |
126 |
+ ser_st_name(ser), ser->st_tbl[new_state].name); |
127 |
+ |
128 |
+ ser->state = new_state; |
129 |
+ ser_state_run(ser, SER_EV_STATE_IN); |
130 |
+} |
131 |
+ |
132 |
+static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser) |
133 |
+{ |
134 |
+ struct ser_msg *msg; |
135 |
+ |
136 |
+ spin_lock_irq(&ser->msg_q_lock); |
137 |
+ msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list); |
138 |
+ if (msg) |
139 |
+ list_del(&msg->list); |
140 |
+ spin_unlock_irq(&ser->msg_q_lock); |
141 |
+ |
142 |
+ return msg; |
143 |
+} |
144 |
+ |
145 |
+static void rtw89_ser_hdl_work(struct work_struct *work) |
146 |
+{ |
147 |
+ struct ser_msg *msg; |
148 |
+ struct rtw89_ser *ser = container_of(work, struct rtw89_ser, |
149 |
+ ser_hdl_work); |
150 |
+ |
151 |
+ while ((msg = __rtw89_ser_dequeue_msg(ser))) { |
152 |
+ ser_state_run(ser, msg->event); |
153 |
+ kfree(msg); |
154 |
+ } |
155 |
+} |
156 |
+ |
157 |
+static int ser_send_msg(struct rtw89_ser *ser, u8 event) |
158 |
+{ |
159 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
160 |
+ struct ser_msg *msg = NULL; |
161 |
+ |
162 |
+ if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) |
163 |
+ return -EIO; |
164 |
+ |
165 |
+ msg = kmalloc(sizeof(*msg), GFP_ATOMIC); |
166 |
+ if (!msg) |
167 |
+ return -ENOMEM; |
168 |
+ |
169 |
+ msg->event = event; |
170 |
+ |
171 |
+ spin_lock_irq(&ser->msg_q_lock); |
172 |
+ list_add(&msg->list, &ser->msg_q); |
173 |
+ spin_unlock_irq(&ser->msg_q_lock); |
174 |
+ |
175 |
+ ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work); |
176 |
+ return 0; |
177 |
+} |
178 |
+ |
179 |
+static void rtw89_ser_alarm_work(struct work_struct *work) |
180 |
+{ |
181 |
+ struct rtw89_ser *ser = container_of(work, struct rtw89_ser, |
182 |
+ ser_alarm_work.work); |
183 |
+ |
184 |
+ ser_send_msg(ser, ser->alarm_event); |
185 |
+ ser->alarm_event = SER_EV_NONE; |
186 |
+} |
187 |
+ |
188 |
+static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event) |
189 |
+{ |
190 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
191 |
+ |
192 |
+ if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) |
193 |
+ return; |
194 |
+ |
195 |
+ ser->alarm_event = event; |
196 |
+ ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work, |
197 |
+ msecs_to_jiffies(ms)); |
198 |
+} |
199 |
+ |
200 |
+static void ser_del_alarm(struct rtw89_ser *ser) |
201 |
+{ |
202 |
+ cancel_delayed_work(&ser->ser_alarm_work); |
203 |
+ ser->alarm_event = SER_EV_NONE; |
204 |
+} |
205 |
+ |
206 |
+/* driver function */ |
207 |
+static void drv_stop_tx(struct rtw89_ser *ser) |
208 |
+{ |
209 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
210 |
+ |
211 |
+ ieee80211_stop_queues(rtwdev->hw); |
212 |
+ set_bit(RTW89_SER_DRV_STOP_TX, ser->flags); |
213 |
+} |
214 |
+ |
215 |
+static void drv_stop_rx(struct rtw89_ser *ser) |
216 |
+{ |
217 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
218 |
+ |
219 |
+ clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); |
220 |
+ set_bit(RTW89_SER_DRV_STOP_RX, ser->flags); |
221 |
+} |
222 |
+ |
223 |
+static void drv_trx_reset(struct rtw89_ser *ser) |
224 |
+{ |
225 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
226 |
+ |
227 |
+ rtw89_hci_reset(rtwdev); |
228 |
+} |
229 |
+ |
230 |
+static void drv_resume_tx(struct rtw89_ser *ser) |
231 |
+{ |
232 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
233 |
+ |
234 |
+ if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags)) |
235 |
+ return; |
236 |
+ |
237 |
+ ieee80211_wake_queues(rtwdev->hw); |
238 |
+ clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags); |
239 |
+} |
240 |
+ |
241 |
+static void drv_resume_rx(struct rtw89_ser *ser) |
242 |
+{ |
243 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
244 |
+ |
245 |
+ if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags)) |
246 |
+ return; |
247 |
+ |
248 |
+ set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); |
249 |
+ clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags); |
250 |
+} |
251 |
+ |
252 |
+static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) |
253 |
+{ |
254 |
+ rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); |
255 |
+ rtwvif->net_type = RTW89_NET_TYPE_NO_LINK; |
256 |
+ rtwvif->trigger = false; |
257 |
+} |
258 |
+ |
259 |
+static void ser_reset_mac_binding(struct rtw89_dev *rtwdev) |
260 |
+{ |
261 |
+ struct rtw89_vif *rtwvif; |
262 |
+ |
263 |
+ rtw89_cam_reset_keys(rtwdev); |
264 |
+ rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM); |
265 |
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) |
266 |
+ ser_reset_vif(rtwdev, rtwvif); |
267 |
+} |
268 |
+ |
269 |
+/* hal function */ |
270 |
+static int hal_enable_dma(struct rtw89_ser *ser) |
271 |
+{ |
272 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
273 |
+ int ret; |
274 |
+ |
275 |
+ if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags)) |
276 |
+ return 0; |
277 |
+ |
278 |
+ if (!rtwdev->hci.ops->mac_lv1_rcvy) |
279 |
+ return -EIO; |
280 |
+ |
281 |
+ ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2); |
282 |
+ if (!ret) |
283 |
+ clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); |
284 |
+ |
285 |
+ return ret; |
286 |
+} |
287 |
+ |
288 |
+static int hal_stop_dma(struct rtw89_ser *ser) |
289 |
+{ |
290 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
291 |
+ int ret; |
292 |
+ |
293 |
+ if (!rtwdev->hci.ops->mac_lv1_rcvy) |
294 |
+ return -EIO; |
295 |
+ |
296 |
+ ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1); |
297 |
+ if (!ret) |
298 |
+ set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); |
299 |
+ |
300 |
+ return ret; |
301 |
+} |
302 |
+ |
303 |
+static void hal_send_m2_event(struct rtw89_ser *ser) |
304 |
+{ |
305 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
306 |
+ |
307 |
+ rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN); |
308 |
+} |
309 |
+ |
310 |
+static void hal_send_m4_event(struct rtw89_ser *ser) |
311 |
+{ |
312 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
313 |
+ |
314 |
+ rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN); |
315 |
+} |
316 |
+ |
317 |
+/* state handler */ |
318 |
+static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt) |
319 |
+{ |
320 |
+ switch (evt) { |
321 |
+ case SER_EV_STATE_IN: |
322 |
+ break; |
323 |
+ case SER_EV_L1_RESET: |
324 |
+ ser_state_goto(ser, SER_RESET_TRX_ST); |
325 |
+ break; |
326 |
+ case SER_EV_L2_RESET: |
327 |
+ ser_state_goto(ser, SER_L2_RESET_ST); |
328 |
+ break; |
329 |
+ case SER_EV_STATE_OUT: |
330 |
+ default: |
331 |
+ break; |
332 |
+ } |
333 |
+} |
334 |
+ |
335 |
+static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) |
336 |
+{ |
337 |
+ switch (evt) { |
338 |
+ case SER_EV_STATE_IN: |
339 |
+ drv_stop_tx(ser); |
340 |
+ |
341 |
+ if (hal_stop_dma(ser)) { |
342 |
+ ser_state_goto(ser, SER_L2_RESET_ST); |
343 |
+ break; |
344 |
+ } |
345 |
+ |
346 |
+ drv_stop_rx(ser); |
347 |
+ drv_trx_reset(ser); |
348 |
+ |
349 |
+ /* wait m3 */ |
350 |
+ hal_send_m2_event(ser); |
351 |
+ |
352 |
+ /* set alarm to prevent FW response timeout */ |
353 |
+ ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT); |
354 |
+ break; |
355 |
+ |
356 |
+ case SER_EV_DO_RECOVERY: |
357 |
+ ser_state_goto(ser, SER_DO_HCI_ST); |
358 |
+ break; |
359 |
+ |
360 |
+ case SER_EV_M3_TIMEOUT: |
361 |
+ ser_state_goto(ser, SER_L2_RESET_ST); |
362 |
+ break; |
363 |
+ |
364 |
+ case SER_EV_STATE_OUT: |
365 |
+ ser_del_alarm(ser); |
366 |
+ hal_enable_dma(ser); |
367 |
+ drv_resume_rx(ser); |
368 |
+ drv_resume_tx(ser); |
369 |
+ break; |
370 |
+ |
371 |
+ default: |
372 |
+ break; |
373 |
+ } |
374 |
+} |
375 |
+ |
376 |
+static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt) |
377 |
+{ |
378 |
+ switch (evt) { |
379 |
+ case SER_EV_STATE_IN: |
380 |
+ /* wait m5 */ |
381 |
+ hal_send_m4_event(ser); |
382 |
+ |
383 |
+ /* prevent FW response timeout */ |
384 |
+ ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT); |
385 |
+ break; |
386 |
+ |
387 |
+ case SER_EV_FW_M5_TIMEOUT: |
388 |
+ ser_state_goto(ser, SER_L2_RESET_ST); |
389 |
+ break; |
390 |
+ |
391 |
+ case SER_EV_MAC_RESET_DONE: |
392 |
+ ser_state_goto(ser, SER_IDLE_ST); |
393 |
+ break; |
394 |
+ |
395 |
+ case SER_EV_STATE_OUT: |
396 |
+ ser_del_alarm(ser); |
397 |
+ break; |
398 |
+ |
399 |
+ default: |
400 |
+ break; |
401 |
+ } |
402 |
+} |
403 |
+ |
404 |
+static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) |
405 |
+{ |
406 |
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); |
407 |
+ |
408 |
+ switch (evt) { |
409 |
+ case SER_EV_STATE_IN: |
410 |
+ mutex_lock(&rtwdev->mutex); |
411 |
+ ser_reset_mac_binding(rtwdev); |
412 |
+ rtw89_core_stop(rtwdev); |
413 |
+ mutex_unlock(&rtwdev->mutex); |
414 |
+ |
415 |
+ ieee80211_restart_hw(rtwdev->hw); |
416 |
+ ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); |
417 |
+ break; |
418 |
+ |
419 |
+ case SER_EV_L2_RECFG_TIMEOUT: |
420 |
+ rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n"); |
421 |
+ fallthrough; |
422 |
+ case SER_EV_L2_RECFG_DONE: |
423 |
+ ser_state_goto(ser, SER_IDLE_ST); |
424 |
+ break; |
425 |
+ |
426 |
+ case SER_EV_STATE_OUT: |
427 |
+ ser_del_alarm(ser); |
428 |
+ break; |
429 |
+ |
430 |
+ default: |
431 |
+ break; |
432 |
+ } |
433 |
+} |
434 |
+ |
435 |
+static struct event_ent ser_ev_tbl[] = { |
436 |
+ {SER_EV_NONE, "SER_EV_NONE"}, |
437 |
+ {SER_EV_STATE_IN, "SER_EV_STATE_IN"}, |
438 |
+ {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"}, |
439 |
+ {SER_EV_L1_RESET, "SER_EV_L1_RESET"}, |
440 |
+ {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"}, |
441 |
+ {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"}, |
442 |
+ {SER_EV_L2_RESET, "SER_EV_L2_RESET"}, |
443 |
+ {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"}, |
444 |
+ {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"}, |
445 |
+ {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"}, |
446 |
+ {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"}, |
447 |
+ {SER_EV_L0_RESET, "SER_EV_L0_RESET"}, |
448 |
+ {SER_EV_MAXX, "SER_EV_MAX"} |
449 |
+}; |
450 |
+ |
451 |
+static struct state_ent ser_st_tbl[] = { |
452 |
+ {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl}, |
453 |
+ {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl}, |
454 |
+ {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl}, |
455 |
+ {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl} |
456 |
+}; |
457 |
+ |
458 |
+int rtw89_ser_init(struct rtw89_dev *rtwdev) |
459 |
+{ |
460 |
+ struct rtw89_ser *ser = &rtwdev->ser; |
461 |
+ |
462 |
+ memset(ser, 0, sizeof(*ser)); |
463 |
+ INIT_LIST_HEAD(&ser->msg_q); |
464 |
+ ser->state = SER_IDLE_ST; |
465 |
+ ser->st_tbl = ser_st_tbl; |
466 |
+ ser->ev_tbl = ser_ev_tbl; |
467 |
+ |
468 |
+ bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS); |
469 |
+ spin_lock_init(&ser->msg_q_lock); |
470 |
+ INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work); |
471 |
+ INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work); |
472 |
+ return 0; |
473 |
+} |
474 |
+ |
475 |
+int rtw89_ser_deinit(struct rtw89_dev *rtwdev) |
476 |
+{ |
477 |
+ struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser; |
478 |
+ |
479 |
+ set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); |
480 |
+ cancel_delayed_work_sync(&ser->ser_alarm_work); |
481 |
+ cancel_work_sync(&ser->ser_hdl_work); |
482 |
+ clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); |
483 |
+ return 0; |
484 |
+} |
485 |
+ |
486 |
+void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev) |
487 |
+{ |
488 |
+ ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE); |
489 |
+} |
490 |
+ |
491 |
+int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err) |
492 |
+{ |
493 |
+ u8 event = SER_EV_NONE; |
494 |
+ |
495 |
+ rtw89_info(rtwdev, "ser event = 0x%04x\n", err); |
496 |
+ |
497 |
+ switch (err) { |
498 |
+ case MAC_AX_ERR_L1_ERR_DMAC: |
499 |
+ case MAC_AX_ERR_L0_PROMOTE_TO_L1: |
500 |
+ event = SER_EV_L1_RESET; /* M1 */ |
501 |
+ break; |
502 |
+ case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE: |
503 |
+ event = SER_EV_DO_RECOVERY; /* M3 */ |
504 |
+ break; |
505 |
+ case MAC_AX_ERR_L1_RESET_RECOVERY_DONE: |
506 |
+ event = SER_EV_MAC_RESET_DONE; /* M5 */ |
507 |
+ break; |
508 |
+ case MAC_AX_ERR_L0_ERR_CMAC0: |
509 |
+ case MAC_AX_ERR_L0_ERR_CMAC1: |
510 |
+ case MAC_AX_ERR_L0_RESET_DONE: |
511 |
+ event = SER_EV_L0_RESET; |
512 |
+ break; |
513 |
+ default: |
514 |
+ if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 || |
515 |
+ (err >= MAC_AX_ERR_L2_ERR_AH_DMA && |
516 |
+ err <= MAC_AX_GET_ERR_MAX)) |
517 |
+ event = SER_EV_L2_RESET; |
518 |
+ break; |
519 |
+ } |
520 |
+ |
521 |
+ if (event == SER_EV_NONE) |
522 |
+ return -EINVAL; |
523 |
+ |
524 |
+ ser_send_msg(&rtwdev->ser, event); |
525 |
+ return 0; |
526 |
+} |
527 |
+EXPORT_SYMBOL(rtw89_ser_notify); |
528 |
diff --git a/drivers/net/wireless/realtek/rtw89/ser.h b/drivers/net/wireless/realtek/rtw89/ser.h |
529 |
new file mode 100644 |
530 |
index 000000000000..6b8e62019942 |
531 |
--- /dev/null |
532 |
+++ b/drivers/net/wireless/realtek/rtw89/ser.h |
533 |
@@ -0,0 +1,15 @@ |
534 |
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
535 |
+ * Copyright(c) 2019-2020 Realtek Corporation |
536 |
+ */ |
537 |
+#ifndef __SER_H__ |
538 |
+#define __SER_H__ |
539 |
+ |
540 |
+#include "core.h" |
541 |
+ |
542 |
+int rtw89_ser_init(struct rtw89_dev *rtwdev); |
543 |
+int rtw89_ser_deinit(struct rtw89_dev *rtwdev); |
544 |
+int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err); |
545 |
+void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev); |
546 |
+ |
547 |
+#endif /* __SER_H__*/ |
548 |
+ |
549 |
-- |
550 |
2.33.0 |
551 |
|