/[packages]/backports/8/kernel/current/SOURCES/hid-Add-support-for-Intel-Touch-Host-Controller.patch
ViewVC logotype

Contents of /backports/8/kernel/current/SOURCES/hid-Add-support-for-Intel-Touch-Host-Controller.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1935597 - (show annotations) (download)
Tue Jan 24 10:18:02 2023 UTC (14 months, 4 weeks ago) by tmb
File size: 53965 byte(s)
- update to 6.1.8
  * drop merged patches
- hid: Add support for Intel Precise Touch and Stylus
- hid: Add support for Intel Touch Host Controller
- update defconfigs for Microsoft Surface devices
- enable more hid devices


1 From dcfd07acf4535a37bb4cfdc290ecab7878aa1cdc Mon Sep 17 00:00:00 2001
2 From: Dorian Stoll <dorian.stoll@tmsp.io>
3 Date: Sun, 11 Dec 2022 12:10:54 +0100
4 Subject: [PATCH] hid: Add support for Intel Touch Host Controller
5
6 Based on quo/ithc-linux@e1c3978
7
8 Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
9 Patchset: ithc
10 ---
11 drivers/hid/Kconfig | 2 +
12 drivers/hid/Makefile | 1 +
13 drivers/hid/ithc/Kbuild | 6 +
14 drivers/hid/ithc/Kconfig | 12 +
15 drivers/hid/ithc/ithc-debug.c | 96 ++++++
16 drivers/hid/ithc/ithc-dma.c | 258 ++++++++++++++++
17 drivers/hid/ithc/ithc-dma.h | 67 +++++
18 drivers/hid/ithc/ithc-main.c | 534 ++++++++++++++++++++++++++++++++++
19 drivers/hid/ithc/ithc-regs.c | 64 ++++
20 drivers/hid/ithc/ithc-regs.h | 186 ++++++++++++
21 drivers/hid/ithc/ithc.h | 60 ++++
22 11 files changed, 1286 insertions(+)
23 create mode 100644 drivers/hid/ithc/Kbuild
24 create mode 100644 drivers/hid/ithc/Kconfig
25 create mode 100644 drivers/hid/ithc/ithc-debug.c
26 create mode 100644 drivers/hid/ithc/ithc-dma.c
27 create mode 100644 drivers/hid/ithc/ithc-dma.h
28 create mode 100644 drivers/hid/ithc/ithc-main.c
29 create mode 100644 drivers/hid/ithc/ithc-regs.c
30 create mode 100644 drivers/hid/ithc/ithc-regs.h
31 create mode 100644 drivers/hid/ithc/ithc.h
32
33 diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
34 index 1523ccdf73b5..b7030071ac0c 100644
35 --- a/drivers/hid/Kconfig
36 +++ b/drivers/hid/Kconfig
37 @@ -1292,4 +1292,6 @@ source "drivers/hid/surface-hid/Kconfig"
38
39 source "drivers/hid/ipts/Kconfig"
40
41 +source "drivers/hid/ithc/Kconfig"
42 +
43 endmenu
44 diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
45 index e48300bcea9b..8ef7308b0f9d 100644
46 --- a/drivers/hid/Makefile
47 +++ b/drivers/hid/Makefile
48 @@ -166,3 +166,4 @@ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
49 obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
50
51 obj-$(CONFIG_HID_IPTS) += ipts/
52 +obj-$(CONFIG_HID_ITHC) += ithc/
53 diff --git a/drivers/hid/ithc/Kbuild b/drivers/hid/ithc/Kbuild
54 new file mode 100644
55 index 000000000000..aea83f2ac07b
56 --- /dev/null
57 +++ b/drivers/hid/ithc/Kbuild
58 @@ -0,0 +1,6 @@
59 +obj-$(CONFIG_HID_ITHC) := ithc.o
60 +
61 +ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-debug.o
62 +
63 +ccflags-y := -std=gnu11 -Wno-declaration-after-statement
64 +
65 diff --git a/drivers/hid/ithc/Kconfig b/drivers/hid/ithc/Kconfig
66 new file mode 100644
67 index 000000000000..ede713023609
68 --- /dev/null
69 +++ b/drivers/hid/ithc/Kconfig
70 @@ -0,0 +1,12 @@
71 +config HID_ITHC
72 + tristate "Intel Touch Host Controller"
73 + depends on PCI
74 + depends on HID
75 + help
76 + Say Y here if your system has a touchscreen using Intels
77 + Touch Host Controller (ITHC / IPTS) technology.
78 +
79 + If unsure say N.
80 +
81 + To compile this driver as a module, choose M here: the
82 + module will be called ithc.
83 diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c
84 new file mode 100644
85 index 000000000000..57bf125c45bd
86 --- /dev/null
87 +++ b/drivers/hid/ithc/ithc-debug.c
88 @@ -0,0 +1,96 @@
89 +#include "ithc.h"
90 +
91 +void ithc_log_regs(struct ithc *ithc) {
92 + if (!ithc->prev_regs) return;
93 + u32 __iomem *cur = (__iomem void*)ithc->regs;
94 + u32 *prev = (void*)ithc->prev_regs;
95 + for (int i = 1024; i < sizeof *ithc->regs / 4; i++) {
96 + u32 x = readl(cur + i);
97 + if (x != prev[i]) {
98 + pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x);
99 + prev[i] = x;
100 + }
101 + }
102 +}
103 +
104 +static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, loff_t *offset) {
105 + struct ithc *ithc = file_inode(f)->i_private;
106 + char cmd[256];
107 + if (!ithc || !ithc->pci) return -ENODEV;
108 + if (!len) return -EINVAL;
109 + if (len >= sizeof cmd) return -EINVAL;
110 + if (copy_from_user(cmd, buf, len)) return -EFAULT;
111 + cmd[len] = 0;
112 + if (cmd[len-1] == '\n') cmd[len-1] = 0;
113 + pci_info(ithc->pci, "debug command: %s\n", cmd);
114 + u32 n = 0;
115 + const char *s = cmd + 1;
116 + u32 a[32];
117 + while (*s && *s != '\n') {
118 + if (n >= ARRAY_SIZE(a)) return -EINVAL;
119 + if (*s++ != ' ') return -EINVAL;
120 + char *e;
121 + a[n++] = simple_strtoul(s, &e, 0);
122 + if (e == s) return -EINVAL;
123 + s = e;
124 + }
125 + ithc_log_regs(ithc);
126 + switch(cmd[0]) {
127 + case 'x': // reset
128 + ithc_reset(ithc);
129 + break;
130 + case 'w': // write register: offset mask value
131 + if (n != 3 || (a[0] & 3)) return -EINVAL;
132 + pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", a[0], a[2], a[1]);
133 + bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]);
134 + break;
135 + case 'r': // read register: offset
136 + if (n != 1 || (a[0] & 3)) return -EINVAL;
137 + pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
138 + break;
139 + case 's': // spi command: cmd offset len data...
140 + // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
141 + // set touch cfg: s 6 12 4 XX
142 + if (n < 3 || a[2] > (n - 3) * 4) return -EINVAL;
143 + pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]);
144 + if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3))
145 + for (u32 i = 0; i < (a[2] + 3) / 4; i++) pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
146 + break;
147 + case 'd': // dma command: cmd len data...
148 + // get report descriptor: d 7 8 0 0
149 + // enable multitouch: d 3 2 0x0105
150 + if (n < 2 || a[1] > (n - 2) * 4) return -EINVAL;
151 + pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]);
152 + if (ithc_dma_tx(ithc, a[0], a[1], a + 2)) pci_err(ithc->pci, "dma tx failed\n");
153 + break;
154 + default:
155 + return -EINVAL;
156 + }
157 + ithc_log_regs(ithc);
158 + return len;
159 +}
160 +
161 +static const struct file_operations ithc_debugfops_cmd = {
162 + .owner = THIS_MODULE,
163 + .write = ithc_debugfs_cmd_write,
164 +};
165 +
166 +static void ithc_debugfs_devres_release(struct device *dev, void *res) {
167 + struct dentry **dbgm = res;
168 + if (*dbgm) debugfs_remove_recursive(*dbgm);
169 +}
170 +
171 +int ithc_debug_init(struct ithc *ithc) {
172 + struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof *dbgm, GFP_KERNEL);
173 + if (!dbgm) return -ENOMEM;
174 + devres_add(&ithc->pci->dev, dbgm);
175 + struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL);
176 + if (IS_ERR(dbg)) return PTR_ERR(dbg);
177 + *dbgm = dbg;
178 +
179 + struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd);
180 + if (IS_ERR(cmd)) return PTR_ERR(cmd);
181 +
182 + return 0;
183 +}
184 +
185 diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c
186 new file mode 100644
187 index 000000000000..7e89b3496918
188 --- /dev/null
189 +++ b/drivers/hid/ithc/ithc-dma.c
190 @@ -0,0 +1,258 @@
191 +#include "ithc.h"
192 +
193 +static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, unsigned num_buffers, unsigned num_pages, enum dma_data_direction dir) {
194 + p->num_pages = num_pages;
195 + p->dir = dir;
196 + p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE);
197 + p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL);
198 + if (!p->addr) return -ENOMEM;
199 + if (p->dma_addr & (PAGE_SIZE - 1)) return -EFAULT;
200 + return 0;
201 +}
202 +
203 +struct ithc_sg_table {
204 + void *addr;
205 + struct sg_table sgt;
206 + enum dma_data_direction dir;
207 +};
208 +static void ithc_dma_sgtable_free(struct sg_table *sgt) {
209 + struct scatterlist *sg;
210 + int i;
211 + for_each_sgtable_sg(sgt, sg, i) {
212 + struct page *p = sg_page(sg);
213 + if (p) __free_page(p);
214 + }
215 + sg_free_table(sgt);
216 +}
217 +static void ithc_dma_data_devres_release(struct device *dev, void *res) {
218 + struct ithc_sg_table *sgt = res;
219 + if (sgt->addr) vunmap(sgt->addr);
220 + dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0);
221 + ithc_dma_sgtable_free(&sgt->sgt);
222 +}
223 +
224 +static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b) {
225 + // We don't use dma_alloc_coherent for data buffers, because they don't have to be contiguous (we can use one PRD per page) or coherent (they are unidirectional).
226 + // Instead we use an sg_table of individually allocated pages (5.13 has dma_alloc_noncontiguous for this, but we'd like to support 5.10 for now).
227 + struct page *pages[16];
228 + if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) return -EINVAL;
229 + b->active_idx = -1;
230 + struct ithc_sg_table *sgt = devres_alloc(ithc_dma_data_devres_release, sizeof *sgt, GFP_KERNEL);
231 + if (!sgt) return -ENOMEM;
232 + sgt->dir = prds->dir;
233 + if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) {
234 + struct scatterlist *sg;
235 + int i;
236 + bool ok = true;
237 + for_each_sgtable_sg(&sgt->sgt, sg, i) {
238 + struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); // don't need __GFP_DMA for PCI DMA
239 + if (!p) { ok = false; break; }
240 + sg_set_page(sg, p, PAGE_SIZE, 0);
241 + }
242 + if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) {
243 + devres_add(&ithc->pci->dev, sgt);
244 + b->sgt = &sgt->sgt;
245 + b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL);
246 + if (!b->addr) return -ENOMEM;
247 + return 0;
248 + }
249 + ithc_dma_sgtable_free(&sgt->sgt);
250 + }
251 + devres_free(sgt);
252 + return -ENOMEM;
253 +}
254 +
255 +static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
256 + struct ithc_phys_region_desc *prd = prds->addr;
257 + prd += idx * prds->num_pages;
258 + if (b->active_idx >= 0) { pci_err(ithc->pci, "buffer already active\n"); return -EINVAL; }
259 + b->active_idx = idx;
260 + if (prds->dir == DMA_TO_DEVICE) {
261 + if (b->data_size > PAGE_SIZE) return -EINVAL;
262 + prd->addr = sg_dma_address(b->sgt->sgl) >> 10;
263 + prd->size = b->data_size | PRD_FLAG_END;
264 + flush_kernel_vmap_range(b->addr, b->data_size);
265 + } else if (prds->dir == DMA_FROM_DEVICE) {
266 + struct scatterlist *sg;
267 + int i;
268 + for_each_sgtable_dma_sg(b->sgt, sg, i) {
269 + prd->addr = sg_dma_address(sg) >> 10;
270 + prd->size = sg_dma_len(sg);
271 + prd++;
272 + }
273 + prd[-1].size |= PRD_FLAG_END;
274 + }
275 + dma_wmb(); // for the prds
276 + dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir);
277 + return 0;
278 +}
279 +
280 +static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
281 + struct ithc_phys_region_desc *prd = prds->addr;
282 + prd += idx * prds->num_pages;
283 + if (b->active_idx != idx) { pci_err(ithc->pci, "wrong buffer index\n"); return -EINVAL; }
284 + b->active_idx = -1;
285 + if (prds->dir == DMA_FROM_DEVICE) {
286 + dma_rmb(); // for the prds
287 + b->data_size = 0;
288 + struct scatterlist *sg;
289 + int i;
290 + for_each_sgtable_dma_sg(b->sgt, sg, i) {
291 + unsigned size = prd->size;
292 + b->data_size += size & PRD_SIZE_MASK;
293 + if (size & PRD_FLAG_END) break;
294 + if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { pci_err(ithc->pci, "truncated prd\n"); break; }
295 + prd++;
296 + }
297 + invalidate_kernel_vmap_range(b->addr, b->data_size);
298 + }
299 + dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir);
300 + return 0;
301 +}
302 +
303 +int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname) {
304 + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
305 + mutex_init(&rx->mutex);
306 + u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes);
307 + unsigned num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
308 + pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", NUM_RX_BUF, buf_size, num_pages);
309 + CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE);
310 + for (unsigned i = 0; i < NUM_RX_BUF; i++)
311 + CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]);
312 + writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2);
313 + lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr);
314 + writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs);
315 + writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds);
316 + u8 head = readb(&ithc->regs->dma_rx[channel].head);
317 + if (head) { pci_err(ithc->pci, "head is nonzero (%u)\n", head); return -EIO; }
318 + for (unsigned i = 0; i < NUM_RX_BUF; i++)
319 + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i);
320 + writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail);
321 + return 0;
322 +}
323 +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) {
324 + bitsb_set(&ithc->regs->dma_rx[channel].control, DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
325 + CHECK(waitl, ithc, &ithc->regs->dma_rx[1].status, DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
326 +}
327 +
328 +int ithc_dma_tx_init(struct ithc *ithc) {
329 + struct ithc_dma_tx *tx = &ithc->dma_tx;
330 + mutex_init(&tx->mutex);
331 + tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes);
332 + unsigned num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
333 + pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", tx->max_size, num_pages);
334 + CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE);
335 + CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf);
336 + lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr);
337 + writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds);
338 + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
339 + return 0;
340 +}
341 +
342 +static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data, u8 channel, u8 buf) {
343 + if (buf >= NUM_RX_BUF) {
344 + pci_err(ithc->pci, "invalid dma ringbuffer index\n");
345 + return -EINVAL;
346 + }
347 + ithc_set_active(ithc);
348 + u32 len = data->data_size;
349 + struct ithc_dma_rx_header *hdr = data->addr;
350 + u8 *hiddata = (void *)(hdr + 1);
351 + if (len >= sizeof *hdr && hdr->code == DMA_RX_CODE_RESET) {
352 + CHECK(ithc_reset, ithc);
353 + } else if (len < sizeof *hdr || len != sizeof *hdr + hdr->data_size) {
354 + if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
355 + // When the CPU enters a low power state during DMA, we can get truncated messages.
356 + // Typically this will be a single touch HID report that is only 1 byte, or a multitouch report that is 257 bytes.
357 + // See also ithc_set_active().
358 + } else {
359 + pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n", channel, buf, len, hdr->code, hdr->data_size);
360 + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
361 + }
362 + } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) {
363 + CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8);
364 + WRITE_ONCE(ithc->hid_parse_done, true);
365 + wake_up(&ithc->wait_hid_parse);
366 + } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
367 + CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1);
368 + } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) {
369 + bool done = false;
370 + mutex_lock(&ithc->hid_get_feature_mutex);
371 + if (ithc->hid_get_feature_buf) {
372 + if (hdr->data_size < ithc->hid_get_feature_size) ithc->hid_get_feature_size = hdr->data_size;
373 + memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size);
374 + ithc->hid_get_feature_buf = NULL;
375 + done = true;
376 + }
377 + mutex_unlock(&ithc->hid_get_feature_mutex);
378 + if (done) wake_up(&ithc->wait_hid_get_feature);
379 + else CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT, hiddata, hdr->data_size, 1);
380 + } else {
381 + pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n", channel, buf, len, hdr->code);
382 + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
383 + }
384 + return 0;
385 +}
386 +
387 +static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
388 + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
389 + unsigned n = rx->num_received;
390 + u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head);
391 + while (1) {
392 + u8 tail = n % NUM_RX_BUF;
393 + u8 tail_wrap = tail | ((n / NUM_RX_BUF) & 1 ? 0 : DMA_RX_WRAP_FLAG);
394 + writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail);
395 + // ringbuffer is full if tail_wrap == head_wrap
396 + // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG
397 + if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) return 0;
398 +
399 + // take the buffer that the device just filled
400 + struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF];
401 + CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail);
402 + rx->num_received = ++n;
403 +
404 + // process data
405 + CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail);
406 +
407 + // give the buffer back to the device
408 + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail);
409 + }
410 +}
411 +int ithc_dma_rx(struct ithc *ithc, u8 channel) {
412 + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
413 + mutex_lock(&rx->mutex);
414 + int ret = ithc_dma_rx_unlocked(ithc, channel);
415 + mutex_unlock(&rx->mutex);
416 + return ret;
417 +}
418 +
419 +static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
420 + pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize);
421 + struct ithc_dma_tx_header *hdr;
422 + u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0;
423 + unsigned fullsize = sizeof *hdr + datasize + padding;
424 + if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE) return -EINVAL;
425 + CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
426 +
427 + ithc->dma_tx.buf.data_size = fullsize;
428 + hdr = ithc->dma_tx.buf.addr;
429 + hdr->code = cmdcode;
430 + hdr->data_size = datasize;
431 + u8 *dest = (void *)(hdr + 1);
432 + memcpy(dest, data, datasize);
433 + dest += datasize;
434 + for (u8 p = 0; p < padding; p++) *dest++ = 0;
435 + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
436 +
437 + bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND);
438 + CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
439 + writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status);
440 + return 0;
441 +}
442 +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
443 + mutex_lock(&ithc->dma_tx.mutex);
444 + int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data);
445 + mutex_unlock(&ithc->dma_tx.mutex);
446 + return ret;
447 +}
448 +
449 diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h
450 new file mode 100644
451 index 000000000000..d9f2c19a13f3
452 --- /dev/null
453 +++ b/drivers/hid/ithc/ithc-dma.h
454 @@ -0,0 +1,67 @@
455 +#define PRD_SIZE_MASK 0xffffff
456 +#define PRD_FLAG_END 0x1000000
457 +#define PRD_FLAG_SUCCESS 0x2000000
458 +#define PRD_FLAG_ERROR 0x4000000
459 +
460 +struct ithc_phys_region_desc {
461 + u64 addr; // physical addr/1024
462 + u32 size; // num bytes, PRD_FLAG_END marks last prd for data split over multiple prds
463 + u32 unused;
464 +};
465 +
466 +#define DMA_RX_CODE_INPUT_REPORT 3
467 +#define DMA_RX_CODE_FEATURE_REPORT 4
468 +#define DMA_RX_CODE_REPORT_DESCRIPTOR 5
469 +#define DMA_RX_CODE_RESET 7
470 +
471 +struct ithc_dma_rx_header {
472 + u32 code;
473 + u32 data_size;
474 + u32 _unknown[14];
475 +};
476 +
477 +#define DMA_TX_CODE_SET_FEATURE 3
478 +#define DMA_TX_CODE_GET_FEATURE 4
479 +#define DMA_TX_CODE_OUTPUT_REPORT 5
480 +#define DMA_TX_CODE_GET_REPORT_DESCRIPTOR 7
481 +
482 +struct ithc_dma_tx_header {
483 + u32 code;
484 + u32 data_size;
485 +};
486 +
487 +struct ithc_dma_prd_buffer {
488 + void *addr;
489 + dma_addr_t dma_addr;
490 + u32 size;
491 + u32 num_pages; // per data buffer
492 + enum dma_data_direction dir;
493 +};
494 +
495 +struct ithc_dma_data_buffer {
496 + void *addr;
497 + struct sg_table *sgt;
498 + int active_idx;
499 + u32 data_size;
500 +};
501 +
502 +struct ithc_dma_tx {
503 + struct mutex mutex;
504 + u32 max_size;
505 + struct ithc_dma_prd_buffer prds;
506 + struct ithc_dma_data_buffer buf;
507 +};
508 +
509 +struct ithc_dma_rx {
510 + struct mutex mutex;
511 + u32 num_received;
512 + struct ithc_dma_prd_buffer prds;
513 + struct ithc_dma_data_buffer bufs[NUM_RX_BUF];
514 +};
515 +
516 +int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname);
517 +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel);
518 +int ithc_dma_tx_init(struct ithc *ithc);
519 +int ithc_dma_rx(struct ithc *ithc, u8 channel);
520 +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *cmddata);
521 +
522 diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c
523 new file mode 100644
524 index 000000000000..09512b9cb4d3
525 --- /dev/null
526 +++ b/drivers/hid/ithc/ithc-main.c
527 @@ -0,0 +1,534 @@
528 +#include "ithc.h"
529 +
530 +MODULE_DESCRIPTION("Intel Touch Host Controller driver");
531 +MODULE_LICENSE("Dual BSD/GPL");
532 +
533 +// Lakefield
534 +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0
535 +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1
536 +// Tiger Lake
537 +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0
538 +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1
539 +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0
540 +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1
541 +// Alder Lake
542 +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8
543 +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9
544 +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0
545 +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1
546 +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0
547 +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1
548 +// Raptor Lake
549 +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58
550 +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59
551 +// Meteor Lake
552 +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48
553 +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a
554 +
555 +static const struct pci_device_id ithc_pci_tbl[] = {
556 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) },
557 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) },
558 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) },
559 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) },
560 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) },
561 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) },
562 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) },
563 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) },
564 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) },
565 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) },
566 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) },
567 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) },
568 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) },
569 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) },
570 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) },
571 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) },
572 + {}
573 +};
574 +MODULE_DEVICE_TABLE(pci, ithc_pci_tbl);
575 +
576 +// Module parameters
577 +
578 +static bool ithc_use_polling = false;
579 +module_param_named(poll, ithc_use_polling, bool, 0);
580 +MODULE_PARM_DESC(poll, "Use polling instead of interrupts");
581 +
582 +static bool ithc_use_rx0 = false;
583 +module_param_named(rx0, ithc_use_rx0, bool, 0);
584 +MODULE_PARM_DESC(rx0, "Use DMA RX channel 0");
585 +
586 +static bool ithc_use_rx1 = true;
587 +module_param_named(rx1, ithc_use_rx1, bool, 0);
588 +MODULE_PARM_DESC(rx1, "Use DMA RX channel 1");
589 +
590 +static bool ithc_log_regs_enabled = false;
591 +module_param_named(logregs, ithc_log_regs_enabled, bool, 0);
592 +MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)");
593 +
594 +// Sysfs attributes
595 +
596 +static bool ithc_is_config_valid(struct ithc *ithc) {
597 + return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC;
598 +}
599 +
600 +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) {
601 + struct ithc *ithc = dev_get_drvdata(dev);
602 + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
603 + return sprintf(buf, "0x%04x", ithc->config.vendor_id);
604 +}
605 +static DEVICE_ATTR_RO(vendor);
606 +static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf) {
607 + struct ithc *ithc = dev_get_drvdata(dev);
608 + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
609 + return sprintf(buf, "0x%04x", ithc->config.product_id);
610 +}
611 +static DEVICE_ATTR_RO(product);
612 +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) {
613 + struct ithc *ithc = dev_get_drvdata(dev);
614 + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
615 + return sprintf(buf, "%u", ithc->config.revision);
616 +}
617 +static DEVICE_ATTR_RO(revision);
618 +static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) {
619 + struct ithc *ithc = dev_get_drvdata(dev);
620 + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
621 + u32 v = ithc->config.fw_version;
622 + return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff);
623 +}
624 +static DEVICE_ATTR_RO(fw_version);
625 +
626 +static const struct attribute_group *ithc_attribute_groups[] = {
627 + &(const struct attribute_group){
628 + .name = DEVNAME,
629 + .attrs = (struct attribute *[]){
630 + &dev_attr_vendor.attr,
631 + &dev_attr_product.attr,
632 + &dev_attr_revision.attr,
633 + &dev_attr_fw_version.attr,
634 + NULL
635 + },
636 + },
637 + NULL
638 +};
639 +
640 +// HID setup
641 +
642 +static int ithc_hid_start(struct hid_device *hdev) { return 0; }
643 +static void ithc_hid_stop(struct hid_device *hdev) { }
644 +static int ithc_hid_open(struct hid_device *hdev) { return 0; }
645 +static void ithc_hid_close(struct hid_device *hdev) { }
646 +
647 +static int ithc_hid_parse(struct hid_device *hdev) {
648 + struct ithc *ithc = hdev->driver_data;
649 + u64 val = 0;
650 + WRITE_ONCE(ithc->hid_parse_done, false);
651 + CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof val, &val);
652 + if (!wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done), msecs_to_jiffies(1000))) return -ETIMEDOUT;
653 + return 0;
654 +}
655 +
656 +static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) {
657 + struct ithc *ithc = hdev->driver_data;
658 + if (!buf || !len) return -EINVAL;
659 + u32 code;
660 + if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_OUTPUT_REPORT;
661 + else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_SET_FEATURE;
662 + else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) code = DMA_TX_CODE_GET_FEATURE;
663 + else {
664 + pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n", rtype, reqtype, reportnum);
665 + return -EINVAL;
666 + }
667 + buf[0] = reportnum;
668 + if (reqtype == HID_REQ_GET_REPORT) {
669 + mutex_lock(&ithc->hid_get_feature_mutex);
670 + ithc->hid_get_feature_buf = buf;
671 + ithc->hid_get_feature_size = len;
672 + mutex_unlock(&ithc->hid_get_feature_mutex);
673 + int r = CHECK(ithc_dma_tx, ithc, code, 1, buf);
674 + if (!r) {
675 + r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature, !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
676 + if (!r) r = -ETIMEDOUT;
677 + else if (r < 0) r = -EINTR;
678 + else r = 0;
679 + }
680 + mutex_lock(&ithc->hid_get_feature_mutex);
681 + ithc->hid_get_feature_buf = NULL;
682 + if (!r) r = ithc->hid_get_feature_size;
683 + mutex_unlock(&ithc->hid_get_feature_mutex);
684 + return r;
685 + }
686 + CHECK_RET(ithc_dma_tx, ithc, code, len, buf);
687 + return 0;
688 +}
689 +
690 +static struct hid_ll_driver ithc_ll_driver = {
691 + .start = ithc_hid_start,
692 + .stop = ithc_hid_stop,
693 + .open = ithc_hid_open,
694 + .close = ithc_hid_close,
695 + .parse = ithc_hid_parse,
696 + .raw_request = ithc_hid_raw_request,
697 +};
698 +
699 +static void ithc_hid_devres_release(struct device *dev, void *res) {
700 + struct hid_device **hidm = res;
701 + if (*hidm) hid_destroy_device(*hidm);
702 +}
703 +
704 +static int ithc_hid_init(struct ithc *ithc) {
705 + struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof *hidm, GFP_KERNEL);
706 + if (!hidm) return -ENOMEM;
707 + devres_add(&ithc->pci->dev, hidm);
708 + struct hid_device *hid = hid_allocate_device();
709 + if (IS_ERR(hid)) return PTR_ERR(hid);
710 + *hidm = hid;
711 +
712 + strscpy(hid->name, DEVFULLNAME, sizeof(hid->name));
713 + strscpy(hid->phys, ithc->phys, sizeof(hid->phys));
714 + hid->ll_driver = &ithc_ll_driver;
715 + hid->bus = BUS_PCI;
716 + hid->vendor = ithc->config.vendor_id;
717 + hid->product = ithc->config.product_id;
718 + hid->version = 0x100;
719 + hid->dev.parent = &ithc->pci->dev;
720 + hid->driver_data = ithc;
721 +
722 + ithc->hid = hid;
723 + return 0;
724 +}
725 +
726 +// Interrupts/polling
727 +
728 +static void ithc_activity_timer_callback(struct timer_list *t) {
729 + struct ithc *ithc = container_of(t, struct ithc, activity_timer);
730 + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
731 +}
732 +
733 +void ithc_set_active(struct ithc *ithc) {
734 + // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
735 + // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_UNKNOWN_12 will be set when this happens.
736 + // The amount of truncated messages can become very high, resulting in user-visible effects (laggy/stuttering cursor).
737 + // To avoid this, we use a CPU latency QoS request to prevent the CPU from entering low power states during touch interactions.
738 + cpu_latency_qos_update_request(&ithc->activity_qos, 0);
739 + mod_timer(&ithc->activity_timer, jiffies + msecs_to_jiffies(1000));
740 +}
741 +
742 +static int ithc_set_device_enabled(struct ithc *ithc, bool enable) {
743 + u32 x = ithc->config.touch_cfg = (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2
744 + | (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
745 + return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE, offsetof(struct ithc_device_config, touch_cfg), sizeof x, &x);
746 +}
747 +
748 +static void ithc_disable_interrupts(struct ithc *ithc) {
749 + writel(0, &ithc->regs->error_control);
750 + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0);
751 + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
752 + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
753 + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0);
754 +}
755 +
756 +static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned channel) {
757 + writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA, &ithc->regs->dma_rx[channel].status);
758 +}
759 +
760 +static void ithc_clear_interrupts(struct ithc *ithc) {
761 + writel(0xffffffff, &ithc->regs->error_flags);
762 + writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status);
763 + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
764 + ithc_clear_dma_rx_interrupts(ithc, 0);
765 + ithc_clear_dma_rx_interrupts(ithc, 1);
766 + writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2, &ithc->regs->dma_tx.status);
767 +}
768 +
769 +static void ithc_process(struct ithc *ithc) {
770 + ithc_log_regs(ithc);
771 +
772 + // read and clear error bits
773 + u32 err = readl(&ithc->regs->error_flags);
774 + if (err) {
775 + if (err & ~ERROR_FLAG_DMA_UNKNOWN_12) pci_err(ithc->pci, "error flags: 0x%08x\n", err);
776 + writel(err, &ithc->regs->error_flags);
777 + }
778 +
779 + // process DMA rx
780 + if (ithc_use_rx0) {
781 + ithc_clear_dma_rx_interrupts(ithc, 0);
782 + ithc_dma_rx(ithc, 0);
783 + }
784 + if (ithc_use_rx1) {
785 + ithc_clear_dma_rx_interrupts(ithc, 1);
786 + ithc_dma_rx(ithc, 1);
787 + }
788 +
789 + ithc_log_regs(ithc);
790 +}
791 +
792 +static irqreturn_t ithc_interrupt_thread(int irq, void *arg) {
793 + struct ithc *ithc = arg;
794 + pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n",
795 + readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags),
796 + readb(&ithc->regs->spi_cmd.control), readl(&ithc->regs->spi_cmd.status),
797 + readb(&ithc->regs->dma_rx[0].control), readl(&ithc->regs->dma_rx[0].status),
798 + readb(&ithc->regs->dma_rx[1].control), readl(&ithc->regs->dma_rx[1].status),
799 + readb(&ithc->regs->dma_tx.control), readl(&ithc->regs->dma_tx.status));
800 + ithc_process(ithc);
801 + return IRQ_HANDLED;
802 +}
803 +
804 +static int ithc_poll_thread(void *arg) {
805 + struct ithc *ithc = arg;
806 + unsigned sleep = 100;
807 + while (!kthread_should_stop()) {
808 + u32 n = ithc->dma_rx[1].num_received;
809 + ithc_process(ithc);
810 + if (n != ithc->dma_rx[1].num_received) sleep = 20;
811 + else sleep = min(200u, sleep + (sleep >> 4) + 1);
812 + msleep_interruptible(sleep);
813 + }
814 + return 0;
815 +}
816 +
817 +// Device initialization and shutdown
818 +
819 +static void ithc_disable(struct ithc *ithc) {
820 + bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE);
821 + CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED);
822 + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
823 + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND, 0);
824 + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
825 + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_ENABLE, 0);
826 + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_ENABLE, 0);
827 + ithc_disable_interrupts(ithc);
828 + ithc_clear_interrupts(ithc);
829 +}
830 +
831 +static int ithc_init_device(struct ithc *ithc) {
832 + ithc_log_regs(ithc);
833 + bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0;
834 + ithc_disable(ithc);
835 + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY);
836 + ithc_set_spi_config(ithc, 10, 0);
837 + bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000); // seems to help with reading config
838 +
839 + if (was_enabled) if (msleep_interruptible(100)) return -EINTR;
840 + bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
841 + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0);
842 + for (int retries = 0; ; retries++) {
843 + ithc_log_regs(ithc);
844 + bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET);
845 + if (!waitl(ithc, &ithc->regs->state, 0xf, 2)) break;
846 + if (retries > 5) {
847 + pci_err(ithc->pci, "too many retries, failed to reset device\n");
848 + return -ETIMEDOUT;
849 + }
850 + pci_err(ithc->pci, "invalid state, retrying reset\n");
851 + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
852 + if (msleep_interruptible(1000)) return -EINTR;
853 + }
854 + ithc_log_regs(ithc);
855 +
856 + CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4);
857 +
858 + // read config
859 + for (int retries = 0; ; retries++) {
860 + ithc_log_regs(ithc);
861 + memset(&ithc->config, 0, sizeof ithc->config);
862 + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof ithc->config, &ithc->config);
863 + u32 *p = (void *)&ithc->config;
864 + pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
865 + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
866 + if (ithc_is_config_valid(ithc)) break;
867 + if (retries > 10) {
868 + pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n", ithc->config.device_id);
869 + return -EIO;
870 + }
871 + pci_err(ithc->pci, "failed to read config, retrying\n");
872 + if (msleep_interruptible(100)) return -EINTR;
873 + }
874 + ithc_log_regs(ithc);
875 +
876 + CHECK_RET(ithc_set_spi_config, ithc, DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config), DEVCFG_SPI_MODE(ithc->config.spi_config));
877 + CHECK_RET(ithc_set_device_enabled, ithc, true);
878 + ithc_log_regs(ithc);
879 + return 0;
880 +}
881 +
882 +int ithc_reset(struct ithc *ithc) {
883 + // FIXME This should probably do devres_release_group()+ithc_start(). But because this is called during DMA
884 + // processing, that would have to be done asynchronously (schedule_work()?). And with extra locking?
885 + pci_err(ithc->pci, "reset\n");
886 + CHECK(ithc_init_device, ithc);
887 + if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
888 + if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
889 + ithc_log_regs(ithc);
890 + pci_dbg(ithc->pci, "reset completed\n");
891 + return 0;
892 +}
893 +
894 +static void ithc_stop(void *res) {
895 + struct ithc *ithc = res;
896 + pci_dbg(ithc->pci, "stopping\n");
897 + ithc_log_regs(ithc);
898 + if (ithc->poll_thread) CHECK(kthread_stop, ithc->poll_thread);
899 + if (ithc->irq >= 0) disable_irq(ithc->irq);
900 + CHECK(ithc_set_device_enabled, ithc, false);
901 + ithc_disable(ithc);
902 + del_timer_sync(&ithc->activity_timer);
903 + cpu_latency_qos_remove_request(&ithc->activity_qos);
904 + // clear dma config
905 + for(unsigned i = 0; i < 2; i++) {
906 + CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0);
907 + lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr);
908 + writeb(0, &ithc->regs->dma_rx[i].num_bufs);
909 + writeb(0, &ithc->regs->dma_rx[i].num_prds);
910 + }
911 + lo_hi_writeq(0, &ithc->regs->dma_tx.addr);
912 + writeb(0, &ithc->regs->dma_tx.num_prds);
913 + ithc_log_regs(ithc);
914 + pci_dbg(ithc->pci, "stopped\n");
915 +}
916 +
917 +static void ithc_clear_drvdata(void *res) {
918 + struct pci_dev *pci = res;
919 + pci_set_drvdata(pci, NULL);
920 +}
921 +
922 +static int ithc_start(struct pci_dev *pci) {
923 + pci_dbg(pci, "starting\n");
924 + if (pci_get_drvdata(pci)) {
925 + pci_err(pci, "device already initialized\n");
926 + return -EINVAL;
927 + }
928 + if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL)) return -ENOMEM;
929 +
930 + struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof *ithc, GFP_KERNEL);
931 + if (!ithc) return -ENOMEM;
932 + ithc->irq = -1;
933 + ithc->pci = pci;
934 + snprintf(ithc->phys, sizeof ithc->phys, "pci-%s/" DEVNAME, pci_name(pci));
935 + init_waitqueue_head(&ithc->wait_hid_parse);
936 + init_waitqueue_head(&ithc->wait_hid_get_feature);
937 + mutex_init(&ithc->hid_get_feature_mutex);
938 + pci_set_drvdata(pci, ithc);
939 + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci);
940 + if (ithc_log_regs_enabled) ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof *ithc->prev_regs, GFP_KERNEL);
941 +
942 + CHECK_RET(pcim_enable_device, pci);
943 + pci_set_master(pci);
944 + CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs");
945 + CHECK_RET(dma_set_mask_and_coherent, &pci->dev, DMA_BIT_MASK(64));
946 + CHECK_RET(pci_set_power_state, pci, PCI_D0);
947 + ithc->regs = pcim_iomap_table(pci)[0];
948 +
949 + if (!ithc_use_polling) {
950 + CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
951 + ithc->irq = CHECK(pci_irq_vector, pci, 0);
952 + if (ithc->irq < 0) return ithc->irq;
953 + }
954 +
955 + CHECK_RET(ithc_init_device, ithc);
956 + CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups);
957 + if (ithc_use_rx0) CHECK_RET(ithc_dma_rx_init, ithc, 0, ithc_use_rx1 ? DEVNAME "0" : DEVNAME);
958 + if (ithc_use_rx1) CHECK_RET(ithc_dma_rx_init, ithc, 1, ithc_use_rx0 ? DEVNAME "1" : DEVNAME);
959 + CHECK_RET(ithc_dma_tx_init, ithc);
960 +
961 + CHECK_RET(ithc_hid_init, ithc);
962 +
963 + cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
964 + timer_setup(&ithc->activity_timer, ithc_activity_timer_callback, 0);
965 +
966 + // add ithc_stop callback AFTER setting up DMA buffers, so that polling/irqs/DMA are disabled BEFORE the buffers are freed
967 + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc);
968 +
969 + if (ithc_use_polling) {
970 + pci_info(pci, "using polling instead of irq\n");
971 + // use a thread instead of simple timer because we want to be able to sleep
972 + ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll");
973 + if (IS_ERR(ithc->poll_thread)) {
974 + int err = PTR_ERR(ithc->poll_thread);
975 + ithc->poll_thread = NULL;
976 + return err;
977 + }
978 + } else {
979 + CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL, ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc);
980 + }
981 +
982 + if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
983 + if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
984 +
985 + // hid_add_device can only be called after irq/polling is started and DMA is enabled, because it calls ithc_hid_parse which reads the report descriptor via DMA
986 + CHECK_RET(hid_add_device, ithc->hid);
987 +
988 + CHECK(ithc_debug_init, ithc);
989 +
990 + pci_dbg(pci, "started\n");
991 + return 0;
992 +}
993 +
994 +static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id) {
995 + pci_dbg(pci, "device probe\n");
996 + return ithc_start(pci);
997 +}
998 +
999 +static void ithc_remove(struct pci_dev *pci) {
1000 + pci_dbg(pci, "device remove\n");
1001 + // all cleanup is handled by devres
1002 +}
1003 +
1004 +static int ithc_suspend(struct device *dev) {
1005 + struct pci_dev *pci = to_pci_dev(dev);
1006 + pci_dbg(pci, "pm suspend\n");
1007 + devres_release_group(dev, ithc_start);
1008 + return 0;
1009 +}
1010 +
1011 +static int ithc_resume(struct device *dev) {
1012 + struct pci_dev *pci = to_pci_dev(dev);
1013 + pci_dbg(pci, "pm resume\n");
1014 + return ithc_start(pci);
1015 +}
1016 +
1017 +static int ithc_freeze(struct device *dev) {
1018 + struct pci_dev *pci = to_pci_dev(dev);
1019 + pci_dbg(pci, "pm freeze\n");
1020 + devres_release_group(dev, ithc_start);
1021 + return 0;
1022 +}
1023 +
1024 +static int ithc_thaw(struct device *dev) {
1025 + struct pci_dev *pci = to_pci_dev(dev);
1026 + pci_dbg(pci, "pm thaw\n");
1027 + return ithc_start(pci);
1028 +}
1029 +
1030 +static int ithc_restore(struct device *dev) {
1031 + struct pci_dev *pci = to_pci_dev(dev);
1032 + pci_dbg(pci, "pm restore\n");
1033 + return ithc_start(pci);
1034 +}
1035 +
1036 +static struct pci_driver ithc_driver = {
1037 + .name = DEVNAME,
1038 + .id_table = ithc_pci_tbl,
1039 + .probe = ithc_probe,
1040 + .remove = ithc_remove,
1041 + .driver.pm = &(const struct dev_pm_ops) {
1042 + .suspend = ithc_suspend,
1043 + .resume = ithc_resume,
1044 + .freeze = ithc_freeze,
1045 + .thaw = ithc_thaw,
1046 + .restore = ithc_restore,
1047 + },
1048 + //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway
1049 +};
1050 +
1051 +static int __init ithc_init(void) {
1052 + return pci_register_driver(&ithc_driver);
1053 +}
1054 +
1055 +static void __exit ithc_exit(void) {
1056 + pci_unregister_driver(&ithc_driver);
1057 +}
1058 +
1059 +module_init(ithc_init);
1060 +module_exit(ithc_exit);
1061 +
1062 diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c
1063 new file mode 100644
1064 index 000000000000..85d567b05761
1065 --- /dev/null
1066 +++ b/drivers/hid/ithc/ithc-regs.c
1067 @@ -0,0 +1,64 @@
1068 +#include "ithc.h"
1069 +
1070 +#define reg_num(r) (0x1fff & (u16)(__force u64)(r))
1071 +
1072 +void bitsl(__iomem u32 *reg, u32 mask, u32 val) {
1073 + if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
1074 + writel((readl(reg) & ~mask) | (val & mask), reg);
1075 +}
1076 +
1077 +void bitsb(__iomem u8 *reg, u8 mask, u8 val) {
1078 + if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
1079 + writeb((readb(reg) & ~mask) | (val & mask), reg);
1080 +}
1081 +
1082 +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val) {
1083 + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
1084 + u32 x;
1085 + if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
1086 + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
1087 + return -ETIMEDOUT;
1088 + }
1089 + pci_dbg(ithc->pci, "done waiting\n");
1090 + return 0;
1091 +}
1092 +
1093 +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val) {
1094 + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
1095 + u8 x;
1096 + if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
1097 + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
1098 + return -ETIMEDOUT;
1099 + }
1100 + pci_dbg(ithc->pci, "done waiting\n");
1101 + return 0;
1102 +}
1103 +
1104 +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode) {
1105 + pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode);
1106 + if (mode == 3) mode = 2;
1107 + bitsl(&ithc->regs->spi_config,
1108 + SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff),
1109 + SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed));
1110 + return 0;
1111 +}
1112 +
1113 +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data) {
1114 + pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset);
1115 + if (size > sizeof ithc->regs->spi_cmd.data) return -EINVAL;
1116 + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
1117 + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
1118 + writeb(command, &ithc->regs->spi_cmd.code);
1119 + writew(size, &ithc->regs->spi_cmd.size);
1120 + writel(offset, &ithc->regs->spi_cmd.offset);
1121 + u32 *p = data, n = (size + 3) / 4;
1122 + for (u32 i = 0; i < n; i++) writel(p[i], &ithc->regs->spi_cmd.data[i]);
1123 + bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND);
1124 + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
1125 + if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE) return -EIO;
1126 + if (readw(&ithc->regs->spi_cmd.size) != size) return -EMSGSIZE;
1127 + for (u32 i = 0; i < n; i++) p[i] = readl(&ithc->regs->spi_cmd.data[i]);
1128 + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
1129 + return 0;
1130 +}
1131 +
1132 diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h
1133 new file mode 100644
1134 index 000000000000..1a96092ed7ee
1135 --- /dev/null
1136 +++ b/drivers/hid/ithc/ithc-regs.h
1137 @@ -0,0 +1,186 @@
1138 +#define CONTROL_QUIESCE BIT(1)
1139 +#define CONTROL_IS_QUIESCED BIT(2)
1140 +#define CONTROL_NRESET BIT(3)
1141 +#define CONTROL_READY BIT(29)
1142 +
1143 +#define SPI_CONFIG_MODE(x) (((x) & 3) << 2)
1144 +#define SPI_CONFIG_SPEED(x) (((x) & 7) << 4)
1145 +#define SPI_CONFIG_UNKNOWN_18(x) (((x) & 3) << 18)
1146 +#define SPI_CONFIG_SPEED2(x) (((x) & 0xf) << 20) // high bit = high speed mode?
1147 +
1148 +#define ERROR_CONTROL_UNKNOWN_0 BIT(0)
1149 +#define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs
1150 +#define ERROR_CONTROL_UNKNOWN_2 BIT(2)
1151 +#define ERROR_CONTROL_UNKNOWN_3 BIT(3)
1152 +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_9 BIT(9)
1153 +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_10 BIT(10)
1154 +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_12 BIT(12)
1155 +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_13 BIT(13)
1156 +#define ERROR_CONTROL_UNKNOWN_16(x) (((x) & 0xff) << 16) // spi error code irq?
1157 +#define ERROR_CONTROL_SET_DMA_STATUS BIT(29) // sets DMA_RX_STATUS_ERROR when a DMA error occurs
1158 +
1159 +#define ERROR_STATUS_DMA BIT(28)
1160 +#define ERROR_STATUS_SPI BIT(30)
1161 +
1162 +#define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9)
1163 +#define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10)
1164 +#define ERROR_FLAG_DMA_UNKNOWN_12 BIT(12) // set when we receive a truncated DMA message
1165 +#define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13)
1166 +#define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16)
1167 +#define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17)
1168 +#define ERROR_FLAG_SPI_INTRA_PACKET_TIMEOUT BIT(18)
1169 +#define ERROR_FLAG_SPI_INVALID_RESPONSE BIT(19)
1170 +#define ERROR_FLAG_SPI_HS_RX_TIMEOUT BIT(20)
1171 +#define ERROR_FLAG_SPI_TOUCH_IC_INIT BIT(21)
1172 +
1173 +#define SPI_CMD_CONTROL_SEND BIT(0) // cleared by device when sending is complete
1174 +#define SPI_CMD_CONTROL_IRQ BIT(1)
1175 +
1176 +#define SPI_CMD_CODE_READ 4
1177 +#define SPI_CMD_CODE_WRITE 6
1178 +
1179 +#define SPI_CMD_STATUS_DONE BIT(0)
1180 +#define SPI_CMD_STATUS_ERROR BIT(1)
1181 +#define SPI_CMD_STATUS_BUSY BIT(3)
1182 +
1183 +#define DMA_TX_CONTROL_SEND BIT(0) // cleared by device when sending is complete
1184 +#define DMA_TX_CONTROL_IRQ BIT(3)
1185 +
1186 +#define DMA_TX_STATUS_DONE BIT(0)
1187 +#define DMA_TX_STATUS_ERROR BIT(1)
1188 +#define DMA_TX_STATUS_UNKNOWN_2 BIT(2)
1189 +#define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy?
1190 +
1191 +#define DMA_RX_CONTROL_ENABLE BIT(0)
1192 +#define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only?
1193 +#define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only?
1194 +#define DMA_RX_CONTROL_IRQ_UNKNOWN_4 BIT(4) // rx0 only?
1195 +#define DMA_RX_CONTROL_IRQ_DATA BIT(5)
1196 +
1197 +#define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only?
1198 +#define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices
1199 +
1200 +#define DMA_RX_WRAP_FLAG BIT(7)
1201 +
1202 +#define DMA_RX_STATUS_ERROR BIT(3)
1203 +#define DMA_RX_STATUS_UNKNOWN_4 BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms)
1204 +#define DMA_RX_STATUS_HAVE_DATA BIT(5)
1205 +#define DMA_RX_STATUS_ENABLED BIT(8)
1206 +
1207 +#define COUNTER_RESET BIT(31)
1208 +
1209 +struct ithc_registers {
1210 + /* 0000 */ u32 _unknown_0000[1024];
1211 + /* 1000 */ u32 _unknown_1000;
1212 + /* 1004 */ u32 _unknown_1004;
1213 + /* 1008 */ u32 control_bits;
1214 + /* 100c */ u32 _unknown_100c;
1215 + /* 1010 */ u32 spi_config;
1216 + /* 1014 */ u32 _unknown_1014[3];
1217 + /* 1020 */ u32 error_control;
1218 + /* 1024 */ u32 error_status; // write to clear
1219 + /* 1028 */ u32 error_flags; // write to clear
1220 + /* 102c */ u32 _unknown_102c[5];
1221 + struct {
1222 + /* 1040 */ u8 control;
1223 + /* 1041 */ u8 code;
1224 + /* 1042 */ u16 size;
1225 + /* 1044 */ u32 status; // write to clear
1226 + /* 1048 */ u32 offset;
1227 + /* 104c */ u32 data[16];
1228 + /* 108c */ u32 _unknown_108c;
1229 + } spi_cmd;
1230 + struct {
1231 + /* 1090 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
1232 + /* 1098 */ u8 control;
1233 + /* 1099 */ u8 _unknown_1099;
1234 + /* 109a */ u8 _unknown_109a;
1235 + /* 109b */ u8 num_prds;
1236 + /* 109c */ u32 status; // write to clear
1237 + } dma_tx;
1238 + /* 10a0 */ u32 _unknown_10a0[7];
1239 + /* 10bc */ u32 state; // is 0xe0000402 (dev config val 0) after CONTROL_NRESET, 0xe0000461 after first touch, 0xe0000401 after DMA_RX_CODE_RESET
1240 + /* 10c0 */ u32 _unknown_10c0[8];
1241 + /* 10e0 */ u32 _unknown_10e0_counters[3];
1242 + /* 10ec */ u32 _unknown_10ec[5];
1243 + struct {
1244 + /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
1245 + /* 1108/1208 */ u8 num_bufs;
1246 + /* 1109/1209 */ u8 num_prds;
1247 + /* 110a/120a */ u16 _unknown_110a;
1248 + /* 110c/120c */ u8 control;
1249 + /* 110d/120d */ u8 head;
1250 + /* 110e/120e */ u8 tail;
1251 + /* 110f/120f */ u8 control2;
1252 + /* 1110/1210 */ u32 status; // write to clear
1253 + /* 1114/1214 */ u32 _unknown_1114;
1254 + /* 1118/1218 */ u64 _unknown_1118_guc_addr;
1255 + /* 1120/1220 */ u32 _unknown_1120_guc;
1256 + /* 1124/1224 */ u32 _unknown_1124_guc;
1257 + /* 1128/1228 */ u32 unknown_init_bits; // bit 2 = guc related, bit 3 = rx1 related, bit 4 = guc related
1258 + /* 112c/122c */ u32 _unknown_112c;
1259 + /* 1130/1230 */ u64 _unknown_1130_guc_addr;
1260 + /* 1138/1238 */ u32 _unknown_1138_guc;
1261 + /* 113c/123c */ u32 _unknown_113c;
1262 + /* 1140/1240 */ u32 _unknown_1140_guc;
1263 + /* 1144/1244 */ u32 _unknown_1144[23];
1264 + /* 11a0/12a0 */ u32 _unknown_11a0_counters[6];
1265 + /* 11b8/12b8 */ u32 _unknown_11b8[18];
1266 + } dma_rx[2];
1267 +};
1268 +static_assert(sizeof(struct ithc_registers) == 0x1300);
1269 +
1270 +#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6)
1271 +#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6)
1272 +
1273 +#define DEVCFG_TOUCH_MASK 0x3f
1274 +#define DEVCFG_TOUCH_ENABLE BIT(0)
1275 +#define DEVCFG_TOUCH_UNKNOWN_1 BIT(1)
1276 +#define DEVCFG_TOUCH_UNKNOWN_2 BIT(2)
1277 +#define DEVCFG_TOUCH_UNKNOWN_3 BIT(3)
1278 +#define DEVCFG_TOUCH_UNKNOWN_4 BIT(4)
1279 +#define DEVCFG_TOUCH_UNKNOWN_5 BIT(5)
1280 +#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6)
1281 +
1282 +#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC"
1283 +
1284 +#define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode?
1285 +#define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3)
1286 +#define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f)
1287 +#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20)
1288 +#define DEVCFG_SPI_HEARTBEAT_INTERVAL (((x) >> 21) & 7)
1289 +#define DEVCFG_SPI_UNKNOWN_25 BIT(25)
1290 +#define DEVCFG_SPI_UNKNOWN_26 BIT(26)
1291 +#define DEVCFG_SPI_UNKNOWN_27 BIT(27)
1292 +#define DEVCFG_SPI_DELAY (((x) >> 28) & 7)
1293 +#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31)
1294 +
1295 +struct ithc_device_config {
1296 + u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET)
1297 + u32 _unknown_04; // 04 = 0x00000000
1298 + u32 dma_buf_sizes; // 08 = 0x000a00ff
1299 + u32 touch_cfg; // 0c = 0x0000001c
1300 + u32 _unknown_10; // 10 = 0x0000001c
1301 + u32 device_id; // 14 = 0x43495424 = "$TIC"
1302 + u32 spi_config; // 18 = 0xfda00a2e
1303 + u16 vendor_id; // 1c = 0x045e = Microsoft Corp.
1304 + u16 product_id; // 1e = 0x0c1a
1305 + u32 revision; // 20 = 0x00000001
1306 + u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139
1307 + u32 _unknown_28; // 28 = 0x00000000
1308 + u32 fw_mode; // 2c = 0x00000000
1309 + u32 _unknown_30; // 30 = 0x00000000
1310 + u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?)
1311 + u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET)
1312 + u32 _unknown_3c; // 3c = 0x00000002
1313 +};
1314 +
1315 +void bitsl(__iomem u32 *reg, u32 mask, u32 val);
1316 +void bitsb(__iomem u8 *reg, u8 mask, u8 val);
1317 +#define bitsl_set(reg, x) bitsl(reg, x, x)
1318 +#define bitsb_set(reg, x) bitsb(reg, x, x)
1319 +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val);
1320 +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val);
1321 +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode);
1322 +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data);
1323 +
1324 diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h
1325 new file mode 100644
1326 index 000000000000..6a9b0d480bc1
1327 --- /dev/null
1328 +++ b/drivers/hid/ithc/ithc.h
1329 @@ -0,0 +1,60 @@
1330 +#include <linux/module.h>
1331 +#include <linux/input.h>
1332 +#include <linux/hid.h>
1333 +#include <linux/dma-mapping.h>
1334 +#include <linux/highmem.h>
1335 +#include <linux/pci.h>
1336 +#include <linux/io-64-nonatomic-lo-hi.h>
1337 +#include <linux/iopoll.h>
1338 +#include <linux/delay.h>
1339 +#include <linux/kthread.h>
1340 +#include <linux/miscdevice.h>
1341 +#include <linux/debugfs.h>
1342 +#include <linux/poll.h>
1343 +#include <linux/timer.h>
1344 +#include <linux/pm_qos.h>
1345 +
1346 +#define DEVNAME "ithc"
1347 +#define DEVFULLNAME "Intel Touch Host Controller"
1348 +
1349 +#undef pr_fmt
1350 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1351 +
1352 +#define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; })
1353 +#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while(0)
1354 +
1355 +#define NUM_RX_BUF 16
1356 +
1357 +struct ithc;
1358 +
1359 +#include "ithc-regs.h"
1360 +#include "ithc-dma.h"
1361 +
1362 +struct ithc {
1363 + char phys[32];
1364 + struct pci_dev *pci;
1365 + int irq;
1366 + struct task_struct *poll_thread;
1367 + struct pm_qos_request activity_qos;
1368 + struct timer_list activity_timer;
1369 +
1370 + struct hid_device *hid;
1371 + bool hid_parse_done;
1372 + wait_queue_head_t wait_hid_parse;
1373 + wait_queue_head_t wait_hid_get_feature;
1374 + struct mutex hid_get_feature_mutex;
1375 + void *hid_get_feature_buf;
1376 + size_t hid_get_feature_size;
1377 +
1378 + struct ithc_registers __iomem *regs;
1379 + struct ithc_registers *prev_regs; // for debugging
1380 + struct ithc_device_config config;
1381 + struct ithc_dma_rx dma_rx[2];
1382 + struct ithc_dma_tx dma_tx;
1383 +};
1384 +
1385 +int ithc_reset(struct ithc *ithc);
1386 +void ithc_set_active(struct ithc *ithc);
1387 +int ithc_debug_init(struct ithc *ithc);
1388 +void ithc_log_regs(struct ithc *ithc);
1389 +

  ViewVC Help
Powered by ViewVC 1.1.30