0005-ithc.patch 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433
  1. From b8fd34edd85322e3106a5aa6ddc375673a023c40 Mon Sep 17 00:00:00 2001
  2. From: Dorian Stoll <dorian.stoll@tmsp.io>
  3. Date: Sun, 11 Dec 2022 12:03:38 +0100
  4. Subject: [PATCH] iommu: intel: Disable source id verification for ITHC
  5. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  6. Patchset: ithc
  7. ---
  8. drivers/iommu/intel/irq_remapping.c | 16 ++++++++++++++++
  9. 1 file changed, 16 insertions(+)
  10. diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
  11. index a1b987335b31..970805409470 100644
  12. --- a/drivers/iommu/intel/irq_remapping.c
  13. +++ b/drivers/iommu/intel/irq_remapping.c
  14. @@ -390,6 +390,22 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
  15. data.busmatch_count = 0;
  16. pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
  17. + /*
  18. + * The Intel Touch Host Controller is at 00:10.6, but for some reason
  19. + * the MSI interrupts have request id 01:05.0.
  20. + * Disable id verification to work around this.
  21. + * FIXME Find proper fix or turn this into a quirk.
  22. + */
  23. + if (dev->vendor == PCI_VENDOR_ID_INTEL && (dev->class >> 8) == PCI_CLASS_INPUT_PEN) {
  24. + switch(dev->device) {
  25. + case 0x98d0: case 0x98d1: // LKF
  26. + case 0xa0d0: case 0xa0d1: // TGL LP
  27. + case 0x43d0: case 0x43d1: // TGL H
  28. + set_irte_sid(irte, SVT_NO_VERIFY, SQ_ALL_16, 0);
  29. + return 0;
  30. + }
  31. + }
  32. +
  33. /*
  34. * DMA alias provides us with a PCI device and alias. The only case
  35. * where the it will return an alias on a different bus than the
  36. --
  37. 2.41.0
  38. From 357c5b10962caedb479db99d1383a31f9e77111f Mon Sep 17 00:00:00 2001
  39. From: Dorian Stoll <dorian.stoll@tmsp.io>
  40. Date: Sun, 11 Dec 2022 12:10:54 +0100
  41. Subject: [PATCH] hid: Add support for Intel Touch Host Controller
  42. Based on quo/ithc-linux@55803a2
  43. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  44. Patchset: ithc
  45. ---
  46. drivers/hid/Kconfig | 2 +
  47. drivers/hid/Makefile | 1 +
  48. drivers/hid/ithc/Kbuild | 6 +
  49. drivers/hid/ithc/Kconfig | 12 +
  50. drivers/hid/ithc/ithc-debug.c | 96 ++++++
  51. drivers/hid/ithc/ithc-dma.c | 258 ++++++++++++++++
  52. drivers/hid/ithc/ithc-dma.h | 67 +++++
  53. drivers/hid/ithc/ithc-main.c | 534 ++++++++++++++++++++++++++++++++++
  54. drivers/hid/ithc/ithc-regs.c | 64 ++++
  55. drivers/hid/ithc/ithc-regs.h | 186 ++++++++++++
  56. drivers/hid/ithc/ithc.h | 60 ++++
  57. 11 files changed, 1286 insertions(+)
  58. create mode 100644 drivers/hid/ithc/Kbuild
  59. create mode 100644 drivers/hid/ithc/Kconfig
  60. create mode 100644 drivers/hid/ithc/ithc-debug.c
  61. create mode 100644 drivers/hid/ithc/ithc-dma.c
  62. create mode 100644 drivers/hid/ithc/ithc-dma.h
  63. create mode 100644 drivers/hid/ithc/ithc-main.c
  64. create mode 100644 drivers/hid/ithc/ithc-regs.c
  65. create mode 100644 drivers/hid/ithc/ithc-regs.h
  66. create mode 100644 drivers/hid/ithc/ithc.h
  67. diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
  68. index dcedd5b90f35..847c6b07914d 100644
  69. --- a/drivers/hid/Kconfig
  70. +++ b/drivers/hid/Kconfig
  71. @@ -1318,4 +1318,6 @@ source "drivers/hid/surface-hid/Kconfig"
  72. source "drivers/hid/ipts/Kconfig"
  73. +source "drivers/hid/ithc/Kconfig"
  74. +
  75. endif # HID_SUPPORT
  76. diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
  77. index 285e12d95b0e..a3ff62e922f1 100644
  78. --- a/drivers/hid/Makefile
  79. +++ b/drivers/hid/Makefile
  80. @@ -169,3 +169,4 @@ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
  81. obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
  82. obj-$(CONFIG_HID_IPTS) += ipts/
  83. +obj-$(CONFIG_HID_ITHC) += ithc/
  84. diff --git a/drivers/hid/ithc/Kbuild b/drivers/hid/ithc/Kbuild
  85. new file mode 100644
  86. index 000000000000..aea83f2ac07b
  87. --- /dev/null
  88. +++ b/drivers/hid/ithc/Kbuild
  89. @@ -0,0 +1,6 @@
  90. +obj-$(CONFIG_HID_ITHC) := ithc.o
  91. +
  92. +ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-debug.o
  93. +
  94. +ccflags-y := -std=gnu11 -Wno-declaration-after-statement
  95. +
  96. diff --git a/drivers/hid/ithc/Kconfig b/drivers/hid/ithc/Kconfig
  97. new file mode 100644
  98. index 000000000000..ede713023609
  99. --- /dev/null
  100. +++ b/drivers/hid/ithc/Kconfig
  101. @@ -0,0 +1,12 @@
  102. +config HID_ITHC
  103. + tristate "Intel Touch Host Controller"
  104. + depends on PCI
  105. + depends on HID
  106. + help
  107. + Say Y here if your system has a touchscreen using Intels
  108. + Touch Host Controller (ITHC / IPTS) technology.
  109. +
  110. + If unsure say N.
  111. +
  112. + To compile this driver as a module, choose M here: the
  113. + module will be called ithc.
  114. diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c
  115. new file mode 100644
  116. index 000000000000..57bf125c45bd
  117. --- /dev/null
  118. +++ b/drivers/hid/ithc/ithc-debug.c
  119. @@ -0,0 +1,96 @@
  120. +#include "ithc.h"
  121. +
  122. +void ithc_log_regs(struct ithc *ithc) {
  123. + if (!ithc->prev_regs) return;
  124. + u32 __iomem *cur = (__iomem void*)ithc->regs;
  125. + u32 *prev = (void*)ithc->prev_regs;
  126. + for (int i = 1024; i < sizeof *ithc->regs / 4; i++) {
  127. + u32 x = readl(cur + i);
  128. + if (x != prev[i]) {
  129. + pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x);
  130. + prev[i] = x;
  131. + }
  132. + }
  133. +}
  134. +
  135. +static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, loff_t *offset) {
  136. + struct ithc *ithc = file_inode(f)->i_private;
  137. + char cmd[256];
  138. + if (!ithc || !ithc->pci) return -ENODEV;
  139. + if (!len) return -EINVAL;
  140. + if (len >= sizeof cmd) return -EINVAL;
  141. + if (copy_from_user(cmd, buf, len)) return -EFAULT;
  142. + cmd[len] = 0;
  143. + if (cmd[len-1] == '\n') cmd[len-1] = 0;
  144. + pci_info(ithc->pci, "debug command: %s\n", cmd);
  145. + u32 n = 0;
  146. + const char *s = cmd + 1;
  147. + u32 a[32];
  148. + while (*s && *s != '\n') {
  149. + if (n >= ARRAY_SIZE(a)) return -EINVAL;
  150. + if (*s++ != ' ') return -EINVAL;
  151. + char *e;
  152. + a[n++] = simple_strtoul(s, &e, 0);
  153. + if (e == s) return -EINVAL;
  154. + s = e;
  155. + }
  156. + ithc_log_regs(ithc);
  157. + switch(cmd[0]) {
  158. + case 'x': // reset
  159. + ithc_reset(ithc);
  160. + break;
  161. + case 'w': // write register: offset mask value
  162. + if (n != 3 || (a[0] & 3)) return -EINVAL;
  163. + pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", a[0], a[2], a[1]);
  164. + bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]);
  165. + break;
  166. + case 'r': // read register: offset
  167. + if (n != 1 || (a[0] & 3)) return -EINVAL;
  168. + pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
  169. + break;
  170. + case 's': // spi command: cmd offset len data...
  171. + // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
  172. + // set touch cfg: s 6 12 4 XX
  173. + if (n < 3 || a[2] > (n - 3) * 4) return -EINVAL;
  174. + pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]);
  175. + if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3))
  176. + for (u32 i = 0; i < (a[2] + 3) / 4; i++) pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
  177. + break;
  178. + case 'd': // dma command: cmd len data...
  179. + // get report descriptor: d 7 8 0 0
  180. + // enable multitouch: d 3 2 0x0105
  181. + if (n < 2 || a[1] > (n - 2) * 4) return -EINVAL;
  182. + pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]);
  183. + if (ithc_dma_tx(ithc, a[0], a[1], a + 2)) pci_err(ithc->pci, "dma tx failed\n");
  184. + break;
  185. + default:
  186. + return -EINVAL;
  187. + }
  188. + ithc_log_regs(ithc);
  189. + return len;
  190. +}
  191. +
  192. +static const struct file_operations ithc_debugfops_cmd = {
  193. + .owner = THIS_MODULE,
  194. + .write = ithc_debugfs_cmd_write,
  195. +};
  196. +
  197. +static void ithc_debugfs_devres_release(struct device *dev, void *res) {
  198. + struct dentry **dbgm = res;
  199. + if (*dbgm) debugfs_remove_recursive(*dbgm);
  200. +}
  201. +
  202. +int ithc_debug_init(struct ithc *ithc) {
  203. + struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof *dbgm, GFP_KERNEL);
  204. + if (!dbgm) return -ENOMEM;
  205. + devres_add(&ithc->pci->dev, dbgm);
  206. + struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL);
  207. + if (IS_ERR(dbg)) return PTR_ERR(dbg);
  208. + *dbgm = dbg;
  209. +
  210. + struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd);
  211. + if (IS_ERR(cmd)) return PTR_ERR(cmd);
  212. +
  213. + return 0;
  214. +}
  215. +
  216. diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c
  217. new file mode 100644
  218. index 000000000000..7e89b3496918
  219. --- /dev/null
  220. +++ b/drivers/hid/ithc/ithc-dma.c
  221. @@ -0,0 +1,258 @@
  222. +#include "ithc.h"
  223. +
  224. +static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, unsigned num_buffers, unsigned num_pages, enum dma_data_direction dir) {
  225. + p->num_pages = num_pages;
  226. + p->dir = dir;
  227. + p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE);
  228. + p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL);
  229. + if (!p->addr) return -ENOMEM;
  230. + if (p->dma_addr & (PAGE_SIZE - 1)) return -EFAULT;
  231. + return 0;
  232. +}
  233. +
  234. +struct ithc_sg_table {
  235. + void *addr;
  236. + struct sg_table sgt;
  237. + enum dma_data_direction dir;
  238. +};
  239. +static void ithc_dma_sgtable_free(struct sg_table *sgt) {
  240. + struct scatterlist *sg;
  241. + int i;
  242. + for_each_sgtable_sg(sgt, sg, i) {
  243. + struct page *p = sg_page(sg);
  244. + if (p) __free_page(p);
  245. + }
  246. + sg_free_table(sgt);
  247. +}
  248. +static void ithc_dma_data_devres_release(struct device *dev, void *res) {
  249. + struct ithc_sg_table *sgt = res;
  250. + if (sgt->addr) vunmap(sgt->addr);
  251. + dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0);
  252. + ithc_dma_sgtable_free(&sgt->sgt);
  253. +}
  254. +
  255. +static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b) {
  256. + // We don't use dma_alloc_coherent for data buffers, because they don't have to be contiguous (we can use one PRD per page) or coherent (they are unidirectional).
  257. + // Instead we use an sg_table of individually allocated pages (5.13 has dma_alloc_noncontiguous for this, but we'd like to support 5.10 for now).
  258. + struct page *pages[16];
  259. + if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) return -EINVAL;
  260. + b->active_idx = -1;
  261. + struct ithc_sg_table *sgt = devres_alloc(ithc_dma_data_devres_release, sizeof *sgt, GFP_KERNEL);
  262. + if (!sgt) return -ENOMEM;
  263. + sgt->dir = prds->dir;
  264. + if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) {
  265. + struct scatterlist *sg;
  266. + int i;
  267. + bool ok = true;
  268. + for_each_sgtable_sg(&sgt->sgt, sg, i) {
  269. + struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); // don't need __GFP_DMA for PCI DMA
  270. + if (!p) { ok = false; break; }
  271. + sg_set_page(sg, p, PAGE_SIZE, 0);
  272. + }
  273. + if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) {
  274. + devres_add(&ithc->pci->dev, sgt);
  275. + b->sgt = &sgt->sgt;
  276. + b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL);
  277. + if (!b->addr) return -ENOMEM;
  278. + return 0;
  279. + }
  280. + ithc_dma_sgtable_free(&sgt->sgt);
  281. + }
  282. + devres_free(sgt);
  283. + return -ENOMEM;
  284. +}
  285. +
  286. +static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
  287. + struct ithc_phys_region_desc *prd = prds->addr;
  288. + prd += idx * prds->num_pages;
  289. + if (b->active_idx >= 0) { pci_err(ithc->pci, "buffer already active\n"); return -EINVAL; }
  290. + b->active_idx = idx;
  291. + if (prds->dir == DMA_TO_DEVICE) {
  292. + if (b->data_size > PAGE_SIZE) return -EINVAL;
  293. + prd->addr = sg_dma_address(b->sgt->sgl) >> 10;
  294. + prd->size = b->data_size | PRD_FLAG_END;
  295. + flush_kernel_vmap_range(b->addr, b->data_size);
  296. + } else if (prds->dir == DMA_FROM_DEVICE) {
  297. + struct scatterlist *sg;
  298. + int i;
  299. + for_each_sgtable_dma_sg(b->sgt, sg, i) {
  300. + prd->addr = sg_dma_address(sg) >> 10;
  301. + prd->size = sg_dma_len(sg);
  302. + prd++;
  303. + }
  304. + prd[-1].size |= PRD_FLAG_END;
  305. + }
  306. + dma_wmb(); // for the prds
  307. + dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir);
  308. + return 0;
  309. +}
  310. +
  311. +static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
  312. + struct ithc_phys_region_desc *prd = prds->addr;
  313. + prd += idx * prds->num_pages;
  314. + if (b->active_idx != idx) { pci_err(ithc->pci, "wrong buffer index\n"); return -EINVAL; }
  315. + b->active_idx = -1;
  316. + if (prds->dir == DMA_FROM_DEVICE) {
  317. + dma_rmb(); // for the prds
  318. + b->data_size = 0;
  319. + struct scatterlist *sg;
  320. + int i;
  321. + for_each_sgtable_dma_sg(b->sgt, sg, i) {
  322. + unsigned size = prd->size;
  323. + b->data_size += size & PRD_SIZE_MASK;
  324. + if (size & PRD_FLAG_END) break;
  325. + if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { pci_err(ithc->pci, "truncated prd\n"); break; }
  326. + prd++;
  327. + }
  328. + invalidate_kernel_vmap_range(b->addr, b->data_size);
  329. + }
  330. + dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir);
  331. + return 0;
  332. +}
  333. +
  334. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname) {
  335. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  336. + mutex_init(&rx->mutex);
  337. + u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes);
  338. + unsigned num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
  339. + pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", NUM_RX_BUF, buf_size, num_pages);
  340. + CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE);
  341. + for (unsigned i = 0; i < NUM_RX_BUF; i++)
  342. + CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]);
  343. + writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2);
  344. + lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr);
  345. + writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs);
  346. + writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds);
  347. + u8 head = readb(&ithc->regs->dma_rx[channel].head);
  348. + if (head) { pci_err(ithc->pci, "head is nonzero (%u)\n", head); return -EIO; }
  349. + for (unsigned i = 0; i < NUM_RX_BUF; i++)
  350. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i);
  351. + writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail);
  352. + return 0;
  353. +}
  354. +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) {
  355. + bitsb_set(&ithc->regs->dma_rx[channel].control, DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
  356. + CHECK(waitl, ithc, &ithc->regs->dma_rx[1].status, DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
  357. +}
  358. +
  359. +int ithc_dma_tx_init(struct ithc *ithc) {
  360. + struct ithc_dma_tx *tx = &ithc->dma_tx;
  361. + mutex_init(&tx->mutex);
  362. + tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes);
  363. + unsigned num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
  364. + pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", tx->max_size, num_pages);
  365. + CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE);
  366. + CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf);
  367. + lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr);
  368. + writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds);
  369. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  370. + return 0;
  371. +}
  372. +
  373. +static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data, u8 channel, u8 buf) {
  374. + if (buf >= NUM_RX_BUF) {
  375. + pci_err(ithc->pci, "invalid dma ringbuffer index\n");
  376. + return -EINVAL;
  377. + }
  378. + ithc_set_active(ithc);
  379. + u32 len = data->data_size;
  380. + struct ithc_dma_rx_header *hdr = data->addr;
  381. + u8 *hiddata = (void *)(hdr + 1);
  382. + if (len >= sizeof *hdr && hdr->code == DMA_RX_CODE_RESET) {
  383. + CHECK(ithc_reset, ithc);
  384. + } else if (len < sizeof *hdr || len != sizeof *hdr + hdr->data_size) {
  385. + if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  386. + // When the CPU enters a low power state during DMA, we can get truncated messages.
  387. + // Typically this will be a single touch HID report that is only 1 byte, or a multitouch report that is 257 bytes.
  388. + // See also ithc_set_active().
  389. + } else {
  390. + pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n", channel, buf, len, hdr->code, hdr->data_size);
  391. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
  392. + }
  393. + } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) {
  394. + CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8);
  395. + WRITE_ONCE(ithc->hid_parse_done, true);
  396. + wake_up(&ithc->wait_hid_parse);
  397. + } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  398. + CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1);
  399. + } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) {
  400. + bool done = false;
  401. + mutex_lock(&ithc->hid_get_feature_mutex);
  402. + if (ithc->hid_get_feature_buf) {
  403. + if (hdr->data_size < ithc->hid_get_feature_size) ithc->hid_get_feature_size = hdr->data_size;
  404. + memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size);
  405. + ithc->hid_get_feature_buf = NULL;
  406. + done = true;
  407. + }
  408. + mutex_unlock(&ithc->hid_get_feature_mutex);
  409. + if (done) wake_up(&ithc->wait_hid_get_feature);
  410. + else CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT, hiddata, hdr->data_size, 1);
  411. + } else {
  412. + pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n", channel, buf, len, hdr->code);
  413. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
  414. + }
  415. + return 0;
  416. +}
  417. +
  418. +static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
  419. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  420. + unsigned n = rx->num_received;
  421. + u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head);
  422. + while (1) {
  423. + u8 tail = n % NUM_RX_BUF;
  424. + u8 tail_wrap = tail | ((n / NUM_RX_BUF) & 1 ? 0 : DMA_RX_WRAP_FLAG);
  425. + writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail);
  426. + // ringbuffer is full if tail_wrap == head_wrap
  427. + // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG
  428. + if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) return 0;
  429. +
  430. + // take the buffer that the device just filled
  431. + struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF];
  432. + CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail);
  433. + rx->num_received = ++n;
  434. +
  435. + // process data
  436. + CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail);
  437. +
  438. + // give the buffer back to the device
  439. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail);
  440. + }
  441. +}
  442. +int ithc_dma_rx(struct ithc *ithc, u8 channel) {
  443. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  444. + mutex_lock(&rx->mutex);
  445. + int ret = ithc_dma_rx_unlocked(ithc, channel);
  446. + mutex_unlock(&rx->mutex);
  447. + return ret;
  448. +}
  449. +
  450. +static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
  451. + pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize);
  452. + struct ithc_dma_tx_header *hdr;
  453. + u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0;
  454. + unsigned fullsize = sizeof *hdr + datasize + padding;
  455. + if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE) return -EINVAL;
  456. + CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  457. +
  458. + ithc->dma_tx.buf.data_size = fullsize;
  459. + hdr = ithc->dma_tx.buf.addr;
  460. + hdr->code = cmdcode;
  461. + hdr->data_size = datasize;
  462. + u8 *dest = (void *)(hdr + 1);
  463. + memcpy(dest, data, datasize);
  464. + dest += datasize;
  465. + for (u8 p = 0; p < padding; p++) *dest++ = 0;
  466. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  467. +
  468. + bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND);
  469. + CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
  470. + writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status);
  471. + return 0;
  472. +}
  473. +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
  474. + mutex_lock(&ithc->dma_tx.mutex);
  475. + int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data);
  476. + mutex_unlock(&ithc->dma_tx.mutex);
  477. + return ret;
  478. +}
  479. +
  480. diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h
  481. new file mode 100644
  482. index 000000000000..d9f2c19a13f3
  483. --- /dev/null
  484. +++ b/drivers/hid/ithc/ithc-dma.h
  485. @@ -0,0 +1,67 @@
  486. +#define PRD_SIZE_MASK 0xffffff
  487. +#define PRD_FLAG_END 0x1000000
  488. +#define PRD_FLAG_SUCCESS 0x2000000
  489. +#define PRD_FLAG_ERROR 0x4000000
  490. +
  491. +struct ithc_phys_region_desc {
  492. + u64 addr; // physical addr/1024
  493. + u32 size; // num bytes, PRD_FLAG_END marks last prd for data split over multiple prds
  494. + u32 unused;
  495. +};
  496. +
  497. +#define DMA_RX_CODE_INPUT_REPORT 3
  498. +#define DMA_RX_CODE_FEATURE_REPORT 4
  499. +#define DMA_RX_CODE_REPORT_DESCRIPTOR 5
  500. +#define DMA_RX_CODE_RESET 7
  501. +
  502. +struct ithc_dma_rx_header {
  503. + u32 code;
  504. + u32 data_size;
  505. + u32 _unknown[14];
  506. +};
  507. +
  508. +#define DMA_TX_CODE_SET_FEATURE 3
  509. +#define DMA_TX_CODE_GET_FEATURE 4
  510. +#define DMA_TX_CODE_OUTPUT_REPORT 5
  511. +#define DMA_TX_CODE_GET_REPORT_DESCRIPTOR 7
  512. +
  513. +struct ithc_dma_tx_header {
  514. + u32 code;
  515. + u32 data_size;
  516. +};
  517. +
  518. +struct ithc_dma_prd_buffer {
  519. + void *addr;
  520. + dma_addr_t dma_addr;
  521. + u32 size;
  522. + u32 num_pages; // per data buffer
  523. + enum dma_data_direction dir;
  524. +};
  525. +
  526. +struct ithc_dma_data_buffer {
  527. + void *addr;
  528. + struct sg_table *sgt;
  529. + int active_idx;
  530. + u32 data_size;
  531. +};
  532. +
  533. +struct ithc_dma_tx {
  534. + struct mutex mutex;
  535. + u32 max_size;
  536. + struct ithc_dma_prd_buffer prds;
  537. + struct ithc_dma_data_buffer buf;
  538. +};
  539. +
  540. +struct ithc_dma_rx {
  541. + struct mutex mutex;
  542. + u32 num_received;
  543. + struct ithc_dma_prd_buffer prds;
  544. + struct ithc_dma_data_buffer bufs[NUM_RX_BUF];
  545. +};
  546. +
  547. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname);
  548. +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel);
  549. +int ithc_dma_tx_init(struct ithc *ithc);
  550. +int ithc_dma_rx(struct ithc *ithc, u8 channel);
  551. +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *cmddata);
  552. +
  553. diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c
  554. new file mode 100644
  555. index 000000000000..09512b9cb4d3
  556. --- /dev/null
  557. +++ b/drivers/hid/ithc/ithc-main.c
  558. @@ -0,0 +1,534 @@
  559. +#include "ithc.h"
  560. +
  561. +MODULE_DESCRIPTION("Intel Touch Host Controller driver");
  562. +MODULE_LICENSE("Dual BSD/GPL");
  563. +
  564. +// Lakefield
  565. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0
  566. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1
  567. +// Tiger Lake
  568. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0
  569. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1
  570. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0
  571. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1
  572. +// Alder Lake
  573. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8
  574. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9
  575. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0
  576. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1
  577. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0
  578. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1
  579. +// Raptor Lake
  580. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58
  581. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59
  582. +// Meteor Lake
  583. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48
  584. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a
  585. +
  586. +static const struct pci_device_id ithc_pci_tbl[] = {
  587. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) },
  588. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) },
  589. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) },
  590. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) },
  591. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) },
  592. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) },
  593. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) },
  594. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) },
  595. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) },
  596. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) },
  597. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) },
  598. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) },
  599. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) },
  600. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) },
  601. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) },
  602. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) },
  603. + {}
  604. +};
  605. +MODULE_DEVICE_TABLE(pci, ithc_pci_tbl);
  606. +
  607. +// Module parameters
  608. +
  609. +static bool ithc_use_polling = false;
  610. +module_param_named(poll, ithc_use_polling, bool, 0);
  611. +MODULE_PARM_DESC(poll, "Use polling instead of interrupts");
  612. +
  613. +static bool ithc_use_rx0 = false;
  614. +module_param_named(rx0, ithc_use_rx0, bool, 0);
  615. +MODULE_PARM_DESC(rx0, "Use DMA RX channel 0");
  616. +
  617. +static bool ithc_use_rx1 = true;
  618. +module_param_named(rx1, ithc_use_rx1, bool, 0);
  619. +MODULE_PARM_DESC(rx1, "Use DMA RX channel 1");
  620. +
  621. +static bool ithc_log_regs_enabled = false;
  622. +module_param_named(logregs, ithc_log_regs_enabled, bool, 0);
  623. +MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)");
  624. +
  625. +// Sysfs attributes
  626. +
  627. +static bool ithc_is_config_valid(struct ithc *ithc) {
  628. + return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC;
  629. +}
  630. +
  631. +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) {
  632. + struct ithc *ithc = dev_get_drvdata(dev);
  633. + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  634. + return sprintf(buf, "0x%04x", ithc->config.vendor_id);
  635. +}
  636. +static DEVICE_ATTR_RO(vendor);
  637. +static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf) {
  638. + struct ithc *ithc = dev_get_drvdata(dev);
  639. + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  640. + return sprintf(buf, "0x%04x", ithc->config.product_id);
  641. +}
  642. +static DEVICE_ATTR_RO(product);
  643. +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) {
  644. + struct ithc *ithc = dev_get_drvdata(dev);
  645. + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  646. + return sprintf(buf, "%u", ithc->config.revision);
  647. +}
  648. +static DEVICE_ATTR_RO(revision);
  649. +static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) {
  650. + struct ithc *ithc = dev_get_drvdata(dev);
  651. + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  652. + u32 v = ithc->config.fw_version;
  653. + return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff);
  654. +}
  655. +static DEVICE_ATTR_RO(fw_version);
  656. +
  657. +static const struct attribute_group *ithc_attribute_groups[] = {
  658. + &(const struct attribute_group){
  659. + .name = DEVNAME,
  660. + .attrs = (struct attribute *[]){
  661. + &dev_attr_vendor.attr,
  662. + &dev_attr_product.attr,
  663. + &dev_attr_revision.attr,
  664. + &dev_attr_fw_version.attr,
  665. + NULL
  666. + },
  667. + },
  668. + NULL
  669. +};
  670. +
  671. +// HID setup
  672. +
  673. +static int ithc_hid_start(struct hid_device *hdev) { return 0; }
  674. +static void ithc_hid_stop(struct hid_device *hdev) { }
  675. +static int ithc_hid_open(struct hid_device *hdev) { return 0; }
  676. +static void ithc_hid_close(struct hid_device *hdev) { }
  677. +
  678. +static int ithc_hid_parse(struct hid_device *hdev) {
  679. + struct ithc *ithc = hdev->driver_data;
  680. + u64 val = 0;
  681. + WRITE_ONCE(ithc->hid_parse_done, false);
  682. + CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof val, &val);
  683. + if (!wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done), msecs_to_jiffies(1000))) return -ETIMEDOUT;
  684. + return 0;
  685. +}
  686. +
  687. +static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) {
  688. + struct ithc *ithc = hdev->driver_data;
  689. + if (!buf || !len) return -EINVAL;
  690. + u32 code;
  691. + if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_OUTPUT_REPORT;
  692. + else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_SET_FEATURE;
  693. + else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) code = DMA_TX_CODE_GET_FEATURE;
  694. + else {
  695. + pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n", rtype, reqtype, reportnum);
  696. + return -EINVAL;
  697. + }
  698. + buf[0] = reportnum;
  699. + if (reqtype == HID_REQ_GET_REPORT) {
  700. + mutex_lock(&ithc->hid_get_feature_mutex);
  701. + ithc->hid_get_feature_buf = buf;
  702. + ithc->hid_get_feature_size = len;
  703. + mutex_unlock(&ithc->hid_get_feature_mutex);
  704. + int r = CHECK(ithc_dma_tx, ithc, code, 1, buf);
  705. + if (!r) {
  706. + r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature, !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
  707. + if (!r) r = -ETIMEDOUT;
  708. + else if (r < 0) r = -EINTR;
  709. + else r = 0;
  710. + }
  711. + mutex_lock(&ithc->hid_get_feature_mutex);
  712. + ithc->hid_get_feature_buf = NULL;
  713. + if (!r) r = ithc->hid_get_feature_size;
  714. + mutex_unlock(&ithc->hid_get_feature_mutex);
  715. + return r;
  716. + }
  717. + CHECK_RET(ithc_dma_tx, ithc, code, len, buf);
  718. + return 0;
  719. +}
  720. +
  721. +static struct hid_ll_driver ithc_ll_driver = {
  722. + .start = ithc_hid_start,
  723. + .stop = ithc_hid_stop,
  724. + .open = ithc_hid_open,
  725. + .close = ithc_hid_close,
  726. + .parse = ithc_hid_parse,
  727. + .raw_request = ithc_hid_raw_request,
  728. +};
  729. +
  730. +static void ithc_hid_devres_release(struct device *dev, void *res) {
  731. + struct hid_device **hidm = res;
  732. + if (*hidm) hid_destroy_device(*hidm);
  733. +}
  734. +
  735. +static int ithc_hid_init(struct ithc *ithc) {
  736. + struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof *hidm, GFP_KERNEL);
  737. + if (!hidm) return -ENOMEM;
  738. + devres_add(&ithc->pci->dev, hidm);
  739. + struct hid_device *hid = hid_allocate_device();
  740. + if (IS_ERR(hid)) return PTR_ERR(hid);
  741. + *hidm = hid;
  742. +
  743. + strscpy(hid->name, DEVFULLNAME, sizeof(hid->name));
  744. + strscpy(hid->phys, ithc->phys, sizeof(hid->phys));
  745. + hid->ll_driver = &ithc_ll_driver;
  746. + hid->bus = BUS_PCI;
  747. + hid->vendor = ithc->config.vendor_id;
  748. + hid->product = ithc->config.product_id;
  749. + hid->version = 0x100;
  750. + hid->dev.parent = &ithc->pci->dev;
  751. + hid->driver_data = ithc;
  752. +
  753. + ithc->hid = hid;
  754. + return 0;
  755. +}
  756. +
  757. +// Interrupts/polling
  758. +
  759. +static void ithc_activity_timer_callback(struct timer_list *t) {
  760. + struct ithc *ithc = container_of(t, struct ithc, activity_timer);
  761. + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  762. +}
  763. +
  764. +void ithc_set_active(struct ithc *ithc) {
  765. + // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
  766. + // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_UNKNOWN_12 will be set when this happens.
  767. + // The amount of truncated messages can become very high, resulting in user-visible effects (laggy/stuttering cursor).
  768. + // To avoid this, we use a CPU latency QoS request to prevent the CPU from entering low power states during touch interactions.
  769. + cpu_latency_qos_update_request(&ithc->activity_qos, 0);
  770. + mod_timer(&ithc->activity_timer, jiffies + msecs_to_jiffies(1000));
  771. +}
  772. +
  773. +static int ithc_set_device_enabled(struct ithc *ithc, bool enable) {
  774. + u32 x = ithc->config.touch_cfg = (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2
  775. + | (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
  776. + return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE, offsetof(struct ithc_device_config, touch_cfg), sizeof x, &x);
  777. +}
  778. +
  779. +static void ithc_disable_interrupts(struct ithc *ithc) {
  780. + writel(0, &ithc->regs->error_control);
  781. + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0);
  782. + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  783. + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  784. + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0);
  785. +}
  786. +
  787. +static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned channel) {
  788. + writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA, &ithc->regs->dma_rx[channel].status);
  789. +}
  790. +
  791. +static void ithc_clear_interrupts(struct ithc *ithc) {
  792. + writel(0xffffffff, &ithc->regs->error_flags);
  793. + writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status);
  794. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  795. + ithc_clear_dma_rx_interrupts(ithc, 0);
  796. + ithc_clear_dma_rx_interrupts(ithc, 1);
  797. + writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2, &ithc->regs->dma_tx.status);
  798. +}
  799. +
  800. +static void ithc_process(struct ithc *ithc) {
  801. + ithc_log_regs(ithc);
  802. +
  803. + // read and clear error bits
  804. + u32 err = readl(&ithc->regs->error_flags);
  805. + if (err) {
  806. + if (err & ~ERROR_FLAG_DMA_UNKNOWN_12) pci_err(ithc->pci, "error flags: 0x%08x\n", err);
  807. + writel(err, &ithc->regs->error_flags);
  808. + }
  809. +
  810. + // process DMA rx
  811. + if (ithc_use_rx0) {
  812. + ithc_clear_dma_rx_interrupts(ithc, 0);
  813. + ithc_dma_rx(ithc, 0);
  814. + }
  815. + if (ithc_use_rx1) {
  816. + ithc_clear_dma_rx_interrupts(ithc, 1);
  817. + ithc_dma_rx(ithc, 1);
  818. + }
  819. +
  820. + ithc_log_regs(ithc);
  821. +}
  822. +
  823. +static irqreturn_t ithc_interrupt_thread(int irq, void *arg) {
  824. + struct ithc *ithc = arg;
  825. + pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n",
  826. + readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags),
  827. + readb(&ithc->regs->spi_cmd.control), readl(&ithc->regs->spi_cmd.status),
  828. + readb(&ithc->regs->dma_rx[0].control), readl(&ithc->regs->dma_rx[0].status),
  829. + readb(&ithc->regs->dma_rx[1].control), readl(&ithc->regs->dma_rx[1].status),
  830. + readb(&ithc->regs->dma_tx.control), readl(&ithc->regs->dma_tx.status));
  831. + ithc_process(ithc);
  832. + return IRQ_HANDLED;
  833. +}
  834. +
  835. +static int ithc_poll_thread(void *arg) {
  836. + struct ithc *ithc = arg;
  837. + unsigned sleep = 100;
  838. + while (!kthread_should_stop()) {
  839. + u32 n = ithc->dma_rx[1].num_received;
  840. + ithc_process(ithc);
  841. + if (n != ithc->dma_rx[1].num_received) sleep = 20;
  842. + else sleep = min(200u, sleep + (sleep >> 4) + 1);
  843. + msleep_interruptible(sleep);
  844. + }
  845. + return 0;
  846. +}
  847. +
  848. +// Device initialization and shutdown
  849. +
  850. +static void ithc_disable(struct ithc *ithc) {
  851. + bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE);
  852. + CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED);
  853. + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  854. + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND, 0);
  855. + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
  856. + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_ENABLE, 0);
  857. + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_ENABLE, 0);
  858. + ithc_disable_interrupts(ithc);
  859. + ithc_clear_interrupts(ithc);
  860. +}
  861. +
  862. +static int ithc_init_device(struct ithc *ithc) {
  863. + ithc_log_regs(ithc);
  864. + bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0;
  865. + ithc_disable(ithc);
  866. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY);
  867. + ithc_set_spi_config(ithc, 10, 0);
  868. + bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000); // seems to help with reading config
  869. +
  870. + if (was_enabled) if (msleep_interruptible(100)) return -EINTR;
  871. + bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
  872. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0);
  873. + for (int retries = 0; ; retries++) {
  874. + ithc_log_regs(ithc);
  875. + bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET);
  876. + if (!waitl(ithc, &ithc->regs->state, 0xf, 2)) break;
  877. + if (retries > 5) {
  878. + pci_err(ithc->pci, "too many retries, failed to reset device\n");
  879. + return -ETIMEDOUT;
  880. + }
  881. + pci_err(ithc->pci, "invalid state, retrying reset\n");
  882. + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  883. + if (msleep_interruptible(1000)) return -EINTR;
  884. + }
  885. + ithc_log_regs(ithc);
  886. +
  887. + CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4);
  888. +
  889. + // read config
  890. + for (int retries = 0; ; retries++) {
  891. + ithc_log_regs(ithc);
  892. + memset(&ithc->config, 0, sizeof ithc->config);
  893. + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof ithc->config, &ithc->config);
  894. + u32 *p = (void *)&ithc->config;
  895. + pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  896. + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
  897. + if (ithc_is_config_valid(ithc)) break;
  898. + if (retries > 10) {
  899. + pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n", ithc->config.device_id);
  900. + return -EIO;
  901. + }
  902. + pci_err(ithc->pci, "failed to read config, retrying\n");
  903. + if (msleep_interruptible(100)) return -EINTR;
  904. + }
  905. + ithc_log_regs(ithc);
  906. +
  907. + CHECK_RET(ithc_set_spi_config, ithc, DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config), DEVCFG_SPI_MODE(ithc->config.spi_config));
  908. + CHECK_RET(ithc_set_device_enabled, ithc, true);
  909. + ithc_log_regs(ithc);
  910. + return 0;
  911. +}
  912. +
  913. +int ithc_reset(struct ithc *ithc) {
  914. + // FIXME This should probably do devres_release_group()+ithc_start(). But because this is called during DMA
  915. + // processing, that would have to be done asynchronously (schedule_work()?). And with extra locking?
  916. + pci_err(ithc->pci, "reset\n");
  917. + CHECK(ithc_init_device, ithc);
  918. + if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
  919. + if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
  920. + ithc_log_regs(ithc);
  921. + pci_dbg(ithc->pci, "reset completed\n");
  922. + return 0;
  923. +}
  924. +
  925. +static void ithc_stop(void *res) {
  926. + struct ithc *ithc = res;
  927. + pci_dbg(ithc->pci, "stopping\n");
  928. + ithc_log_regs(ithc);
  929. + if (ithc->poll_thread) CHECK(kthread_stop, ithc->poll_thread);
  930. + if (ithc->irq >= 0) disable_irq(ithc->irq);
  931. + CHECK(ithc_set_device_enabled, ithc, false);
  932. + ithc_disable(ithc);
  933. + del_timer_sync(&ithc->activity_timer);
  934. + cpu_latency_qos_remove_request(&ithc->activity_qos);
  935. + // clear dma config
  936. + for(unsigned i = 0; i < 2; i++) {
  937. + CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0);
  938. + lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr);
  939. + writeb(0, &ithc->regs->dma_rx[i].num_bufs);
  940. + writeb(0, &ithc->regs->dma_rx[i].num_prds);
  941. + }
  942. + lo_hi_writeq(0, &ithc->regs->dma_tx.addr);
  943. + writeb(0, &ithc->regs->dma_tx.num_prds);
  944. + ithc_log_regs(ithc);
  945. + pci_dbg(ithc->pci, "stopped\n");
  946. +}
  947. +
  948. +static void ithc_clear_drvdata(void *res) {
  949. + struct pci_dev *pci = res;
  950. + pci_set_drvdata(pci, NULL);
  951. +}
  952. +
  953. +static int ithc_start(struct pci_dev *pci) {
  954. + pci_dbg(pci, "starting\n");
  955. + if (pci_get_drvdata(pci)) {
  956. + pci_err(pci, "device already initialized\n");
  957. + return -EINVAL;
  958. + }
  959. + if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL)) return -ENOMEM;
  960. +
  961. + struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof *ithc, GFP_KERNEL);
  962. + if (!ithc) return -ENOMEM;
  963. + ithc->irq = -1;
  964. + ithc->pci = pci;
  965. + snprintf(ithc->phys, sizeof ithc->phys, "pci-%s/" DEVNAME, pci_name(pci));
  966. + init_waitqueue_head(&ithc->wait_hid_parse);
  967. + init_waitqueue_head(&ithc->wait_hid_get_feature);
  968. + mutex_init(&ithc->hid_get_feature_mutex);
  969. + pci_set_drvdata(pci, ithc);
  970. + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci);
  971. + if (ithc_log_regs_enabled) ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof *ithc->prev_regs, GFP_KERNEL);
  972. +
  973. + CHECK_RET(pcim_enable_device, pci);
  974. + pci_set_master(pci);
  975. + CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs");
  976. + CHECK_RET(dma_set_mask_and_coherent, &pci->dev, DMA_BIT_MASK(64));
  977. + CHECK_RET(pci_set_power_state, pci, PCI_D0);
  978. + ithc->regs = pcim_iomap_table(pci)[0];
  979. +
  980. + if (!ithc_use_polling) {
  981. + CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
  982. + ithc->irq = CHECK(pci_irq_vector, pci, 0);
  983. + if (ithc->irq < 0) return ithc->irq;
  984. + }
  985. +
  986. + CHECK_RET(ithc_init_device, ithc);
  987. + CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups);
  988. + if (ithc_use_rx0) CHECK_RET(ithc_dma_rx_init, ithc, 0, ithc_use_rx1 ? DEVNAME "0" : DEVNAME);
  989. + if (ithc_use_rx1) CHECK_RET(ithc_dma_rx_init, ithc, 1, ithc_use_rx0 ? DEVNAME "1" : DEVNAME);
  990. + CHECK_RET(ithc_dma_tx_init, ithc);
  991. +
  992. + CHECK_RET(ithc_hid_init, ithc);
  993. +
  994. + cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  995. + timer_setup(&ithc->activity_timer, ithc_activity_timer_callback, 0);
  996. +
  997. + // add ithc_stop callback AFTER setting up DMA buffers, so that polling/irqs/DMA are disabled BEFORE the buffers are freed
  998. + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc);
  999. +
  1000. + if (ithc_use_polling) {
  1001. + pci_info(pci, "using polling instead of irq\n");
  1002. + // use a thread instead of simple timer because we want to be able to sleep
  1003. + ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll");
  1004. + if (IS_ERR(ithc->poll_thread)) {
  1005. + int err = PTR_ERR(ithc->poll_thread);
  1006. + ithc->poll_thread = NULL;
  1007. + return err;
  1008. + }
  1009. + } else {
  1010. + CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL, ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc);
  1011. + }
  1012. +
  1013. + if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
  1014. + if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
  1015. +
  1016. + // hid_add_device can only be called after irq/polling is started and DMA is enabled, because it calls ithc_hid_parse which reads the report descriptor via DMA
  1017. + CHECK_RET(hid_add_device, ithc->hid);
  1018. +
  1019. + CHECK(ithc_debug_init, ithc);
  1020. +
  1021. + pci_dbg(pci, "started\n");
  1022. + return 0;
  1023. +}
  1024. +
  1025. +static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id) {
  1026. + pci_dbg(pci, "device probe\n");
  1027. + return ithc_start(pci);
  1028. +}
  1029. +
  1030. +static void ithc_remove(struct pci_dev *pci) {
  1031. + pci_dbg(pci, "device remove\n");
  1032. + // all cleanup is handled by devres
  1033. +}
  1034. +
  1035. +static int ithc_suspend(struct device *dev) {
  1036. + struct pci_dev *pci = to_pci_dev(dev);
  1037. + pci_dbg(pci, "pm suspend\n");
  1038. + devres_release_group(dev, ithc_start);
  1039. + return 0;
  1040. +}
  1041. +
  1042. +static int ithc_resume(struct device *dev) {
  1043. + struct pci_dev *pci = to_pci_dev(dev);
  1044. + pci_dbg(pci, "pm resume\n");
  1045. + return ithc_start(pci);
  1046. +}
  1047. +
  1048. +static int ithc_freeze(struct device *dev) {
  1049. + struct pci_dev *pci = to_pci_dev(dev);
  1050. + pci_dbg(pci, "pm freeze\n");
  1051. + devres_release_group(dev, ithc_start);
  1052. + return 0;
  1053. +}
  1054. +
  1055. +static int ithc_thaw(struct device *dev) {
  1056. + struct pci_dev *pci = to_pci_dev(dev);
  1057. + pci_dbg(pci, "pm thaw\n");
  1058. + return ithc_start(pci);
  1059. +}
  1060. +
  1061. +static int ithc_restore(struct device *dev) {
  1062. + struct pci_dev *pci = to_pci_dev(dev);
  1063. + pci_dbg(pci, "pm restore\n");
  1064. + return ithc_start(pci);
  1065. +}
  1066. +
  1067. +static struct pci_driver ithc_driver = {
  1068. + .name = DEVNAME,
  1069. + .id_table = ithc_pci_tbl,
  1070. + .probe = ithc_probe,
  1071. + .remove = ithc_remove,
  1072. + .driver.pm = &(const struct dev_pm_ops) {
  1073. + .suspend = ithc_suspend,
  1074. + .resume = ithc_resume,
  1075. + .freeze = ithc_freeze,
  1076. + .thaw = ithc_thaw,
  1077. + .restore = ithc_restore,
  1078. + },
  1079. + //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway
  1080. +};
  1081. +
  1082. +static int __init ithc_init(void) {
  1083. + return pci_register_driver(&ithc_driver);
  1084. +}
  1085. +
  1086. +static void __exit ithc_exit(void) {
  1087. + pci_unregister_driver(&ithc_driver);
  1088. +}
  1089. +
  1090. +module_init(ithc_init);
  1091. +module_exit(ithc_exit);
  1092. +
  1093. diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c
  1094. new file mode 100644
  1095. index 000000000000..85d567b05761
  1096. --- /dev/null
  1097. +++ b/drivers/hid/ithc/ithc-regs.c
  1098. @@ -0,0 +1,64 @@
  1099. +#include "ithc.h"
  1100. +
  1101. +#define reg_num(r) (0x1fff & (u16)(__force u64)(r))
  1102. +
  1103. +void bitsl(__iomem u32 *reg, u32 mask, u32 val) {
  1104. + if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
  1105. + writel((readl(reg) & ~mask) | (val & mask), reg);
  1106. +}
  1107. +
  1108. +void bitsb(__iomem u8 *reg, u8 mask, u8 val) {
  1109. + if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
  1110. + writeb((readb(reg) & ~mask) | (val & mask), reg);
  1111. +}
  1112. +
  1113. +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val) {
  1114. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
  1115. + u32 x;
  1116. + if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  1117. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
  1118. + return -ETIMEDOUT;
  1119. + }
  1120. + pci_dbg(ithc->pci, "done waiting\n");
  1121. + return 0;
  1122. +}
  1123. +
  1124. +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val) {
  1125. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
  1126. + u8 x;
  1127. + if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  1128. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
  1129. + return -ETIMEDOUT;
  1130. + }
  1131. + pci_dbg(ithc->pci, "done waiting\n");
  1132. + return 0;
  1133. +}
  1134. +
  1135. +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode) {
  1136. + pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode);
  1137. + if (mode == 3) mode = 2;
  1138. + bitsl(&ithc->regs->spi_config,
  1139. + SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff),
  1140. + SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed));
  1141. + return 0;
  1142. +}
  1143. +
  1144. +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data) {
  1145. + pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset);
  1146. + if (size > sizeof ithc->regs->spi_cmd.data) return -EINVAL;
  1147. + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  1148. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1149. + writeb(command, &ithc->regs->spi_cmd.code);
  1150. + writew(size, &ithc->regs->spi_cmd.size);
  1151. + writel(offset, &ithc->regs->spi_cmd.offset);
  1152. + u32 *p = data, n = (size + 3) / 4;
  1153. + for (u32 i = 0; i < n; i++) writel(p[i], &ithc->regs->spi_cmd.data[i]);
  1154. + bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND);
  1155. + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  1156. + if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE) return -EIO;
  1157. + if (readw(&ithc->regs->spi_cmd.size) != size) return -EMSGSIZE;
  1158. + for (u32 i = 0; i < n; i++) p[i] = readl(&ithc->regs->spi_cmd.data[i]);
  1159. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1160. + return 0;
  1161. +}
  1162. +
  1163. diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h
  1164. new file mode 100644
  1165. index 000000000000..1a96092ed7ee
  1166. --- /dev/null
  1167. +++ b/drivers/hid/ithc/ithc-regs.h
  1168. @@ -0,0 +1,186 @@
  1169. +#define CONTROL_QUIESCE BIT(1)
  1170. +#define CONTROL_IS_QUIESCED BIT(2)
  1171. +#define CONTROL_NRESET BIT(3)
  1172. +#define CONTROL_READY BIT(29)
  1173. +
  1174. +#define SPI_CONFIG_MODE(x) (((x) & 3) << 2)
  1175. +#define SPI_CONFIG_SPEED(x) (((x) & 7) << 4)
  1176. +#define SPI_CONFIG_UNKNOWN_18(x) (((x) & 3) << 18)
  1177. +#define SPI_CONFIG_SPEED2(x) (((x) & 0xf) << 20) // high bit = high speed mode?
  1178. +
  1179. +#define ERROR_CONTROL_UNKNOWN_0 BIT(0)
  1180. +#define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs
  1181. +#define ERROR_CONTROL_UNKNOWN_2 BIT(2)
  1182. +#define ERROR_CONTROL_UNKNOWN_3 BIT(3)
  1183. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_9 BIT(9)
  1184. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_10 BIT(10)
  1185. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_12 BIT(12)
  1186. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_13 BIT(13)
  1187. +#define ERROR_CONTROL_UNKNOWN_16(x) (((x) & 0xff) << 16) // spi error code irq?
  1188. +#define ERROR_CONTROL_SET_DMA_STATUS BIT(29) // sets DMA_RX_STATUS_ERROR when a DMA error occurs
  1189. +
  1190. +#define ERROR_STATUS_DMA BIT(28)
  1191. +#define ERROR_STATUS_SPI BIT(30)
  1192. +
  1193. +#define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9)
  1194. +#define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10)
  1195. +#define ERROR_FLAG_DMA_UNKNOWN_12 BIT(12) // set when we receive a truncated DMA message
  1196. +#define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13)
  1197. +#define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16)
  1198. +#define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17)
  1199. +#define ERROR_FLAG_SPI_INTRA_PACKET_TIMEOUT BIT(18)
  1200. +#define ERROR_FLAG_SPI_INVALID_RESPONSE BIT(19)
  1201. +#define ERROR_FLAG_SPI_HS_RX_TIMEOUT BIT(20)
  1202. +#define ERROR_FLAG_SPI_TOUCH_IC_INIT BIT(21)
  1203. +
  1204. +#define SPI_CMD_CONTROL_SEND BIT(0) // cleared by device when sending is complete
  1205. +#define SPI_CMD_CONTROL_IRQ BIT(1)
  1206. +
  1207. +#define SPI_CMD_CODE_READ 4
  1208. +#define SPI_CMD_CODE_WRITE 6
  1209. +
  1210. +#define SPI_CMD_STATUS_DONE BIT(0)
  1211. +#define SPI_CMD_STATUS_ERROR BIT(1)
  1212. +#define SPI_CMD_STATUS_BUSY BIT(3)
  1213. +
  1214. +#define DMA_TX_CONTROL_SEND BIT(0) // cleared by device when sending is complete
  1215. +#define DMA_TX_CONTROL_IRQ BIT(3)
  1216. +
  1217. +#define DMA_TX_STATUS_DONE BIT(0)
  1218. +#define DMA_TX_STATUS_ERROR BIT(1)
  1219. +#define DMA_TX_STATUS_UNKNOWN_2 BIT(2)
  1220. +#define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy?
  1221. +
  1222. +#define DMA_RX_CONTROL_ENABLE BIT(0)
  1223. +#define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only?
  1224. +#define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only?
  1225. +#define DMA_RX_CONTROL_IRQ_UNKNOWN_4 BIT(4) // rx0 only?
  1226. +#define DMA_RX_CONTROL_IRQ_DATA BIT(5)
  1227. +
  1228. +#define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only?
  1229. +#define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices
  1230. +
  1231. +#define DMA_RX_WRAP_FLAG BIT(7)
  1232. +
  1233. +#define DMA_RX_STATUS_ERROR BIT(3)
  1234. +#define DMA_RX_STATUS_UNKNOWN_4 BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms)
  1235. +#define DMA_RX_STATUS_HAVE_DATA BIT(5)
  1236. +#define DMA_RX_STATUS_ENABLED BIT(8)
  1237. +
  1238. +#define COUNTER_RESET BIT(31)
  1239. +
  1240. +struct ithc_registers {
  1241. + /* 0000 */ u32 _unknown_0000[1024];
  1242. + /* 1000 */ u32 _unknown_1000;
  1243. + /* 1004 */ u32 _unknown_1004;
  1244. + /* 1008 */ u32 control_bits;
  1245. + /* 100c */ u32 _unknown_100c;
  1246. + /* 1010 */ u32 spi_config;
  1247. + /* 1014 */ u32 _unknown_1014[3];
  1248. + /* 1020 */ u32 error_control;
  1249. + /* 1024 */ u32 error_status; // write to clear
  1250. + /* 1028 */ u32 error_flags; // write to clear
  1251. + /* 102c */ u32 _unknown_102c[5];
  1252. + struct {
  1253. + /* 1040 */ u8 control;
  1254. + /* 1041 */ u8 code;
  1255. + /* 1042 */ u16 size;
  1256. + /* 1044 */ u32 status; // write to clear
  1257. + /* 1048 */ u32 offset;
  1258. + /* 104c */ u32 data[16];
  1259. + /* 108c */ u32 _unknown_108c;
  1260. + } spi_cmd;
  1261. + struct {
  1262. + /* 1090 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
  1263. + /* 1098 */ u8 control;
  1264. + /* 1099 */ u8 _unknown_1099;
  1265. + /* 109a */ u8 _unknown_109a;
  1266. + /* 109b */ u8 num_prds;
  1267. + /* 109c */ u32 status; // write to clear
  1268. + } dma_tx;
  1269. + /* 10a0 */ u32 _unknown_10a0[7];
  1270. + /* 10bc */ u32 state; // is 0xe0000402 (dev config val 0) after CONTROL_NRESET, 0xe0000461 after first touch, 0xe0000401 after DMA_RX_CODE_RESET
  1271. + /* 10c0 */ u32 _unknown_10c0[8];
  1272. + /* 10e0 */ u32 _unknown_10e0_counters[3];
  1273. + /* 10ec */ u32 _unknown_10ec[5];
  1274. + struct {
  1275. + /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
  1276. + /* 1108/1208 */ u8 num_bufs;
  1277. + /* 1109/1209 */ u8 num_prds;
  1278. + /* 110a/120a */ u16 _unknown_110a;
  1279. + /* 110c/120c */ u8 control;
  1280. + /* 110d/120d */ u8 head;
  1281. + /* 110e/120e */ u8 tail;
  1282. + /* 110f/120f */ u8 control2;
  1283. + /* 1110/1210 */ u32 status; // write to clear
  1284. + /* 1114/1214 */ u32 _unknown_1114;
  1285. + /* 1118/1218 */ u64 _unknown_1118_guc_addr;
  1286. + /* 1120/1220 */ u32 _unknown_1120_guc;
  1287. + /* 1124/1224 */ u32 _unknown_1124_guc;
  1288. + /* 1128/1228 */ u32 unknown_init_bits; // bit 2 = guc related, bit 3 = rx1 related, bit 4 = guc related
  1289. + /* 112c/122c */ u32 _unknown_112c;
  1290. + /* 1130/1230 */ u64 _unknown_1130_guc_addr;
  1291. + /* 1138/1238 */ u32 _unknown_1138_guc;
  1292. + /* 113c/123c */ u32 _unknown_113c;
  1293. + /* 1140/1240 */ u32 _unknown_1140_guc;
  1294. + /* 1144/1244 */ u32 _unknown_1144[23];
  1295. + /* 11a0/12a0 */ u32 _unknown_11a0_counters[6];
  1296. + /* 11b8/12b8 */ u32 _unknown_11b8[18];
  1297. + } dma_rx[2];
  1298. +};
  1299. +static_assert(sizeof(struct ithc_registers) == 0x1300);
  1300. +
  1301. +#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6)
  1302. +#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6)
  1303. +
  1304. +#define DEVCFG_TOUCH_MASK 0x3f
  1305. +#define DEVCFG_TOUCH_ENABLE BIT(0)
  1306. +#define DEVCFG_TOUCH_UNKNOWN_1 BIT(1)
  1307. +#define DEVCFG_TOUCH_UNKNOWN_2 BIT(2)
  1308. +#define DEVCFG_TOUCH_UNKNOWN_3 BIT(3)
  1309. +#define DEVCFG_TOUCH_UNKNOWN_4 BIT(4)
  1310. +#define DEVCFG_TOUCH_UNKNOWN_5 BIT(5)
  1311. +#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6)
  1312. +
  1313. +#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC"
  1314. +
  1315. +#define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode?
  1316. +#define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3)
  1317. +#define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f)
  1318. +#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20)
  1319. +#define DEVCFG_SPI_HEARTBEAT_INTERVAL (((x) >> 21) & 7)
  1320. +#define DEVCFG_SPI_UNKNOWN_25 BIT(25)
  1321. +#define DEVCFG_SPI_UNKNOWN_26 BIT(26)
  1322. +#define DEVCFG_SPI_UNKNOWN_27 BIT(27)
  1323. +#define DEVCFG_SPI_DELAY (((x) >> 28) & 7)
  1324. +#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31)
  1325. +
  1326. +struct ithc_device_config {
  1327. + u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET)
  1328. + u32 _unknown_04; // 04 = 0x00000000
  1329. + u32 dma_buf_sizes; // 08 = 0x000a00ff
  1330. + u32 touch_cfg; // 0c = 0x0000001c
  1331. + u32 _unknown_10; // 10 = 0x0000001c
  1332. + u32 device_id; // 14 = 0x43495424 = "$TIC"
  1333. + u32 spi_config; // 18 = 0xfda00a2e
  1334. + u16 vendor_id; // 1c = 0x045e = Microsoft Corp.
  1335. + u16 product_id; // 1e = 0x0c1a
  1336. + u32 revision; // 20 = 0x00000001
  1337. + u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139
  1338. + u32 _unknown_28; // 28 = 0x00000000
  1339. + u32 fw_mode; // 2c = 0x00000000
  1340. + u32 _unknown_30; // 30 = 0x00000000
  1341. + u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?)
  1342. + u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET)
  1343. + u32 _unknown_3c; // 3c = 0x00000002
  1344. +};
  1345. +
  1346. +void bitsl(__iomem u32 *reg, u32 mask, u32 val);
  1347. +void bitsb(__iomem u8 *reg, u8 mask, u8 val);
  1348. +#define bitsl_set(reg, x) bitsl(reg, x, x)
  1349. +#define bitsb_set(reg, x) bitsb(reg, x, x)
  1350. +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val);
  1351. +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val);
  1352. +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode);
  1353. +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data);
  1354. +
  1355. diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h
  1356. new file mode 100644
  1357. index 000000000000..6a9b0d480bc1
  1358. --- /dev/null
  1359. +++ b/drivers/hid/ithc/ithc.h
  1360. @@ -0,0 +1,60 @@
  1361. +#include <linux/module.h>
  1362. +#include <linux/input.h>
  1363. +#include <linux/hid.h>
  1364. +#include <linux/dma-mapping.h>
  1365. +#include <linux/highmem.h>
  1366. +#include <linux/pci.h>
  1367. +#include <linux/io-64-nonatomic-lo-hi.h>
  1368. +#include <linux/iopoll.h>
  1369. +#include <linux/delay.h>
  1370. +#include <linux/kthread.h>
  1371. +#include <linux/miscdevice.h>
  1372. +#include <linux/debugfs.h>
  1373. +#include <linux/poll.h>
  1374. +#include <linux/timer.h>
  1375. +#include <linux/pm_qos.h>
  1376. +
  1377. +#define DEVNAME "ithc"
  1378. +#define DEVFULLNAME "Intel Touch Host Controller"
  1379. +
  1380. +#undef pr_fmt
  1381. +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  1382. +
  1383. +#define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; })
  1384. +#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while(0)
  1385. +
  1386. +#define NUM_RX_BUF 16
  1387. +
  1388. +struct ithc;
  1389. +
  1390. +#include "ithc-regs.h"
  1391. +#include "ithc-dma.h"
  1392. +
  1393. +struct ithc {
  1394. + char phys[32];
  1395. + struct pci_dev *pci;
  1396. + int irq;
  1397. + struct task_struct *poll_thread;
  1398. + struct pm_qos_request activity_qos;
  1399. + struct timer_list activity_timer;
  1400. +
  1401. + struct hid_device *hid;
  1402. + bool hid_parse_done;
  1403. + wait_queue_head_t wait_hid_parse;
  1404. + wait_queue_head_t wait_hid_get_feature;
  1405. + struct mutex hid_get_feature_mutex;
  1406. + void *hid_get_feature_buf;
  1407. + size_t hid_get_feature_size;
  1408. +
  1409. + struct ithc_registers __iomem *regs;
  1410. + struct ithc_registers *prev_regs; // for debugging
  1411. + struct ithc_device_config config;
  1412. + struct ithc_dma_rx dma_rx[2];
  1413. + struct ithc_dma_tx dma_tx;
  1414. +};
  1415. +
  1416. +int ithc_reset(struct ithc *ithc);
  1417. +void ithc_set_active(struct ithc *ithc);
  1418. +int ithc_debug_init(struct ithc *ithc);
  1419. +void ithc_log_regs(struct ithc *ithc);
  1420. +
  1421. --
  1422. 2.41.0