0005-ithc.patch 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820
  1. From 8aadfc38967cb2804446c8bdae851377651e6248 Mon Sep 17 00:00:00 2001
  2. From: Dorian Stoll <dorian.stoll@tmsp.io>
  3. Date: Sun, 11 Dec 2022 12:03:38 +0100
  4. Subject: [PATCH] iommu: intel: Disable source id verification for ITHC
  5. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  6. Patchset: ithc
  7. ---
  8. drivers/iommu/intel/irq_remapping.c | 16 ++++++++++++++++
  9. 1 file changed, 16 insertions(+)
  10. diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
  11. index 29b9e55dcf26..986e91c813ae 100644
  12. --- a/drivers/iommu/intel/irq_remapping.c
  13. +++ b/drivers/iommu/intel/irq_remapping.c
  14. @@ -386,6 +386,22 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
  15. data.busmatch_count = 0;
  16. pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
  17. + /*
  18. + * The Intel Touch Host Controller is at 00:10.6, but for some reason
  19. + * the MSI interrupts have request id 01:05.0.
  20. + * Disable id verification to work around this.
  21. + * FIXME Find proper fix or turn this into a quirk.
  22. + */
  23. + if (dev->vendor == PCI_VENDOR_ID_INTEL && (dev->class >> 8) == PCI_CLASS_INPUT_PEN) {
  24. + switch(dev->device) {
  25. + case 0x98d0: case 0x98d1: // LKF
  26. + case 0xa0d0: case 0xa0d1: // TGL LP
  27. + case 0x43d0: case 0x43d1: // TGL H
  28. + set_irte_sid(irte, SVT_NO_VERIFY, SQ_ALL_16, 0);
  29. + return 0;
  30. + }
  31. + }
  32. +
  33. /*
  34. * DMA alias provides us with a PCI device and alias. The only case
  35. * where the it will return an alias on a different bus than the
  36. --
  37. 2.43.0
  38. From fe08b40d122fdb102c2cc4876d2d68ac19d74ae3 Mon Sep 17 00:00:00 2001
  39. From: quo <tuple@list.ru>
  40. Date: Sun, 11 Dec 2022 12:10:54 +0100
  41. Subject: [PATCH] hid: Add support for Intel Touch Host Controller
  42. Based on quo/ithc-linux@0b8b45d
  43. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  44. Patchset: ithc
  45. ---
  46. drivers/hid/Kconfig | 2 +
  47. drivers/hid/Makefile | 1 +
  48. drivers/hid/ithc/Kbuild | 6 +
  49. drivers/hid/ithc/Kconfig | 12 +
  50. drivers/hid/ithc/ithc-debug.c | 130 ++++++
  51. drivers/hid/ithc/ithc-dma.c | 373 +++++++++++++++++
  52. drivers/hid/ithc/ithc-dma.h | 69 ++++
  53. drivers/hid/ithc/ithc-main.c | 728 ++++++++++++++++++++++++++++++++++
  54. drivers/hid/ithc/ithc-regs.c | 96 +++++
  55. drivers/hid/ithc/ithc-regs.h | 189 +++++++++
  56. drivers/hid/ithc/ithc.h | 67 ++++
  57. 11 files changed, 1673 insertions(+)
  58. create mode 100644 drivers/hid/ithc/Kbuild
  59. create mode 100644 drivers/hid/ithc/Kconfig
  60. create mode 100644 drivers/hid/ithc/ithc-debug.c
  61. create mode 100644 drivers/hid/ithc/ithc-dma.c
  62. create mode 100644 drivers/hid/ithc/ithc-dma.h
  63. create mode 100644 drivers/hid/ithc/ithc-main.c
  64. create mode 100644 drivers/hid/ithc/ithc-regs.c
  65. create mode 100644 drivers/hid/ithc/ithc-regs.h
  66. create mode 100644 drivers/hid/ithc/ithc.h
  67. diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
  68. index 86c6c815bd5b..a87c3c6911fb 100644
  69. --- a/drivers/hid/Kconfig
  70. +++ b/drivers/hid/Kconfig
  71. @@ -1343,4 +1343,6 @@ source "drivers/hid/surface-hid/Kconfig"
  72. source "drivers/hid/ipts/Kconfig"
  73. +source "drivers/hid/ithc/Kconfig"
  74. +
  75. endif # HID_SUPPORT
  76. diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
  77. index 2ef21b257d0b..e94b79727b48 100644
  78. --- a/drivers/hid/Makefile
  79. +++ b/drivers/hid/Makefile
  80. @@ -171,3 +171,4 @@ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
  81. obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
  82. obj-$(CONFIG_HID_IPTS) += ipts/
  83. +obj-$(CONFIG_HID_ITHC) += ithc/
  84. diff --git a/drivers/hid/ithc/Kbuild b/drivers/hid/ithc/Kbuild
  85. new file mode 100644
  86. index 000000000000..aea83f2ac07b
  87. --- /dev/null
  88. +++ b/drivers/hid/ithc/Kbuild
  89. @@ -0,0 +1,6 @@
  90. +obj-$(CONFIG_HID_ITHC) := ithc.o
  91. +
  92. +ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-debug.o
  93. +
  94. +ccflags-y := -std=gnu11 -Wno-declaration-after-statement
  95. +
  96. diff --git a/drivers/hid/ithc/Kconfig b/drivers/hid/ithc/Kconfig
  97. new file mode 100644
  98. index 000000000000..ede713023609
  99. --- /dev/null
  100. +++ b/drivers/hid/ithc/Kconfig
  101. @@ -0,0 +1,12 @@
  102. +config HID_ITHC
  103. + tristate "Intel Touch Host Controller"
  104. + depends on PCI
  105. + depends on HID
  106. + help
  107. + Say Y here if your system has a touchscreen using Intels
  108. + Touch Host Controller (ITHC / IPTS) technology.
  109. +
  110. + If unsure say N.
  111. +
  112. + To compile this driver as a module, choose M here: the
  113. + module will be called ithc.
  114. diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c
  115. new file mode 100644
  116. index 000000000000..1f1f1e33f2e5
  117. --- /dev/null
  118. +++ b/drivers/hid/ithc/ithc-debug.c
  119. @@ -0,0 +1,130 @@
  120. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  121. +
  122. +#include "ithc.h"
  123. +
  124. +void ithc_log_regs(struct ithc *ithc)
  125. +{
  126. + if (!ithc->prev_regs)
  127. + return;
  128. + u32 __iomem *cur = (__iomem void *)ithc->regs;
  129. + u32 *prev = (void *)ithc->prev_regs;
  130. + for (int i = 1024; i < sizeof(*ithc->regs) / 4; i++) {
  131. + u32 x = readl(cur + i);
  132. + if (x != prev[i]) {
  133. + pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x);
  134. + prev[i] = x;
  135. + }
  136. + }
  137. +}
  138. +
  139. +static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len,
  140. + loff_t *offset)
  141. +{
  142. + // Debug commands consist of a single letter followed by a list of numbers (decimal or
  143. + // hexadecimal, space-separated).
  144. + struct ithc *ithc = file_inode(f)->i_private;
  145. + char cmd[256];
  146. + if (!ithc || !ithc->pci)
  147. + return -ENODEV;
  148. + if (!len)
  149. + return -EINVAL;
  150. + if (len >= sizeof(cmd))
  151. + return -EINVAL;
  152. + if (copy_from_user(cmd, buf, len))
  153. + return -EFAULT;
  154. + cmd[len] = 0;
  155. + if (cmd[len-1] == '\n')
  156. + cmd[len-1] = 0;
  157. + pci_info(ithc->pci, "debug command: %s\n", cmd);
  158. +
  159. + // Parse the list of arguments into a u32 array.
  160. + u32 n = 0;
  161. + const char *s = cmd + 1;
  162. + u32 a[32];
  163. + while (*s && *s != '\n') {
  164. + if (n >= ARRAY_SIZE(a))
  165. + return -EINVAL;
  166. + if (*s++ != ' ')
  167. + return -EINVAL;
  168. + char *e;
  169. + a[n++] = simple_strtoul(s, &e, 0);
  170. + if (e == s)
  171. + return -EINVAL;
  172. + s = e;
  173. + }
  174. + ithc_log_regs(ithc);
  175. +
  176. + // Execute the command.
  177. + switch (cmd[0]) {
  178. + case 'x': // reset
  179. + ithc_reset(ithc);
  180. + break;
  181. + case 'w': // write register: offset mask value
  182. + if (n != 3 || (a[0] & 3))
  183. + return -EINVAL;
  184. + pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n",
  185. + a[0], a[2], a[1]);
  186. + bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]);
  187. + break;
  188. + case 'r': // read register: offset
  189. + if (n != 1 || (a[0] & 3))
  190. + return -EINVAL;
  191. + pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0],
  192. + readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
  193. + break;
  194. + case 's': // spi command: cmd offset len data...
  195. + // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
  196. + // set touch cfg: s 6 12 4 XX
  197. + if (n < 3 || a[2] > (n - 3) * 4)
  198. + return -EINVAL;
  199. + pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]);
  200. + if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3))
  201. + for (u32 i = 0; i < (a[2] + 3) / 4; i++)
  202. + pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
  203. + break;
  204. + case 'd': // dma command: cmd len data...
  205. + // get report descriptor: d 7 8 0 0
  206. + // enable multitouch: d 3 2 0x0105
  207. + if (n < 2 || a[1] > (n - 2) * 4)
  208. + return -EINVAL;
  209. + pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]);
  210. + if (ithc_dma_tx(ithc, a[0], a[1], a + 2))
  211. + pci_err(ithc->pci, "dma tx failed\n");
  212. + break;
  213. + default:
  214. + return -EINVAL;
  215. + }
  216. + ithc_log_regs(ithc);
  217. + return len;
  218. +}
  219. +
  220. +static const struct file_operations ithc_debugfops_cmd = {
  221. + .owner = THIS_MODULE,
  222. + .write = ithc_debugfs_cmd_write,
  223. +};
  224. +
  225. +static void ithc_debugfs_devres_release(struct device *dev, void *res)
  226. +{
  227. + struct dentry **dbgm = res;
  228. + if (*dbgm)
  229. + debugfs_remove_recursive(*dbgm);
  230. +}
  231. +
  232. +int ithc_debug_init(struct ithc *ithc)
  233. +{
  234. + struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof(*dbgm), GFP_KERNEL);
  235. + if (!dbgm)
  236. + return -ENOMEM;
  237. + devres_add(&ithc->pci->dev, dbgm);
  238. + struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL);
  239. + if (IS_ERR(dbg))
  240. + return PTR_ERR(dbg);
  241. + *dbgm = dbg;
  242. +
  243. + struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd);
  244. + if (IS_ERR(cmd))
  245. + return PTR_ERR(cmd);
  246. +
  247. + return 0;
  248. +}
  249. +
  250. diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c
  251. new file mode 100644
  252. index 000000000000..ffb8689b8a78
  253. --- /dev/null
  254. +++ b/drivers/hid/ithc/ithc-dma.c
  255. @@ -0,0 +1,373 @@
  256. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  257. +
  258. +#include "ithc.h"
  259. +
  260. +// The THC uses tables of PRDs (physical region descriptors) to describe the TX and RX data buffers.
  261. +// Each PRD contains the DMA address and size of a block of DMA memory, and some status flags.
  262. +// This allows each data buffer to consist of multiple non-contiguous blocks of memory.
  263. +
  264. +static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p,
  265. + unsigned int num_buffers, unsigned int num_pages, enum dma_data_direction dir)
  266. +{
  267. + p->num_pages = num_pages;
  268. + p->dir = dir;
  269. + // We allocate enough space to have one PRD per data buffer page, however if the data
  270. + // buffer pages happen to be contiguous, we can describe the buffer using fewer PRDs, so
  271. + // some will remain unused (which is fine).
  272. + p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE);
  273. + p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL);
  274. + if (!p->addr)
  275. + return -ENOMEM;
  276. + if (p->dma_addr & (PAGE_SIZE - 1))
  277. + return -EFAULT;
  278. + return 0;
  279. +}
  280. +
  281. +// Devres managed sg_table wrapper.
  282. +struct ithc_sg_table {
  283. + void *addr;
  284. + struct sg_table sgt;
  285. + enum dma_data_direction dir;
  286. +};
  287. +static void ithc_dma_sgtable_free(struct sg_table *sgt)
  288. +{
  289. + struct scatterlist *sg;
  290. + int i;
  291. + for_each_sgtable_sg(sgt, sg, i) {
  292. + struct page *p = sg_page(sg);
  293. + if (p)
  294. + __free_page(p);
  295. + }
  296. + sg_free_table(sgt);
  297. +}
  298. +static void ithc_dma_data_devres_release(struct device *dev, void *res)
  299. +{
  300. + struct ithc_sg_table *sgt = res;
  301. + if (sgt->addr)
  302. + vunmap(sgt->addr);
  303. + dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0);
  304. + ithc_dma_sgtable_free(&sgt->sgt);
  305. +}
  306. +
  307. +static int ithc_dma_data_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
  308. + struct ithc_dma_data_buffer *b)
  309. +{
  310. + // We don't use dma_alloc_coherent() for data buffers, because they don't have to be
  311. + // coherent (they are unidirectional) or contiguous (we can use one PRD per page).
  312. + // We could use dma_alloc_noncontiguous(), however this still always allocates a single
  313. + // DMA mapped segment, which is more restrictive than what we need.
  314. + // Instead we use an sg_table of individually allocated pages.
  315. + struct page *pages[16];
  316. + if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages))
  317. + return -EINVAL;
  318. + b->active_idx = -1;
  319. + struct ithc_sg_table *sgt = devres_alloc(
  320. + ithc_dma_data_devres_release, sizeof(*sgt), GFP_KERNEL);
  321. + if (!sgt)
  322. + return -ENOMEM;
  323. + sgt->dir = prds->dir;
  324. +
  325. + if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) {
  326. + struct scatterlist *sg;
  327. + int i;
  328. + bool ok = true;
  329. + for_each_sgtable_sg(&sgt->sgt, sg, i) {
  330. + // NOTE: don't need __GFP_DMA for PCI DMA
  331. + struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
  332. + if (!p) {
  333. + ok = false;
  334. + break;
  335. + }
  336. + sg_set_page(sg, p, PAGE_SIZE, 0);
  337. + }
  338. + if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) {
  339. + devres_add(&ithc->pci->dev, sgt);
  340. + b->sgt = &sgt->sgt;
  341. + b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL);
  342. + if (!b->addr)
  343. + return -ENOMEM;
  344. + return 0;
  345. + }
  346. + ithc_dma_sgtable_free(&sgt->sgt);
  347. + }
  348. + devres_free(sgt);
  349. + return -ENOMEM;
  350. +}
  351. +
  352. +static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
  353. + struct ithc_dma_data_buffer *b, unsigned int idx)
  354. +{
  355. + // Give a buffer to the THC.
  356. + struct ithc_phys_region_desc *prd = prds->addr;
  357. + prd += idx * prds->num_pages;
  358. + if (b->active_idx >= 0) {
  359. + pci_err(ithc->pci, "buffer already active\n");
  360. + return -EINVAL;
  361. + }
  362. + b->active_idx = idx;
  363. + if (prds->dir == DMA_TO_DEVICE) {
  364. + // TX buffer: Caller should have already filled the data buffer, so just fill
  365. + // the PRD and flush.
  366. + // (TODO: Support multi-page TX buffers. So far no device seems to use or need
  367. + // these though.)
  368. + if (b->data_size > PAGE_SIZE)
  369. + return -EINVAL;
  370. + prd->addr = sg_dma_address(b->sgt->sgl) >> 10;
  371. + prd->size = b->data_size | PRD_FLAG_END;
  372. + flush_kernel_vmap_range(b->addr, b->data_size);
  373. + } else if (prds->dir == DMA_FROM_DEVICE) {
  374. + // RX buffer: Reset PRDs.
  375. + struct scatterlist *sg;
  376. + int i;
  377. + for_each_sgtable_dma_sg(b->sgt, sg, i) {
  378. + prd->addr = sg_dma_address(sg) >> 10;
  379. + prd->size = sg_dma_len(sg);
  380. + prd++;
  381. + }
  382. + prd[-1].size |= PRD_FLAG_END;
  383. + }
  384. + dma_wmb(); // for the prds
  385. + dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir);
  386. + return 0;
  387. +}
  388. +
  389. +static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
  390. + struct ithc_dma_data_buffer *b, unsigned int idx)
  391. +{
  392. + // Take a buffer from the THC.
  393. + struct ithc_phys_region_desc *prd = prds->addr;
  394. + prd += idx * prds->num_pages;
  395. + // This is purely a sanity check. We don't strictly need the idx parameter for this
  396. + // function, because it should always be the same as active_idx, unless we have a bug.
  397. + if (b->active_idx != idx) {
  398. + pci_err(ithc->pci, "wrong buffer index\n");
  399. + return -EINVAL;
  400. + }
  401. + b->active_idx = -1;
  402. + if (prds->dir == DMA_FROM_DEVICE) {
  403. + // RX buffer: Calculate actual received data size from PRDs.
  404. + dma_rmb(); // for the prds
  405. + b->data_size = 0;
  406. + struct scatterlist *sg;
  407. + int i;
  408. + for_each_sgtable_dma_sg(b->sgt, sg, i) {
  409. + unsigned int size = prd->size;
  410. + b->data_size += size & PRD_SIZE_MASK;
  411. + if (size & PRD_FLAG_END)
  412. + break;
  413. + if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) {
  414. + pci_err(ithc->pci, "truncated prd\n");
  415. + break;
  416. + }
  417. + prd++;
  418. + }
  419. + invalidate_kernel_vmap_range(b->addr, b->data_size);
  420. + }
  421. + dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir);
  422. + return 0;
  423. +}
  424. +
  425. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel)
  426. +{
  427. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  428. + mutex_init(&rx->mutex);
  429. +
  430. + // Allocate buffers.
  431. + u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes);
  432. + unsigned int num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
  433. + pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n",
  434. + NUM_RX_BUF, buf_size, num_pages);
  435. + CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE);
  436. + for (unsigned int i = 0; i < NUM_RX_BUF; i++)
  437. + CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]);
  438. +
  439. + // Init registers.
  440. + writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2);
  441. + lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr);
  442. + writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs);
  443. + writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds);
  444. + u8 head = readb(&ithc->regs->dma_rx[channel].head);
  445. + if (head) {
  446. + pci_err(ithc->pci, "head is nonzero (%u)\n", head);
  447. + return -EIO;
  448. + }
  449. +
  450. + // Init buffers.
  451. + for (unsigned int i = 0; i < NUM_RX_BUF; i++)
  452. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i);
  453. +
  454. + writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail);
  455. + return 0;
  456. +}
  457. +
  458. +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel)
  459. +{
  460. + bitsb_set(&ithc->regs->dma_rx[channel].control,
  461. + DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
  462. + CHECK(waitl, ithc, &ithc->regs->dma_rx[channel].status,
  463. + DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
  464. +}
  465. +
  466. +int ithc_dma_tx_init(struct ithc *ithc)
  467. +{
  468. + struct ithc_dma_tx *tx = &ithc->dma_tx;
  469. + mutex_init(&tx->mutex);
  470. +
  471. + // Allocate buffers.
  472. + tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes);
  473. + unsigned int num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
  474. + pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n",
  475. + tx->max_size, num_pages);
  476. + CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE);
  477. + CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf);
  478. +
  479. + // Init registers.
  480. + lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr);
  481. + writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds);
  482. +
  483. + // Init buffers.
  484. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  485. + return 0;
  486. +}
  487. +
  488. +static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data,
  489. + u8 channel, u8 buf)
  490. +{
  491. + if (buf >= NUM_RX_BUF) {
  492. + pci_err(ithc->pci, "invalid dma ringbuffer index\n");
  493. + return -EINVAL;
  494. + }
  495. + u32 len = data->data_size;
  496. + struct ithc_dma_rx_header *hdr = data->addr;
  497. + u8 *hiddata = (void *)(hdr + 1);
  498. + if (len >= sizeof(*hdr) && hdr->code == DMA_RX_CODE_RESET) {
  499. + // The THC sends a reset request when we need to reinitialize the device.
  500. + // This usually only happens if we send an invalid command or put the device
  501. + // in a bad state.
  502. + CHECK(ithc_reset, ithc);
  503. + } else if (len < sizeof(*hdr) || len != sizeof(*hdr) + hdr->data_size) {
  504. + if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  505. + // When the CPU enters a low power state during DMA, we can get truncated
  506. + // messages. For Surface devices, this will typically be a single touch
  507. + // report that is only 1 byte, or a multitouch report that is 257 bytes.
  508. + // See also ithc_set_active().
  509. + } else {
  510. + pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n",
  511. + channel, buf, len, hdr->code, hdr->data_size);
  512. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
  513. + hdr, min(len, 0x400u), 0);
  514. + }
  515. + } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) {
  516. + // Response to a 'get report descriptor' request.
  517. + // The actual descriptor is preceded by 8 nul bytes.
  518. + CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8);
  519. + WRITE_ONCE(ithc->hid_parse_done, true);
  520. + wake_up(&ithc->wait_hid_parse);
  521. + } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  522. + // Standard HID input report containing touch data.
  523. + CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1);
  524. + } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) {
  525. + // Response to a 'get feature' request.
  526. + bool done = false;
  527. + mutex_lock(&ithc->hid_get_feature_mutex);
  528. + if (ithc->hid_get_feature_buf) {
  529. + if (hdr->data_size < ithc->hid_get_feature_size)
  530. + ithc->hid_get_feature_size = hdr->data_size;
  531. + memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size);
  532. + ithc->hid_get_feature_buf = NULL;
  533. + done = true;
  534. + }
  535. + mutex_unlock(&ithc->hid_get_feature_mutex);
  536. + if (done) {
  537. + wake_up(&ithc->wait_hid_get_feature);
  538. + } else {
  539. + // Received data without a matching request, or the request already
  540. + // timed out. (XXX What's the correct thing to do here?)
  541. + CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT,
  542. + hiddata, hdr->data_size, 1);
  543. + }
  544. + } else {
  545. + pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n",
  546. + channel, buf, len, hdr->code);
  547. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
  548. + hdr, min(len, 0x400u), 0);
  549. + }
  550. + return 0;
  551. +}
  552. +
  553. +static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel)
  554. +{
  555. + // Process all filled RX buffers from the ringbuffer.
  556. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  557. + unsigned int n = rx->num_received;
  558. + u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head);
  559. + while (1) {
  560. + u8 tail = n % NUM_RX_BUF;
  561. + u8 tail_wrap = tail | ((n / NUM_RX_BUF) & 1 ? 0 : DMA_RX_WRAP_FLAG);
  562. + writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail);
  563. + // ringbuffer is full if tail_wrap == head_wrap
  564. + // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG
  565. + if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG))
  566. + return 0;
  567. +
  568. + // take the buffer that the device just filled
  569. + struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF];
  570. + CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail);
  571. + rx->num_received = ++n;
  572. +
  573. + // process data
  574. + CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail);
  575. +
  576. + // give the buffer back to the device
  577. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail);
  578. + }
  579. +}
  580. +int ithc_dma_rx(struct ithc *ithc, u8 channel)
  581. +{
  582. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  583. + mutex_lock(&rx->mutex);
  584. + int ret = ithc_dma_rx_unlocked(ithc, channel);
  585. + mutex_unlock(&rx->mutex);
  586. + return ret;
  587. +}
  588. +
  589. +static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
  590. +{
  591. + ithc_set_active(ithc, 100 * USEC_PER_MSEC);
  592. +
  593. + // Send a single TX buffer to the THC.
  594. + pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize);
  595. + struct ithc_dma_tx_header *hdr;
  596. + // Data must be padded to next 4-byte boundary.
  597. + u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0;
  598. + unsigned int fullsize = sizeof(*hdr) + datasize + padding;
  599. + if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE)
  600. + return -EINVAL;
  601. + CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  602. +
  603. + // Fill the TX buffer with header and data.
  604. + ithc->dma_tx.buf.data_size = fullsize;
  605. + hdr = ithc->dma_tx.buf.addr;
  606. + hdr->code = cmdcode;
  607. + hdr->data_size = datasize;
  608. + u8 *dest = (void *)(hdr + 1);
  609. + memcpy(dest, data, datasize);
  610. + dest += datasize;
  611. + for (u8 p = 0; p < padding; p++)
  612. + *dest++ = 0;
  613. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  614. +
  615. + // Let the THC process the buffer.
  616. + bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND);
  617. + CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
  618. + writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status);
  619. + return 0;
  620. +}
  621. +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
  622. +{
  623. + mutex_lock(&ithc->dma_tx.mutex);
  624. + int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data);
  625. + mutex_unlock(&ithc->dma_tx.mutex);
  626. + return ret;
  627. +}
  628. +
  629. diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h
  630. new file mode 100644
  631. index 000000000000..93652e4476bf
  632. --- /dev/null
  633. +++ b/drivers/hid/ithc/ithc-dma.h
  634. @@ -0,0 +1,69 @@
  635. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  636. +
  637. +#define PRD_SIZE_MASK 0xffffff
  638. +#define PRD_FLAG_END 0x1000000
  639. +#define PRD_FLAG_SUCCESS 0x2000000
  640. +#define PRD_FLAG_ERROR 0x4000000
  641. +
  642. +struct ithc_phys_region_desc {
  643. + u64 addr; // physical addr/1024
  644. + u32 size; // num bytes, PRD_FLAG_END marks last prd for data split over multiple prds
  645. + u32 unused;
  646. +};
  647. +
  648. +#define DMA_RX_CODE_INPUT_REPORT 3
  649. +#define DMA_RX_CODE_FEATURE_REPORT 4
  650. +#define DMA_RX_CODE_REPORT_DESCRIPTOR 5
  651. +#define DMA_RX_CODE_RESET 7
  652. +
  653. +struct ithc_dma_rx_header {
  654. + u32 code;
  655. + u32 data_size;
  656. + u32 _unknown[14];
  657. +};
  658. +
  659. +#define DMA_TX_CODE_SET_FEATURE 3
  660. +#define DMA_TX_CODE_GET_FEATURE 4
  661. +#define DMA_TX_CODE_OUTPUT_REPORT 5
  662. +#define DMA_TX_CODE_GET_REPORT_DESCRIPTOR 7
  663. +
  664. +struct ithc_dma_tx_header {
  665. + u32 code;
  666. + u32 data_size;
  667. +};
  668. +
  669. +struct ithc_dma_prd_buffer {
  670. + void *addr;
  671. + dma_addr_t dma_addr;
  672. + u32 size;
  673. + u32 num_pages; // per data buffer
  674. + enum dma_data_direction dir;
  675. +};
  676. +
  677. +struct ithc_dma_data_buffer {
  678. + void *addr;
  679. + struct sg_table *sgt;
  680. + int active_idx;
  681. + u32 data_size;
  682. +};
  683. +
  684. +struct ithc_dma_tx {
  685. + struct mutex mutex;
  686. + u32 max_size;
  687. + struct ithc_dma_prd_buffer prds;
  688. + struct ithc_dma_data_buffer buf;
  689. +};
  690. +
  691. +struct ithc_dma_rx {
  692. + struct mutex mutex;
  693. + u32 num_received;
  694. + struct ithc_dma_prd_buffer prds;
  695. + struct ithc_dma_data_buffer bufs[NUM_RX_BUF];
  696. +};
  697. +
  698. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel);
  699. +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel);
  700. +int ithc_dma_tx_init(struct ithc *ithc);
  701. +int ithc_dma_rx(struct ithc *ithc, u8 channel);
  702. +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *cmddata);
  703. +
  704. diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c
  705. new file mode 100644
  706. index 000000000000..87ed4aa70fda
  707. --- /dev/null
  708. +++ b/drivers/hid/ithc/ithc-main.c
  709. @@ -0,0 +1,728 @@
  710. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  711. +
  712. +#include "ithc.h"
  713. +
  714. +MODULE_DESCRIPTION("Intel Touch Host Controller driver");
  715. +MODULE_LICENSE("Dual BSD/GPL");
  716. +
  717. +// Lakefield
  718. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0
  719. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1
  720. +// Tiger Lake
  721. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0
  722. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1
  723. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0
  724. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1
  725. +// Alder Lake
  726. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8
  727. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9
  728. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0
  729. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1
  730. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0
  731. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1
  732. +// Raptor Lake
  733. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58
  734. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59
  735. +// Meteor Lake
  736. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48
  737. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a
  738. +
  739. +static const struct pci_device_id ithc_pci_tbl[] = {
  740. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) },
  741. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) },
  742. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) },
  743. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) },
  744. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) },
  745. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) },
  746. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) },
  747. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) },
  748. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) },
  749. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) },
  750. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) },
  751. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) },
  752. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) },
  753. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) },
  754. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) },
  755. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) },
  756. + // XXX So far the THC seems to be the only Intel PCI device with PCI_CLASS_INPUT_PEN,
  757. + // so instead of the device list we could just do:
  758. + // { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = PCI_CLASS_INPUT_PEN, .class_mask = ~0, },
  759. + {}
  760. +};
  761. +MODULE_DEVICE_TABLE(pci, ithc_pci_tbl);
  762. +
  763. +// Module parameters
  764. +
  765. +static bool ithc_use_polling = false;
  766. +module_param_named(poll, ithc_use_polling, bool, 0);
  767. +MODULE_PARM_DESC(poll, "Use polling instead of interrupts");
  768. +
  769. +// Since all known devices seem to use only channel 1, by default we disable channel 0.
  770. +static bool ithc_use_rx0 = false;
  771. +module_param_named(rx0, ithc_use_rx0, bool, 0);
  772. +MODULE_PARM_DESC(rx0, "Use DMA RX channel 0");
  773. +
  774. +static bool ithc_use_rx1 = true;
  775. +module_param_named(rx1, ithc_use_rx1, bool, 0);
  776. +MODULE_PARM_DESC(rx1, "Use DMA RX channel 1");
  777. +
  778. +// Values below 250 seem to work well on the SP7+. If this is set too high, you may observe cursor stuttering.
  779. +static int ithc_dma_latency_us = 200;
  780. +module_param_named(dma_latency_us, ithc_dma_latency_us, int, 0);
  781. +MODULE_PARM_DESC(dma_latency_us, "Determines the CPU latency QoS value for DMA transfers (in microseconds), -1 to disable latency QoS");
  782. +
  783. +// Values above 1700 seem to work well on the SP7+. If this is set too low, you may observe cursor stuttering.
  784. +static unsigned int ithc_dma_early_us = 2000;
  785. +module_param_named(dma_early_us, ithc_dma_early_us, uint, 0);
  786. +MODULE_PARM_DESC(dma_early_us, "Determines how early the CPU latency QoS value is applied before the next expected IRQ (in microseconds)");
  787. +
  788. +static bool ithc_log_regs_enabled = false;
  789. +module_param_named(logregs, ithc_log_regs_enabled, bool, 0);
  790. +MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)");
  791. +
  792. +// Sysfs attributes
  793. +
  794. +static bool ithc_is_config_valid(struct ithc *ithc)
  795. +{
  796. + return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC;
  797. +}
  798. +
  799. +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
  800. +{
  801. + struct ithc *ithc = dev_get_drvdata(dev);
  802. + if (!ithc || !ithc_is_config_valid(ithc))
  803. + return -ENODEV;
  804. + return sprintf(buf, "0x%04x", ithc->config.vendor_id);
  805. +}
  806. +static DEVICE_ATTR_RO(vendor);
  807. +static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf)
  808. +{
  809. + struct ithc *ithc = dev_get_drvdata(dev);
  810. + if (!ithc || !ithc_is_config_valid(ithc))
  811. + return -ENODEV;
  812. + return sprintf(buf, "0x%04x", ithc->config.product_id);
  813. +}
  814. +static DEVICE_ATTR_RO(product);
  815. +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf)
  816. +{
  817. + struct ithc *ithc = dev_get_drvdata(dev);
  818. + if (!ithc || !ithc_is_config_valid(ithc))
  819. + return -ENODEV;
  820. + return sprintf(buf, "%u", ithc->config.revision);
  821. +}
  822. +static DEVICE_ATTR_RO(revision);
  823. +static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
  824. +{
  825. + struct ithc *ithc = dev_get_drvdata(dev);
  826. + if (!ithc || !ithc_is_config_valid(ithc))
  827. + return -ENODEV;
  828. + u32 v = ithc->config.fw_version;
  829. + return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff);
  830. +}
  831. +static DEVICE_ATTR_RO(fw_version);
  832. +
  833. +static const struct attribute_group *ithc_attribute_groups[] = {
  834. + &(const struct attribute_group){
  835. + .name = DEVNAME,
  836. + .attrs = (struct attribute *[]){
  837. + &dev_attr_vendor.attr,
  838. + &dev_attr_product.attr,
  839. + &dev_attr_revision.attr,
  840. + &dev_attr_fw_version.attr,
  841. + NULL
  842. + },
  843. + },
  844. + NULL
  845. +};
  846. +
  847. +// HID setup
  848. +
  849. +static int ithc_hid_start(struct hid_device *hdev) { return 0; }
  850. +static void ithc_hid_stop(struct hid_device *hdev) { }
  851. +static int ithc_hid_open(struct hid_device *hdev) { return 0; }
  852. +static void ithc_hid_close(struct hid_device *hdev) { }
  853. +
  854. +static int ithc_hid_parse(struct hid_device *hdev)
  855. +{
  856. + struct ithc *ithc = hdev->driver_data;
  857. + u64 val = 0;
  858. + WRITE_ONCE(ithc->hid_parse_done, false);
  859. + for (int retries = 0; ; retries++) {
  860. + CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof(val), &val);
  861. + if (wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done),
  862. + msecs_to_jiffies(200)))
  863. + return 0;
  864. + if (retries > 5) {
  865. + pci_err(ithc->pci, "failed to read report descriptor\n");
  866. + return -ETIMEDOUT;
  867. + }
  868. + pci_warn(ithc->pci, "failed to read report descriptor, retrying\n");
  869. + }
  870. +}
  871. +
  872. +static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf,
  873. + size_t len, unsigned char rtype, int reqtype)
  874. +{
  875. + struct ithc *ithc = hdev->driver_data;
  876. + if (!buf || !len)
  877. + return -EINVAL;
  878. + u32 code;
  879. + if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) {
  880. + code = DMA_TX_CODE_OUTPUT_REPORT;
  881. + } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) {
  882. + code = DMA_TX_CODE_SET_FEATURE;
  883. + } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) {
  884. + code = DMA_TX_CODE_GET_FEATURE;
  885. + } else {
  886. + pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n",
  887. + rtype, reqtype, reportnum);
  888. + return -EINVAL;
  889. + }
  890. + buf[0] = reportnum;
  891. +
  892. + if (reqtype == HID_REQ_GET_REPORT) {
  893. + // Prepare for response.
  894. + mutex_lock(&ithc->hid_get_feature_mutex);
  895. + ithc->hid_get_feature_buf = buf;
  896. + ithc->hid_get_feature_size = len;
  897. + mutex_unlock(&ithc->hid_get_feature_mutex);
  898. +
  899. + // Transmit 'get feature' request.
  900. + int r = CHECK(ithc_dma_tx, ithc, code, 1, buf);
  901. + if (!r) {
  902. + r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature,
  903. + !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
  904. + if (!r)
  905. + r = -ETIMEDOUT;
  906. + else if (r < 0)
  907. + r = -EINTR;
  908. + else
  909. + r = 0;
  910. + }
  911. +
  912. + // If everything went ok, the buffer has been filled with the response data.
  913. + // Return the response size.
  914. + mutex_lock(&ithc->hid_get_feature_mutex);
  915. + ithc->hid_get_feature_buf = NULL;
  916. + if (!r)
  917. + r = ithc->hid_get_feature_size;
  918. + mutex_unlock(&ithc->hid_get_feature_mutex);
  919. + return r;
  920. + }
  921. +
  922. + // 'Set feature', or 'output report'. These don't have a response.
  923. + CHECK_RET(ithc_dma_tx, ithc, code, len, buf);
  924. + return 0;
  925. +}
  926. +
  927. +static struct hid_ll_driver ithc_ll_driver = {
  928. + .start = ithc_hid_start,
  929. + .stop = ithc_hid_stop,
  930. + .open = ithc_hid_open,
  931. + .close = ithc_hid_close,
  932. + .parse = ithc_hid_parse,
  933. + .raw_request = ithc_hid_raw_request,
  934. +};
  935. +
  936. +static void ithc_hid_devres_release(struct device *dev, void *res)
  937. +{
  938. + struct hid_device **hidm = res;
  939. + if (*hidm)
  940. + hid_destroy_device(*hidm);
  941. +}
  942. +
  943. +static int ithc_hid_init(struct ithc *ithc)
  944. +{
  945. + struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof(*hidm), GFP_KERNEL);
  946. + if (!hidm)
  947. + return -ENOMEM;
  948. + devres_add(&ithc->pci->dev, hidm);
  949. + struct hid_device *hid = hid_allocate_device();
  950. + if (IS_ERR(hid))
  951. + return PTR_ERR(hid);
  952. + *hidm = hid;
  953. +
  954. + strscpy(hid->name, DEVFULLNAME, sizeof(hid->name));
  955. + strscpy(hid->phys, ithc->phys, sizeof(hid->phys));
  956. + hid->ll_driver = &ithc_ll_driver;
  957. + hid->bus = BUS_PCI;
  958. + hid->vendor = ithc->config.vendor_id;
  959. + hid->product = ithc->config.product_id;
  960. + hid->version = 0x100;
  961. + hid->dev.parent = &ithc->pci->dev;
  962. + hid->driver_data = ithc;
  963. +
  964. + ithc->hid = hid;
  965. + return 0;
  966. +}
  967. +
  968. +// Interrupts/polling
  969. +
  970. +static enum hrtimer_restart ithc_activity_start_timer_callback(struct hrtimer *t)
  971. +{
  972. + struct ithc *ithc = container_of(t, struct ithc, activity_start_timer);
  973. + ithc_set_active(ithc, ithc_dma_early_us * 2 + USEC_PER_MSEC);
  974. + return HRTIMER_NORESTART;
  975. +}
  976. +
  977. +static enum hrtimer_restart ithc_activity_end_timer_callback(struct hrtimer *t)
  978. +{
  979. + struct ithc *ithc = container_of(t, struct ithc, activity_end_timer);
  980. + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  981. + return HRTIMER_NORESTART;
  982. +}
  983. +
  984. +void ithc_set_active(struct ithc *ithc, unsigned int duration_us)
  985. +{
  986. + if (ithc_dma_latency_us < 0)
  987. + return;
  988. + // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
  989. + // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_RX_TIMEOUT will be
  990. + // set when this happens. The amount of truncated messages can become very high, resulting
  991. + // in user-visible effects (laggy/stuttering cursor). To avoid this, we use a CPU latency
  992. + // QoS request to prevent the CPU from entering low power states during touch interactions.
  993. + cpu_latency_qos_update_request(&ithc->activity_qos, ithc_dma_latency_us);
  994. + hrtimer_start_range_ns(&ithc->activity_end_timer,
  995. + ns_to_ktime(duration_us * NSEC_PER_USEC), duration_us * NSEC_PER_USEC, HRTIMER_MODE_REL);
  996. +}
  997. +
  998. +static int ithc_set_device_enabled(struct ithc *ithc, bool enable)
  999. +{
  1000. + u32 x = ithc->config.touch_cfg =
  1001. + (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2 |
  1002. + (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
  1003. + return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE,
  1004. + offsetof(struct ithc_device_config, touch_cfg), sizeof(x), &x);
  1005. +}
  1006. +
  1007. +static void ithc_disable_interrupts(struct ithc *ithc)
  1008. +{
  1009. + writel(0, &ithc->regs->error_control);
  1010. + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0);
  1011. + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  1012. + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  1013. + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0);
  1014. +}
  1015. +
  1016. +static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned int channel)
  1017. +{
  1018. + writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA,
  1019. + &ithc->regs->dma_rx[channel].status);
  1020. +}
  1021. +
  1022. +static void ithc_clear_interrupts(struct ithc *ithc)
  1023. +{
  1024. + writel(0xffffffff, &ithc->regs->error_flags);
  1025. + writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status);
  1026. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1027. + ithc_clear_dma_rx_interrupts(ithc, 0);
  1028. + ithc_clear_dma_rx_interrupts(ithc, 1);
  1029. + writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2,
  1030. + &ithc->regs->dma_tx.status);
  1031. +}
  1032. +
  1033. +static void ithc_process(struct ithc *ithc)
  1034. +{
  1035. + ithc_log_regs(ithc);
  1036. +
  1037. + bool rx0 = ithc_use_rx0 && (readl(&ithc->regs->dma_rx[0].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  1038. + bool rx1 = ithc_use_rx1 && (readl(&ithc->regs->dma_rx[1].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  1039. +
  1040. + // Track time between DMA rx transfers, so we can try to predict when we need to enable CPU latency QoS for the next transfer
  1041. + ktime_t t = ktime_get();
  1042. + ktime_t dt = ktime_sub(t, ithc->last_rx_time);
  1043. + if (rx0 || rx1) {
  1044. + ithc->last_rx_time = t;
  1045. + if (dt > ms_to_ktime(100)) {
  1046. + ithc->cur_rx_seq_count = 0;
  1047. + ithc->cur_rx_seq_errors = 0;
  1048. + }
  1049. + ithc->cur_rx_seq_count++;
  1050. + if (!ithc_use_polling && ithc_dma_latency_us >= 0) {
  1051. + // Disable QoS, since the DMA transfer has completed (we re-enable it after a delay below)
  1052. + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  1053. + hrtimer_try_to_cancel(&ithc->activity_end_timer);
  1054. + }
  1055. + }
  1056. +
  1057. + // Read and clear error bits
  1058. + u32 err = readl(&ithc->regs->error_flags);
  1059. + if (err) {
  1060. + writel(err, &ithc->regs->error_flags);
  1061. + if (err & ~ERROR_FLAG_DMA_RX_TIMEOUT)
  1062. + pci_err(ithc->pci, "error flags: 0x%08x\n", err);
  1063. + if (err & ERROR_FLAG_DMA_RX_TIMEOUT) {
  1064. + // Only log an error if we see a significant number of these errors.
  1065. + ithc->cur_rx_seq_errors++;
  1066. + if (ithc->cur_rx_seq_errors && ithc->cur_rx_seq_errors % 50 == 0 && ithc->cur_rx_seq_errors > ithc->cur_rx_seq_count / 10)
  1067. + pci_err(ithc->pci, "High number of DMA RX timeouts/errors (%u/%u, dt=%lldus). Try adjusting dma_early_us and/or dma_latency_us.\n",
  1068. + ithc->cur_rx_seq_errors, ithc->cur_rx_seq_count, ktime_to_us(dt));
  1069. + }
  1070. + }
  1071. +
  1072. + // Process DMA rx
  1073. + if (ithc_use_rx0) {
  1074. + ithc_clear_dma_rx_interrupts(ithc, 0);
  1075. + if (rx0)
  1076. + ithc_dma_rx(ithc, 0);
  1077. + }
  1078. + if (ithc_use_rx1) {
  1079. + ithc_clear_dma_rx_interrupts(ithc, 1);
  1080. + if (rx1)
  1081. + ithc_dma_rx(ithc, 1);
  1082. + }
  1083. +
  1084. + // Start timer to re-enable QoS for next rx, but only if we've seen an ERROR_FLAG_DMA_RX_TIMEOUT
  1085. + if ((rx0 || rx1) && !ithc_use_polling && ithc_dma_latency_us >= 0 && ithc->cur_rx_seq_errors > 0) {
  1086. + ktime_t expires = ktime_add(t, ktime_sub_us(dt, ithc_dma_early_us));
  1087. + hrtimer_start_range_ns(&ithc->activity_start_timer, expires, 10 * NSEC_PER_USEC, HRTIMER_MODE_ABS);
  1088. + }
  1089. +
  1090. + ithc_log_regs(ithc);
  1091. +}
  1092. +
  1093. +static irqreturn_t ithc_interrupt_thread(int irq, void *arg)
  1094. +{
  1095. + struct ithc *ithc = arg;
  1096. + pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n",
  1097. + readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags),
  1098. + readb(&ithc->regs->spi_cmd.control), readl(&ithc->regs->spi_cmd.status),
  1099. + readb(&ithc->regs->dma_rx[0].control), readl(&ithc->regs->dma_rx[0].status),
  1100. + readb(&ithc->regs->dma_rx[1].control), readl(&ithc->regs->dma_rx[1].status),
  1101. + readb(&ithc->regs->dma_tx.control), readl(&ithc->regs->dma_tx.status));
  1102. + ithc_process(ithc);
  1103. + return IRQ_HANDLED;
  1104. +}
  1105. +
  1106. +static int ithc_poll_thread(void *arg)
  1107. +{
  1108. + struct ithc *ithc = arg;
  1109. + unsigned int sleep = 100;
  1110. + while (!kthread_should_stop()) {
  1111. + u32 n = ithc->dma_rx[1].num_received;
  1112. + ithc_process(ithc);
  1113. + // Decrease polling interval to 20ms if we received data, otherwise slowly
  1114. + // increase it up to 200ms.
  1115. + if (n != ithc->dma_rx[1].num_received) {
  1116. + ithc_set_active(ithc, 100 * USEC_PER_MSEC);
  1117. + sleep = 20;
  1118. + } else {
  1119. + sleep = min(200u, sleep + (sleep >> 4) + 1);
  1120. + }
  1121. + msleep_interruptible(sleep);
  1122. + }
  1123. + return 0;
  1124. +}
  1125. +
  1126. +// Device initialization and shutdown
  1127. +
  1128. +static void ithc_disable(struct ithc *ithc)
  1129. +{
  1130. + bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE);
  1131. + CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED);
  1132. + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  1133. + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND, 0);
  1134. + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
  1135. + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_ENABLE, 0);
  1136. + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_ENABLE, 0);
  1137. + ithc_disable_interrupts(ithc);
  1138. + ithc_clear_interrupts(ithc);
  1139. +}
  1140. +
  1141. +static int ithc_init_device(struct ithc *ithc)
  1142. +{
  1143. + ithc_log_regs(ithc);
  1144. + bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0;
  1145. + ithc_disable(ithc);
  1146. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY);
  1147. +
  1148. + // Since we don't yet know which SPI config the device wants, use default speed and mode
  1149. + // initially for reading config data.
  1150. + ithc_set_spi_config(ithc, 10, 0);
  1151. +
  1152. + // Setting the following bit seems to make reading the config more reliable.
  1153. + bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000);
  1154. +
  1155. + // If the device was previously enabled, wait a bit to make sure it's fully shut down.
  1156. + if (was_enabled)
  1157. + if (msleep_interruptible(100))
  1158. + return -EINTR;
  1159. +
  1160. + // Take the touch device out of reset.
  1161. + bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
  1162. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0);
  1163. + for (int retries = 0; ; retries++) {
  1164. + ithc_log_regs(ithc);
  1165. + bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET);
  1166. + if (!waitl(ithc, &ithc->regs->state, 0xf, 2))
  1167. + break;
  1168. + if (retries > 5) {
  1169. + pci_err(ithc->pci, "failed to reset device, state = 0x%08x\n", readl(&ithc->regs->state));
  1170. + return -ETIMEDOUT;
  1171. + }
  1172. + pci_warn(ithc->pci, "invalid state, retrying reset\n");
  1173. + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  1174. + if (msleep_interruptible(1000))
  1175. + return -EINTR;
  1176. + }
  1177. + ithc_log_regs(ithc);
  1178. +
  1179. + // Waiting for the following status bit makes reading config much more reliable,
  1180. + // however the official driver does not seem to do this...
  1181. + CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4);
  1182. +
  1183. + // Read configuration data.
  1184. + for (int retries = 0; ; retries++) {
  1185. + ithc_log_regs(ithc);
  1186. + memset(&ithc->config, 0, sizeof(ithc->config));
  1187. + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof(ithc->config), &ithc->config);
  1188. + u32 *p = (void *)&ithc->config;
  1189. + pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  1190. + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
  1191. + if (ithc_is_config_valid(ithc))
  1192. + break;
  1193. + if (retries > 10) {
  1194. + pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n",
  1195. + ithc->config.device_id);
  1196. + return -EIO;
  1197. + }
  1198. + pci_warn(ithc->pci, "failed to read config, retrying\n");
  1199. + if (msleep_interruptible(100))
  1200. + return -EINTR;
  1201. + }
  1202. + ithc_log_regs(ithc);
  1203. +
  1204. + // Apply SPI config and enable touch device.
  1205. + CHECK_RET(ithc_set_spi_config, ithc,
  1206. + DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config),
  1207. + DEVCFG_SPI_MODE(ithc->config.spi_config));
  1208. + CHECK_RET(ithc_set_device_enabled, ithc, true);
  1209. + ithc_log_regs(ithc);
  1210. + return 0;
  1211. +}
  1212. +
  1213. +int ithc_reset(struct ithc *ithc)
  1214. +{
  1215. + // FIXME This should probably do devres_release_group()+ithc_start().
  1216. + // But because this is called during DMA processing, that would have to be done
  1217. + // asynchronously (schedule_work()?). And with extra locking?
  1218. + pci_err(ithc->pci, "reset\n");
  1219. + CHECK(ithc_init_device, ithc);
  1220. + if (ithc_use_rx0)
  1221. + ithc_dma_rx_enable(ithc, 0);
  1222. + if (ithc_use_rx1)
  1223. + ithc_dma_rx_enable(ithc, 1);
  1224. + ithc_log_regs(ithc);
  1225. + pci_dbg(ithc->pci, "reset completed\n");
  1226. + return 0;
  1227. +}
  1228. +
  1229. +static void ithc_stop(void *res)
  1230. +{
  1231. + struct ithc *ithc = res;
  1232. + pci_dbg(ithc->pci, "stopping\n");
  1233. + ithc_log_regs(ithc);
  1234. +
  1235. + if (ithc->poll_thread)
  1236. + CHECK(kthread_stop, ithc->poll_thread);
  1237. + if (ithc->irq >= 0)
  1238. + disable_irq(ithc->irq);
  1239. + CHECK(ithc_set_device_enabled, ithc, false);
  1240. + ithc_disable(ithc);
  1241. + hrtimer_cancel(&ithc->activity_start_timer);
  1242. + hrtimer_cancel(&ithc->activity_end_timer);
  1243. + cpu_latency_qos_remove_request(&ithc->activity_qos);
  1244. +
  1245. + // Clear DMA config.
  1246. + for (unsigned int i = 0; i < 2; i++) {
  1247. + CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0);
  1248. + lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr);
  1249. + writeb(0, &ithc->regs->dma_rx[i].num_bufs);
  1250. + writeb(0, &ithc->regs->dma_rx[i].num_prds);
  1251. + }
  1252. + lo_hi_writeq(0, &ithc->regs->dma_tx.addr);
  1253. + writeb(0, &ithc->regs->dma_tx.num_prds);
  1254. +
  1255. + ithc_log_regs(ithc);
  1256. + pci_dbg(ithc->pci, "stopped\n");
  1257. +}
  1258. +
  1259. +static void ithc_clear_drvdata(void *res)
  1260. +{
  1261. + struct pci_dev *pci = res;
  1262. + pci_set_drvdata(pci, NULL);
  1263. +}
  1264. +
  1265. +static int ithc_start(struct pci_dev *pci)
  1266. +{
  1267. + pci_dbg(pci, "starting\n");
  1268. + if (pci_get_drvdata(pci)) {
  1269. + pci_err(pci, "device already initialized\n");
  1270. + return -EINVAL;
  1271. + }
  1272. + if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL))
  1273. + return -ENOMEM;
  1274. +
  1275. + // Allocate/init main driver struct.
  1276. + struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof(*ithc), GFP_KERNEL);
  1277. + if (!ithc)
  1278. + return -ENOMEM;
  1279. + ithc->irq = -1;
  1280. + ithc->pci = pci;
  1281. + snprintf(ithc->phys, sizeof(ithc->phys), "pci-%s/" DEVNAME, pci_name(pci));
  1282. + init_waitqueue_head(&ithc->wait_hid_parse);
  1283. + init_waitqueue_head(&ithc->wait_hid_get_feature);
  1284. + mutex_init(&ithc->hid_get_feature_mutex);
  1285. + pci_set_drvdata(pci, ithc);
  1286. + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci);
  1287. + if (ithc_log_regs_enabled)
  1288. + ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof(*ithc->prev_regs), GFP_KERNEL);
  1289. +
  1290. + // PCI initialization.
  1291. + CHECK_RET(pcim_enable_device, pci);
  1292. + pci_set_master(pci);
  1293. + CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs");
  1294. + CHECK_RET(dma_set_mask_and_coherent, &pci->dev, DMA_BIT_MASK(64));
  1295. + CHECK_RET(pci_set_power_state, pci, PCI_D0);
  1296. + ithc->regs = pcim_iomap_table(pci)[0];
  1297. +
  1298. + // Allocate IRQ.
  1299. + if (!ithc_use_polling) {
  1300. + CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
  1301. + ithc->irq = CHECK(pci_irq_vector, pci, 0);
  1302. + if (ithc->irq < 0)
  1303. + return ithc->irq;
  1304. + }
  1305. +
  1306. + // Initialize THC and touch device.
  1307. + CHECK_RET(ithc_init_device, ithc);
  1308. + CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups);
  1309. + if (ithc_use_rx0)
  1310. + CHECK_RET(ithc_dma_rx_init, ithc, 0);
  1311. + if (ithc_use_rx1)
  1312. + CHECK_RET(ithc_dma_rx_init, ithc, 1);
  1313. + CHECK_RET(ithc_dma_tx_init, ithc);
  1314. +
  1315. + cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  1316. + hrtimer_init(&ithc->activity_start_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  1317. + ithc->activity_start_timer.function = ithc_activity_start_timer_callback;
  1318. + hrtimer_init(&ithc->activity_end_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1319. + ithc->activity_end_timer.function = ithc_activity_end_timer_callback;
  1320. +
  1321. + // Add ithc_stop() callback AFTER setting up DMA buffers, so that polling/irqs/DMA are
  1322. + // disabled BEFORE the buffers are freed.
  1323. + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc);
  1324. +
  1325. + CHECK_RET(ithc_hid_init, ithc);
  1326. +
  1327. + // Start polling/IRQ.
  1328. + if (ithc_use_polling) {
  1329. + pci_info(pci, "using polling instead of irq\n");
  1330. + // Use a thread instead of simple timer because we want to be able to sleep.
  1331. + ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll");
  1332. + if (IS_ERR(ithc->poll_thread)) {
  1333. + int err = PTR_ERR(ithc->poll_thread);
  1334. + ithc->poll_thread = NULL;
  1335. + return err;
  1336. + }
  1337. + } else {
  1338. + CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL,
  1339. + ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc);
  1340. + }
  1341. +
  1342. + if (ithc_use_rx0)
  1343. + ithc_dma_rx_enable(ithc, 0);
  1344. + if (ithc_use_rx1)
  1345. + ithc_dma_rx_enable(ithc, 1);
  1346. +
  1347. + // hid_add_device() can only be called after irq/polling is started and DMA is enabled,
  1348. + // because it calls ithc_hid_parse() which reads the report descriptor via DMA.
  1349. + CHECK_RET(hid_add_device, ithc->hid);
  1350. +
  1351. + CHECK(ithc_debug_init, ithc);
  1352. +
  1353. + pci_dbg(pci, "started\n");
  1354. + return 0;
  1355. +}
  1356. +
  1357. +static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id)
  1358. +{
  1359. + pci_dbg(pci, "device probe\n");
  1360. + return ithc_start(pci);
  1361. +}
  1362. +
  1363. +static void ithc_remove(struct pci_dev *pci)
  1364. +{
  1365. + pci_dbg(pci, "device remove\n");
  1366. + // all cleanup is handled by devres
  1367. +}
  1368. +
  1369. +// For suspend/resume, we just deinitialize and reinitialize everything.
  1370. +// TODO It might be cleaner to keep the HID device around, however we would then have to signal
  1371. +// to userspace that the touch device has lost state and userspace needs to e.g. resend 'set
  1372. +// feature' requests. Hidraw does not seem to have a facility to do that.
  1373. +static int ithc_suspend(struct device *dev)
  1374. +{
  1375. + struct pci_dev *pci = to_pci_dev(dev);
  1376. + pci_dbg(pci, "pm suspend\n");
  1377. + devres_release_group(dev, ithc_start);
  1378. + return 0;
  1379. +}
  1380. +
  1381. +static int ithc_resume(struct device *dev)
  1382. +{
  1383. + struct pci_dev *pci = to_pci_dev(dev);
  1384. + pci_dbg(pci, "pm resume\n");
  1385. + return ithc_start(pci);
  1386. +}
  1387. +
  1388. +static int ithc_freeze(struct device *dev)
  1389. +{
  1390. + struct pci_dev *pci = to_pci_dev(dev);
  1391. + pci_dbg(pci, "pm freeze\n");
  1392. + devres_release_group(dev, ithc_start);
  1393. + return 0;
  1394. +}
  1395. +
  1396. +static int ithc_thaw(struct device *dev)
  1397. +{
  1398. + struct pci_dev *pci = to_pci_dev(dev);
  1399. + pci_dbg(pci, "pm thaw\n");
  1400. + return ithc_start(pci);
  1401. +}
  1402. +
  1403. +static int ithc_restore(struct device *dev)
  1404. +{
  1405. + struct pci_dev *pci = to_pci_dev(dev);
  1406. + pci_dbg(pci, "pm restore\n");
  1407. + return ithc_start(pci);
  1408. +}
  1409. +
  1410. +static struct pci_driver ithc_driver = {
  1411. + .name = DEVNAME,
  1412. + .id_table = ithc_pci_tbl,
  1413. + .probe = ithc_probe,
  1414. + .remove = ithc_remove,
  1415. + .driver.pm = &(const struct dev_pm_ops) {
  1416. + .suspend = ithc_suspend,
  1417. + .resume = ithc_resume,
  1418. + .freeze = ithc_freeze,
  1419. + .thaw = ithc_thaw,
  1420. + .restore = ithc_restore,
  1421. + },
  1422. + //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway
  1423. +};
  1424. +
  1425. +static int __init ithc_init(void)
  1426. +{
  1427. + return pci_register_driver(&ithc_driver);
  1428. +}
  1429. +
  1430. +static void __exit ithc_exit(void)
  1431. +{
  1432. + pci_unregister_driver(&ithc_driver);
  1433. +}
  1434. +
  1435. +module_init(ithc_init);
  1436. +module_exit(ithc_exit);
  1437. +
  1438. diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c
  1439. new file mode 100644
  1440. index 000000000000..e058721886e3
  1441. --- /dev/null
  1442. +++ b/drivers/hid/ithc/ithc-regs.c
  1443. @@ -0,0 +1,96 @@
  1444. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  1445. +
  1446. +#include "ithc.h"
  1447. +
  1448. +#define reg_num(r) (0x1fff & (u16)(__force u64)(r))
  1449. +
  1450. +void bitsl(__iomem u32 *reg, u32 mask, u32 val)
  1451. +{
  1452. + if (val & ~mask)
  1453. + pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n",
  1454. + reg_num(reg), val, mask);
  1455. + writel((readl(reg) & ~mask) | (val & mask), reg);
  1456. +}
  1457. +
  1458. +void bitsb(__iomem u8 *reg, u8 mask, u8 val)
  1459. +{
  1460. + if (val & ~mask)
  1461. + pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n",
  1462. + reg_num(reg), val, mask);
  1463. + writeb((readb(reg) & ~mask) | (val & mask), reg);
  1464. +}
  1465. +
  1466. +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val)
  1467. +{
  1468. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
  1469. + reg_num(reg), mask, val);
  1470. + u32 x;
  1471. + if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  1472. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
  1473. + reg_num(reg), mask, val);
  1474. + return -ETIMEDOUT;
  1475. + }
  1476. + pci_dbg(ithc->pci, "done waiting\n");
  1477. + return 0;
  1478. +}
  1479. +
  1480. +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val)
  1481. +{
  1482. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
  1483. + reg_num(reg), mask, val);
  1484. + u8 x;
  1485. + if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  1486. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
  1487. + reg_num(reg), mask, val);
  1488. + return -ETIMEDOUT;
  1489. + }
  1490. + pci_dbg(ithc->pci, "done waiting\n");
  1491. + return 0;
  1492. +}
  1493. +
  1494. +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode)
  1495. +{
  1496. + pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode);
  1497. + if (mode == 3)
  1498. + mode = 2;
  1499. + bitsl(&ithc->regs->spi_config,
  1500. + SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff),
  1501. + SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed));
  1502. + return 0;
  1503. +}
  1504. +
  1505. +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data)
  1506. +{
  1507. + pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset);
  1508. + if (size > sizeof(ithc->regs->spi_cmd.data))
  1509. + return -EINVAL;
  1510. +
  1511. + // Wait if the device is still busy.
  1512. + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  1513. + // Clear result flags.
  1514. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1515. +
  1516. + // Init SPI command data.
  1517. + writeb(command, &ithc->regs->spi_cmd.code);
  1518. + writew(size, &ithc->regs->spi_cmd.size);
  1519. + writel(offset, &ithc->regs->spi_cmd.offset);
  1520. + u32 *p = data, n = (size + 3) / 4;
  1521. + for (u32 i = 0; i < n; i++)
  1522. + writel(p[i], &ithc->regs->spi_cmd.data[i]);
  1523. +
  1524. + // Start transmission.
  1525. + bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND);
  1526. + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  1527. +
  1528. + // Read response.
  1529. + if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE)
  1530. + return -EIO;
  1531. + if (readw(&ithc->regs->spi_cmd.size) != size)
  1532. + return -EMSGSIZE;
  1533. + for (u32 i = 0; i < n; i++)
  1534. + p[i] = readl(&ithc->regs->spi_cmd.data[i]);
  1535. +
  1536. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1537. + return 0;
  1538. +}
  1539. +
  1540. diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h
  1541. new file mode 100644
  1542. index 000000000000..d4007d9e2bac
  1543. --- /dev/null
  1544. +++ b/drivers/hid/ithc/ithc-regs.h
  1545. @@ -0,0 +1,189 @@
  1546. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  1547. +
  1548. +#define CONTROL_QUIESCE BIT(1)
  1549. +#define CONTROL_IS_QUIESCED BIT(2)
  1550. +#define CONTROL_NRESET BIT(3)
  1551. +#define CONTROL_READY BIT(29)
  1552. +
  1553. +#define SPI_CONFIG_MODE(x) (((x) & 3) << 2)
  1554. +#define SPI_CONFIG_SPEED(x) (((x) & 7) << 4)
  1555. +#define SPI_CONFIG_UNKNOWN_18(x) (((x) & 3) << 18)
  1556. +#define SPI_CONFIG_SPEED2(x) (((x) & 0xf) << 20) // high bit = high speed mode?
  1557. +
  1558. +#define ERROR_CONTROL_UNKNOWN_0 BIT(0)
  1559. +#define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs
  1560. +#define ERROR_CONTROL_UNKNOWN_2 BIT(2)
  1561. +#define ERROR_CONTROL_UNKNOWN_3 BIT(3)
  1562. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_9 BIT(9)
  1563. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_10 BIT(10)
  1564. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_12 BIT(12)
  1565. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_13 BIT(13)
  1566. +#define ERROR_CONTROL_UNKNOWN_16(x) (((x) & 0xff) << 16) // spi error code irq?
  1567. +#define ERROR_CONTROL_SET_DMA_STATUS BIT(29) // sets DMA_RX_STATUS_ERROR when a DMA error occurs
  1568. +
  1569. +#define ERROR_STATUS_DMA BIT(28)
  1570. +#define ERROR_STATUS_SPI BIT(30)
  1571. +
  1572. +#define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9)
  1573. +#define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10)
  1574. +#define ERROR_FLAG_DMA_RX_TIMEOUT BIT(12) // set when we receive a truncated DMA message
  1575. +#define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13)
  1576. +#define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16)
  1577. +#define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17)
  1578. +#define ERROR_FLAG_SPI_INTRA_PACKET_TIMEOUT BIT(18)
  1579. +#define ERROR_FLAG_SPI_INVALID_RESPONSE BIT(19)
  1580. +#define ERROR_FLAG_SPI_HS_RX_TIMEOUT BIT(20)
  1581. +#define ERROR_FLAG_SPI_TOUCH_IC_INIT BIT(21)
  1582. +
  1583. +#define SPI_CMD_CONTROL_SEND BIT(0) // cleared by device when sending is complete
  1584. +#define SPI_CMD_CONTROL_IRQ BIT(1)
  1585. +
  1586. +#define SPI_CMD_CODE_READ 4
  1587. +#define SPI_CMD_CODE_WRITE 6
  1588. +
  1589. +#define SPI_CMD_STATUS_DONE BIT(0)
  1590. +#define SPI_CMD_STATUS_ERROR BIT(1)
  1591. +#define SPI_CMD_STATUS_BUSY BIT(3)
  1592. +
  1593. +#define DMA_TX_CONTROL_SEND BIT(0) // cleared by device when sending is complete
  1594. +#define DMA_TX_CONTROL_IRQ BIT(3)
  1595. +
  1596. +#define DMA_TX_STATUS_DONE BIT(0)
  1597. +#define DMA_TX_STATUS_ERROR BIT(1)
  1598. +#define DMA_TX_STATUS_UNKNOWN_2 BIT(2)
  1599. +#define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy?
  1600. +
  1601. +#define DMA_RX_CONTROL_ENABLE BIT(0)
  1602. +#define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only?
  1603. +#define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only?
  1604. +#define DMA_RX_CONTROL_IRQ_UNKNOWN_4 BIT(4) // rx0 only?
  1605. +#define DMA_RX_CONTROL_IRQ_DATA BIT(5)
  1606. +
  1607. +#define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only?
  1608. +#define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices
  1609. +
  1610. +#define DMA_RX_WRAP_FLAG BIT(7)
  1611. +
  1612. +#define DMA_RX_STATUS_ERROR BIT(3)
  1613. +#define DMA_RX_STATUS_UNKNOWN_4 BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms)
  1614. +#define DMA_RX_STATUS_HAVE_DATA BIT(5)
  1615. +#define DMA_RX_STATUS_ENABLED BIT(8)
  1616. +
  1617. +// COUNTER_RESET can be written to counter registers to reset them to zero. However, in some cases this can mess up the THC.
  1618. +#define COUNTER_RESET BIT(31)
  1619. +
  1620. +struct ithc_registers {
  1621. + /* 0000 */ u32 _unknown_0000[1024];
  1622. + /* 1000 */ u32 _unknown_1000;
  1623. + /* 1004 */ u32 _unknown_1004;
  1624. + /* 1008 */ u32 control_bits;
  1625. + /* 100c */ u32 _unknown_100c;
  1626. + /* 1010 */ u32 spi_config;
  1627. + /* 1014 */ u32 _unknown_1014[3];
  1628. + /* 1020 */ u32 error_control;
  1629. + /* 1024 */ u32 error_status; // write to clear
  1630. + /* 1028 */ u32 error_flags; // write to clear
  1631. + /* 102c */ u32 _unknown_102c[5];
  1632. + struct {
  1633. + /* 1040 */ u8 control;
  1634. + /* 1041 */ u8 code;
  1635. + /* 1042 */ u16 size;
  1636. + /* 1044 */ u32 status; // write to clear
  1637. + /* 1048 */ u32 offset;
  1638. + /* 104c */ u32 data[16];
  1639. + /* 108c */ u32 _unknown_108c;
  1640. + } spi_cmd;
  1641. + struct {
  1642. + /* 1090 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
  1643. + /* 1098 */ u8 control;
  1644. + /* 1099 */ u8 _unknown_1099;
  1645. + /* 109a */ u8 _unknown_109a;
  1646. + /* 109b */ u8 num_prds;
  1647. + /* 109c */ u32 status; // write to clear
  1648. + } dma_tx;
  1649. + /* 10a0 */ u32 _unknown_10a0[7];
  1650. + /* 10bc */ u32 state; // is 0xe0000402 (dev config val 0) after CONTROL_NRESET, 0xe0000461 after first touch, 0xe0000401 after DMA_RX_CODE_RESET
  1651. + /* 10c0 */ u32 _unknown_10c0[8];
  1652. + /* 10e0 */ u32 _unknown_10e0_counters[3];
  1653. + /* 10ec */ u32 _unknown_10ec[5];
  1654. + struct {
  1655. + /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
  1656. + /* 1108/1208 */ u8 num_bufs;
  1657. + /* 1109/1209 */ u8 num_prds;
  1658. + /* 110a/120a */ u16 _unknown_110a;
  1659. + /* 110c/120c */ u8 control;
  1660. + /* 110d/120d */ u8 head;
  1661. + /* 110e/120e */ u8 tail;
  1662. + /* 110f/120f */ u8 control2;
  1663. + /* 1110/1210 */ u32 status; // write to clear
  1664. + /* 1114/1214 */ u32 _unknown_1114;
  1665. + /* 1118/1218 */ u64 _unknown_1118_guc_addr;
  1666. + /* 1120/1220 */ u32 _unknown_1120_guc;
  1667. + /* 1124/1224 */ u32 _unknown_1124_guc;
  1668. + /* 1128/1228 */ u32 unknown_init_bits; // bit 2 = guc related, bit 3 = rx1 related, bit 4 = guc related
  1669. + /* 112c/122c */ u32 _unknown_112c;
  1670. + /* 1130/1230 */ u64 _unknown_1130_guc_addr;
  1671. + /* 1138/1238 */ u32 _unknown_1138_guc;
  1672. + /* 113c/123c */ u32 _unknown_113c;
  1673. + /* 1140/1240 */ u32 _unknown_1140_guc;
  1674. + /* 1144/1244 */ u32 _unknown_1144[23];
  1675. + /* 11a0/12a0 */ u32 _unknown_11a0_counters[6];
  1676. + /* 11b8/12b8 */ u32 _unknown_11b8[18];
  1677. + } dma_rx[2];
  1678. +};
  1679. +static_assert(sizeof(struct ithc_registers) == 0x1300);
  1680. +
  1681. +#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6)
  1682. +#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6)
  1683. +
  1684. +#define DEVCFG_TOUCH_MASK 0x3f
  1685. +#define DEVCFG_TOUCH_ENABLE BIT(0)
  1686. +#define DEVCFG_TOUCH_UNKNOWN_1 BIT(1)
  1687. +#define DEVCFG_TOUCH_UNKNOWN_2 BIT(2)
  1688. +#define DEVCFG_TOUCH_UNKNOWN_3 BIT(3)
  1689. +#define DEVCFG_TOUCH_UNKNOWN_4 BIT(4)
  1690. +#define DEVCFG_TOUCH_UNKNOWN_5 BIT(5)
  1691. +#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6)
  1692. +
  1693. +#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC"
  1694. +
  1695. +#define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode?
  1696. +#define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3)
  1697. +#define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f)
  1698. +#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) // TODO implement heartbeat
  1699. +#define DEVCFG_SPI_HEARTBEAT_INTERVAL(x) (((x) >> 21) & 7)
  1700. +#define DEVCFG_SPI_UNKNOWN_25 BIT(25)
  1701. +#define DEVCFG_SPI_UNKNOWN_26 BIT(26)
  1702. +#define DEVCFG_SPI_UNKNOWN_27 BIT(27)
  1703. +#define DEVCFG_SPI_DELAY(x) (((x) >> 28) & 7) // TODO use this
  1704. +#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) // TODO use this?
  1705. +
  1706. +struct ithc_device_config { // (Example values are from an SP7+.)
  1707. + u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET)
  1708. + u32 _unknown_04; // 04 = 0x00000000
  1709. + u32 dma_buf_sizes; // 08 = 0x000a00ff
  1710. + u32 touch_cfg; // 0c = 0x0000001c
  1711. + u32 _unknown_10; // 10 = 0x0000001c
  1712. + u32 device_id; // 14 = 0x43495424 = "$TIC"
  1713. + u32 spi_config; // 18 = 0xfda00a2e
  1714. + u16 vendor_id; // 1c = 0x045e = Microsoft Corp.
  1715. + u16 product_id; // 1e = 0x0c1a
  1716. + u32 revision; // 20 = 0x00000001
  1717. + u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 (this value looks more random on newer devices)
  1718. + u32 _unknown_28; // 28 = 0x00000000
  1719. + u32 fw_mode; // 2c = 0x00000000 (for fw update?)
  1720. + u32 _unknown_30; // 30 = 0x00000000
  1721. + u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?)
  1722. + u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET)
  1723. + u32 _unknown_3c; // 3c = 0x00000002
  1724. +};
  1725. +
  1726. +void bitsl(__iomem u32 *reg, u32 mask, u32 val);
  1727. +void bitsb(__iomem u8 *reg, u8 mask, u8 val);
  1728. +#define bitsl_set(reg, x) bitsl(reg, x, x)
  1729. +#define bitsb_set(reg, x) bitsb(reg, x, x)
  1730. +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val);
  1731. +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val);
  1732. +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode);
  1733. +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data);
  1734. +
  1735. diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h
  1736. new file mode 100644
  1737. index 000000000000..028e55a4ec53
  1738. --- /dev/null
  1739. +++ b/drivers/hid/ithc/ithc.h
  1740. @@ -0,0 +1,67 @@
  1741. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  1742. +
  1743. +#include <linux/module.h>
  1744. +#include <linux/input.h>
  1745. +#include <linux/hid.h>
  1746. +#include <linux/dma-mapping.h>
  1747. +#include <linux/highmem.h>
  1748. +#include <linux/pci.h>
  1749. +#include <linux/io-64-nonatomic-lo-hi.h>
  1750. +#include <linux/iopoll.h>
  1751. +#include <linux/delay.h>
  1752. +#include <linux/kthread.h>
  1753. +#include <linux/miscdevice.h>
  1754. +#include <linux/debugfs.h>
  1755. +#include <linux/poll.h>
  1756. +#include <linux/timer.h>
  1757. +#include <linux/pm_qos.h>
  1758. +
  1759. +#define DEVNAME "ithc"
  1760. +#define DEVFULLNAME "Intel Touch Host Controller"
  1761. +
  1762. +#undef pr_fmt
  1763. +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  1764. +
  1765. +#define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; })
  1766. +#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while (0)
  1767. +
  1768. +#define NUM_RX_BUF 16
  1769. +
  1770. +struct ithc;
  1771. +
  1772. +#include "ithc-regs.h"
  1773. +#include "ithc-dma.h"
  1774. +
  1775. +struct ithc {
  1776. + char phys[32];
  1777. + struct pci_dev *pci;
  1778. + int irq;
  1779. + struct task_struct *poll_thread;
  1780. +
  1781. + struct pm_qos_request activity_qos;
  1782. + struct hrtimer activity_start_timer;
  1783. + struct hrtimer activity_end_timer;
  1784. + ktime_t last_rx_time;
  1785. + unsigned int cur_rx_seq_count;
  1786. + unsigned int cur_rx_seq_errors;
  1787. +
  1788. + struct hid_device *hid;
  1789. + bool hid_parse_done;
  1790. + wait_queue_head_t wait_hid_parse;
  1791. + wait_queue_head_t wait_hid_get_feature;
  1792. + struct mutex hid_get_feature_mutex;
  1793. + void *hid_get_feature_buf;
  1794. + size_t hid_get_feature_size;
  1795. +
  1796. + struct ithc_registers __iomem *regs;
  1797. + struct ithc_registers *prev_regs; // for debugging
  1798. + struct ithc_device_config config;
  1799. + struct ithc_dma_rx dma_rx[2];
  1800. + struct ithc_dma_tx dma_tx;
  1801. +};
  1802. +
  1803. +int ithc_reset(struct ithc *ithc);
  1804. +void ithc_set_active(struct ithc *ithc, unsigned int duration_us);
  1805. +int ithc_debug_init(struct ithc *ithc);
  1806. +void ithc_log_regs(struct ithc *ithc);
  1807. +
  1808. --
  1809. 2.43.0