0005-ithc.patch 118 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024
  1. From 7a359e9084a94ddfbacd67ea99c572bdeebb72f3 Mon Sep 17 00:00:00 2001
  2. From: Dorian Stoll <dorian.stoll@tmsp.io>
  3. Date: Sun, 11 Dec 2022 12:03:38 +0100
  4. Subject: [PATCH] iommu: intel: Disable source id verification for ITHC
  5. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  6. Patchset: ithc
  7. ---
  8. drivers/iommu/intel/irq_remapping.c | 16 ++++++++++++++++
  9. 1 file changed, 16 insertions(+)
  10. diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
  11. index 29b9e55dcf26c..986e91c813ae1 100644
  12. --- a/drivers/iommu/intel/irq_remapping.c
  13. +++ b/drivers/iommu/intel/irq_remapping.c
  14. @@ -386,6 +386,22 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
  15. data.busmatch_count = 0;
  16. pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
  17. + /*
  18. + * The Intel Touch Host Controller is at 00:10.6, but for some reason
  19. + * the MSI interrupts have request id 01:05.0.
  20. + * Disable id verification to work around this.
  21. + * FIXME Find proper fix or turn this into a quirk.
  22. + */
  23. + if (dev->vendor == PCI_VENDOR_ID_INTEL && (dev->class >> 8) == PCI_CLASS_INPUT_PEN) {
  24. + switch(dev->device) {
  25. + case 0x98d0: case 0x98d1: // LKF
  26. + case 0xa0d0: case 0xa0d1: // TGL LP
  27. + case 0x43d0: case 0x43d1: // TGL H
  28. + set_irte_sid(irte, SVT_NO_VERIFY, SQ_ALL_16, 0);
  29. + return 0;
  30. + }
  31. + }
  32. +
  33. /*
  34. * DMA alias provides us with a PCI device and alias. The only case
  35. * where the it will return an alias on a different bus than the
  36. --
  37. 2.43.0
  38. From f023c8e014f11338ea77f6473152d56b79092e01 Mon Sep 17 00:00:00 2001
  39. From: Dorian Stoll <dorian.stoll@tmsp.io>
  40. Date: Sun, 11 Dec 2022 12:10:54 +0100
  41. Subject: [PATCH] hid: Add support for Intel Touch Host Controller
  42. Based on quo/ithc-linux@55803a2
  43. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  44. Patchset: ithc
  45. ---
  46. drivers/hid/Kconfig | 2 +
  47. drivers/hid/Makefile | 1 +
  48. drivers/hid/ithc/Kbuild | 6 +
  49. drivers/hid/ithc/Kconfig | 12 +
  50. drivers/hid/ithc/ithc-debug.c | 96 ++++++
  51. drivers/hid/ithc/ithc-dma.c | 258 ++++++++++++++++
  52. drivers/hid/ithc/ithc-dma.h | 67 +++++
  53. drivers/hid/ithc/ithc-main.c | 534 ++++++++++++++++++++++++++++++++++
  54. drivers/hid/ithc/ithc-regs.c | 64 ++++
  55. drivers/hid/ithc/ithc-regs.h | 186 ++++++++++++
  56. drivers/hid/ithc/ithc.h | 60 ++++
  57. 11 files changed, 1286 insertions(+)
  58. create mode 100644 drivers/hid/ithc/Kbuild
  59. create mode 100644 drivers/hid/ithc/Kconfig
  60. create mode 100644 drivers/hid/ithc/ithc-debug.c
  61. create mode 100644 drivers/hid/ithc/ithc-dma.c
  62. create mode 100644 drivers/hid/ithc/ithc-dma.h
  63. create mode 100644 drivers/hid/ithc/ithc-main.c
  64. create mode 100644 drivers/hid/ithc/ithc-regs.c
  65. create mode 100644 drivers/hid/ithc/ithc-regs.h
  66. create mode 100644 drivers/hid/ithc/ithc.h
  67. diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
  68. index 0b9d245d10e54..8ba1c309228be 100644
  69. --- a/drivers/hid/Kconfig
  70. +++ b/drivers/hid/Kconfig
  71. @@ -1347,4 +1347,6 @@ source "drivers/hid/surface-hid/Kconfig"
  72. source "drivers/hid/ipts/Kconfig"
  73. +source "drivers/hid/ithc/Kconfig"
  74. +
  75. endif # HID_SUPPORT
  76. diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
  77. index 2ef21b257d0b5..e94b79727b489 100644
  78. --- a/drivers/hid/Makefile
  79. +++ b/drivers/hid/Makefile
  80. @@ -171,3 +171,4 @@ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
  81. obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
  82. obj-$(CONFIG_HID_IPTS) += ipts/
  83. +obj-$(CONFIG_HID_ITHC) += ithc/
  84. diff --git a/drivers/hid/ithc/Kbuild b/drivers/hid/ithc/Kbuild
  85. new file mode 100644
  86. index 0000000000000..aea83f2ac07b4
  87. --- /dev/null
  88. +++ b/drivers/hid/ithc/Kbuild
  89. @@ -0,0 +1,6 @@
  90. +obj-$(CONFIG_HID_ITHC) := ithc.o
  91. +
  92. +ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-debug.o
  93. +
  94. +ccflags-y := -std=gnu11 -Wno-declaration-after-statement
  95. +
  96. diff --git a/drivers/hid/ithc/Kconfig b/drivers/hid/ithc/Kconfig
  97. new file mode 100644
  98. index 0000000000000..ede7130236096
  99. --- /dev/null
  100. +++ b/drivers/hid/ithc/Kconfig
  101. @@ -0,0 +1,12 @@
  102. +config HID_ITHC
  103. + tristate "Intel Touch Host Controller"
  104. + depends on PCI
  105. + depends on HID
  106. + help
  107. + Say Y here if your system has a touchscreen using Intels
  108. + Touch Host Controller (ITHC / IPTS) technology.
  109. +
  110. + If unsure say N.
  111. +
  112. + To compile this driver as a module, choose M here: the
  113. + module will be called ithc.
  114. diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c
  115. new file mode 100644
  116. index 0000000000000..57bf125c45bd5
  117. --- /dev/null
  118. +++ b/drivers/hid/ithc/ithc-debug.c
  119. @@ -0,0 +1,96 @@
  120. +#include "ithc.h"
  121. +
  122. +void ithc_log_regs(struct ithc *ithc) {
  123. + if (!ithc->prev_regs) return;
  124. + u32 __iomem *cur = (__iomem void*)ithc->regs;
  125. + u32 *prev = (void*)ithc->prev_regs;
  126. + for (int i = 1024; i < sizeof *ithc->regs / 4; i++) {
  127. + u32 x = readl(cur + i);
  128. + if (x != prev[i]) {
  129. + pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x);
  130. + prev[i] = x;
  131. + }
  132. + }
  133. +}
  134. +
  135. +static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, loff_t *offset) {
  136. + struct ithc *ithc = file_inode(f)->i_private;
  137. + char cmd[256];
  138. + if (!ithc || !ithc->pci) return -ENODEV;
  139. + if (!len) return -EINVAL;
  140. + if (len >= sizeof cmd) return -EINVAL;
  141. + if (copy_from_user(cmd, buf, len)) return -EFAULT;
  142. + cmd[len] = 0;
  143. + if (cmd[len-1] == '\n') cmd[len-1] = 0;
  144. + pci_info(ithc->pci, "debug command: %s\n", cmd);
  145. + u32 n = 0;
  146. + const char *s = cmd + 1;
  147. + u32 a[32];
  148. + while (*s && *s != '\n') {
  149. + if (n >= ARRAY_SIZE(a)) return -EINVAL;
  150. + if (*s++ != ' ') return -EINVAL;
  151. + char *e;
  152. + a[n++] = simple_strtoul(s, &e, 0);
  153. + if (e == s) return -EINVAL;
  154. + s = e;
  155. + }
  156. + ithc_log_regs(ithc);
  157. + switch(cmd[0]) {
  158. + case 'x': // reset
  159. + ithc_reset(ithc);
  160. + break;
  161. + case 'w': // write register: offset mask value
  162. + if (n != 3 || (a[0] & 3)) return -EINVAL;
  163. + pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", a[0], a[2], a[1]);
  164. + bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]);
  165. + break;
  166. + case 'r': // read register: offset
  167. + if (n != 1 || (a[0] & 3)) return -EINVAL;
  168. + pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
  169. + break;
  170. + case 's': // spi command: cmd offset len data...
  171. + // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
  172. + // set touch cfg: s 6 12 4 XX
  173. + if (n < 3 || a[2] > (n - 3) * 4) return -EINVAL;
  174. + pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]);
  175. + if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3))
  176. + for (u32 i = 0; i < (a[2] + 3) / 4; i++) pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
  177. + break;
  178. + case 'd': // dma command: cmd len data...
  179. + // get report descriptor: d 7 8 0 0
  180. + // enable multitouch: d 3 2 0x0105
  181. + if (n < 2 || a[1] > (n - 2) * 4) return -EINVAL;
  182. + pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]);
  183. + if (ithc_dma_tx(ithc, a[0], a[1], a + 2)) pci_err(ithc->pci, "dma tx failed\n");
  184. + break;
  185. + default:
  186. + return -EINVAL;
  187. + }
  188. + ithc_log_regs(ithc);
  189. + return len;
  190. +}
  191. +
  192. +static const struct file_operations ithc_debugfops_cmd = {
  193. + .owner = THIS_MODULE,
  194. + .write = ithc_debugfs_cmd_write,
  195. +};
  196. +
  197. +static void ithc_debugfs_devres_release(struct device *dev, void *res) {
  198. + struct dentry **dbgm = res;
  199. + if (*dbgm) debugfs_remove_recursive(*dbgm);
  200. +}
  201. +
  202. +int ithc_debug_init(struct ithc *ithc) {
  203. + struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof *dbgm, GFP_KERNEL);
  204. + if (!dbgm) return -ENOMEM;
  205. + devres_add(&ithc->pci->dev, dbgm);
  206. + struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL);
  207. + if (IS_ERR(dbg)) return PTR_ERR(dbg);
  208. + *dbgm = dbg;
  209. +
  210. + struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd);
  211. + if (IS_ERR(cmd)) return PTR_ERR(cmd);
  212. +
  213. + return 0;
  214. +}
  215. +
  216. diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c
  217. new file mode 100644
  218. index 0000000000000..7e89b3496918d
  219. --- /dev/null
  220. +++ b/drivers/hid/ithc/ithc-dma.c
  221. @@ -0,0 +1,258 @@
  222. +#include "ithc.h"
  223. +
  224. +static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, unsigned num_buffers, unsigned num_pages, enum dma_data_direction dir) {
  225. + p->num_pages = num_pages;
  226. + p->dir = dir;
  227. + p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE);
  228. + p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL);
  229. + if (!p->addr) return -ENOMEM;
  230. + if (p->dma_addr & (PAGE_SIZE - 1)) return -EFAULT;
  231. + return 0;
  232. +}
  233. +
  234. +struct ithc_sg_table {
  235. + void *addr;
  236. + struct sg_table sgt;
  237. + enum dma_data_direction dir;
  238. +};
  239. +static void ithc_dma_sgtable_free(struct sg_table *sgt) {
  240. + struct scatterlist *sg;
  241. + int i;
  242. + for_each_sgtable_sg(sgt, sg, i) {
  243. + struct page *p = sg_page(sg);
  244. + if (p) __free_page(p);
  245. + }
  246. + sg_free_table(sgt);
  247. +}
  248. +static void ithc_dma_data_devres_release(struct device *dev, void *res) {
  249. + struct ithc_sg_table *sgt = res;
  250. + if (sgt->addr) vunmap(sgt->addr);
  251. + dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0);
  252. + ithc_dma_sgtable_free(&sgt->sgt);
  253. +}
  254. +
  255. +static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b) {
  256. + // We don't use dma_alloc_coherent for data buffers, because they don't have to be contiguous (we can use one PRD per page) or coherent (they are unidirectional).
  257. + // Instead we use an sg_table of individually allocated pages (5.13 has dma_alloc_noncontiguous for this, but we'd like to support 5.10 for now).
  258. + struct page *pages[16];
  259. + if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) return -EINVAL;
  260. + b->active_idx = -1;
  261. + struct ithc_sg_table *sgt = devres_alloc(ithc_dma_data_devres_release, sizeof *sgt, GFP_KERNEL);
  262. + if (!sgt) return -ENOMEM;
  263. + sgt->dir = prds->dir;
  264. + if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) {
  265. + struct scatterlist *sg;
  266. + int i;
  267. + bool ok = true;
  268. + for_each_sgtable_sg(&sgt->sgt, sg, i) {
  269. + struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); // don't need __GFP_DMA for PCI DMA
  270. + if (!p) { ok = false; break; }
  271. + sg_set_page(sg, p, PAGE_SIZE, 0);
  272. + }
  273. + if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) {
  274. + devres_add(&ithc->pci->dev, sgt);
  275. + b->sgt = &sgt->sgt;
  276. + b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL);
  277. + if (!b->addr) return -ENOMEM;
  278. + return 0;
  279. + }
  280. + ithc_dma_sgtable_free(&sgt->sgt);
  281. + }
  282. + devres_free(sgt);
  283. + return -ENOMEM;
  284. +}
  285. +
  286. +static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
  287. + struct ithc_phys_region_desc *prd = prds->addr;
  288. + prd += idx * prds->num_pages;
  289. + if (b->active_idx >= 0) { pci_err(ithc->pci, "buffer already active\n"); return -EINVAL; }
  290. + b->active_idx = idx;
  291. + if (prds->dir == DMA_TO_DEVICE) {
  292. + if (b->data_size > PAGE_SIZE) return -EINVAL;
  293. + prd->addr = sg_dma_address(b->sgt->sgl) >> 10;
  294. + prd->size = b->data_size | PRD_FLAG_END;
  295. + flush_kernel_vmap_range(b->addr, b->data_size);
  296. + } else if (prds->dir == DMA_FROM_DEVICE) {
  297. + struct scatterlist *sg;
  298. + int i;
  299. + for_each_sgtable_dma_sg(b->sgt, sg, i) {
  300. + prd->addr = sg_dma_address(sg) >> 10;
  301. + prd->size = sg_dma_len(sg);
  302. + prd++;
  303. + }
  304. + prd[-1].size |= PRD_FLAG_END;
  305. + }
  306. + dma_wmb(); // for the prds
  307. + dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir);
  308. + return 0;
  309. +}
  310. +
  311. +static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
  312. + struct ithc_phys_region_desc *prd = prds->addr;
  313. + prd += idx * prds->num_pages;
  314. + if (b->active_idx != idx) { pci_err(ithc->pci, "wrong buffer index\n"); return -EINVAL; }
  315. + b->active_idx = -1;
  316. + if (prds->dir == DMA_FROM_DEVICE) {
  317. + dma_rmb(); // for the prds
  318. + b->data_size = 0;
  319. + struct scatterlist *sg;
  320. + int i;
  321. + for_each_sgtable_dma_sg(b->sgt, sg, i) {
  322. + unsigned size = prd->size;
  323. + b->data_size += size & PRD_SIZE_MASK;
  324. + if (size & PRD_FLAG_END) break;
  325. + if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { pci_err(ithc->pci, "truncated prd\n"); break; }
  326. + prd++;
  327. + }
  328. + invalidate_kernel_vmap_range(b->addr, b->data_size);
  329. + }
  330. + dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir);
  331. + return 0;
  332. +}
  333. +
  334. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname) {
  335. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  336. + mutex_init(&rx->mutex);
  337. + u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes);
  338. + unsigned num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
  339. + pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", NUM_RX_BUF, buf_size, num_pages);
  340. + CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE);
  341. + for (unsigned i = 0; i < NUM_RX_BUF; i++)
  342. + CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]);
  343. + writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2);
  344. + lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr);
  345. + writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs);
  346. + writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds);
  347. + u8 head = readb(&ithc->regs->dma_rx[channel].head);
  348. + if (head) { pci_err(ithc->pci, "head is nonzero (%u)\n", head); return -EIO; }
  349. + for (unsigned i = 0; i < NUM_RX_BUF; i++)
  350. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i);
  351. + writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail);
  352. + return 0;
  353. +}
  354. +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) {
  355. + bitsb_set(&ithc->regs->dma_rx[channel].control, DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
  356. + CHECK(waitl, ithc, &ithc->regs->dma_rx[1].status, DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
  357. +}
  358. +
  359. +int ithc_dma_tx_init(struct ithc *ithc) {
  360. + struct ithc_dma_tx *tx = &ithc->dma_tx;
  361. + mutex_init(&tx->mutex);
  362. + tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes);
  363. + unsigned num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
  364. + pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", tx->max_size, num_pages);
  365. + CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE);
  366. + CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf);
  367. + lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr);
  368. + writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds);
  369. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  370. + return 0;
  371. +}
  372. +
  373. +static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data, u8 channel, u8 buf) {
  374. + if (buf >= NUM_RX_BUF) {
  375. + pci_err(ithc->pci, "invalid dma ringbuffer index\n");
  376. + return -EINVAL;
  377. + }
  378. + ithc_set_active(ithc);
  379. + u32 len = data->data_size;
  380. + struct ithc_dma_rx_header *hdr = data->addr;
  381. + u8 *hiddata = (void *)(hdr + 1);
  382. + if (len >= sizeof *hdr && hdr->code == DMA_RX_CODE_RESET) {
  383. + CHECK(ithc_reset, ithc);
  384. + } else if (len < sizeof *hdr || len != sizeof *hdr + hdr->data_size) {
  385. + if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  386. + // When the CPU enters a low power state during DMA, we can get truncated messages.
  387. + // Typically this will be a single touch HID report that is only 1 byte, or a multitouch report that is 257 bytes.
  388. + // See also ithc_set_active().
  389. + } else {
  390. + pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n", channel, buf, len, hdr->code, hdr->data_size);
  391. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
  392. + }
  393. + } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) {
  394. + CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8);
  395. + WRITE_ONCE(ithc->hid_parse_done, true);
  396. + wake_up(&ithc->wait_hid_parse);
  397. + } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  398. + CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1);
  399. + } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) {
  400. + bool done = false;
  401. + mutex_lock(&ithc->hid_get_feature_mutex);
  402. + if (ithc->hid_get_feature_buf) {
  403. + if (hdr->data_size < ithc->hid_get_feature_size) ithc->hid_get_feature_size = hdr->data_size;
  404. + memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size);
  405. + ithc->hid_get_feature_buf = NULL;
  406. + done = true;
  407. + }
  408. + mutex_unlock(&ithc->hid_get_feature_mutex);
  409. + if (done) wake_up(&ithc->wait_hid_get_feature);
  410. + else CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT, hiddata, hdr->data_size, 1);
  411. + } else {
  412. + pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n", channel, buf, len, hdr->code);
  413. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
  414. + }
  415. + return 0;
  416. +}
  417. +
  418. +static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
  419. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  420. + unsigned n = rx->num_received;
  421. + u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head);
  422. + while (1) {
  423. + u8 tail = n % NUM_RX_BUF;
  424. + u8 tail_wrap = tail | ((n / NUM_RX_BUF) & 1 ? 0 : DMA_RX_WRAP_FLAG);
  425. + writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail);
  426. + // ringbuffer is full if tail_wrap == head_wrap
  427. + // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG
  428. + if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) return 0;
  429. +
  430. + // take the buffer that the device just filled
  431. + struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF];
  432. + CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail);
  433. + rx->num_received = ++n;
  434. +
  435. + // process data
  436. + CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail);
  437. +
  438. + // give the buffer back to the device
  439. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail);
  440. + }
  441. +}
  442. +int ithc_dma_rx(struct ithc *ithc, u8 channel) {
  443. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  444. + mutex_lock(&rx->mutex);
  445. + int ret = ithc_dma_rx_unlocked(ithc, channel);
  446. + mutex_unlock(&rx->mutex);
  447. + return ret;
  448. +}
  449. +
  450. +static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
  451. + pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize);
  452. + struct ithc_dma_tx_header *hdr;
  453. + u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0;
  454. + unsigned fullsize = sizeof *hdr + datasize + padding;
  455. + if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE) return -EINVAL;
  456. + CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  457. +
  458. + ithc->dma_tx.buf.data_size = fullsize;
  459. + hdr = ithc->dma_tx.buf.addr;
  460. + hdr->code = cmdcode;
  461. + hdr->data_size = datasize;
  462. + u8 *dest = (void *)(hdr + 1);
  463. + memcpy(dest, data, datasize);
  464. + dest += datasize;
  465. + for (u8 p = 0; p < padding; p++) *dest++ = 0;
  466. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  467. +
  468. + bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND);
  469. + CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
  470. + writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status);
  471. + return 0;
  472. +}
  473. +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
  474. + mutex_lock(&ithc->dma_tx.mutex);
  475. + int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data);
  476. + mutex_unlock(&ithc->dma_tx.mutex);
  477. + return ret;
  478. +}
  479. +
  480. diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h
  481. new file mode 100644
  482. index 0000000000000..d9f2c19a13f3a
  483. --- /dev/null
  484. +++ b/drivers/hid/ithc/ithc-dma.h
  485. @@ -0,0 +1,67 @@
  486. +#define PRD_SIZE_MASK 0xffffff
  487. +#define PRD_FLAG_END 0x1000000
  488. +#define PRD_FLAG_SUCCESS 0x2000000
  489. +#define PRD_FLAG_ERROR 0x4000000
  490. +
  491. +struct ithc_phys_region_desc {
  492. + u64 addr; // physical addr/1024
  493. + u32 size; // num bytes, PRD_FLAG_END marks last prd for data split over multiple prds
  494. + u32 unused;
  495. +};
  496. +
  497. +#define DMA_RX_CODE_INPUT_REPORT 3
  498. +#define DMA_RX_CODE_FEATURE_REPORT 4
  499. +#define DMA_RX_CODE_REPORT_DESCRIPTOR 5
  500. +#define DMA_RX_CODE_RESET 7
  501. +
  502. +struct ithc_dma_rx_header {
  503. + u32 code;
  504. + u32 data_size;
  505. + u32 _unknown[14];
  506. +};
  507. +
  508. +#define DMA_TX_CODE_SET_FEATURE 3
  509. +#define DMA_TX_CODE_GET_FEATURE 4
  510. +#define DMA_TX_CODE_OUTPUT_REPORT 5
  511. +#define DMA_TX_CODE_GET_REPORT_DESCRIPTOR 7
  512. +
  513. +struct ithc_dma_tx_header {
  514. + u32 code;
  515. + u32 data_size;
  516. +};
  517. +
  518. +struct ithc_dma_prd_buffer {
  519. + void *addr;
  520. + dma_addr_t dma_addr;
  521. + u32 size;
  522. + u32 num_pages; // per data buffer
  523. + enum dma_data_direction dir;
  524. +};
  525. +
  526. +struct ithc_dma_data_buffer {
  527. + void *addr;
  528. + struct sg_table *sgt;
  529. + int active_idx;
  530. + u32 data_size;
  531. +};
  532. +
  533. +struct ithc_dma_tx {
  534. + struct mutex mutex;
  535. + u32 max_size;
  536. + struct ithc_dma_prd_buffer prds;
  537. + struct ithc_dma_data_buffer buf;
  538. +};
  539. +
  540. +struct ithc_dma_rx {
  541. + struct mutex mutex;
  542. + u32 num_received;
  543. + struct ithc_dma_prd_buffer prds;
  544. + struct ithc_dma_data_buffer bufs[NUM_RX_BUF];
  545. +};
  546. +
  547. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname);
  548. +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel);
  549. +int ithc_dma_tx_init(struct ithc *ithc);
  550. +int ithc_dma_rx(struct ithc *ithc, u8 channel);
  551. +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *cmddata);
  552. +
  553. diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c
  554. new file mode 100644
  555. index 0000000000000..09512b9cb4d31
  556. --- /dev/null
  557. +++ b/drivers/hid/ithc/ithc-main.c
  558. @@ -0,0 +1,534 @@
  559. +#include "ithc.h"
  560. +
  561. +MODULE_DESCRIPTION("Intel Touch Host Controller driver");
  562. +MODULE_LICENSE("Dual BSD/GPL");
  563. +
  564. +// Lakefield
  565. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0
  566. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1
  567. +// Tiger Lake
  568. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0
  569. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1
  570. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0
  571. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1
  572. +// Alder Lake
  573. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8
  574. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9
  575. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0
  576. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1
  577. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0
  578. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1
  579. +// Raptor Lake
  580. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58
  581. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59
  582. +// Meteor Lake
  583. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48
  584. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a
  585. +
  586. +static const struct pci_device_id ithc_pci_tbl[] = {
  587. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) },
  588. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) },
  589. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) },
  590. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) },
  591. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) },
  592. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) },
  593. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) },
  594. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) },
  595. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) },
  596. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) },
  597. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) },
  598. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) },
  599. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) },
  600. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) },
  601. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) },
  602. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) },
  603. + {}
  604. +};
  605. +MODULE_DEVICE_TABLE(pci, ithc_pci_tbl);
  606. +
  607. +// Module parameters
  608. +
  609. +static bool ithc_use_polling = false;
  610. +module_param_named(poll, ithc_use_polling, bool, 0);
  611. +MODULE_PARM_DESC(poll, "Use polling instead of interrupts");
  612. +
  613. +static bool ithc_use_rx0 = false;
  614. +module_param_named(rx0, ithc_use_rx0, bool, 0);
  615. +MODULE_PARM_DESC(rx0, "Use DMA RX channel 0");
  616. +
  617. +static bool ithc_use_rx1 = true;
  618. +module_param_named(rx1, ithc_use_rx1, bool, 0);
  619. +MODULE_PARM_DESC(rx1, "Use DMA RX channel 1");
  620. +
  621. +static bool ithc_log_regs_enabled = false;
  622. +module_param_named(logregs, ithc_log_regs_enabled, bool, 0);
  623. +MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)");
  624. +
  625. +// Sysfs attributes
  626. +
  627. +static bool ithc_is_config_valid(struct ithc *ithc) {
  628. + return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC;
  629. +}
  630. +
  631. +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) {
  632. + struct ithc *ithc = dev_get_drvdata(dev);
  633. + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  634. + return sprintf(buf, "0x%04x", ithc->config.vendor_id);
  635. +}
  636. +static DEVICE_ATTR_RO(vendor);
  637. +static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf) {
  638. + struct ithc *ithc = dev_get_drvdata(dev);
  639. + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  640. + return sprintf(buf, "0x%04x", ithc->config.product_id);
  641. +}
  642. +static DEVICE_ATTR_RO(product);
  643. +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) {
  644. + struct ithc *ithc = dev_get_drvdata(dev);
  645. + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  646. + return sprintf(buf, "%u", ithc->config.revision);
  647. +}
  648. +static DEVICE_ATTR_RO(revision);
  649. +static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) {
  650. + struct ithc *ithc = dev_get_drvdata(dev);
  651. + if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  652. + u32 v = ithc->config.fw_version;
  653. + return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff);
  654. +}
  655. +static DEVICE_ATTR_RO(fw_version);
  656. +
  657. +static const struct attribute_group *ithc_attribute_groups[] = {
  658. + &(const struct attribute_group){
  659. + .name = DEVNAME,
  660. + .attrs = (struct attribute *[]){
  661. + &dev_attr_vendor.attr,
  662. + &dev_attr_product.attr,
  663. + &dev_attr_revision.attr,
  664. + &dev_attr_fw_version.attr,
  665. + NULL
  666. + },
  667. + },
  668. + NULL
  669. +};
  670. +
  671. +// HID setup
  672. +
  673. +static int ithc_hid_start(struct hid_device *hdev) { return 0; }
  674. +static void ithc_hid_stop(struct hid_device *hdev) { }
  675. +static int ithc_hid_open(struct hid_device *hdev) { return 0; }
  676. +static void ithc_hid_close(struct hid_device *hdev) { }
  677. +
  678. +static int ithc_hid_parse(struct hid_device *hdev) {
  679. + struct ithc *ithc = hdev->driver_data;
  680. + u64 val = 0;
  681. + WRITE_ONCE(ithc->hid_parse_done, false);
  682. + CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof val, &val);
  683. + if (!wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done), msecs_to_jiffies(1000))) return -ETIMEDOUT;
  684. + return 0;
  685. +}
  686. +
  687. +static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) {
  688. + struct ithc *ithc = hdev->driver_data;
  689. + if (!buf || !len) return -EINVAL;
  690. + u32 code;
  691. + if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_OUTPUT_REPORT;
  692. + else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_SET_FEATURE;
  693. + else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) code = DMA_TX_CODE_GET_FEATURE;
  694. + else {
  695. + pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n", rtype, reqtype, reportnum);
  696. + return -EINVAL;
  697. + }
  698. + buf[0] = reportnum;
  699. + if (reqtype == HID_REQ_GET_REPORT) {
  700. + mutex_lock(&ithc->hid_get_feature_mutex);
  701. + ithc->hid_get_feature_buf = buf;
  702. + ithc->hid_get_feature_size = len;
  703. + mutex_unlock(&ithc->hid_get_feature_mutex);
  704. + int r = CHECK(ithc_dma_tx, ithc, code, 1, buf);
  705. + if (!r) {
  706. + r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature, !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
  707. + if (!r) r = -ETIMEDOUT;
  708. + else if (r < 0) r = -EINTR;
  709. + else r = 0;
  710. + }
  711. + mutex_lock(&ithc->hid_get_feature_mutex);
  712. + ithc->hid_get_feature_buf = NULL;
  713. + if (!r) r = ithc->hid_get_feature_size;
  714. + mutex_unlock(&ithc->hid_get_feature_mutex);
  715. + return r;
  716. + }
  717. + CHECK_RET(ithc_dma_tx, ithc, code, len, buf);
  718. + return 0;
  719. +}
  720. +
  721. +static struct hid_ll_driver ithc_ll_driver = {
  722. + .start = ithc_hid_start,
  723. + .stop = ithc_hid_stop,
  724. + .open = ithc_hid_open,
  725. + .close = ithc_hid_close,
  726. + .parse = ithc_hid_parse,
  727. + .raw_request = ithc_hid_raw_request,
  728. +};
  729. +
  730. +static void ithc_hid_devres_release(struct device *dev, void *res) {
  731. + struct hid_device **hidm = res;
  732. + if (*hidm) hid_destroy_device(*hidm);
  733. +}
  734. +
  735. +static int ithc_hid_init(struct ithc *ithc) {
  736. + struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof *hidm, GFP_KERNEL);
  737. + if (!hidm) return -ENOMEM;
  738. + devres_add(&ithc->pci->dev, hidm);
  739. + struct hid_device *hid = hid_allocate_device();
  740. + if (IS_ERR(hid)) return PTR_ERR(hid);
  741. + *hidm = hid;
  742. +
  743. + strscpy(hid->name, DEVFULLNAME, sizeof(hid->name));
  744. + strscpy(hid->phys, ithc->phys, sizeof(hid->phys));
  745. + hid->ll_driver = &ithc_ll_driver;
  746. + hid->bus = BUS_PCI;
  747. + hid->vendor = ithc->config.vendor_id;
  748. + hid->product = ithc->config.product_id;
  749. + hid->version = 0x100;
  750. + hid->dev.parent = &ithc->pci->dev;
  751. + hid->driver_data = ithc;
  752. +
  753. + ithc->hid = hid;
  754. + return 0;
  755. +}
  756. +
  757. +// Interrupts/polling
  758. +
  759. +static void ithc_activity_timer_callback(struct timer_list *t) {
  760. + struct ithc *ithc = container_of(t, struct ithc, activity_timer);
  761. + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  762. +}
  763. +
  764. +void ithc_set_active(struct ithc *ithc) {
  765. + // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
  766. + // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_UNKNOWN_12 will be set when this happens.
  767. + // The amount of truncated messages can become very high, resulting in user-visible effects (laggy/stuttering cursor).
  768. + // To avoid this, we use a CPU latency QoS request to prevent the CPU from entering low power states during touch interactions.
  769. + cpu_latency_qos_update_request(&ithc->activity_qos, 0);
  770. + mod_timer(&ithc->activity_timer, jiffies + msecs_to_jiffies(1000));
  771. +}
  772. +
  773. +static int ithc_set_device_enabled(struct ithc *ithc, bool enable) {
  774. + u32 x = ithc->config.touch_cfg = (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2
  775. + | (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
  776. + return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE, offsetof(struct ithc_device_config, touch_cfg), sizeof x, &x);
  777. +}
  778. +
  779. +static void ithc_disable_interrupts(struct ithc *ithc) {
  780. + writel(0, &ithc->regs->error_control);
  781. + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0);
  782. + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  783. + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  784. + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0);
  785. +}
  786. +
  787. +static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned channel) {
  788. + writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA, &ithc->regs->dma_rx[channel].status);
  789. +}
  790. +
  791. +static void ithc_clear_interrupts(struct ithc *ithc) {
  792. + writel(0xffffffff, &ithc->regs->error_flags);
  793. + writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status);
  794. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  795. + ithc_clear_dma_rx_interrupts(ithc, 0);
  796. + ithc_clear_dma_rx_interrupts(ithc, 1);
  797. + writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2, &ithc->regs->dma_tx.status);
  798. +}
  799. +
  800. +static void ithc_process(struct ithc *ithc) {
  801. + ithc_log_regs(ithc);
  802. +
  803. + // read and clear error bits
  804. + u32 err = readl(&ithc->regs->error_flags);
  805. + if (err) {
  806. + if (err & ~ERROR_FLAG_DMA_UNKNOWN_12) pci_err(ithc->pci, "error flags: 0x%08x\n", err);
  807. + writel(err, &ithc->regs->error_flags);
  808. + }
  809. +
  810. + // process DMA rx
  811. + if (ithc_use_rx0) {
  812. + ithc_clear_dma_rx_interrupts(ithc, 0);
  813. + ithc_dma_rx(ithc, 0);
  814. + }
  815. + if (ithc_use_rx1) {
  816. + ithc_clear_dma_rx_interrupts(ithc, 1);
  817. + ithc_dma_rx(ithc, 1);
  818. + }
  819. +
  820. + ithc_log_regs(ithc);
  821. +}
  822. +
  823. +static irqreturn_t ithc_interrupt_thread(int irq, void *arg) {
  824. + struct ithc *ithc = arg;
  825. + pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n",
  826. + readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags),
  827. + readb(&ithc->regs->spi_cmd.control), readl(&ithc->regs->spi_cmd.status),
  828. + readb(&ithc->regs->dma_rx[0].control), readl(&ithc->regs->dma_rx[0].status),
  829. + readb(&ithc->regs->dma_rx[1].control), readl(&ithc->regs->dma_rx[1].status),
  830. + readb(&ithc->regs->dma_tx.control), readl(&ithc->regs->dma_tx.status));
  831. + ithc_process(ithc);
  832. + return IRQ_HANDLED;
  833. +}
  834. +
  835. +static int ithc_poll_thread(void *arg) {
  836. + struct ithc *ithc = arg;
  837. + unsigned sleep = 100;
  838. + while (!kthread_should_stop()) {
  839. + u32 n = ithc->dma_rx[1].num_received;
  840. + ithc_process(ithc);
  841. + if (n != ithc->dma_rx[1].num_received) sleep = 20;
  842. + else sleep = min(200u, sleep + (sleep >> 4) + 1);
  843. + msleep_interruptible(sleep);
  844. + }
  845. + return 0;
  846. +}
  847. +
  848. +// Device initialization and shutdown
  849. +
  850. +static void ithc_disable(struct ithc *ithc) {
  851. + bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE);
  852. + CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED);
  853. + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  854. + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND, 0);
  855. + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
  856. + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_ENABLE, 0);
  857. + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_ENABLE, 0);
  858. + ithc_disable_interrupts(ithc);
  859. + ithc_clear_interrupts(ithc);
  860. +}
  861. +
  862. +static int ithc_init_device(struct ithc *ithc) {
  863. + ithc_log_regs(ithc);
  864. + bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0;
  865. + ithc_disable(ithc);
  866. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY);
  867. + ithc_set_spi_config(ithc, 10, 0);
  868. + bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000); // seems to help with reading config
  869. +
  870. + if (was_enabled) if (msleep_interruptible(100)) return -EINTR;
  871. + bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
  872. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0);
  873. + for (int retries = 0; ; retries++) {
  874. + ithc_log_regs(ithc);
  875. + bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET);
  876. + if (!waitl(ithc, &ithc->regs->state, 0xf, 2)) break;
  877. + if (retries > 5) {
  878. + pci_err(ithc->pci, "too many retries, failed to reset device\n");
  879. + return -ETIMEDOUT;
  880. + }
  881. + pci_err(ithc->pci, "invalid state, retrying reset\n");
  882. + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  883. + if (msleep_interruptible(1000)) return -EINTR;
  884. + }
  885. + ithc_log_regs(ithc);
  886. +
  887. + CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4);
  888. +
  889. + // read config
  890. + for (int retries = 0; ; retries++) {
  891. + ithc_log_regs(ithc);
  892. + memset(&ithc->config, 0, sizeof ithc->config);
  893. + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof ithc->config, &ithc->config);
  894. + u32 *p = (void *)&ithc->config;
  895. + pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  896. + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
  897. + if (ithc_is_config_valid(ithc)) break;
  898. + if (retries > 10) {
  899. + pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n", ithc->config.device_id);
  900. + return -EIO;
  901. + }
  902. + pci_err(ithc->pci, "failed to read config, retrying\n");
  903. + if (msleep_interruptible(100)) return -EINTR;
  904. + }
  905. + ithc_log_regs(ithc);
  906. +
  907. + CHECK_RET(ithc_set_spi_config, ithc, DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config), DEVCFG_SPI_MODE(ithc->config.spi_config));
  908. + CHECK_RET(ithc_set_device_enabled, ithc, true);
  909. + ithc_log_regs(ithc);
  910. + return 0;
  911. +}
  912. +
  913. +int ithc_reset(struct ithc *ithc) {
  914. + // FIXME This should probably do devres_release_group()+ithc_start(). But because this is called during DMA
  915. + // processing, that would have to be done asynchronously (schedule_work()?). And with extra locking?
  916. + pci_err(ithc->pci, "reset\n");
  917. + CHECK(ithc_init_device, ithc);
  918. + if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
  919. + if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
  920. + ithc_log_regs(ithc);
  921. + pci_dbg(ithc->pci, "reset completed\n");
  922. + return 0;
  923. +}
  924. +
  925. +static void ithc_stop(void *res) {
  926. + struct ithc *ithc = res;
  927. + pci_dbg(ithc->pci, "stopping\n");
  928. + ithc_log_regs(ithc);
  929. + if (ithc->poll_thread) CHECK(kthread_stop, ithc->poll_thread);
  930. + if (ithc->irq >= 0) disable_irq(ithc->irq);
  931. + CHECK(ithc_set_device_enabled, ithc, false);
  932. + ithc_disable(ithc);
  933. + del_timer_sync(&ithc->activity_timer);
  934. + cpu_latency_qos_remove_request(&ithc->activity_qos);
  935. + // clear dma config
  936. + for(unsigned i = 0; i < 2; i++) {
  937. + CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0);
  938. + lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr);
  939. + writeb(0, &ithc->regs->dma_rx[i].num_bufs);
  940. + writeb(0, &ithc->regs->dma_rx[i].num_prds);
  941. + }
  942. + lo_hi_writeq(0, &ithc->regs->dma_tx.addr);
  943. + writeb(0, &ithc->regs->dma_tx.num_prds);
  944. + ithc_log_regs(ithc);
  945. + pci_dbg(ithc->pci, "stopped\n");
  946. +}
  947. +
  948. +static void ithc_clear_drvdata(void *res) {
  949. + struct pci_dev *pci = res;
  950. + pci_set_drvdata(pci, NULL);
  951. +}
  952. +
  953. +static int ithc_start(struct pci_dev *pci) {
  954. + pci_dbg(pci, "starting\n");
  955. + if (pci_get_drvdata(pci)) {
  956. + pci_err(pci, "device already initialized\n");
  957. + return -EINVAL;
  958. + }
  959. + if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL)) return -ENOMEM;
  960. +
  961. + struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof *ithc, GFP_KERNEL);
  962. + if (!ithc) return -ENOMEM;
  963. + ithc->irq = -1;
  964. + ithc->pci = pci;
  965. + snprintf(ithc->phys, sizeof ithc->phys, "pci-%s/" DEVNAME, pci_name(pci));
  966. + init_waitqueue_head(&ithc->wait_hid_parse);
  967. + init_waitqueue_head(&ithc->wait_hid_get_feature);
  968. + mutex_init(&ithc->hid_get_feature_mutex);
  969. + pci_set_drvdata(pci, ithc);
  970. + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci);
  971. + if (ithc_log_regs_enabled) ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof *ithc->prev_regs, GFP_KERNEL);
  972. +
  973. + CHECK_RET(pcim_enable_device, pci);
  974. + pci_set_master(pci);
  975. + CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs");
  976. + CHECK_RET(dma_set_mask_and_coherent, &pci->dev, DMA_BIT_MASK(64));
  977. + CHECK_RET(pci_set_power_state, pci, PCI_D0);
  978. + ithc->regs = pcim_iomap_table(pci)[0];
  979. +
  980. + if (!ithc_use_polling) {
  981. + CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
  982. + ithc->irq = CHECK(pci_irq_vector, pci, 0);
  983. + if (ithc->irq < 0) return ithc->irq;
  984. + }
  985. +
  986. + CHECK_RET(ithc_init_device, ithc);
  987. + CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups);
  988. + if (ithc_use_rx0) CHECK_RET(ithc_dma_rx_init, ithc, 0, ithc_use_rx1 ? DEVNAME "0" : DEVNAME);
  989. + if (ithc_use_rx1) CHECK_RET(ithc_dma_rx_init, ithc, 1, ithc_use_rx0 ? DEVNAME "1" : DEVNAME);
  990. + CHECK_RET(ithc_dma_tx_init, ithc);
  991. +
  992. + CHECK_RET(ithc_hid_init, ithc);
  993. +
  994. + cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  995. + timer_setup(&ithc->activity_timer, ithc_activity_timer_callback, 0);
  996. +
  997. + // add ithc_stop callback AFTER setting up DMA buffers, so that polling/irqs/DMA are disabled BEFORE the buffers are freed
  998. + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc);
  999. +
  1000. + if (ithc_use_polling) {
  1001. + pci_info(pci, "using polling instead of irq\n");
  1002. + // use a thread instead of simple timer because we want to be able to sleep
  1003. + ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll");
  1004. + if (IS_ERR(ithc->poll_thread)) {
  1005. + int err = PTR_ERR(ithc->poll_thread);
  1006. + ithc->poll_thread = NULL;
  1007. + return err;
  1008. + }
  1009. + } else {
  1010. + CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL, ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc);
  1011. + }
  1012. +
  1013. + if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
  1014. + if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
  1015. +
  1016. + // hid_add_device can only be called after irq/polling is started and DMA is enabled, because it calls ithc_hid_parse which reads the report descriptor via DMA
  1017. + CHECK_RET(hid_add_device, ithc->hid);
  1018. +
  1019. + CHECK(ithc_debug_init, ithc);
  1020. +
  1021. + pci_dbg(pci, "started\n");
  1022. + return 0;
  1023. +}
  1024. +
  1025. +static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id) {
  1026. + pci_dbg(pci, "device probe\n");
  1027. + return ithc_start(pci);
  1028. +}
  1029. +
  1030. +static void ithc_remove(struct pci_dev *pci) {
  1031. + pci_dbg(pci, "device remove\n");
  1032. + // all cleanup is handled by devres
  1033. +}
  1034. +
  1035. +static int ithc_suspend(struct device *dev) {
  1036. + struct pci_dev *pci = to_pci_dev(dev);
  1037. + pci_dbg(pci, "pm suspend\n");
  1038. + devres_release_group(dev, ithc_start);
  1039. + return 0;
  1040. +}
  1041. +
  1042. +static int ithc_resume(struct device *dev) {
  1043. + struct pci_dev *pci = to_pci_dev(dev);
  1044. + pci_dbg(pci, "pm resume\n");
  1045. + return ithc_start(pci);
  1046. +}
  1047. +
  1048. +static int ithc_freeze(struct device *dev) {
  1049. + struct pci_dev *pci = to_pci_dev(dev);
  1050. + pci_dbg(pci, "pm freeze\n");
  1051. + devres_release_group(dev, ithc_start);
  1052. + return 0;
  1053. +}
  1054. +
  1055. +static int ithc_thaw(struct device *dev) {
  1056. + struct pci_dev *pci = to_pci_dev(dev);
  1057. + pci_dbg(pci, "pm thaw\n");
  1058. + return ithc_start(pci);
  1059. +}
  1060. +
  1061. +static int ithc_restore(struct device *dev) {
  1062. + struct pci_dev *pci = to_pci_dev(dev);
  1063. + pci_dbg(pci, "pm restore\n");
  1064. + return ithc_start(pci);
  1065. +}
  1066. +
  1067. +static struct pci_driver ithc_driver = {
  1068. + .name = DEVNAME,
  1069. + .id_table = ithc_pci_tbl,
  1070. + .probe = ithc_probe,
  1071. + .remove = ithc_remove,
  1072. + .driver.pm = &(const struct dev_pm_ops) {
  1073. + .suspend = ithc_suspend,
  1074. + .resume = ithc_resume,
  1075. + .freeze = ithc_freeze,
  1076. + .thaw = ithc_thaw,
  1077. + .restore = ithc_restore,
  1078. + },
  1079. + //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway
  1080. +};
  1081. +
  1082. +static int __init ithc_init(void) {
  1083. + return pci_register_driver(&ithc_driver);
  1084. +}
  1085. +
  1086. +static void __exit ithc_exit(void) {
  1087. + pci_unregister_driver(&ithc_driver);
  1088. +}
  1089. +
  1090. +module_init(ithc_init);
  1091. +module_exit(ithc_exit);
  1092. +
  1093. diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c
  1094. new file mode 100644
  1095. index 0000000000000..85d567b05761f
  1096. --- /dev/null
  1097. +++ b/drivers/hid/ithc/ithc-regs.c
  1098. @@ -0,0 +1,64 @@
  1099. +#include "ithc.h"
  1100. +
  1101. +#define reg_num(r) (0x1fff & (u16)(__force u64)(r))
  1102. +
  1103. +void bitsl(__iomem u32 *reg, u32 mask, u32 val) {
  1104. + if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
  1105. + writel((readl(reg) & ~mask) | (val & mask), reg);
  1106. +}
  1107. +
  1108. +void bitsb(__iomem u8 *reg, u8 mask, u8 val) {
  1109. + if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
  1110. + writeb((readb(reg) & ~mask) | (val & mask), reg);
  1111. +}
  1112. +
  1113. +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val) {
  1114. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
  1115. + u32 x;
  1116. + if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  1117. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
  1118. + return -ETIMEDOUT;
  1119. + }
  1120. + pci_dbg(ithc->pci, "done waiting\n");
  1121. + return 0;
  1122. +}
  1123. +
  1124. +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val) {
  1125. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
  1126. + u8 x;
  1127. + if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  1128. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
  1129. + return -ETIMEDOUT;
  1130. + }
  1131. + pci_dbg(ithc->pci, "done waiting\n");
  1132. + return 0;
  1133. +}
  1134. +
  1135. +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode) {
  1136. + pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode);
  1137. + if (mode == 3) mode = 2;
  1138. + bitsl(&ithc->regs->spi_config,
  1139. + SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff),
  1140. + SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed));
  1141. + return 0;
  1142. +}
  1143. +
  1144. +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data) {
  1145. + pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset);
  1146. + if (size > sizeof ithc->regs->spi_cmd.data) return -EINVAL;
  1147. + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  1148. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1149. + writeb(command, &ithc->regs->spi_cmd.code);
  1150. + writew(size, &ithc->regs->spi_cmd.size);
  1151. + writel(offset, &ithc->regs->spi_cmd.offset);
  1152. + u32 *p = data, n = (size + 3) / 4;
  1153. + for (u32 i = 0; i < n; i++) writel(p[i], &ithc->regs->spi_cmd.data[i]);
  1154. + bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND);
  1155. + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  1156. + if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE) return -EIO;
  1157. + if (readw(&ithc->regs->spi_cmd.size) != size) return -EMSGSIZE;
  1158. + for (u32 i = 0; i < n; i++) p[i] = readl(&ithc->regs->spi_cmd.data[i]);
  1159. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1160. + return 0;
  1161. +}
  1162. +
  1163. diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h
  1164. new file mode 100644
  1165. index 0000000000000..1a96092ed7eed
  1166. --- /dev/null
  1167. +++ b/drivers/hid/ithc/ithc-regs.h
  1168. @@ -0,0 +1,186 @@
  1169. +#define CONTROL_QUIESCE BIT(1)
  1170. +#define CONTROL_IS_QUIESCED BIT(2)
  1171. +#define CONTROL_NRESET BIT(3)
  1172. +#define CONTROL_READY BIT(29)
  1173. +
  1174. +#define SPI_CONFIG_MODE(x) (((x) & 3) << 2)
  1175. +#define SPI_CONFIG_SPEED(x) (((x) & 7) << 4)
  1176. +#define SPI_CONFIG_UNKNOWN_18(x) (((x) & 3) << 18)
  1177. +#define SPI_CONFIG_SPEED2(x) (((x) & 0xf) << 20) // high bit = high speed mode?
  1178. +
  1179. +#define ERROR_CONTROL_UNKNOWN_0 BIT(0)
  1180. +#define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs
  1181. +#define ERROR_CONTROL_UNKNOWN_2 BIT(2)
  1182. +#define ERROR_CONTROL_UNKNOWN_3 BIT(3)
  1183. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_9 BIT(9)
  1184. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_10 BIT(10)
  1185. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_12 BIT(12)
  1186. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_13 BIT(13)
  1187. +#define ERROR_CONTROL_UNKNOWN_16(x) (((x) & 0xff) << 16) // spi error code irq?
  1188. +#define ERROR_CONTROL_SET_DMA_STATUS BIT(29) // sets DMA_RX_STATUS_ERROR when a DMA error occurs
  1189. +
  1190. +#define ERROR_STATUS_DMA BIT(28)
  1191. +#define ERROR_STATUS_SPI BIT(30)
  1192. +
  1193. +#define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9)
  1194. +#define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10)
  1195. +#define ERROR_FLAG_DMA_UNKNOWN_12 BIT(12) // set when we receive a truncated DMA message
  1196. +#define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13)
  1197. +#define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16)
  1198. +#define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17)
  1199. +#define ERROR_FLAG_SPI_INTRA_PACKET_TIMEOUT BIT(18)
  1200. +#define ERROR_FLAG_SPI_INVALID_RESPONSE BIT(19)
  1201. +#define ERROR_FLAG_SPI_HS_RX_TIMEOUT BIT(20)
  1202. +#define ERROR_FLAG_SPI_TOUCH_IC_INIT BIT(21)
  1203. +
  1204. +#define SPI_CMD_CONTROL_SEND BIT(0) // cleared by device when sending is complete
  1205. +#define SPI_CMD_CONTROL_IRQ BIT(1)
  1206. +
  1207. +#define SPI_CMD_CODE_READ 4
  1208. +#define SPI_CMD_CODE_WRITE 6
  1209. +
  1210. +#define SPI_CMD_STATUS_DONE BIT(0)
  1211. +#define SPI_CMD_STATUS_ERROR BIT(1)
  1212. +#define SPI_CMD_STATUS_BUSY BIT(3)
  1213. +
  1214. +#define DMA_TX_CONTROL_SEND BIT(0) // cleared by device when sending is complete
  1215. +#define DMA_TX_CONTROL_IRQ BIT(3)
  1216. +
  1217. +#define DMA_TX_STATUS_DONE BIT(0)
  1218. +#define DMA_TX_STATUS_ERROR BIT(1)
  1219. +#define DMA_TX_STATUS_UNKNOWN_2 BIT(2)
  1220. +#define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy?
  1221. +
  1222. +#define DMA_RX_CONTROL_ENABLE BIT(0)
  1223. +#define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only?
  1224. +#define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only?
  1225. +#define DMA_RX_CONTROL_IRQ_UNKNOWN_4 BIT(4) // rx0 only?
  1226. +#define DMA_RX_CONTROL_IRQ_DATA BIT(5)
  1227. +
  1228. +#define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only?
  1229. +#define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices
  1230. +
  1231. +#define DMA_RX_WRAP_FLAG BIT(7)
  1232. +
  1233. +#define DMA_RX_STATUS_ERROR BIT(3)
  1234. +#define DMA_RX_STATUS_UNKNOWN_4 BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms)
  1235. +#define DMA_RX_STATUS_HAVE_DATA BIT(5)
  1236. +#define DMA_RX_STATUS_ENABLED BIT(8)
  1237. +
  1238. +#define COUNTER_RESET BIT(31)
  1239. +
  1240. +struct ithc_registers {
  1241. + /* 0000 */ u32 _unknown_0000[1024];
  1242. + /* 1000 */ u32 _unknown_1000;
  1243. + /* 1004 */ u32 _unknown_1004;
  1244. + /* 1008 */ u32 control_bits;
  1245. + /* 100c */ u32 _unknown_100c;
  1246. + /* 1010 */ u32 spi_config;
  1247. + /* 1014 */ u32 _unknown_1014[3];
  1248. + /* 1020 */ u32 error_control;
  1249. + /* 1024 */ u32 error_status; // write to clear
  1250. + /* 1028 */ u32 error_flags; // write to clear
  1251. + /* 102c */ u32 _unknown_102c[5];
  1252. + struct {
  1253. + /* 1040 */ u8 control;
  1254. + /* 1041 */ u8 code;
  1255. + /* 1042 */ u16 size;
  1256. + /* 1044 */ u32 status; // write to clear
  1257. + /* 1048 */ u32 offset;
  1258. + /* 104c */ u32 data[16];
  1259. + /* 108c */ u32 _unknown_108c;
  1260. + } spi_cmd;
  1261. + struct {
  1262. + /* 1090 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
  1263. + /* 1098 */ u8 control;
  1264. + /* 1099 */ u8 _unknown_1099;
  1265. + /* 109a */ u8 _unknown_109a;
  1266. + /* 109b */ u8 num_prds;
  1267. + /* 109c */ u32 status; // write to clear
  1268. + } dma_tx;
  1269. + /* 10a0 */ u32 _unknown_10a0[7];
  1270. + /* 10bc */ u32 state; // is 0xe0000402 (dev config val 0) after CONTROL_NRESET, 0xe0000461 after first touch, 0xe0000401 after DMA_RX_CODE_RESET
  1271. + /* 10c0 */ u32 _unknown_10c0[8];
  1272. + /* 10e0 */ u32 _unknown_10e0_counters[3];
  1273. + /* 10ec */ u32 _unknown_10ec[5];
  1274. + struct {
  1275. + /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
  1276. + /* 1108/1208 */ u8 num_bufs;
  1277. + /* 1109/1209 */ u8 num_prds;
  1278. + /* 110a/120a */ u16 _unknown_110a;
  1279. + /* 110c/120c */ u8 control;
  1280. + /* 110d/120d */ u8 head;
  1281. + /* 110e/120e */ u8 tail;
  1282. + /* 110f/120f */ u8 control2;
  1283. + /* 1110/1210 */ u32 status; // write to clear
  1284. + /* 1114/1214 */ u32 _unknown_1114;
  1285. + /* 1118/1218 */ u64 _unknown_1118_guc_addr;
  1286. + /* 1120/1220 */ u32 _unknown_1120_guc;
  1287. + /* 1124/1224 */ u32 _unknown_1124_guc;
  1288. + /* 1128/1228 */ u32 unknown_init_bits; // bit 2 = guc related, bit 3 = rx1 related, bit 4 = guc related
  1289. + /* 112c/122c */ u32 _unknown_112c;
  1290. + /* 1130/1230 */ u64 _unknown_1130_guc_addr;
  1291. + /* 1138/1238 */ u32 _unknown_1138_guc;
  1292. + /* 113c/123c */ u32 _unknown_113c;
  1293. + /* 1140/1240 */ u32 _unknown_1140_guc;
  1294. + /* 1144/1244 */ u32 _unknown_1144[23];
  1295. + /* 11a0/12a0 */ u32 _unknown_11a0_counters[6];
  1296. + /* 11b8/12b8 */ u32 _unknown_11b8[18];
  1297. + } dma_rx[2];
  1298. +};
  1299. +static_assert(sizeof(struct ithc_registers) == 0x1300);
  1300. +
  1301. +#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6)
  1302. +#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6)
  1303. +
  1304. +#define DEVCFG_TOUCH_MASK 0x3f
  1305. +#define DEVCFG_TOUCH_ENABLE BIT(0)
  1306. +#define DEVCFG_TOUCH_UNKNOWN_1 BIT(1)
  1307. +#define DEVCFG_TOUCH_UNKNOWN_2 BIT(2)
  1308. +#define DEVCFG_TOUCH_UNKNOWN_3 BIT(3)
  1309. +#define DEVCFG_TOUCH_UNKNOWN_4 BIT(4)
  1310. +#define DEVCFG_TOUCH_UNKNOWN_5 BIT(5)
  1311. +#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6)
  1312. +
  1313. +#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC"
  1314. +
  1315. +#define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode?
  1316. +#define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3)
  1317. +#define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f)
  1318. +#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20)
  1319. +#define DEVCFG_SPI_HEARTBEAT_INTERVAL (((x) >> 21) & 7)
  1320. +#define DEVCFG_SPI_UNKNOWN_25 BIT(25)
  1321. +#define DEVCFG_SPI_UNKNOWN_26 BIT(26)
  1322. +#define DEVCFG_SPI_UNKNOWN_27 BIT(27)
  1323. +#define DEVCFG_SPI_DELAY (((x) >> 28) & 7)
  1324. +#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31)
  1325. +
  1326. +struct ithc_device_config {
  1327. + u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET)
  1328. + u32 _unknown_04; // 04 = 0x00000000
  1329. + u32 dma_buf_sizes; // 08 = 0x000a00ff
  1330. + u32 touch_cfg; // 0c = 0x0000001c
  1331. + u32 _unknown_10; // 10 = 0x0000001c
  1332. + u32 device_id; // 14 = 0x43495424 = "$TIC"
  1333. + u32 spi_config; // 18 = 0xfda00a2e
  1334. + u16 vendor_id; // 1c = 0x045e = Microsoft Corp.
  1335. + u16 product_id; // 1e = 0x0c1a
  1336. + u32 revision; // 20 = 0x00000001
  1337. + u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139
  1338. + u32 _unknown_28; // 28 = 0x00000000
  1339. + u32 fw_mode; // 2c = 0x00000000
  1340. + u32 _unknown_30; // 30 = 0x00000000
  1341. + u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?)
  1342. + u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET)
  1343. + u32 _unknown_3c; // 3c = 0x00000002
  1344. +};
  1345. +
  1346. +void bitsl(__iomem u32 *reg, u32 mask, u32 val);
  1347. +void bitsb(__iomem u8 *reg, u8 mask, u8 val);
  1348. +#define bitsl_set(reg, x) bitsl(reg, x, x)
  1349. +#define bitsb_set(reg, x) bitsb(reg, x, x)
  1350. +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val);
  1351. +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val);
  1352. +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode);
  1353. +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data);
  1354. +
  1355. diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h
  1356. new file mode 100644
  1357. index 0000000000000..6a9b0d480bc15
  1358. --- /dev/null
  1359. +++ b/drivers/hid/ithc/ithc.h
  1360. @@ -0,0 +1,60 @@
  1361. +#include <linux/module.h>
  1362. +#include <linux/input.h>
  1363. +#include <linux/hid.h>
  1364. +#include <linux/dma-mapping.h>
  1365. +#include <linux/highmem.h>
  1366. +#include <linux/pci.h>
  1367. +#include <linux/io-64-nonatomic-lo-hi.h>
  1368. +#include <linux/iopoll.h>
  1369. +#include <linux/delay.h>
  1370. +#include <linux/kthread.h>
  1371. +#include <linux/miscdevice.h>
  1372. +#include <linux/debugfs.h>
  1373. +#include <linux/poll.h>
  1374. +#include <linux/timer.h>
  1375. +#include <linux/pm_qos.h>
  1376. +
  1377. +#define DEVNAME "ithc"
  1378. +#define DEVFULLNAME "Intel Touch Host Controller"
  1379. +
  1380. +#undef pr_fmt
  1381. +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  1382. +
  1383. +#define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; })
  1384. +#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while(0)
  1385. +
  1386. +#define NUM_RX_BUF 16
  1387. +
  1388. +struct ithc;
  1389. +
  1390. +#include "ithc-regs.h"
  1391. +#include "ithc-dma.h"
  1392. +
  1393. +struct ithc {
  1394. + char phys[32];
  1395. + struct pci_dev *pci;
  1396. + int irq;
  1397. + struct task_struct *poll_thread;
  1398. + struct pm_qos_request activity_qos;
  1399. + struct timer_list activity_timer;
  1400. +
  1401. + struct hid_device *hid;
  1402. + bool hid_parse_done;
  1403. + wait_queue_head_t wait_hid_parse;
  1404. + wait_queue_head_t wait_hid_get_feature;
  1405. + struct mutex hid_get_feature_mutex;
  1406. + void *hid_get_feature_buf;
  1407. + size_t hid_get_feature_size;
  1408. +
  1409. + struct ithc_registers __iomem *regs;
  1410. + struct ithc_registers *prev_regs; // for debugging
  1411. + struct ithc_device_config config;
  1412. + struct ithc_dma_rx dma_rx[2];
  1413. + struct ithc_dma_tx dma_tx;
  1414. +};
  1415. +
  1416. +int ithc_reset(struct ithc *ithc);
  1417. +void ithc_set_active(struct ithc *ithc);
  1418. +int ithc_debug_init(struct ithc *ithc);
  1419. +void ithc_log_regs(struct ithc *ithc);
  1420. +
  1421. --
  1422. 2.43.0
  1423. From 37dc17be7687c220d8c84f3ed200fa4fedeafb04 Mon Sep 17 00:00:00 2001
  1424. From: quo <tuple@list.ru>
  1425. Date: Mon, 23 Oct 2023 10:15:29 +0200
  1426. Subject: [PATCH] Update ITHC from module repo
  1427. Changes:
  1428. - Added some comments and fixed a few checkpatch warnings
  1429. - Improved CPU latency QoS handling
  1430. - Retry reading the report descriptor on error / timeout
  1431. Based on https://github.com/quo/ithc-linux/commit/0b8b45d9775e756d6bd3a699bfaf9f5bd7b9b10b
  1432. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  1433. Patchset: ithc
  1434. ---
  1435. drivers/hid/ithc/ithc-debug.c | 94 +++++---
  1436. drivers/hid/ithc/ithc-dma.c | 231 +++++++++++++-----
  1437. drivers/hid/ithc/ithc-dma.h | 4 +-
  1438. drivers/hid/ithc/ithc-main.c | 430 ++++++++++++++++++++++++----------
  1439. drivers/hid/ithc/ithc-regs.c | 68 ++++--
  1440. drivers/hid/ithc/ithc-regs.h | 19 +-
  1441. drivers/hid/ithc/ithc.h | 13 +-
  1442. 7 files changed, 623 insertions(+), 236 deletions(-)
  1443. diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c
  1444. index 57bf125c45bd5..1f1f1e33f2e5a 100644
  1445. --- a/drivers/hid/ithc/ithc-debug.c
  1446. +++ b/drivers/hid/ithc/ithc-debug.c
  1447. @@ -1,10 +1,14 @@
  1448. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  1449. +
  1450. #include "ithc.h"
  1451. -void ithc_log_regs(struct ithc *ithc) {
  1452. - if (!ithc->prev_regs) return;
  1453. - u32 __iomem *cur = (__iomem void*)ithc->regs;
  1454. - u32 *prev = (void*)ithc->prev_regs;
  1455. - for (int i = 1024; i < sizeof *ithc->regs / 4; i++) {
  1456. +void ithc_log_regs(struct ithc *ithc)
  1457. +{
  1458. + if (!ithc->prev_regs)
  1459. + return;
  1460. + u32 __iomem *cur = (__iomem void *)ithc->regs;
  1461. + u32 *prev = (void *)ithc->prev_regs;
  1462. + for (int i = 1024; i < sizeof(*ithc->regs) / 4; i++) {
  1463. u32 x = readl(cur + i);
  1464. if (x != prev[i]) {
  1465. pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x);
  1466. @@ -13,55 +17,79 @@ void ithc_log_regs(struct ithc *ithc) {
  1467. }
  1468. }
  1469. -static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, loff_t *offset) {
  1470. +static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len,
  1471. + loff_t *offset)
  1472. +{
  1473. + // Debug commands consist of a single letter followed by a list of numbers (decimal or
  1474. + // hexadecimal, space-separated).
  1475. struct ithc *ithc = file_inode(f)->i_private;
  1476. char cmd[256];
  1477. - if (!ithc || !ithc->pci) return -ENODEV;
  1478. - if (!len) return -EINVAL;
  1479. - if (len >= sizeof cmd) return -EINVAL;
  1480. - if (copy_from_user(cmd, buf, len)) return -EFAULT;
  1481. + if (!ithc || !ithc->pci)
  1482. + return -ENODEV;
  1483. + if (!len)
  1484. + return -EINVAL;
  1485. + if (len >= sizeof(cmd))
  1486. + return -EINVAL;
  1487. + if (copy_from_user(cmd, buf, len))
  1488. + return -EFAULT;
  1489. cmd[len] = 0;
  1490. - if (cmd[len-1] == '\n') cmd[len-1] = 0;
  1491. + if (cmd[len-1] == '\n')
  1492. + cmd[len-1] = 0;
  1493. pci_info(ithc->pci, "debug command: %s\n", cmd);
  1494. +
  1495. + // Parse the list of arguments into a u32 array.
  1496. u32 n = 0;
  1497. const char *s = cmd + 1;
  1498. u32 a[32];
  1499. while (*s && *s != '\n') {
  1500. - if (n >= ARRAY_SIZE(a)) return -EINVAL;
  1501. - if (*s++ != ' ') return -EINVAL;
  1502. + if (n >= ARRAY_SIZE(a))
  1503. + return -EINVAL;
  1504. + if (*s++ != ' ')
  1505. + return -EINVAL;
  1506. char *e;
  1507. a[n++] = simple_strtoul(s, &e, 0);
  1508. - if (e == s) return -EINVAL;
  1509. + if (e == s)
  1510. + return -EINVAL;
  1511. s = e;
  1512. }
  1513. ithc_log_regs(ithc);
  1514. - switch(cmd[0]) {
  1515. +
  1516. + // Execute the command.
  1517. + switch (cmd[0]) {
  1518. case 'x': // reset
  1519. ithc_reset(ithc);
  1520. break;
  1521. case 'w': // write register: offset mask value
  1522. - if (n != 3 || (a[0] & 3)) return -EINVAL;
  1523. - pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", a[0], a[2], a[1]);
  1524. + if (n != 3 || (a[0] & 3))
  1525. + return -EINVAL;
  1526. + pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n",
  1527. + a[0], a[2], a[1]);
  1528. bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]);
  1529. break;
  1530. case 'r': // read register: offset
  1531. - if (n != 1 || (a[0] & 3)) return -EINVAL;
  1532. - pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
  1533. + if (n != 1 || (a[0] & 3))
  1534. + return -EINVAL;
  1535. + pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0],
  1536. + readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
  1537. break;
  1538. case 's': // spi command: cmd offset len data...
  1539. // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
  1540. // set touch cfg: s 6 12 4 XX
  1541. - if (n < 3 || a[2] > (n - 3) * 4) return -EINVAL;
  1542. + if (n < 3 || a[2] > (n - 3) * 4)
  1543. + return -EINVAL;
  1544. pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]);
  1545. if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3))
  1546. - for (u32 i = 0; i < (a[2] + 3) / 4; i++) pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
  1547. + for (u32 i = 0; i < (a[2] + 3) / 4; i++)
  1548. + pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
  1549. break;
  1550. case 'd': // dma command: cmd len data...
  1551. // get report descriptor: d 7 8 0 0
  1552. // enable multitouch: d 3 2 0x0105
  1553. - if (n < 2 || a[1] > (n - 2) * 4) return -EINVAL;
  1554. + if (n < 2 || a[1] > (n - 2) * 4)
  1555. + return -EINVAL;
  1556. pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]);
  1557. - if (ithc_dma_tx(ithc, a[0], a[1], a + 2)) pci_err(ithc->pci, "dma tx failed\n");
  1558. + if (ithc_dma_tx(ithc, a[0], a[1], a + 2))
  1559. + pci_err(ithc->pci, "dma tx failed\n");
  1560. break;
  1561. default:
  1562. return -EINVAL;
  1563. @@ -75,21 +103,27 @@ static const struct file_operations ithc_debugfops_cmd = {
  1564. .write = ithc_debugfs_cmd_write,
  1565. };
  1566. -static void ithc_debugfs_devres_release(struct device *dev, void *res) {
  1567. +static void ithc_debugfs_devres_release(struct device *dev, void *res)
  1568. +{
  1569. struct dentry **dbgm = res;
  1570. - if (*dbgm) debugfs_remove_recursive(*dbgm);
  1571. + if (*dbgm)
  1572. + debugfs_remove_recursive(*dbgm);
  1573. }
  1574. -int ithc_debug_init(struct ithc *ithc) {
  1575. - struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof *dbgm, GFP_KERNEL);
  1576. - if (!dbgm) return -ENOMEM;
  1577. +int ithc_debug_init(struct ithc *ithc)
  1578. +{
  1579. + struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof(*dbgm), GFP_KERNEL);
  1580. + if (!dbgm)
  1581. + return -ENOMEM;
  1582. devres_add(&ithc->pci->dev, dbgm);
  1583. struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL);
  1584. - if (IS_ERR(dbg)) return PTR_ERR(dbg);
  1585. + if (IS_ERR(dbg))
  1586. + return PTR_ERR(dbg);
  1587. *dbgm = dbg;
  1588. struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd);
  1589. - if (IS_ERR(cmd)) return PTR_ERR(cmd);
  1590. + if (IS_ERR(cmd))
  1591. + return PTR_ERR(cmd);
  1592. return 0;
  1593. }
  1594. diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c
  1595. index 7e89b3496918d..ffb8689b8a780 100644
  1596. --- a/drivers/hid/ithc/ithc-dma.c
  1597. +++ b/drivers/hid/ithc/ithc-dma.c
  1598. @@ -1,59 +1,91 @@
  1599. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  1600. +
  1601. #include "ithc.h"
  1602. -static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, unsigned num_buffers, unsigned num_pages, enum dma_data_direction dir) {
  1603. +// The THC uses tables of PRDs (physical region descriptors) to describe the TX and RX data buffers.
  1604. +// Each PRD contains the DMA address and size of a block of DMA memory, and some status flags.
  1605. +// This allows each data buffer to consist of multiple non-contiguous blocks of memory.
  1606. +
  1607. +static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p,
  1608. + unsigned int num_buffers, unsigned int num_pages, enum dma_data_direction dir)
  1609. +{
  1610. p->num_pages = num_pages;
  1611. p->dir = dir;
  1612. + // We allocate enough space to have one PRD per data buffer page, however if the data
  1613. + // buffer pages happen to be contiguous, we can describe the buffer using fewer PRDs, so
  1614. + // some will remain unused (which is fine).
  1615. p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE);
  1616. p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL);
  1617. - if (!p->addr) return -ENOMEM;
  1618. - if (p->dma_addr & (PAGE_SIZE - 1)) return -EFAULT;
  1619. + if (!p->addr)
  1620. + return -ENOMEM;
  1621. + if (p->dma_addr & (PAGE_SIZE - 1))
  1622. + return -EFAULT;
  1623. return 0;
  1624. }
  1625. +// Devres managed sg_table wrapper.
  1626. struct ithc_sg_table {
  1627. void *addr;
  1628. struct sg_table sgt;
  1629. enum dma_data_direction dir;
  1630. };
  1631. -static void ithc_dma_sgtable_free(struct sg_table *sgt) {
  1632. +static void ithc_dma_sgtable_free(struct sg_table *sgt)
  1633. +{
  1634. struct scatterlist *sg;
  1635. int i;
  1636. for_each_sgtable_sg(sgt, sg, i) {
  1637. struct page *p = sg_page(sg);
  1638. - if (p) __free_page(p);
  1639. + if (p)
  1640. + __free_page(p);
  1641. }
  1642. sg_free_table(sgt);
  1643. }
  1644. -static void ithc_dma_data_devres_release(struct device *dev, void *res) {
  1645. +static void ithc_dma_data_devres_release(struct device *dev, void *res)
  1646. +{
  1647. struct ithc_sg_table *sgt = res;
  1648. - if (sgt->addr) vunmap(sgt->addr);
  1649. + if (sgt->addr)
  1650. + vunmap(sgt->addr);
  1651. dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0);
  1652. ithc_dma_sgtable_free(&sgt->sgt);
  1653. }
  1654. -static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b) {
  1655. - // We don't use dma_alloc_coherent for data buffers, because they don't have to be contiguous (we can use one PRD per page) or coherent (they are unidirectional).
  1656. - // Instead we use an sg_table of individually allocated pages (5.13 has dma_alloc_noncontiguous for this, but we'd like to support 5.10 for now).
  1657. +static int ithc_dma_data_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
  1658. + struct ithc_dma_data_buffer *b)
  1659. +{
  1660. + // We don't use dma_alloc_coherent() for data buffers, because they don't have to be
  1661. + // coherent (they are unidirectional) or contiguous (we can use one PRD per page).
  1662. + // We could use dma_alloc_noncontiguous(), however this still always allocates a single
  1663. + // DMA mapped segment, which is more restrictive than what we need.
  1664. + // Instead we use an sg_table of individually allocated pages.
  1665. struct page *pages[16];
  1666. - if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) return -EINVAL;
  1667. + if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages))
  1668. + return -EINVAL;
  1669. b->active_idx = -1;
  1670. - struct ithc_sg_table *sgt = devres_alloc(ithc_dma_data_devres_release, sizeof *sgt, GFP_KERNEL);
  1671. - if (!sgt) return -ENOMEM;
  1672. + struct ithc_sg_table *sgt = devres_alloc(
  1673. + ithc_dma_data_devres_release, sizeof(*sgt), GFP_KERNEL);
  1674. + if (!sgt)
  1675. + return -ENOMEM;
  1676. sgt->dir = prds->dir;
  1677. +
  1678. if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) {
  1679. struct scatterlist *sg;
  1680. int i;
  1681. bool ok = true;
  1682. for_each_sgtable_sg(&sgt->sgt, sg, i) {
  1683. - struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); // don't need __GFP_DMA for PCI DMA
  1684. - if (!p) { ok = false; break; }
  1685. + // NOTE: don't need __GFP_DMA for PCI DMA
  1686. + struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1687. + if (!p) {
  1688. + ok = false;
  1689. + break;
  1690. + }
  1691. sg_set_page(sg, p, PAGE_SIZE, 0);
  1692. }
  1693. if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) {
  1694. devres_add(&ithc->pci->dev, sgt);
  1695. b->sgt = &sgt->sgt;
  1696. b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL);
  1697. - if (!b->addr) return -ENOMEM;
  1698. + if (!b->addr)
  1699. + return -ENOMEM;
  1700. return 0;
  1701. }
  1702. ithc_dma_sgtable_free(&sgt->sgt);
  1703. @@ -62,17 +94,29 @@ static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *pr
  1704. return -ENOMEM;
  1705. }
  1706. -static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
  1707. +static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
  1708. + struct ithc_dma_data_buffer *b, unsigned int idx)
  1709. +{
  1710. + // Give a buffer to the THC.
  1711. struct ithc_phys_region_desc *prd = prds->addr;
  1712. prd += idx * prds->num_pages;
  1713. - if (b->active_idx >= 0) { pci_err(ithc->pci, "buffer already active\n"); return -EINVAL; }
  1714. + if (b->active_idx >= 0) {
  1715. + pci_err(ithc->pci, "buffer already active\n");
  1716. + return -EINVAL;
  1717. + }
  1718. b->active_idx = idx;
  1719. if (prds->dir == DMA_TO_DEVICE) {
  1720. - if (b->data_size > PAGE_SIZE) return -EINVAL;
  1721. + // TX buffer: Caller should have already filled the data buffer, so just fill
  1722. + // the PRD and flush.
  1723. + // (TODO: Support multi-page TX buffers. So far no device seems to use or need
  1724. + // these though.)
  1725. + if (b->data_size > PAGE_SIZE)
  1726. + return -EINVAL;
  1727. prd->addr = sg_dma_address(b->sgt->sgl) >> 10;
  1728. prd->size = b->data_size | PRD_FLAG_END;
  1729. flush_kernel_vmap_range(b->addr, b->data_size);
  1730. } else if (prds->dir == DMA_FROM_DEVICE) {
  1731. + // RX buffer: Reset PRDs.
  1732. struct scatterlist *sg;
  1733. int i;
  1734. for_each_sgtable_dma_sg(b->sgt, sg, i) {
  1735. @@ -87,21 +131,34 @@ static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffe
  1736. return 0;
  1737. }
  1738. -static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) {
  1739. +static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
  1740. + struct ithc_dma_data_buffer *b, unsigned int idx)
  1741. +{
  1742. + // Take a buffer from the THC.
  1743. struct ithc_phys_region_desc *prd = prds->addr;
  1744. prd += idx * prds->num_pages;
  1745. - if (b->active_idx != idx) { pci_err(ithc->pci, "wrong buffer index\n"); return -EINVAL; }
  1746. + // This is purely a sanity check. We don't strictly need the idx parameter for this
  1747. + // function, because it should always be the same as active_idx, unless we have a bug.
  1748. + if (b->active_idx != idx) {
  1749. + pci_err(ithc->pci, "wrong buffer index\n");
  1750. + return -EINVAL;
  1751. + }
  1752. b->active_idx = -1;
  1753. if (prds->dir == DMA_FROM_DEVICE) {
  1754. + // RX buffer: Calculate actual received data size from PRDs.
  1755. dma_rmb(); // for the prds
  1756. b->data_size = 0;
  1757. struct scatterlist *sg;
  1758. int i;
  1759. for_each_sgtable_dma_sg(b->sgt, sg, i) {
  1760. - unsigned size = prd->size;
  1761. + unsigned int size = prd->size;
  1762. b->data_size += size & PRD_SIZE_MASK;
  1763. - if (size & PRD_FLAG_END) break;
  1764. - if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { pci_err(ithc->pci, "truncated prd\n"); break; }
  1765. + if (size & PRD_FLAG_END)
  1766. + break;
  1767. + if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) {
  1768. + pci_err(ithc->pci, "truncated prd\n");
  1769. + break;
  1770. + }
  1771. prd++;
  1772. }
  1773. invalidate_kernel_vmap_range(b->addr, b->data_size);
  1774. @@ -110,93 +167,139 @@ static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffe
  1775. return 0;
  1776. }
  1777. -int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname) {
  1778. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel)
  1779. +{
  1780. struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  1781. mutex_init(&rx->mutex);
  1782. +
  1783. + // Allocate buffers.
  1784. u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes);
  1785. - unsigned num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
  1786. - pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", NUM_RX_BUF, buf_size, num_pages);
  1787. + unsigned int num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
  1788. + pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n",
  1789. + NUM_RX_BUF, buf_size, num_pages);
  1790. CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE);
  1791. - for (unsigned i = 0; i < NUM_RX_BUF; i++)
  1792. + for (unsigned int i = 0; i < NUM_RX_BUF; i++)
  1793. CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]);
  1794. +
  1795. + // Init registers.
  1796. writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2);
  1797. lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr);
  1798. writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs);
  1799. writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds);
  1800. u8 head = readb(&ithc->regs->dma_rx[channel].head);
  1801. - if (head) { pci_err(ithc->pci, "head is nonzero (%u)\n", head); return -EIO; }
  1802. - for (unsigned i = 0; i < NUM_RX_BUF; i++)
  1803. + if (head) {
  1804. + pci_err(ithc->pci, "head is nonzero (%u)\n", head);
  1805. + return -EIO;
  1806. + }
  1807. +
  1808. + // Init buffers.
  1809. + for (unsigned int i = 0; i < NUM_RX_BUF; i++)
  1810. CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i);
  1811. +
  1812. writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail);
  1813. return 0;
  1814. }
  1815. -void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) {
  1816. - bitsb_set(&ithc->regs->dma_rx[channel].control, DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
  1817. - CHECK(waitl, ithc, &ithc->regs->dma_rx[1].status, DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
  1818. +
  1819. +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel)
  1820. +{
  1821. + bitsb_set(&ithc->regs->dma_rx[channel].control,
  1822. + DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
  1823. + CHECK(waitl, ithc, &ithc->regs->dma_rx[channel].status,
  1824. + DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
  1825. }
  1826. -int ithc_dma_tx_init(struct ithc *ithc) {
  1827. +int ithc_dma_tx_init(struct ithc *ithc)
  1828. +{
  1829. struct ithc_dma_tx *tx = &ithc->dma_tx;
  1830. mutex_init(&tx->mutex);
  1831. +
  1832. + // Allocate buffers.
  1833. tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes);
  1834. - unsigned num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
  1835. - pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", tx->max_size, num_pages);
  1836. + unsigned int num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
  1837. + pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n",
  1838. + tx->max_size, num_pages);
  1839. CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE);
  1840. CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf);
  1841. +
  1842. + // Init registers.
  1843. lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr);
  1844. writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds);
  1845. +
  1846. + // Init buffers.
  1847. CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  1848. return 0;
  1849. }
  1850. -static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data, u8 channel, u8 buf) {
  1851. +static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data,
  1852. + u8 channel, u8 buf)
  1853. +{
  1854. if (buf >= NUM_RX_BUF) {
  1855. pci_err(ithc->pci, "invalid dma ringbuffer index\n");
  1856. return -EINVAL;
  1857. }
  1858. - ithc_set_active(ithc);
  1859. u32 len = data->data_size;
  1860. struct ithc_dma_rx_header *hdr = data->addr;
  1861. u8 *hiddata = (void *)(hdr + 1);
  1862. - if (len >= sizeof *hdr && hdr->code == DMA_RX_CODE_RESET) {
  1863. + if (len >= sizeof(*hdr) && hdr->code == DMA_RX_CODE_RESET) {
  1864. + // The THC sends a reset request when we need to reinitialize the device.
  1865. + // This usually only happens if we send an invalid command or put the device
  1866. + // in a bad state.
  1867. CHECK(ithc_reset, ithc);
  1868. - } else if (len < sizeof *hdr || len != sizeof *hdr + hdr->data_size) {
  1869. + } else if (len < sizeof(*hdr) || len != sizeof(*hdr) + hdr->data_size) {
  1870. if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  1871. - // When the CPU enters a low power state during DMA, we can get truncated messages.
  1872. - // Typically this will be a single touch HID report that is only 1 byte, or a multitouch report that is 257 bytes.
  1873. + // When the CPU enters a low power state during DMA, we can get truncated
  1874. + // messages. For Surface devices, this will typically be a single touch
  1875. + // report that is only 1 byte, or a multitouch report that is 257 bytes.
  1876. // See also ithc_set_active().
  1877. } else {
  1878. - pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n", channel, buf, len, hdr->code, hdr->data_size);
  1879. - print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
  1880. + pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n",
  1881. + channel, buf, len, hdr->code, hdr->data_size);
  1882. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
  1883. + hdr, min(len, 0x400u), 0);
  1884. }
  1885. } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) {
  1886. + // Response to a 'get report descriptor' request.
  1887. + // The actual descriptor is preceded by 8 nul bytes.
  1888. CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8);
  1889. WRITE_ONCE(ithc->hid_parse_done, true);
  1890. wake_up(&ithc->wait_hid_parse);
  1891. } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  1892. + // Standard HID input report containing touch data.
  1893. CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1);
  1894. } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) {
  1895. + // Response to a 'get feature' request.
  1896. bool done = false;
  1897. mutex_lock(&ithc->hid_get_feature_mutex);
  1898. if (ithc->hid_get_feature_buf) {
  1899. - if (hdr->data_size < ithc->hid_get_feature_size) ithc->hid_get_feature_size = hdr->data_size;
  1900. + if (hdr->data_size < ithc->hid_get_feature_size)
  1901. + ithc->hid_get_feature_size = hdr->data_size;
  1902. memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size);
  1903. ithc->hid_get_feature_buf = NULL;
  1904. done = true;
  1905. }
  1906. mutex_unlock(&ithc->hid_get_feature_mutex);
  1907. - if (done) wake_up(&ithc->wait_hid_get_feature);
  1908. - else CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT, hiddata, hdr->data_size, 1);
  1909. + if (done) {
  1910. + wake_up(&ithc->wait_hid_get_feature);
  1911. + } else {
  1912. + // Received data without a matching request, or the request already
  1913. + // timed out. (XXX What's the correct thing to do here?)
  1914. + CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT,
  1915. + hiddata, hdr->data_size, 1);
  1916. + }
  1917. } else {
  1918. - pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n", channel, buf, len, hdr->code);
  1919. - print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0);
  1920. + pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n",
  1921. + channel, buf, len, hdr->code);
  1922. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
  1923. + hdr, min(len, 0x400u), 0);
  1924. }
  1925. return 0;
  1926. }
  1927. -static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
  1928. +static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel)
  1929. +{
  1930. + // Process all filled RX buffers from the ringbuffer.
  1931. struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  1932. - unsigned n = rx->num_received;
  1933. + unsigned int n = rx->num_received;
  1934. u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head);
  1935. while (1) {
  1936. u8 tail = n % NUM_RX_BUF;
  1937. @@ -204,7 +307,8 @@ static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
  1938. writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail);
  1939. // ringbuffer is full if tail_wrap == head_wrap
  1940. // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG
  1941. - if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) return 0;
  1942. + if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG))
  1943. + return 0;
  1944. // take the buffer that the device just filled
  1945. struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF];
  1946. @@ -218,7 +322,8 @@ static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) {
  1947. CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail);
  1948. }
  1949. }
  1950. -int ithc_dma_rx(struct ithc *ithc, u8 channel) {
  1951. +int ithc_dma_rx(struct ithc *ithc, u8 channel)
  1952. +{
  1953. struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  1954. mutex_lock(&rx->mutex);
  1955. int ret = ithc_dma_rx_unlocked(ithc, channel);
  1956. @@ -226,14 +331,21 @@ int ithc_dma_rx(struct ithc *ithc, u8 channel) {
  1957. return ret;
  1958. }
  1959. -static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
  1960. +static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
  1961. +{
  1962. + ithc_set_active(ithc, 100 * USEC_PER_MSEC);
  1963. +
  1964. + // Send a single TX buffer to the THC.
  1965. pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize);
  1966. struct ithc_dma_tx_header *hdr;
  1967. + // Data must be padded to next 4-byte boundary.
  1968. u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0;
  1969. - unsigned fullsize = sizeof *hdr + datasize + padding;
  1970. - if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE) return -EINVAL;
  1971. + unsigned int fullsize = sizeof(*hdr) + datasize + padding;
  1972. + if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE)
  1973. + return -EINVAL;
  1974. CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  1975. + // Fill the TX buffer with header and data.
  1976. ithc->dma_tx.buf.data_size = fullsize;
  1977. hdr = ithc->dma_tx.buf.addr;
  1978. hdr->code = cmdcode;
  1979. @@ -241,15 +353,18 @@ static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, vo
  1980. u8 *dest = (void *)(hdr + 1);
  1981. memcpy(dest, data, datasize);
  1982. dest += datasize;
  1983. - for (u8 p = 0; p < padding; p++) *dest++ = 0;
  1984. + for (u8 p = 0; p < padding; p++)
  1985. + *dest++ = 0;
  1986. CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  1987. + // Let the THC process the buffer.
  1988. bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND);
  1989. CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
  1990. writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status);
  1991. return 0;
  1992. }
  1993. -int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) {
  1994. +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
  1995. +{
  1996. mutex_lock(&ithc->dma_tx.mutex);
  1997. int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data);
  1998. mutex_unlock(&ithc->dma_tx.mutex);
  1999. diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h
  2000. index d9f2c19a13f3a..93652e4476bf8 100644
  2001. --- a/drivers/hid/ithc/ithc-dma.h
  2002. +++ b/drivers/hid/ithc/ithc-dma.h
  2003. @@ -1,3 +1,5 @@
  2004. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  2005. +
  2006. #define PRD_SIZE_MASK 0xffffff
  2007. #define PRD_FLAG_END 0x1000000
  2008. #define PRD_FLAG_SUCCESS 0x2000000
  2009. @@ -59,7 +61,7 @@ struct ithc_dma_rx {
  2010. struct ithc_dma_data_buffer bufs[NUM_RX_BUF];
  2011. };
  2012. -int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname);
  2013. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel);
  2014. void ithc_dma_rx_enable(struct ithc *ithc, u8 channel);
  2015. int ithc_dma_tx_init(struct ithc *ithc);
  2016. int ithc_dma_rx(struct ithc *ithc, u8 channel);
  2017. diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c
  2018. index 09512b9cb4d31..87ed4aa70fda0 100644
  2019. --- a/drivers/hid/ithc/ithc-main.c
  2020. +++ b/drivers/hid/ithc/ithc-main.c
  2021. @@ -1,3 +1,5 @@
  2022. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  2023. +
  2024. #include "ithc.h"
  2025. MODULE_DESCRIPTION("Intel Touch Host Controller driver");
  2026. @@ -42,6 +44,9 @@ static const struct pci_device_id ithc_pci_tbl[] = {
  2027. { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) },
  2028. { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) },
  2029. { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) },
  2030. + // XXX So far the THC seems to be the only Intel PCI device with PCI_CLASS_INPUT_PEN,
  2031. + // so instead of the device list we could just do:
  2032. + // { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = PCI_CLASS_INPUT_PEN, .class_mask = ~0, },
  2033. {}
  2034. };
  2035. MODULE_DEVICE_TABLE(pci, ithc_pci_tbl);
  2036. @@ -52,6 +57,7 @@ static bool ithc_use_polling = false;
  2037. module_param_named(poll, ithc_use_polling, bool, 0);
  2038. MODULE_PARM_DESC(poll, "Use polling instead of interrupts");
  2039. +// Since all known devices seem to use only channel 1, by default we disable channel 0.
  2040. static bool ithc_use_rx0 = false;
  2041. module_param_named(rx0, ithc_use_rx0, bool, 0);
  2042. MODULE_PARM_DESC(rx0, "Use DMA RX channel 0");
  2043. @@ -60,37 +66,56 @@ static bool ithc_use_rx1 = true;
  2044. module_param_named(rx1, ithc_use_rx1, bool, 0);
  2045. MODULE_PARM_DESC(rx1, "Use DMA RX channel 1");
  2046. +// Values below 250 seem to work well on the SP7+. If this is set too high, you may observe cursor stuttering.
  2047. +static int ithc_dma_latency_us = 200;
  2048. +module_param_named(dma_latency_us, ithc_dma_latency_us, int, 0);
  2049. +MODULE_PARM_DESC(dma_latency_us, "Determines the CPU latency QoS value for DMA transfers (in microseconds), -1 to disable latency QoS");
  2050. +
  2051. +// Values above 1700 seem to work well on the SP7+. If this is set too low, you may observe cursor stuttering.
  2052. +static unsigned int ithc_dma_early_us = 2000;
  2053. +module_param_named(dma_early_us, ithc_dma_early_us, uint, 0);
  2054. +MODULE_PARM_DESC(dma_early_us, "Determines how early the CPU latency QoS value is applied before the next expected IRQ (in microseconds)");
  2055. +
  2056. static bool ithc_log_regs_enabled = false;
  2057. module_param_named(logregs, ithc_log_regs_enabled, bool, 0);
  2058. MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)");
  2059. // Sysfs attributes
  2060. -static bool ithc_is_config_valid(struct ithc *ithc) {
  2061. +static bool ithc_is_config_valid(struct ithc *ithc)
  2062. +{
  2063. return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC;
  2064. }
  2065. -static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) {
  2066. +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
  2067. +{
  2068. struct ithc *ithc = dev_get_drvdata(dev);
  2069. - if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  2070. + if (!ithc || !ithc_is_config_valid(ithc))
  2071. + return -ENODEV;
  2072. return sprintf(buf, "0x%04x", ithc->config.vendor_id);
  2073. }
  2074. static DEVICE_ATTR_RO(vendor);
  2075. -static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf) {
  2076. +static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf)
  2077. +{
  2078. struct ithc *ithc = dev_get_drvdata(dev);
  2079. - if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  2080. + if (!ithc || !ithc_is_config_valid(ithc))
  2081. + return -ENODEV;
  2082. return sprintf(buf, "0x%04x", ithc->config.product_id);
  2083. }
  2084. static DEVICE_ATTR_RO(product);
  2085. -static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) {
  2086. +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf)
  2087. +{
  2088. struct ithc *ithc = dev_get_drvdata(dev);
  2089. - if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  2090. + if (!ithc || !ithc_is_config_valid(ithc))
  2091. + return -ENODEV;
  2092. return sprintf(buf, "%u", ithc->config.revision);
  2093. }
  2094. static DEVICE_ATTR_RO(revision);
  2095. -static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) {
  2096. +static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
  2097. +{
  2098. struct ithc *ithc = dev_get_drvdata(dev);
  2099. - if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV;
  2100. + if (!ithc || !ithc_is_config_valid(ithc))
  2101. + return -ENODEV;
  2102. u32 v = ithc->config.fw_version;
  2103. return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff);
  2104. }
  2105. @@ -117,45 +142,75 @@ static void ithc_hid_stop(struct hid_device *hdev) { }
  2106. static int ithc_hid_open(struct hid_device *hdev) { return 0; }
  2107. static void ithc_hid_close(struct hid_device *hdev) { }
  2108. -static int ithc_hid_parse(struct hid_device *hdev) {
  2109. +static int ithc_hid_parse(struct hid_device *hdev)
  2110. +{
  2111. struct ithc *ithc = hdev->driver_data;
  2112. u64 val = 0;
  2113. WRITE_ONCE(ithc->hid_parse_done, false);
  2114. - CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof val, &val);
  2115. - if (!wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done), msecs_to_jiffies(1000))) return -ETIMEDOUT;
  2116. - return 0;
  2117. + for (int retries = 0; ; retries++) {
  2118. + CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof(val), &val);
  2119. + if (wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done),
  2120. + msecs_to_jiffies(200)))
  2121. + return 0;
  2122. + if (retries > 5) {
  2123. + pci_err(ithc->pci, "failed to read report descriptor\n");
  2124. + return -ETIMEDOUT;
  2125. + }
  2126. + pci_warn(ithc->pci, "failed to read report descriptor, retrying\n");
  2127. + }
  2128. }
  2129. -static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) {
  2130. +static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf,
  2131. + size_t len, unsigned char rtype, int reqtype)
  2132. +{
  2133. struct ithc *ithc = hdev->driver_data;
  2134. - if (!buf || !len) return -EINVAL;
  2135. + if (!buf || !len)
  2136. + return -EINVAL;
  2137. u32 code;
  2138. - if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_OUTPUT_REPORT;
  2139. - else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_SET_FEATURE;
  2140. - else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) code = DMA_TX_CODE_GET_FEATURE;
  2141. - else {
  2142. - pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n", rtype, reqtype, reportnum);
  2143. + if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) {
  2144. + code = DMA_TX_CODE_OUTPUT_REPORT;
  2145. + } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) {
  2146. + code = DMA_TX_CODE_SET_FEATURE;
  2147. + } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) {
  2148. + code = DMA_TX_CODE_GET_FEATURE;
  2149. + } else {
  2150. + pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n",
  2151. + rtype, reqtype, reportnum);
  2152. return -EINVAL;
  2153. }
  2154. buf[0] = reportnum;
  2155. +
  2156. if (reqtype == HID_REQ_GET_REPORT) {
  2157. + // Prepare for response.
  2158. mutex_lock(&ithc->hid_get_feature_mutex);
  2159. ithc->hid_get_feature_buf = buf;
  2160. ithc->hid_get_feature_size = len;
  2161. mutex_unlock(&ithc->hid_get_feature_mutex);
  2162. +
  2163. + // Transmit 'get feature' request.
  2164. int r = CHECK(ithc_dma_tx, ithc, code, 1, buf);
  2165. if (!r) {
  2166. - r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature, !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
  2167. - if (!r) r = -ETIMEDOUT;
  2168. - else if (r < 0) r = -EINTR;
  2169. - else r = 0;
  2170. + r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature,
  2171. + !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
  2172. + if (!r)
  2173. + r = -ETIMEDOUT;
  2174. + else if (r < 0)
  2175. + r = -EINTR;
  2176. + else
  2177. + r = 0;
  2178. }
  2179. +
  2180. + // If everything went ok, the buffer has been filled with the response data.
  2181. + // Return the response size.
  2182. mutex_lock(&ithc->hid_get_feature_mutex);
  2183. ithc->hid_get_feature_buf = NULL;
  2184. - if (!r) r = ithc->hid_get_feature_size;
  2185. + if (!r)
  2186. + r = ithc->hid_get_feature_size;
  2187. mutex_unlock(&ithc->hid_get_feature_mutex);
  2188. return r;
  2189. }
  2190. +
  2191. + // 'Set feature', or 'output report'. These don't have a response.
  2192. CHECK_RET(ithc_dma_tx, ithc, code, len, buf);
  2193. return 0;
  2194. }
  2195. @@ -169,17 +224,22 @@ static struct hid_ll_driver ithc_ll_driver = {
  2196. .raw_request = ithc_hid_raw_request,
  2197. };
  2198. -static void ithc_hid_devres_release(struct device *dev, void *res) {
  2199. +static void ithc_hid_devres_release(struct device *dev, void *res)
  2200. +{
  2201. struct hid_device **hidm = res;
  2202. - if (*hidm) hid_destroy_device(*hidm);
  2203. + if (*hidm)
  2204. + hid_destroy_device(*hidm);
  2205. }
  2206. -static int ithc_hid_init(struct ithc *ithc) {
  2207. - struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof *hidm, GFP_KERNEL);
  2208. - if (!hidm) return -ENOMEM;
  2209. +static int ithc_hid_init(struct ithc *ithc)
  2210. +{
  2211. + struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof(*hidm), GFP_KERNEL);
  2212. + if (!hidm)
  2213. + return -ENOMEM;
  2214. devres_add(&ithc->pci->dev, hidm);
  2215. struct hid_device *hid = hid_allocate_device();
  2216. - if (IS_ERR(hid)) return PTR_ERR(hid);
  2217. + if (IS_ERR(hid))
  2218. + return PTR_ERR(hid);
  2219. *hidm = hid;
  2220. strscpy(hid->name, DEVFULLNAME, sizeof(hid->name));
  2221. @@ -198,27 +258,45 @@ static int ithc_hid_init(struct ithc *ithc) {
  2222. // Interrupts/polling
  2223. -static void ithc_activity_timer_callback(struct timer_list *t) {
  2224. - struct ithc *ithc = container_of(t, struct ithc, activity_timer);
  2225. - cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  2226. +static enum hrtimer_restart ithc_activity_start_timer_callback(struct hrtimer *t)
  2227. +{
  2228. + struct ithc *ithc = container_of(t, struct ithc, activity_start_timer);
  2229. + ithc_set_active(ithc, ithc_dma_early_us * 2 + USEC_PER_MSEC);
  2230. + return HRTIMER_NORESTART;
  2231. }
  2232. -void ithc_set_active(struct ithc *ithc) {
  2233. - // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
  2234. - // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_UNKNOWN_12 will be set when this happens.
  2235. - // The amount of truncated messages can become very high, resulting in user-visible effects (laggy/stuttering cursor).
  2236. - // To avoid this, we use a CPU latency QoS request to prevent the CPU from entering low power states during touch interactions.
  2237. - cpu_latency_qos_update_request(&ithc->activity_qos, 0);
  2238. - mod_timer(&ithc->activity_timer, jiffies + msecs_to_jiffies(1000));
  2239. -}
  2240. -
  2241. -static int ithc_set_device_enabled(struct ithc *ithc, bool enable) {
  2242. - u32 x = ithc->config.touch_cfg = (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2
  2243. - | (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
  2244. - return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE, offsetof(struct ithc_device_config, touch_cfg), sizeof x, &x);
  2245. +static enum hrtimer_restart ithc_activity_end_timer_callback(struct hrtimer *t)
  2246. +{
  2247. + struct ithc *ithc = container_of(t, struct ithc, activity_end_timer);
  2248. + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  2249. + return HRTIMER_NORESTART;
  2250. }
  2251. -static void ithc_disable_interrupts(struct ithc *ithc) {
  2252. +void ithc_set_active(struct ithc *ithc, unsigned int duration_us)
  2253. +{
  2254. + if (ithc_dma_latency_us < 0)
  2255. + return;
  2256. + // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
  2257. + // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_RX_TIMEOUT will be
  2258. + // set when this happens. The amount of truncated messages can become very high, resulting
  2259. + // in user-visible effects (laggy/stuttering cursor). To avoid this, we use a CPU latency
  2260. + // QoS request to prevent the CPU from entering low power states during touch interactions.
  2261. + cpu_latency_qos_update_request(&ithc->activity_qos, ithc_dma_latency_us);
  2262. + hrtimer_start_range_ns(&ithc->activity_end_timer,
  2263. + ns_to_ktime(duration_us * NSEC_PER_USEC), duration_us * NSEC_PER_USEC, HRTIMER_MODE_REL);
  2264. +}
  2265. +
  2266. +static int ithc_set_device_enabled(struct ithc *ithc, bool enable)
  2267. +{
  2268. + u32 x = ithc->config.touch_cfg =
  2269. + (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2 |
  2270. + (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
  2271. + return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE,
  2272. + offsetof(struct ithc_device_config, touch_cfg), sizeof(x), &x);
  2273. +}
  2274. +
  2275. +static void ithc_disable_interrupts(struct ithc *ithc)
  2276. +{
  2277. writel(0, &ithc->regs->error_control);
  2278. bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0);
  2279. bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  2280. @@ -226,43 +304,85 @@ static void ithc_disable_interrupts(struct ithc *ithc) {
  2281. bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0);
  2282. }
  2283. -static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned channel) {
  2284. - writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA, &ithc->regs->dma_rx[channel].status);
  2285. +static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned int channel)
  2286. +{
  2287. + writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA,
  2288. + &ithc->regs->dma_rx[channel].status);
  2289. }
  2290. -static void ithc_clear_interrupts(struct ithc *ithc) {
  2291. +static void ithc_clear_interrupts(struct ithc *ithc)
  2292. +{
  2293. writel(0xffffffff, &ithc->regs->error_flags);
  2294. writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status);
  2295. writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  2296. ithc_clear_dma_rx_interrupts(ithc, 0);
  2297. ithc_clear_dma_rx_interrupts(ithc, 1);
  2298. - writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2, &ithc->regs->dma_tx.status);
  2299. + writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2,
  2300. + &ithc->regs->dma_tx.status);
  2301. }
  2302. -static void ithc_process(struct ithc *ithc) {
  2303. +static void ithc_process(struct ithc *ithc)
  2304. +{
  2305. ithc_log_regs(ithc);
  2306. - // read and clear error bits
  2307. + bool rx0 = ithc_use_rx0 && (readl(&ithc->regs->dma_rx[0].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  2308. + bool rx1 = ithc_use_rx1 && (readl(&ithc->regs->dma_rx[1].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  2309. +
  2310. + // Track time between DMA rx transfers, so we can try to predict when we need to enable CPU latency QoS for the next transfer
  2311. + ktime_t t = ktime_get();
  2312. + ktime_t dt = ktime_sub(t, ithc->last_rx_time);
  2313. + if (rx0 || rx1) {
  2314. + ithc->last_rx_time = t;
  2315. + if (dt > ms_to_ktime(100)) {
  2316. + ithc->cur_rx_seq_count = 0;
  2317. + ithc->cur_rx_seq_errors = 0;
  2318. + }
  2319. + ithc->cur_rx_seq_count++;
  2320. + if (!ithc_use_polling && ithc_dma_latency_us >= 0) {
  2321. + // Disable QoS, since the DMA transfer has completed (we re-enable it after a delay below)
  2322. + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  2323. + hrtimer_try_to_cancel(&ithc->activity_end_timer);
  2324. + }
  2325. + }
  2326. +
  2327. + // Read and clear error bits
  2328. u32 err = readl(&ithc->regs->error_flags);
  2329. if (err) {
  2330. - if (err & ~ERROR_FLAG_DMA_UNKNOWN_12) pci_err(ithc->pci, "error flags: 0x%08x\n", err);
  2331. writel(err, &ithc->regs->error_flags);
  2332. + if (err & ~ERROR_FLAG_DMA_RX_TIMEOUT)
  2333. + pci_err(ithc->pci, "error flags: 0x%08x\n", err);
  2334. + if (err & ERROR_FLAG_DMA_RX_TIMEOUT) {
  2335. + // Only log an error if we see a significant number of these errors.
  2336. + ithc->cur_rx_seq_errors++;
  2337. + if (ithc->cur_rx_seq_errors && ithc->cur_rx_seq_errors % 50 == 0 && ithc->cur_rx_seq_errors > ithc->cur_rx_seq_count / 10)
  2338. + pci_err(ithc->pci, "High number of DMA RX timeouts/errors (%u/%u, dt=%lldus). Try adjusting dma_early_us and/or dma_latency_us.\n",
  2339. + ithc->cur_rx_seq_errors, ithc->cur_rx_seq_count, ktime_to_us(dt));
  2340. + }
  2341. }
  2342. - // process DMA rx
  2343. + // Process DMA rx
  2344. if (ithc_use_rx0) {
  2345. ithc_clear_dma_rx_interrupts(ithc, 0);
  2346. - ithc_dma_rx(ithc, 0);
  2347. + if (rx0)
  2348. + ithc_dma_rx(ithc, 0);
  2349. }
  2350. if (ithc_use_rx1) {
  2351. ithc_clear_dma_rx_interrupts(ithc, 1);
  2352. - ithc_dma_rx(ithc, 1);
  2353. + if (rx1)
  2354. + ithc_dma_rx(ithc, 1);
  2355. + }
  2356. +
  2357. + // Start timer to re-enable QoS for next rx, but only if we've seen an ERROR_FLAG_DMA_RX_TIMEOUT
  2358. + if ((rx0 || rx1) && !ithc_use_polling && ithc_dma_latency_us >= 0 && ithc->cur_rx_seq_errors > 0) {
  2359. + ktime_t expires = ktime_add(t, ktime_sub_us(dt, ithc_dma_early_us));
  2360. + hrtimer_start_range_ns(&ithc->activity_start_timer, expires, 10 * NSEC_PER_USEC, HRTIMER_MODE_ABS);
  2361. }
  2362. ithc_log_regs(ithc);
  2363. }
  2364. -static irqreturn_t ithc_interrupt_thread(int irq, void *arg) {
  2365. +static irqreturn_t ithc_interrupt_thread(int irq, void *arg)
  2366. +{
  2367. struct ithc *ithc = arg;
  2368. pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n",
  2369. readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags),
  2370. @@ -274,14 +394,21 @@ static irqreturn_t ithc_interrupt_thread(int irq, void *arg) {
  2371. return IRQ_HANDLED;
  2372. }
  2373. -static int ithc_poll_thread(void *arg) {
  2374. +static int ithc_poll_thread(void *arg)
  2375. +{
  2376. struct ithc *ithc = arg;
  2377. - unsigned sleep = 100;
  2378. + unsigned int sleep = 100;
  2379. while (!kthread_should_stop()) {
  2380. u32 n = ithc->dma_rx[1].num_received;
  2381. ithc_process(ithc);
  2382. - if (n != ithc->dma_rx[1].num_received) sleep = 20;
  2383. - else sleep = min(200u, sleep + (sleep >> 4) + 1);
  2384. + // Decrease polling interval to 20ms if we received data, otherwise slowly
  2385. + // increase it up to 200ms.
  2386. + if (n != ithc->dma_rx[1].num_received) {
  2387. + ithc_set_active(ithc, 100 * USEC_PER_MSEC);
  2388. + sleep = 20;
  2389. + } else {
  2390. + sleep = min(200u, sleep + (sleep >> 4) + 1);
  2391. + }
  2392. msleep_interruptible(sleep);
  2393. }
  2394. return 0;
  2395. @@ -289,7 +416,8 @@ static int ithc_poll_thread(void *arg) {
  2396. // Device initialization and shutdown
  2397. -static void ithc_disable(struct ithc *ithc) {
  2398. +static void ithc_disable(struct ithc *ithc)
  2399. +{
  2400. bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE);
  2401. CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED);
  2402. bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  2403. @@ -301,81 +429,112 @@ static void ithc_disable(struct ithc *ithc) {
  2404. ithc_clear_interrupts(ithc);
  2405. }
  2406. -static int ithc_init_device(struct ithc *ithc) {
  2407. +static int ithc_init_device(struct ithc *ithc)
  2408. +{
  2409. ithc_log_regs(ithc);
  2410. bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0;
  2411. ithc_disable(ithc);
  2412. CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY);
  2413. +
  2414. + // Since we don't yet know which SPI config the device wants, use default speed and mode
  2415. + // initially for reading config data.
  2416. ithc_set_spi_config(ithc, 10, 0);
  2417. - bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000); // seems to help with reading config
  2418. - if (was_enabled) if (msleep_interruptible(100)) return -EINTR;
  2419. + // Setting the following bit seems to make reading the config more reliable.
  2420. + bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000);
  2421. +
  2422. + // If the device was previously enabled, wait a bit to make sure it's fully shut down.
  2423. + if (was_enabled)
  2424. + if (msleep_interruptible(100))
  2425. + return -EINTR;
  2426. +
  2427. + // Take the touch device out of reset.
  2428. bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
  2429. CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0);
  2430. for (int retries = 0; ; retries++) {
  2431. ithc_log_regs(ithc);
  2432. bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET);
  2433. - if (!waitl(ithc, &ithc->regs->state, 0xf, 2)) break;
  2434. + if (!waitl(ithc, &ithc->regs->state, 0xf, 2))
  2435. + break;
  2436. if (retries > 5) {
  2437. - pci_err(ithc->pci, "too many retries, failed to reset device\n");
  2438. + pci_err(ithc->pci, "failed to reset device, state = 0x%08x\n", readl(&ithc->regs->state));
  2439. return -ETIMEDOUT;
  2440. }
  2441. - pci_err(ithc->pci, "invalid state, retrying reset\n");
  2442. + pci_warn(ithc->pci, "invalid state, retrying reset\n");
  2443. bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  2444. - if (msleep_interruptible(1000)) return -EINTR;
  2445. + if (msleep_interruptible(1000))
  2446. + return -EINTR;
  2447. }
  2448. ithc_log_regs(ithc);
  2449. + // Waiting for the following status bit makes reading config much more reliable,
  2450. + // however the official driver does not seem to do this...
  2451. CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4);
  2452. - // read config
  2453. + // Read configuration data.
  2454. for (int retries = 0; ; retries++) {
  2455. ithc_log_regs(ithc);
  2456. - memset(&ithc->config, 0, sizeof ithc->config);
  2457. - CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof ithc->config, &ithc->config);
  2458. + memset(&ithc->config, 0, sizeof(ithc->config));
  2459. + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof(ithc->config), &ithc->config);
  2460. u32 *p = (void *)&ithc->config;
  2461. pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  2462. p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
  2463. - if (ithc_is_config_valid(ithc)) break;
  2464. + if (ithc_is_config_valid(ithc))
  2465. + break;
  2466. if (retries > 10) {
  2467. - pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n", ithc->config.device_id);
  2468. + pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n",
  2469. + ithc->config.device_id);
  2470. return -EIO;
  2471. }
  2472. - pci_err(ithc->pci, "failed to read config, retrying\n");
  2473. - if (msleep_interruptible(100)) return -EINTR;
  2474. + pci_warn(ithc->pci, "failed to read config, retrying\n");
  2475. + if (msleep_interruptible(100))
  2476. + return -EINTR;
  2477. }
  2478. ithc_log_regs(ithc);
  2479. - CHECK_RET(ithc_set_spi_config, ithc, DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config), DEVCFG_SPI_MODE(ithc->config.spi_config));
  2480. + // Apply SPI config and enable touch device.
  2481. + CHECK_RET(ithc_set_spi_config, ithc,
  2482. + DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config),
  2483. + DEVCFG_SPI_MODE(ithc->config.spi_config));
  2484. CHECK_RET(ithc_set_device_enabled, ithc, true);
  2485. ithc_log_regs(ithc);
  2486. return 0;
  2487. }
  2488. -int ithc_reset(struct ithc *ithc) {
  2489. - // FIXME This should probably do devres_release_group()+ithc_start(). But because this is called during DMA
  2490. - // processing, that would have to be done asynchronously (schedule_work()?). And with extra locking?
  2491. +int ithc_reset(struct ithc *ithc)
  2492. +{
  2493. + // FIXME This should probably do devres_release_group()+ithc_start().
  2494. + // But because this is called during DMA processing, that would have to be done
  2495. + // asynchronously (schedule_work()?). And with extra locking?
  2496. pci_err(ithc->pci, "reset\n");
  2497. CHECK(ithc_init_device, ithc);
  2498. - if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
  2499. - if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
  2500. + if (ithc_use_rx0)
  2501. + ithc_dma_rx_enable(ithc, 0);
  2502. + if (ithc_use_rx1)
  2503. + ithc_dma_rx_enable(ithc, 1);
  2504. ithc_log_regs(ithc);
  2505. pci_dbg(ithc->pci, "reset completed\n");
  2506. return 0;
  2507. }
  2508. -static void ithc_stop(void *res) {
  2509. +static void ithc_stop(void *res)
  2510. +{
  2511. struct ithc *ithc = res;
  2512. pci_dbg(ithc->pci, "stopping\n");
  2513. ithc_log_regs(ithc);
  2514. - if (ithc->poll_thread) CHECK(kthread_stop, ithc->poll_thread);
  2515. - if (ithc->irq >= 0) disable_irq(ithc->irq);
  2516. +
  2517. + if (ithc->poll_thread)
  2518. + CHECK(kthread_stop, ithc->poll_thread);
  2519. + if (ithc->irq >= 0)
  2520. + disable_irq(ithc->irq);
  2521. CHECK(ithc_set_device_enabled, ithc, false);
  2522. ithc_disable(ithc);
  2523. - del_timer_sync(&ithc->activity_timer);
  2524. + hrtimer_cancel(&ithc->activity_start_timer);
  2525. + hrtimer_cancel(&ithc->activity_end_timer);
  2526. cpu_latency_qos_remove_request(&ithc->activity_qos);
  2527. - // clear dma config
  2528. - for(unsigned i = 0; i < 2; i++) {
  2529. +
  2530. + // Clear DMA config.
  2531. + for (unsigned int i = 0; i < 2; i++) {
  2532. CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0);
  2533. lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr);
  2534. writeb(0, &ithc->regs->dma_rx[i].num_bufs);
  2535. @@ -383,35 +542,43 @@ static void ithc_stop(void *res) {
  2536. }
  2537. lo_hi_writeq(0, &ithc->regs->dma_tx.addr);
  2538. writeb(0, &ithc->regs->dma_tx.num_prds);
  2539. +
  2540. ithc_log_regs(ithc);
  2541. pci_dbg(ithc->pci, "stopped\n");
  2542. }
  2543. -static void ithc_clear_drvdata(void *res) {
  2544. +static void ithc_clear_drvdata(void *res)
  2545. +{
  2546. struct pci_dev *pci = res;
  2547. pci_set_drvdata(pci, NULL);
  2548. }
  2549. -static int ithc_start(struct pci_dev *pci) {
  2550. +static int ithc_start(struct pci_dev *pci)
  2551. +{
  2552. pci_dbg(pci, "starting\n");
  2553. if (pci_get_drvdata(pci)) {
  2554. pci_err(pci, "device already initialized\n");
  2555. return -EINVAL;
  2556. }
  2557. - if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL)) return -ENOMEM;
  2558. + if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL))
  2559. + return -ENOMEM;
  2560. - struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof *ithc, GFP_KERNEL);
  2561. - if (!ithc) return -ENOMEM;
  2562. + // Allocate/init main driver struct.
  2563. + struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof(*ithc), GFP_KERNEL);
  2564. + if (!ithc)
  2565. + return -ENOMEM;
  2566. ithc->irq = -1;
  2567. ithc->pci = pci;
  2568. - snprintf(ithc->phys, sizeof ithc->phys, "pci-%s/" DEVNAME, pci_name(pci));
  2569. + snprintf(ithc->phys, sizeof(ithc->phys), "pci-%s/" DEVNAME, pci_name(pci));
  2570. init_waitqueue_head(&ithc->wait_hid_parse);
  2571. init_waitqueue_head(&ithc->wait_hid_get_feature);
  2572. mutex_init(&ithc->hid_get_feature_mutex);
  2573. pci_set_drvdata(pci, ithc);
  2574. CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci);
  2575. - if (ithc_log_regs_enabled) ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof *ithc->prev_regs, GFP_KERNEL);
  2576. + if (ithc_log_regs_enabled)
  2577. + ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof(*ithc->prev_regs), GFP_KERNEL);
  2578. + // PCI initialization.
  2579. CHECK_RET(pcim_enable_device, pci);
  2580. pci_set_master(pci);
  2581. CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs");
  2582. @@ -419,29 +586,39 @@ static int ithc_start(struct pci_dev *pci) {
  2583. CHECK_RET(pci_set_power_state, pci, PCI_D0);
  2584. ithc->regs = pcim_iomap_table(pci)[0];
  2585. + // Allocate IRQ.
  2586. if (!ithc_use_polling) {
  2587. CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
  2588. ithc->irq = CHECK(pci_irq_vector, pci, 0);
  2589. - if (ithc->irq < 0) return ithc->irq;
  2590. + if (ithc->irq < 0)
  2591. + return ithc->irq;
  2592. }
  2593. + // Initialize THC and touch device.
  2594. CHECK_RET(ithc_init_device, ithc);
  2595. CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups);
  2596. - if (ithc_use_rx0) CHECK_RET(ithc_dma_rx_init, ithc, 0, ithc_use_rx1 ? DEVNAME "0" : DEVNAME);
  2597. - if (ithc_use_rx1) CHECK_RET(ithc_dma_rx_init, ithc, 1, ithc_use_rx0 ? DEVNAME "1" : DEVNAME);
  2598. + if (ithc_use_rx0)
  2599. + CHECK_RET(ithc_dma_rx_init, ithc, 0);
  2600. + if (ithc_use_rx1)
  2601. + CHECK_RET(ithc_dma_rx_init, ithc, 1);
  2602. CHECK_RET(ithc_dma_tx_init, ithc);
  2603. - CHECK_RET(ithc_hid_init, ithc);
  2604. -
  2605. cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  2606. - timer_setup(&ithc->activity_timer, ithc_activity_timer_callback, 0);
  2607. + hrtimer_init(&ithc->activity_start_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  2608. + ithc->activity_start_timer.function = ithc_activity_start_timer_callback;
  2609. + hrtimer_init(&ithc->activity_end_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2610. + ithc->activity_end_timer.function = ithc_activity_end_timer_callback;
  2611. - // add ithc_stop callback AFTER setting up DMA buffers, so that polling/irqs/DMA are disabled BEFORE the buffers are freed
  2612. + // Add ithc_stop() callback AFTER setting up DMA buffers, so that polling/irqs/DMA are
  2613. + // disabled BEFORE the buffers are freed.
  2614. CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc);
  2615. + CHECK_RET(ithc_hid_init, ithc);
  2616. +
  2617. + // Start polling/IRQ.
  2618. if (ithc_use_polling) {
  2619. pci_info(pci, "using polling instead of irq\n");
  2620. - // use a thread instead of simple timer because we want to be able to sleep
  2621. + // Use a thread instead of simple timer because we want to be able to sleep.
  2622. ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll");
  2623. if (IS_ERR(ithc->poll_thread)) {
  2624. int err = PTR_ERR(ithc->poll_thread);
  2625. @@ -449,13 +626,17 @@ static int ithc_start(struct pci_dev *pci) {
  2626. return err;
  2627. }
  2628. } else {
  2629. - CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL, ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc);
  2630. + CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL,
  2631. + ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc);
  2632. }
  2633. - if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0);
  2634. - if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1);
  2635. + if (ithc_use_rx0)
  2636. + ithc_dma_rx_enable(ithc, 0);
  2637. + if (ithc_use_rx1)
  2638. + ithc_dma_rx_enable(ithc, 1);
  2639. - // hid_add_device can only be called after irq/polling is started and DMA is enabled, because it calls ithc_hid_parse which reads the report descriptor via DMA
  2640. + // hid_add_device() can only be called after irq/polling is started and DMA is enabled,
  2641. + // because it calls ithc_hid_parse() which reads the report descriptor via DMA.
  2642. CHECK_RET(hid_add_device, ithc->hid);
  2643. CHECK(ithc_debug_init, ithc);
  2644. @@ -464,43 +645,54 @@ static int ithc_start(struct pci_dev *pci) {
  2645. return 0;
  2646. }
  2647. -static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id) {
  2648. +static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id)
  2649. +{
  2650. pci_dbg(pci, "device probe\n");
  2651. return ithc_start(pci);
  2652. }
  2653. -static void ithc_remove(struct pci_dev *pci) {
  2654. +static void ithc_remove(struct pci_dev *pci)
  2655. +{
  2656. pci_dbg(pci, "device remove\n");
  2657. // all cleanup is handled by devres
  2658. }
  2659. -static int ithc_suspend(struct device *dev) {
  2660. +// For suspend/resume, we just deinitialize and reinitialize everything.
  2661. +// TODO It might be cleaner to keep the HID device around, however we would then have to signal
  2662. +// to userspace that the touch device has lost state and userspace needs to e.g. resend 'set
  2663. +// feature' requests. Hidraw does not seem to have a facility to do that.
  2664. +static int ithc_suspend(struct device *dev)
  2665. +{
  2666. struct pci_dev *pci = to_pci_dev(dev);
  2667. pci_dbg(pci, "pm suspend\n");
  2668. devres_release_group(dev, ithc_start);
  2669. return 0;
  2670. }
  2671. -static int ithc_resume(struct device *dev) {
  2672. +static int ithc_resume(struct device *dev)
  2673. +{
  2674. struct pci_dev *pci = to_pci_dev(dev);
  2675. pci_dbg(pci, "pm resume\n");
  2676. return ithc_start(pci);
  2677. }
  2678. -static int ithc_freeze(struct device *dev) {
  2679. +static int ithc_freeze(struct device *dev)
  2680. +{
  2681. struct pci_dev *pci = to_pci_dev(dev);
  2682. pci_dbg(pci, "pm freeze\n");
  2683. devres_release_group(dev, ithc_start);
  2684. return 0;
  2685. }
  2686. -static int ithc_thaw(struct device *dev) {
  2687. +static int ithc_thaw(struct device *dev)
  2688. +{
  2689. struct pci_dev *pci = to_pci_dev(dev);
  2690. pci_dbg(pci, "pm thaw\n");
  2691. return ithc_start(pci);
  2692. }
  2693. -static int ithc_restore(struct device *dev) {
  2694. +static int ithc_restore(struct device *dev)
  2695. +{
  2696. struct pci_dev *pci = to_pci_dev(dev);
  2697. pci_dbg(pci, "pm restore\n");
  2698. return ithc_start(pci);
  2699. @@ -521,11 +713,13 @@ static struct pci_driver ithc_driver = {
  2700. //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway
  2701. };
  2702. -static int __init ithc_init(void) {
  2703. +static int __init ithc_init(void)
  2704. +{
  2705. return pci_register_driver(&ithc_driver);
  2706. }
  2707. -static void __exit ithc_exit(void) {
  2708. +static void __exit ithc_exit(void)
  2709. +{
  2710. pci_unregister_driver(&ithc_driver);
  2711. }
  2712. diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c
  2713. index 85d567b05761f..e058721886e37 100644
  2714. --- a/drivers/hid/ithc/ithc-regs.c
  2715. +++ b/drivers/hid/ithc/ithc-regs.c
  2716. @@ -1,63 +1,95 @@
  2717. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  2718. +
  2719. #include "ithc.h"
  2720. #define reg_num(r) (0x1fff & (u16)(__force u64)(r))
  2721. -void bitsl(__iomem u32 *reg, u32 mask, u32 val) {
  2722. - if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
  2723. +void bitsl(__iomem u32 *reg, u32 mask, u32 val)
  2724. +{
  2725. + if (val & ~mask)
  2726. + pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n",
  2727. + reg_num(reg), val, mask);
  2728. writel((readl(reg) & ~mask) | (val & mask), reg);
  2729. }
  2730. -void bitsb(__iomem u8 *reg, u8 mask, u8 val) {
  2731. - if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask);
  2732. +void bitsb(__iomem u8 *reg, u8 mask, u8 val)
  2733. +{
  2734. + if (val & ~mask)
  2735. + pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n",
  2736. + reg_num(reg), val, mask);
  2737. writeb((readb(reg) & ~mask) | (val & mask), reg);
  2738. }
  2739. -int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val) {
  2740. - pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
  2741. +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val)
  2742. +{
  2743. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
  2744. + reg_num(reg), mask, val);
  2745. u32 x;
  2746. if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  2747. - pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val);
  2748. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
  2749. + reg_num(reg), mask, val);
  2750. return -ETIMEDOUT;
  2751. }
  2752. pci_dbg(ithc->pci, "done waiting\n");
  2753. return 0;
  2754. }
  2755. -int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val) {
  2756. - pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
  2757. +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val)
  2758. +{
  2759. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
  2760. + reg_num(reg), mask, val);
  2761. u8 x;
  2762. if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  2763. - pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val);
  2764. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
  2765. + reg_num(reg), mask, val);
  2766. return -ETIMEDOUT;
  2767. }
  2768. pci_dbg(ithc->pci, "done waiting\n");
  2769. return 0;
  2770. }
  2771. -int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode) {
  2772. +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode)
  2773. +{
  2774. pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode);
  2775. - if (mode == 3) mode = 2;
  2776. + if (mode == 3)
  2777. + mode = 2;
  2778. bitsl(&ithc->regs->spi_config,
  2779. SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff),
  2780. SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed));
  2781. return 0;
  2782. }
  2783. -int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data) {
  2784. +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data)
  2785. +{
  2786. pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset);
  2787. - if (size > sizeof ithc->regs->spi_cmd.data) return -EINVAL;
  2788. + if (size > sizeof(ithc->regs->spi_cmd.data))
  2789. + return -EINVAL;
  2790. +
  2791. + // Wait if the device is still busy.
  2792. CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  2793. + // Clear result flags.
  2794. writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  2795. +
  2796. + // Init SPI command data.
  2797. writeb(command, &ithc->regs->spi_cmd.code);
  2798. writew(size, &ithc->regs->spi_cmd.size);
  2799. writel(offset, &ithc->regs->spi_cmd.offset);
  2800. u32 *p = data, n = (size + 3) / 4;
  2801. - for (u32 i = 0; i < n; i++) writel(p[i], &ithc->regs->spi_cmd.data[i]);
  2802. + for (u32 i = 0; i < n; i++)
  2803. + writel(p[i], &ithc->regs->spi_cmd.data[i]);
  2804. +
  2805. + // Start transmission.
  2806. bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND);
  2807. CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  2808. - if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE) return -EIO;
  2809. - if (readw(&ithc->regs->spi_cmd.size) != size) return -EMSGSIZE;
  2810. - for (u32 i = 0; i < n; i++) p[i] = readl(&ithc->regs->spi_cmd.data[i]);
  2811. +
  2812. + // Read response.
  2813. + if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE)
  2814. + return -EIO;
  2815. + if (readw(&ithc->regs->spi_cmd.size) != size)
  2816. + return -EMSGSIZE;
  2817. + for (u32 i = 0; i < n; i++)
  2818. + p[i] = readl(&ithc->regs->spi_cmd.data[i]);
  2819. +
  2820. writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  2821. return 0;
  2822. }
  2823. diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h
  2824. index 1a96092ed7eed..d4007d9e2bacc 100644
  2825. --- a/drivers/hid/ithc/ithc-regs.h
  2826. +++ b/drivers/hid/ithc/ithc-regs.h
  2827. @@ -1,3 +1,5 @@
  2828. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  2829. +
  2830. #define CONTROL_QUIESCE BIT(1)
  2831. #define CONTROL_IS_QUIESCED BIT(2)
  2832. #define CONTROL_NRESET BIT(3)
  2833. @@ -24,7 +26,7 @@
  2834. #define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9)
  2835. #define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10)
  2836. -#define ERROR_FLAG_DMA_UNKNOWN_12 BIT(12) // set when we receive a truncated DMA message
  2837. +#define ERROR_FLAG_DMA_RX_TIMEOUT BIT(12) // set when we receive a truncated DMA message
  2838. #define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13)
  2839. #define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16)
  2840. #define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17)
  2841. @@ -67,6 +69,7 @@
  2842. #define DMA_RX_STATUS_HAVE_DATA BIT(5)
  2843. #define DMA_RX_STATUS_ENABLED BIT(8)
  2844. +// COUNTER_RESET can be written to counter registers to reset them to zero. However, in some cases this can mess up the THC.
  2845. #define COUNTER_RESET BIT(31)
  2846. struct ithc_registers {
  2847. @@ -147,15 +150,15 @@ static_assert(sizeof(struct ithc_registers) == 0x1300);
  2848. #define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode?
  2849. #define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3)
  2850. #define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f)
  2851. -#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20)
  2852. -#define DEVCFG_SPI_HEARTBEAT_INTERVAL (((x) >> 21) & 7)
  2853. +#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) // TODO implement heartbeat
  2854. +#define DEVCFG_SPI_HEARTBEAT_INTERVAL(x) (((x) >> 21) & 7)
  2855. #define DEVCFG_SPI_UNKNOWN_25 BIT(25)
  2856. #define DEVCFG_SPI_UNKNOWN_26 BIT(26)
  2857. #define DEVCFG_SPI_UNKNOWN_27 BIT(27)
  2858. -#define DEVCFG_SPI_DELAY (((x) >> 28) & 7)
  2859. -#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31)
  2860. +#define DEVCFG_SPI_DELAY(x) (((x) >> 28) & 7) // TODO use this
  2861. +#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) // TODO use this?
  2862. -struct ithc_device_config {
  2863. +struct ithc_device_config { // (Example values are from an SP7+.)
  2864. u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET)
  2865. u32 _unknown_04; // 04 = 0x00000000
  2866. u32 dma_buf_sizes; // 08 = 0x000a00ff
  2867. @@ -166,9 +169,9 @@ struct ithc_device_config {
  2868. u16 vendor_id; // 1c = 0x045e = Microsoft Corp.
  2869. u16 product_id; // 1e = 0x0c1a
  2870. u32 revision; // 20 = 0x00000001
  2871. - u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139
  2872. + u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 (this value looks more random on newer devices)
  2873. u32 _unknown_28; // 28 = 0x00000000
  2874. - u32 fw_mode; // 2c = 0x00000000
  2875. + u32 fw_mode; // 2c = 0x00000000 (for fw update?)
  2876. u32 _unknown_30; // 30 = 0x00000000
  2877. u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?)
  2878. u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET)
  2879. diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h
  2880. index 6a9b0d480bc15..028e55a4ec53e 100644
  2881. --- a/drivers/hid/ithc/ithc.h
  2882. +++ b/drivers/hid/ithc/ithc.h
  2883. @@ -1,3 +1,5 @@
  2884. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  2885. +
  2886. #include <linux/module.h>
  2887. #include <linux/input.h>
  2888. #include <linux/hid.h>
  2889. @@ -21,7 +23,7 @@
  2890. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  2891. #define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; })
  2892. -#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while(0)
  2893. +#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while (0)
  2894. #define NUM_RX_BUF 16
  2895. @@ -35,8 +37,13 @@ struct ithc {
  2896. struct pci_dev *pci;
  2897. int irq;
  2898. struct task_struct *poll_thread;
  2899. +
  2900. struct pm_qos_request activity_qos;
  2901. - struct timer_list activity_timer;
  2902. + struct hrtimer activity_start_timer;
  2903. + struct hrtimer activity_end_timer;
  2904. + ktime_t last_rx_time;
  2905. + unsigned int cur_rx_seq_count;
  2906. + unsigned int cur_rx_seq_errors;
  2907. struct hid_device *hid;
  2908. bool hid_parse_done;
  2909. @@ -54,7 +61,7 @@ struct ithc {
  2910. };
  2911. int ithc_reset(struct ithc *ithc);
  2912. -void ithc_set_active(struct ithc *ithc);
  2913. +void ithc_set_active(struct ithc *ithc, unsigned int duration_us);
  2914. int ithc_debug_init(struct ithc *ithc);
  2915. void ithc_log_regs(struct ithc *ithc);
  2916. --
  2917. 2.43.0