0006-ithc.patch 166 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679
  1. From 888f8f300f9a547535e251f192759902545208f0 Mon Sep 17 00:00:00 2001
  2. From: Dorian Stoll <dorian.stoll@tmsp.io>
  3. Date: Sun, 11 Dec 2022 12:03:38 +0100
  4. Subject: [PATCH] iommu: intel: Disable source id verification for ITHC
  5. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  6. Patchset: ithc
  7. ---
  8. drivers/iommu/intel/irq_remapping.c | 16 ++++++++++++++++
  9. 1 file changed, 16 insertions(+)
  10. diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
  11. index 566297bc87dd..a8cd8f12d593 100644
  12. --- a/drivers/iommu/intel/irq_remapping.c
  13. +++ b/drivers/iommu/intel/irq_remapping.c
  14. @@ -386,6 +386,22 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
  15. data.busmatch_count = 0;
  16. pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
  17. + /*
  18. + * The Intel Touch Host Controller is at 00:10.6, but for some reason
  19. + * the MSI interrupts have request id 01:05.0.
  20. + * Disable id verification to work around this.
  21. + * FIXME Find proper fix or turn this into a quirk.
  22. + */
  23. + if (dev->vendor == PCI_VENDOR_ID_INTEL && (dev->class >> 8) == PCI_CLASS_INPUT_PEN) {
  24. + switch(dev->device) {
  25. + case 0x98d0: case 0x98d1: // LKF
  26. + case 0xa0d0: case 0xa0d1: // TGL LP
  27. + case 0x43d0: case 0x43d1: // TGL H
  28. + set_irte_sid(irte, SVT_NO_VERIFY, SQ_ALL_16, 0);
  29. + return 0;
  30. + }
  31. + }
  32. +
  33. /*
  34. * DMA alias provides us with a PCI device and alias. The only case
  35. * where the it will return an alias on a different bus than the
  36. --
  37. 2.45.2
  38. From e5bbe336297f8d6fbaac16f8b091522bb394e30a Mon Sep 17 00:00:00 2001
  39. From: quo <tuple@list.ru>
  40. Date: Sun, 11 Dec 2022 12:10:54 +0100
  41. Subject: [PATCH] hid: Add support for Intel Touch Host Controller
  42. Based on quo/ithc-linux@0b8b45d
  43. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  44. Patchset: ithc
  45. ---
  46. drivers/hid/Kconfig | 2 +
  47. drivers/hid/Makefile | 1 +
  48. drivers/hid/ithc/Kbuild | 6 +
  49. drivers/hid/ithc/Kconfig | 12 +
  50. drivers/hid/ithc/ithc-debug.c | 130 ++++++
  51. drivers/hid/ithc/ithc-dma.c | 373 +++++++++++++++++
  52. drivers/hid/ithc/ithc-dma.h | 69 ++++
  53. drivers/hid/ithc/ithc-main.c | 728 ++++++++++++++++++++++++++++++++++
  54. drivers/hid/ithc/ithc-regs.c | 96 +++++
  55. drivers/hid/ithc/ithc-regs.h | 189 +++++++++
  56. drivers/hid/ithc/ithc.h | 67 ++++
  57. 11 files changed, 1673 insertions(+)
  58. create mode 100644 drivers/hid/ithc/Kbuild
  59. create mode 100644 drivers/hid/ithc/Kconfig
  60. create mode 100644 drivers/hid/ithc/ithc-debug.c
  61. create mode 100644 drivers/hid/ithc/ithc-dma.c
  62. create mode 100644 drivers/hid/ithc/ithc-dma.h
  63. create mode 100644 drivers/hid/ithc/ithc-main.c
  64. create mode 100644 drivers/hid/ithc/ithc-regs.c
  65. create mode 100644 drivers/hid/ithc/ithc-regs.h
  66. create mode 100644 drivers/hid/ithc/ithc.h
  67. diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
  68. index a263e49b2ae2..03f0f5af289a 100644
  69. --- a/drivers/hid/Kconfig
  70. +++ b/drivers/hid/Kconfig
  71. @@ -1353,4 +1353,6 @@ source "drivers/hid/surface-hid/Kconfig"
  72. source "drivers/hid/ipts/Kconfig"
  73. +source "drivers/hid/ithc/Kconfig"
  74. +
  75. endif # HID_SUPPORT
  76. diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
  77. index f4bad1b8d813..d32c194400ae 100644
  78. --- a/drivers/hid/Makefile
  79. +++ b/drivers/hid/Makefile
  80. @@ -172,3 +172,4 @@ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
  81. obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
  82. obj-$(CONFIG_HID_IPTS) += ipts/
  83. +obj-$(CONFIG_HID_ITHC) += ithc/
  84. diff --git a/drivers/hid/ithc/Kbuild b/drivers/hid/ithc/Kbuild
  85. new file mode 100644
  86. index 000000000000..aea83f2ac07b
  87. --- /dev/null
  88. +++ b/drivers/hid/ithc/Kbuild
  89. @@ -0,0 +1,6 @@
  90. +obj-$(CONFIG_HID_ITHC) := ithc.o
  91. +
  92. +ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-debug.o
  93. +
  94. +ccflags-y := -std=gnu11 -Wno-declaration-after-statement
  95. +
  96. diff --git a/drivers/hid/ithc/Kconfig b/drivers/hid/ithc/Kconfig
  97. new file mode 100644
  98. index 000000000000..ede713023609
  99. --- /dev/null
  100. +++ b/drivers/hid/ithc/Kconfig
  101. @@ -0,0 +1,12 @@
  102. +config HID_ITHC
  103. + tristate "Intel Touch Host Controller"
  104. + depends on PCI
  105. + depends on HID
  106. + help
  107. + Say Y here if your system has a touchscreen using Intels
  108. + Touch Host Controller (ITHC / IPTS) technology.
  109. +
  110. + If unsure say N.
  111. +
  112. + To compile this driver as a module, choose M here: the
  113. + module will be called ithc.
  114. diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c
  115. new file mode 100644
  116. index 000000000000..1f1f1e33f2e5
  117. --- /dev/null
  118. +++ b/drivers/hid/ithc/ithc-debug.c
  119. @@ -0,0 +1,130 @@
  120. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  121. +
  122. +#include "ithc.h"
  123. +
  124. +void ithc_log_regs(struct ithc *ithc)
  125. +{
  126. + if (!ithc->prev_regs)
  127. + return;
  128. + u32 __iomem *cur = (__iomem void *)ithc->regs;
  129. + u32 *prev = (void *)ithc->prev_regs;
  130. + for (int i = 1024; i < sizeof(*ithc->regs) / 4; i++) {
  131. + u32 x = readl(cur + i);
  132. + if (x != prev[i]) {
  133. + pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x);
  134. + prev[i] = x;
  135. + }
  136. + }
  137. +}
  138. +
  139. +static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len,
  140. + loff_t *offset)
  141. +{
  142. + // Debug commands consist of a single letter followed by a list of numbers (decimal or
  143. + // hexadecimal, space-separated).
  144. + struct ithc *ithc = file_inode(f)->i_private;
  145. + char cmd[256];
  146. + if (!ithc || !ithc->pci)
  147. + return -ENODEV;
  148. + if (!len)
  149. + return -EINVAL;
  150. + if (len >= sizeof(cmd))
  151. + return -EINVAL;
  152. + if (copy_from_user(cmd, buf, len))
  153. + return -EFAULT;
  154. + cmd[len] = 0;
  155. + if (cmd[len-1] == '\n')
  156. + cmd[len-1] = 0;
  157. + pci_info(ithc->pci, "debug command: %s\n", cmd);
  158. +
  159. + // Parse the list of arguments into a u32 array.
  160. + u32 n = 0;
  161. + const char *s = cmd + 1;
  162. + u32 a[32];
  163. + while (*s && *s != '\n') {
  164. + if (n >= ARRAY_SIZE(a))
  165. + return -EINVAL;
  166. + if (*s++ != ' ')
  167. + return -EINVAL;
  168. + char *e;
  169. + a[n++] = simple_strtoul(s, &e, 0);
  170. + if (e == s)
  171. + return -EINVAL;
  172. + s = e;
  173. + }
  174. + ithc_log_regs(ithc);
  175. +
  176. + // Execute the command.
  177. + switch (cmd[0]) {
  178. + case 'x': // reset
  179. + ithc_reset(ithc);
  180. + break;
  181. + case 'w': // write register: offset mask value
  182. + if (n != 3 || (a[0] & 3))
  183. + return -EINVAL;
  184. + pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n",
  185. + a[0], a[2], a[1]);
  186. + bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]);
  187. + break;
  188. + case 'r': // read register: offset
  189. + if (n != 1 || (a[0] & 3))
  190. + return -EINVAL;
  191. + pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0],
  192. + readl(((__iomem u32 *)ithc->regs) + a[0] / 4));
  193. + break;
  194. + case 's': // spi command: cmd offset len data...
  195. + // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
  196. + // set touch cfg: s 6 12 4 XX
  197. + if (n < 3 || a[2] > (n - 3) * 4)
  198. + return -EINVAL;
  199. + pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]);
  200. + if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3))
  201. + for (u32 i = 0; i < (a[2] + 3) / 4; i++)
  202. + pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]);
  203. + break;
  204. + case 'd': // dma command: cmd len data...
  205. + // get report descriptor: d 7 8 0 0
  206. + // enable multitouch: d 3 2 0x0105
  207. + if (n < 2 || a[1] > (n - 2) * 4)
  208. + return -EINVAL;
  209. + pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]);
  210. + if (ithc_dma_tx(ithc, a[0], a[1], a + 2))
  211. + pci_err(ithc->pci, "dma tx failed\n");
  212. + break;
  213. + default:
  214. + return -EINVAL;
  215. + }
  216. + ithc_log_regs(ithc);
  217. + return len;
  218. +}
  219. +
  220. +static const struct file_operations ithc_debugfops_cmd = {
  221. + .owner = THIS_MODULE,
  222. + .write = ithc_debugfs_cmd_write,
  223. +};
  224. +
  225. +static void ithc_debugfs_devres_release(struct device *dev, void *res)
  226. +{
  227. + struct dentry **dbgm = res;
  228. + if (*dbgm)
  229. + debugfs_remove_recursive(*dbgm);
  230. +}
  231. +
  232. +int ithc_debug_init(struct ithc *ithc)
  233. +{
  234. + struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof(*dbgm), GFP_KERNEL);
  235. + if (!dbgm)
  236. + return -ENOMEM;
  237. + devres_add(&ithc->pci->dev, dbgm);
  238. + struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL);
  239. + if (IS_ERR(dbg))
  240. + return PTR_ERR(dbg);
  241. + *dbgm = dbg;
  242. +
  243. + struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd);
  244. + if (IS_ERR(cmd))
  245. + return PTR_ERR(cmd);
  246. +
  247. + return 0;
  248. +}
  249. +
  250. diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c
  251. new file mode 100644
  252. index 000000000000..ffb8689b8a78
  253. --- /dev/null
  254. +++ b/drivers/hid/ithc/ithc-dma.c
  255. @@ -0,0 +1,373 @@
  256. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  257. +
  258. +#include "ithc.h"
  259. +
  260. +// The THC uses tables of PRDs (physical region descriptors) to describe the TX and RX data buffers.
  261. +// Each PRD contains the DMA address and size of a block of DMA memory, and some status flags.
  262. +// This allows each data buffer to consist of multiple non-contiguous blocks of memory.
  263. +
  264. +static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p,
  265. + unsigned int num_buffers, unsigned int num_pages, enum dma_data_direction dir)
  266. +{
  267. + p->num_pages = num_pages;
  268. + p->dir = dir;
  269. + // We allocate enough space to have one PRD per data buffer page, however if the data
  270. + // buffer pages happen to be contiguous, we can describe the buffer using fewer PRDs, so
  271. + // some will remain unused (which is fine).
  272. + p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE);
  273. + p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL);
  274. + if (!p->addr)
  275. + return -ENOMEM;
  276. + if (p->dma_addr & (PAGE_SIZE - 1))
  277. + return -EFAULT;
  278. + return 0;
  279. +}
  280. +
  281. +// Devres managed sg_table wrapper.
  282. +struct ithc_sg_table {
  283. + void *addr;
  284. + struct sg_table sgt;
  285. + enum dma_data_direction dir;
  286. +};
  287. +static void ithc_dma_sgtable_free(struct sg_table *sgt)
  288. +{
  289. + struct scatterlist *sg;
  290. + int i;
  291. + for_each_sgtable_sg(sgt, sg, i) {
  292. + struct page *p = sg_page(sg);
  293. + if (p)
  294. + __free_page(p);
  295. + }
  296. + sg_free_table(sgt);
  297. +}
  298. +static void ithc_dma_data_devres_release(struct device *dev, void *res)
  299. +{
  300. + struct ithc_sg_table *sgt = res;
  301. + if (sgt->addr)
  302. + vunmap(sgt->addr);
  303. + dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0);
  304. + ithc_dma_sgtable_free(&sgt->sgt);
  305. +}
  306. +
  307. +static int ithc_dma_data_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
  308. + struct ithc_dma_data_buffer *b)
  309. +{
  310. + // We don't use dma_alloc_coherent() for data buffers, because they don't have to be
  311. + // coherent (they are unidirectional) or contiguous (we can use one PRD per page).
  312. + // We could use dma_alloc_noncontiguous(), however this still always allocates a single
  313. + // DMA mapped segment, which is more restrictive than what we need.
  314. + // Instead we use an sg_table of individually allocated pages.
  315. + struct page *pages[16];
  316. + if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages))
  317. + return -EINVAL;
  318. + b->active_idx = -1;
  319. + struct ithc_sg_table *sgt = devres_alloc(
  320. + ithc_dma_data_devres_release, sizeof(*sgt), GFP_KERNEL);
  321. + if (!sgt)
  322. + return -ENOMEM;
  323. + sgt->dir = prds->dir;
  324. +
  325. + if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) {
  326. + struct scatterlist *sg;
  327. + int i;
  328. + bool ok = true;
  329. + for_each_sgtable_sg(&sgt->sgt, sg, i) {
  330. + // NOTE: don't need __GFP_DMA for PCI DMA
  331. + struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
  332. + if (!p) {
  333. + ok = false;
  334. + break;
  335. + }
  336. + sg_set_page(sg, p, PAGE_SIZE, 0);
  337. + }
  338. + if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) {
  339. + devres_add(&ithc->pci->dev, sgt);
  340. + b->sgt = &sgt->sgt;
  341. + b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL);
  342. + if (!b->addr)
  343. + return -ENOMEM;
  344. + return 0;
  345. + }
  346. + ithc_dma_sgtable_free(&sgt->sgt);
  347. + }
  348. + devres_free(sgt);
  349. + return -ENOMEM;
  350. +}
  351. +
  352. +static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
  353. + struct ithc_dma_data_buffer *b, unsigned int idx)
  354. +{
  355. + // Give a buffer to the THC.
  356. + struct ithc_phys_region_desc *prd = prds->addr;
  357. + prd += idx * prds->num_pages;
  358. + if (b->active_idx >= 0) {
  359. + pci_err(ithc->pci, "buffer already active\n");
  360. + return -EINVAL;
  361. + }
  362. + b->active_idx = idx;
  363. + if (prds->dir == DMA_TO_DEVICE) {
  364. + // TX buffer: Caller should have already filled the data buffer, so just fill
  365. + // the PRD and flush.
  366. + // (TODO: Support multi-page TX buffers. So far no device seems to use or need
  367. + // these though.)
  368. + if (b->data_size > PAGE_SIZE)
  369. + return -EINVAL;
  370. + prd->addr = sg_dma_address(b->sgt->sgl) >> 10;
  371. + prd->size = b->data_size | PRD_FLAG_END;
  372. + flush_kernel_vmap_range(b->addr, b->data_size);
  373. + } else if (prds->dir == DMA_FROM_DEVICE) {
  374. + // RX buffer: Reset PRDs.
  375. + struct scatterlist *sg;
  376. + int i;
  377. + for_each_sgtable_dma_sg(b->sgt, sg, i) {
  378. + prd->addr = sg_dma_address(sg) >> 10;
  379. + prd->size = sg_dma_len(sg);
  380. + prd++;
  381. + }
  382. + prd[-1].size |= PRD_FLAG_END;
  383. + }
  384. + dma_wmb(); // for the prds
  385. + dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir);
  386. + return 0;
  387. +}
  388. +
  389. +static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds,
  390. + struct ithc_dma_data_buffer *b, unsigned int idx)
  391. +{
  392. + // Take a buffer from the THC.
  393. + struct ithc_phys_region_desc *prd = prds->addr;
  394. + prd += idx * prds->num_pages;
  395. + // This is purely a sanity check. We don't strictly need the idx parameter for this
  396. + // function, because it should always be the same as active_idx, unless we have a bug.
  397. + if (b->active_idx != idx) {
  398. + pci_err(ithc->pci, "wrong buffer index\n");
  399. + return -EINVAL;
  400. + }
  401. + b->active_idx = -1;
  402. + if (prds->dir == DMA_FROM_DEVICE) {
  403. + // RX buffer: Calculate actual received data size from PRDs.
  404. + dma_rmb(); // for the prds
  405. + b->data_size = 0;
  406. + struct scatterlist *sg;
  407. + int i;
  408. + for_each_sgtable_dma_sg(b->sgt, sg, i) {
  409. + unsigned int size = prd->size;
  410. + b->data_size += size & PRD_SIZE_MASK;
  411. + if (size & PRD_FLAG_END)
  412. + break;
  413. + if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) {
  414. + pci_err(ithc->pci, "truncated prd\n");
  415. + break;
  416. + }
  417. + prd++;
  418. + }
  419. + invalidate_kernel_vmap_range(b->addr, b->data_size);
  420. + }
  421. + dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir);
  422. + return 0;
  423. +}
  424. +
  425. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel)
  426. +{
  427. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  428. + mutex_init(&rx->mutex);
  429. +
  430. + // Allocate buffers.
  431. + u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes);
  432. + unsigned int num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
  433. + pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n",
  434. + NUM_RX_BUF, buf_size, num_pages);
  435. + CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE);
  436. + for (unsigned int i = 0; i < NUM_RX_BUF; i++)
  437. + CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]);
  438. +
  439. + // Init registers.
  440. + writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2);
  441. + lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr);
  442. + writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs);
  443. + writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds);
  444. + u8 head = readb(&ithc->regs->dma_rx[channel].head);
  445. + if (head) {
  446. + pci_err(ithc->pci, "head is nonzero (%u)\n", head);
  447. + return -EIO;
  448. + }
  449. +
  450. + // Init buffers.
  451. + for (unsigned int i = 0; i < NUM_RX_BUF; i++)
  452. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i);
  453. +
  454. + writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail);
  455. + return 0;
  456. +}
  457. +
  458. +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel)
  459. +{
  460. + bitsb_set(&ithc->regs->dma_rx[channel].control,
  461. + DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA);
  462. + CHECK(waitl, ithc, &ithc->regs->dma_rx[channel].status,
  463. + DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED);
  464. +}
  465. +
  466. +int ithc_dma_tx_init(struct ithc *ithc)
  467. +{
  468. + struct ithc_dma_tx *tx = &ithc->dma_tx;
  469. + mutex_init(&tx->mutex);
  470. +
  471. + // Allocate buffers.
  472. + tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes);
  473. + unsigned int num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
  474. + pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n",
  475. + tx->max_size, num_pages);
  476. + CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE);
  477. + CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf);
  478. +
  479. + // Init registers.
  480. + lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr);
  481. + writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds);
  482. +
  483. + // Init buffers.
  484. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  485. + return 0;
  486. +}
  487. +
  488. +static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data,
  489. + u8 channel, u8 buf)
  490. +{
  491. + if (buf >= NUM_RX_BUF) {
  492. + pci_err(ithc->pci, "invalid dma ringbuffer index\n");
  493. + return -EINVAL;
  494. + }
  495. + u32 len = data->data_size;
  496. + struct ithc_dma_rx_header *hdr = data->addr;
  497. + u8 *hiddata = (void *)(hdr + 1);
  498. + if (len >= sizeof(*hdr) && hdr->code == DMA_RX_CODE_RESET) {
  499. + // The THC sends a reset request when we need to reinitialize the device.
  500. + // This usually only happens if we send an invalid command or put the device
  501. + // in a bad state.
  502. + CHECK(ithc_reset, ithc);
  503. + } else if (len < sizeof(*hdr) || len != sizeof(*hdr) + hdr->data_size) {
  504. + if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  505. + // When the CPU enters a low power state during DMA, we can get truncated
  506. + // messages. For Surface devices, this will typically be a single touch
  507. + // report that is only 1 byte, or a multitouch report that is 257 bytes.
  508. + // See also ithc_set_active().
  509. + } else {
  510. + pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n",
  511. + channel, buf, len, hdr->code, hdr->data_size);
  512. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
  513. + hdr, min(len, 0x400u), 0);
  514. + }
  515. + } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) {
  516. + // Response to a 'get report descriptor' request.
  517. + // The actual descriptor is preceded by 8 nul bytes.
  518. + CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8);
  519. + WRITE_ONCE(ithc->hid_parse_done, true);
  520. + wake_up(&ithc->wait_hid_parse);
  521. + } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  522. + // Standard HID input report containing touch data.
  523. + CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1);
  524. + } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) {
  525. + // Response to a 'get feature' request.
  526. + bool done = false;
  527. + mutex_lock(&ithc->hid_get_feature_mutex);
  528. + if (ithc->hid_get_feature_buf) {
  529. + if (hdr->data_size < ithc->hid_get_feature_size)
  530. + ithc->hid_get_feature_size = hdr->data_size;
  531. + memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size);
  532. + ithc->hid_get_feature_buf = NULL;
  533. + done = true;
  534. + }
  535. + mutex_unlock(&ithc->hid_get_feature_mutex);
  536. + if (done) {
  537. + wake_up(&ithc->wait_hid_get_feature);
  538. + } else {
  539. + // Received data without a matching request, or the request already
  540. + // timed out. (XXX What's the correct thing to do here?)
  541. + CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT,
  542. + hiddata, hdr->data_size, 1);
  543. + }
  544. + } else {
  545. + pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n",
  546. + channel, buf, len, hdr->code);
  547. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
  548. + hdr, min(len, 0x400u), 0);
  549. + }
  550. + return 0;
  551. +}
  552. +
  553. +static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel)
  554. +{
  555. + // Process all filled RX buffers from the ringbuffer.
  556. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  557. + unsigned int n = rx->num_received;
  558. + u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head);
  559. + while (1) {
  560. + u8 tail = n % NUM_RX_BUF;
  561. + u8 tail_wrap = tail | ((n / NUM_RX_BUF) & 1 ? 0 : DMA_RX_WRAP_FLAG);
  562. + writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail);
  563. + // ringbuffer is full if tail_wrap == head_wrap
  564. + // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG
  565. + if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG))
  566. + return 0;
  567. +
  568. + // take the buffer that the device just filled
  569. + struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF];
  570. + CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail);
  571. + rx->num_received = ++n;
  572. +
  573. + // process data
  574. + CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail);
  575. +
  576. + // give the buffer back to the device
  577. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail);
  578. + }
  579. +}
  580. +int ithc_dma_rx(struct ithc *ithc, u8 channel)
  581. +{
  582. + struct ithc_dma_rx *rx = &ithc->dma_rx[channel];
  583. + mutex_lock(&rx->mutex);
  584. + int ret = ithc_dma_rx_unlocked(ithc, channel);
  585. + mutex_unlock(&rx->mutex);
  586. + return ret;
  587. +}
  588. +
  589. +static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
  590. +{
  591. + ithc_set_active(ithc, 100 * USEC_PER_MSEC);
  592. +
  593. + // Send a single TX buffer to the THC.
  594. + pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize);
  595. + struct ithc_dma_tx_header *hdr;
  596. + // Data must be padded to next 4-byte boundary.
  597. + u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0;
  598. + unsigned int fullsize = sizeof(*hdr) + datasize + padding;
  599. + if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE)
  600. + return -EINVAL;
  601. + CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  602. +
  603. + // Fill the TX buffer with header and data.
  604. + ithc->dma_tx.buf.data_size = fullsize;
  605. + hdr = ithc->dma_tx.buf.addr;
  606. + hdr->code = cmdcode;
  607. + hdr->data_size = datasize;
  608. + u8 *dest = (void *)(hdr + 1);
  609. + memcpy(dest, data, datasize);
  610. + dest += datasize;
  611. + for (u8 p = 0; p < padding; p++)
  612. + *dest++ = 0;
  613. + CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  614. +
  615. + // Let the THC process the buffer.
  616. + bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND);
  617. + CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
  618. + writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status);
  619. + return 0;
  620. +}
  621. +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
  622. +{
  623. + mutex_lock(&ithc->dma_tx.mutex);
  624. + int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data);
  625. + mutex_unlock(&ithc->dma_tx.mutex);
  626. + return ret;
  627. +}
  628. +
  629. diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h
  630. new file mode 100644
  631. index 000000000000..93652e4476bf
  632. --- /dev/null
  633. +++ b/drivers/hid/ithc/ithc-dma.h
  634. @@ -0,0 +1,69 @@
  635. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  636. +
  637. +#define PRD_SIZE_MASK 0xffffff
  638. +#define PRD_FLAG_END 0x1000000
  639. +#define PRD_FLAG_SUCCESS 0x2000000
  640. +#define PRD_FLAG_ERROR 0x4000000
  641. +
  642. +struct ithc_phys_region_desc {
  643. + u64 addr; // physical addr/1024
  644. + u32 size; // num bytes, PRD_FLAG_END marks last prd for data split over multiple prds
  645. + u32 unused;
  646. +};
  647. +
  648. +#define DMA_RX_CODE_INPUT_REPORT 3
  649. +#define DMA_RX_CODE_FEATURE_REPORT 4
  650. +#define DMA_RX_CODE_REPORT_DESCRIPTOR 5
  651. +#define DMA_RX_CODE_RESET 7
  652. +
  653. +struct ithc_dma_rx_header {
  654. + u32 code;
  655. + u32 data_size;
  656. + u32 _unknown[14];
  657. +};
  658. +
  659. +#define DMA_TX_CODE_SET_FEATURE 3
  660. +#define DMA_TX_CODE_GET_FEATURE 4
  661. +#define DMA_TX_CODE_OUTPUT_REPORT 5
  662. +#define DMA_TX_CODE_GET_REPORT_DESCRIPTOR 7
  663. +
  664. +struct ithc_dma_tx_header {
  665. + u32 code;
  666. + u32 data_size;
  667. +};
  668. +
  669. +struct ithc_dma_prd_buffer {
  670. + void *addr;
  671. + dma_addr_t dma_addr;
  672. + u32 size;
  673. + u32 num_pages; // per data buffer
  674. + enum dma_data_direction dir;
  675. +};
  676. +
  677. +struct ithc_dma_data_buffer {
  678. + void *addr;
  679. + struct sg_table *sgt;
  680. + int active_idx;
  681. + u32 data_size;
  682. +};
  683. +
  684. +struct ithc_dma_tx {
  685. + struct mutex mutex;
  686. + u32 max_size;
  687. + struct ithc_dma_prd_buffer prds;
  688. + struct ithc_dma_data_buffer buf;
  689. +};
  690. +
  691. +struct ithc_dma_rx {
  692. + struct mutex mutex;
  693. + u32 num_received;
  694. + struct ithc_dma_prd_buffer prds;
  695. + struct ithc_dma_data_buffer bufs[NUM_RX_BUF];
  696. +};
  697. +
  698. +int ithc_dma_rx_init(struct ithc *ithc, u8 channel);
  699. +void ithc_dma_rx_enable(struct ithc *ithc, u8 channel);
  700. +int ithc_dma_tx_init(struct ithc *ithc);
  701. +int ithc_dma_rx(struct ithc *ithc, u8 channel);
  702. +int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *cmddata);
  703. +
  704. diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c
  705. new file mode 100644
  706. index 000000000000..87ed4aa70fda
  707. --- /dev/null
  708. +++ b/drivers/hid/ithc/ithc-main.c
  709. @@ -0,0 +1,728 @@
  710. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  711. +
  712. +#include "ithc.h"
  713. +
  714. +MODULE_DESCRIPTION("Intel Touch Host Controller driver");
  715. +MODULE_LICENSE("Dual BSD/GPL");
  716. +
  717. +// Lakefield
  718. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0
  719. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1
  720. +// Tiger Lake
  721. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0
  722. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1
  723. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0
  724. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1
  725. +// Alder Lake
  726. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8
  727. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9
  728. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0
  729. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1
  730. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0
  731. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1
  732. +// Raptor Lake
  733. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58
  734. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59
  735. +// Meteor Lake
  736. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48
  737. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a
  738. +
  739. +static const struct pci_device_id ithc_pci_tbl[] = {
  740. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) },
  741. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) },
  742. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) },
  743. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) },
  744. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) },
  745. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) },
  746. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) },
  747. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) },
  748. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) },
  749. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) },
  750. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) },
  751. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) },
  752. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) },
  753. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) },
  754. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) },
  755. + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) },
  756. + // XXX So far the THC seems to be the only Intel PCI device with PCI_CLASS_INPUT_PEN,
  757. + // so instead of the device list we could just do:
  758. + // { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = PCI_CLASS_INPUT_PEN, .class_mask = ~0, },
  759. + {}
  760. +};
  761. +MODULE_DEVICE_TABLE(pci, ithc_pci_tbl);
  762. +
  763. +// Module parameters
  764. +
  765. +static bool ithc_use_polling = false;
  766. +module_param_named(poll, ithc_use_polling, bool, 0);
  767. +MODULE_PARM_DESC(poll, "Use polling instead of interrupts");
  768. +
  769. +// Since all known devices seem to use only channel 1, by default we disable channel 0.
  770. +static bool ithc_use_rx0 = false;
  771. +module_param_named(rx0, ithc_use_rx0, bool, 0);
  772. +MODULE_PARM_DESC(rx0, "Use DMA RX channel 0");
  773. +
  774. +static bool ithc_use_rx1 = true;
  775. +module_param_named(rx1, ithc_use_rx1, bool, 0);
  776. +MODULE_PARM_DESC(rx1, "Use DMA RX channel 1");
  777. +
  778. +// Values below 250 seem to work well on the SP7+. If this is set too high, you may observe cursor stuttering.
  779. +static int ithc_dma_latency_us = 200;
  780. +module_param_named(dma_latency_us, ithc_dma_latency_us, int, 0);
  781. +MODULE_PARM_DESC(dma_latency_us, "Determines the CPU latency QoS value for DMA transfers (in microseconds), -1 to disable latency QoS");
  782. +
  783. +// Values above 1700 seem to work well on the SP7+. If this is set too low, you may observe cursor stuttering.
  784. +static unsigned int ithc_dma_early_us = 2000;
  785. +module_param_named(dma_early_us, ithc_dma_early_us, uint, 0);
  786. +MODULE_PARM_DESC(dma_early_us, "Determines how early the CPU latency QoS value is applied before the next expected IRQ (in microseconds)");
  787. +
  788. +static bool ithc_log_regs_enabled = false;
  789. +module_param_named(logregs, ithc_log_regs_enabled, bool, 0);
  790. +MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)");
  791. +
  792. +// Sysfs attributes
  793. +
  794. +static bool ithc_is_config_valid(struct ithc *ithc)
  795. +{
  796. + return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC;
  797. +}
  798. +
  799. +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
  800. +{
  801. + struct ithc *ithc = dev_get_drvdata(dev);
  802. + if (!ithc || !ithc_is_config_valid(ithc))
  803. + return -ENODEV;
  804. + return sprintf(buf, "0x%04x", ithc->config.vendor_id);
  805. +}
  806. +static DEVICE_ATTR_RO(vendor);
  807. +static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf)
  808. +{
  809. + struct ithc *ithc = dev_get_drvdata(dev);
  810. + if (!ithc || !ithc_is_config_valid(ithc))
  811. + return -ENODEV;
  812. + return sprintf(buf, "0x%04x", ithc->config.product_id);
  813. +}
  814. +static DEVICE_ATTR_RO(product);
  815. +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf)
  816. +{
  817. + struct ithc *ithc = dev_get_drvdata(dev);
  818. + if (!ithc || !ithc_is_config_valid(ithc))
  819. + return -ENODEV;
  820. + return sprintf(buf, "%u", ithc->config.revision);
  821. +}
  822. +static DEVICE_ATTR_RO(revision);
  823. +static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
  824. +{
  825. + struct ithc *ithc = dev_get_drvdata(dev);
  826. + if (!ithc || !ithc_is_config_valid(ithc))
  827. + return -ENODEV;
  828. + u32 v = ithc->config.fw_version;
  829. + return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff);
  830. +}
  831. +static DEVICE_ATTR_RO(fw_version);
  832. +
  833. +static const struct attribute_group *ithc_attribute_groups[] = {
  834. + &(const struct attribute_group){
  835. + .name = DEVNAME,
  836. + .attrs = (struct attribute *[]){
  837. + &dev_attr_vendor.attr,
  838. + &dev_attr_product.attr,
  839. + &dev_attr_revision.attr,
  840. + &dev_attr_fw_version.attr,
  841. + NULL
  842. + },
  843. + },
  844. + NULL
  845. +};
  846. +
  847. +// HID setup
  848. +
  849. +static int ithc_hid_start(struct hid_device *hdev) { return 0; }
  850. +static void ithc_hid_stop(struct hid_device *hdev) { }
  851. +static int ithc_hid_open(struct hid_device *hdev) { return 0; }
  852. +static void ithc_hid_close(struct hid_device *hdev) { }
  853. +
  854. +static int ithc_hid_parse(struct hid_device *hdev)
  855. +{
  856. + struct ithc *ithc = hdev->driver_data;
  857. + u64 val = 0;
  858. + WRITE_ONCE(ithc->hid_parse_done, false);
  859. + for (int retries = 0; ; retries++) {
  860. + CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof(val), &val);
  861. + if (wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done),
  862. + msecs_to_jiffies(200)))
  863. + return 0;
  864. + if (retries > 5) {
  865. + pci_err(ithc->pci, "failed to read report descriptor\n");
  866. + return -ETIMEDOUT;
  867. + }
  868. + pci_warn(ithc->pci, "failed to read report descriptor, retrying\n");
  869. + }
  870. +}
  871. +
  872. +static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf,
  873. + size_t len, unsigned char rtype, int reqtype)
  874. +{
  875. + struct ithc *ithc = hdev->driver_data;
  876. + if (!buf || !len)
  877. + return -EINVAL;
  878. + u32 code;
  879. + if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) {
  880. + code = DMA_TX_CODE_OUTPUT_REPORT;
  881. + } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) {
  882. + code = DMA_TX_CODE_SET_FEATURE;
  883. + } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) {
  884. + code = DMA_TX_CODE_GET_FEATURE;
  885. + } else {
  886. + pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n",
  887. + rtype, reqtype, reportnum);
  888. + return -EINVAL;
  889. + }
  890. + buf[0] = reportnum;
  891. +
  892. + if (reqtype == HID_REQ_GET_REPORT) {
  893. + // Prepare for response.
  894. + mutex_lock(&ithc->hid_get_feature_mutex);
  895. + ithc->hid_get_feature_buf = buf;
  896. + ithc->hid_get_feature_size = len;
  897. + mutex_unlock(&ithc->hid_get_feature_mutex);
  898. +
  899. + // Transmit 'get feature' request.
  900. + int r = CHECK(ithc_dma_tx, ithc, code, 1, buf);
  901. + if (!r) {
  902. + r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature,
  903. + !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
  904. + if (!r)
  905. + r = -ETIMEDOUT;
  906. + else if (r < 0)
  907. + r = -EINTR;
  908. + else
  909. + r = 0;
  910. + }
  911. +
  912. + // If everything went ok, the buffer has been filled with the response data.
  913. + // Return the response size.
  914. + mutex_lock(&ithc->hid_get_feature_mutex);
  915. + ithc->hid_get_feature_buf = NULL;
  916. + if (!r)
  917. + r = ithc->hid_get_feature_size;
  918. + mutex_unlock(&ithc->hid_get_feature_mutex);
  919. + return r;
  920. + }
  921. +
  922. + // 'Set feature', or 'output report'. These don't have a response.
  923. + CHECK_RET(ithc_dma_tx, ithc, code, len, buf);
  924. + return 0;
  925. +}
  926. +
  927. +static struct hid_ll_driver ithc_ll_driver = {
  928. + .start = ithc_hid_start,
  929. + .stop = ithc_hid_stop,
  930. + .open = ithc_hid_open,
  931. + .close = ithc_hid_close,
  932. + .parse = ithc_hid_parse,
  933. + .raw_request = ithc_hid_raw_request,
  934. +};
  935. +
  936. +static void ithc_hid_devres_release(struct device *dev, void *res)
  937. +{
  938. + struct hid_device **hidm = res;
  939. + if (*hidm)
  940. + hid_destroy_device(*hidm);
  941. +}
  942. +
  943. +static int ithc_hid_init(struct ithc *ithc)
  944. +{
  945. + struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof(*hidm), GFP_KERNEL);
  946. + if (!hidm)
  947. + return -ENOMEM;
  948. + devres_add(&ithc->pci->dev, hidm);
  949. + struct hid_device *hid = hid_allocate_device();
  950. + if (IS_ERR(hid))
  951. + return PTR_ERR(hid);
  952. + *hidm = hid;
  953. +
  954. + strscpy(hid->name, DEVFULLNAME, sizeof(hid->name));
  955. + strscpy(hid->phys, ithc->phys, sizeof(hid->phys));
  956. + hid->ll_driver = &ithc_ll_driver;
  957. + hid->bus = BUS_PCI;
  958. + hid->vendor = ithc->config.vendor_id;
  959. + hid->product = ithc->config.product_id;
  960. + hid->version = 0x100;
  961. + hid->dev.parent = &ithc->pci->dev;
  962. + hid->driver_data = ithc;
  963. +
  964. + ithc->hid = hid;
  965. + return 0;
  966. +}
  967. +
  968. +// Interrupts/polling
  969. +
  970. +static enum hrtimer_restart ithc_activity_start_timer_callback(struct hrtimer *t)
  971. +{
  972. + struct ithc *ithc = container_of(t, struct ithc, activity_start_timer);
  973. + ithc_set_active(ithc, ithc_dma_early_us * 2 + USEC_PER_MSEC);
  974. + return HRTIMER_NORESTART;
  975. +}
  976. +
  977. +static enum hrtimer_restart ithc_activity_end_timer_callback(struct hrtimer *t)
  978. +{
  979. + struct ithc *ithc = container_of(t, struct ithc, activity_end_timer);
  980. + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  981. + return HRTIMER_NORESTART;
  982. +}
  983. +
  984. +void ithc_set_active(struct ithc *ithc, unsigned int duration_us)
  985. +{
  986. + if (ithc_dma_latency_us < 0)
  987. + return;
  988. + // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
  989. + // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_RX_TIMEOUT will be
  990. + // set when this happens. The amount of truncated messages can become very high, resulting
  991. + // in user-visible effects (laggy/stuttering cursor). To avoid this, we use a CPU latency
  992. + // QoS request to prevent the CPU from entering low power states during touch interactions.
  993. + cpu_latency_qos_update_request(&ithc->activity_qos, ithc_dma_latency_us);
  994. + hrtimer_start_range_ns(&ithc->activity_end_timer,
  995. + ns_to_ktime(duration_us * NSEC_PER_USEC), duration_us * NSEC_PER_USEC, HRTIMER_MODE_REL);
  996. +}
  997. +
  998. +static int ithc_set_device_enabled(struct ithc *ithc, bool enable)
  999. +{
  1000. + u32 x = ithc->config.touch_cfg =
  1001. + (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2 |
  1002. + (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
  1003. + return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE,
  1004. + offsetof(struct ithc_device_config, touch_cfg), sizeof(x), &x);
  1005. +}
  1006. +
  1007. +static void ithc_disable_interrupts(struct ithc *ithc)
  1008. +{
  1009. + writel(0, &ithc->regs->error_control);
  1010. + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0);
  1011. + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  1012. + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  1013. + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0);
  1014. +}
  1015. +
  1016. +static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned int channel)
  1017. +{
  1018. + writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA,
  1019. + &ithc->regs->dma_rx[channel].status);
  1020. +}
  1021. +
  1022. +static void ithc_clear_interrupts(struct ithc *ithc)
  1023. +{
  1024. + writel(0xffffffff, &ithc->regs->error_flags);
  1025. + writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status);
  1026. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1027. + ithc_clear_dma_rx_interrupts(ithc, 0);
  1028. + ithc_clear_dma_rx_interrupts(ithc, 1);
  1029. + writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2,
  1030. + &ithc->regs->dma_tx.status);
  1031. +}
  1032. +
  1033. +static void ithc_process(struct ithc *ithc)
  1034. +{
  1035. + ithc_log_regs(ithc);
  1036. +
  1037. + bool rx0 = ithc_use_rx0 && (readl(&ithc->regs->dma_rx[0].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  1038. + bool rx1 = ithc_use_rx1 && (readl(&ithc->regs->dma_rx[1].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  1039. +
  1040. + // Track time between DMA rx transfers, so we can try to predict when we need to enable CPU latency QoS for the next transfer
  1041. + ktime_t t = ktime_get();
  1042. + ktime_t dt = ktime_sub(t, ithc->last_rx_time);
  1043. + if (rx0 || rx1) {
  1044. + ithc->last_rx_time = t;
  1045. + if (dt > ms_to_ktime(100)) {
  1046. + ithc->cur_rx_seq_count = 0;
  1047. + ithc->cur_rx_seq_errors = 0;
  1048. + }
  1049. + ithc->cur_rx_seq_count++;
  1050. + if (!ithc_use_polling && ithc_dma_latency_us >= 0) {
  1051. + // Disable QoS, since the DMA transfer has completed (we re-enable it after a delay below)
  1052. + cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  1053. + hrtimer_try_to_cancel(&ithc->activity_end_timer);
  1054. + }
  1055. + }
  1056. +
  1057. + // Read and clear error bits
  1058. + u32 err = readl(&ithc->regs->error_flags);
  1059. + if (err) {
  1060. + writel(err, &ithc->regs->error_flags);
  1061. + if (err & ~ERROR_FLAG_DMA_RX_TIMEOUT)
  1062. + pci_err(ithc->pci, "error flags: 0x%08x\n", err);
  1063. + if (err & ERROR_FLAG_DMA_RX_TIMEOUT) {
  1064. + // Only log an error if we see a significant number of these errors.
  1065. + ithc->cur_rx_seq_errors++;
  1066. + if (ithc->cur_rx_seq_errors && ithc->cur_rx_seq_errors % 50 == 0 && ithc->cur_rx_seq_errors > ithc->cur_rx_seq_count / 10)
  1067. + pci_err(ithc->pci, "High number of DMA RX timeouts/errors (%u/%u, dt=%lldus). Try adjusting dma_early_us and/or dma_latency_us.\n",
  1068. + ithc->cur_rx_seq_errors, ithc->cur_rx_seq_count, ktime_to_us(dt));
  1069. + }
  1070. + }
  1071. +
  1072. + // Process DMA rx
  1073. + if (ithc_use_rx0) {
  1074. + ithc_clear_dma_rx_interrupts(ithc, 0);
  1075. + if (rx0)
  1076. + ithc_dma_rx(ithc, 0);
  1077. + }
  1078. + if (ithc_use_rx1) {
  1079. + ithc_clear_dma_rx_interrupts(ithc, 1);
  1080. + if (rx1)
  1081. + ithc_dma_rx(ithc, 1);
  1082. + }
  1083. +
  1084. + // Start timer to re-enable QoS for next rx, but only if we've seen an ERROR_FLAG_DMA_RX_TIMEOUT
  1085. + if ((rx0 || rx1) && !ithc_use_polling && ithc_dma_latency_us >= 0 && ithc->cur_rx_seq_errors > 0) {
  1086. + ktime_t expires = ktime_add(t, ktime_sub_us(dt, ithc_dma_early_us));
  1087. + hrtimer_start_range_ns(&ithc->activity_start_timer, expires, 10 * NSEC_PER_USEC, HRTIMER_MODE_ABS);
  1088. + }
  1089. +
  1090. + ithc_log_regs(ithc);
  1091. +}
  1092. +
  1093. +static irqreturn_t ithc_interrupt_thread(int irq, void *arg)
  1094. +{
  1095. + struct ithc *ithc = arg;
  1096. + pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n",
  1097. + readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags),
  1098. + readb(&ithc->regs->spi_cmd.control), readl(&ithc->regs->spi_cmd.status),
  1099. + readb(&ithc->regs->dma_rx[0].control), readl(&ithc->regs->dma_rx[0].status),
  1100. + readb(&ithc->regs->dma_rx[1].control), readl(&ithc->regs->dma_rx[1].status),
  1101. + readb(&ithc->regs->dma_tx.control), readl(&ithc->regs->dma_tx.status));
  1102. + ithc_process(ithc);
  1103. + return IRQ_HANDLED;
  1104. +}
  1105. +
  1106. +static int ithc_poll_thread(void *arg)
  1107. +{
  1108. + struct ithc *ithc = arg;
  1109. + unsigned int sleep = 100;
  1110. + while (!kthread_should_stop()) {
  1111. + u32 n = ithc->dma_rx[1].num_received;
  1112. + ithc_process(ithc);
  1113. + // Decrease polling interval to 20ms if we received data, otherwise slowly
  1114. + // increase it up to 200ms.
  1115. + if (n != ithc->dma_rx[1].num_received) {
  1116. + ithc_set_active(ithc, 100 * USEC_PER_MSEC);
  1117. + sleep = 20;
  1118. + } else {
  1119. + sleep = min(200u, sleep + (sleep >> 4) + 1);
  1120. + }
  1121. + msleep_interruptible(sleep);
  1122. + }
  1123. + return 0;
  1124. +}
  1125. +
  1126. +// Device initialization and shutdown
  1127. +
  1128. +static void ithc_disable(struct ithc *ithc)
  1129. +{
  1130. + bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE);
  1131. + CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED);
  1132. + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  1133. + bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND, 0);
  1134. + bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0);
  1135. + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_ENABLE, 0);
  1136. + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_ENABLE, 0);
  1137. + ithc_disable_interrupts(ithc);
  1138. + ithc_clear_interrupts(ithc);
  1139. +}
  1140. +
  1141. +static int ithc_init_device(struct ithc *ithc)
  1142. +{
  1143. + ithc_log_regs(ithc);
  1144. + bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0;
  1145. + ithc_disable(ithc);
  1146. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY);
  1147. +
  1148. + // Since we don't yet know which SPI config the device wants, use default speed and mode
  1149. + // initially for reading config data.
  1150. + ithc_set_spi_config(ithc, 10, 0);
  1151. +
  1152. + // Setting the following bit seems to make reading the config more reliable.
  1153. + bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000);
  1154. +
  1155. + // If the device was previously enabled, wait a bit to make sure it's fully shut down.
  1156. + if (was_enabled)
  1157. + if (msleep_interruptible(100))
  1158. + return -EINTR;
  1159. +
  1160. + // Take the touch device out of reset.
  1161. + bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
  1162. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0);
  1163. + for (int retries = 0; ; retries++) {
  1164. + ithc_log_regs(ithc);
  1165. + bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET);
  1166. + if (!waitl(ithc, &ithc->regs->state, 0xf, 2))
  1167. + break;
  1168. + if (retries > 5) {
  1169. + pci_err(ithc->pci, "failed to reset device, state = 0x%08x\n", readl(&ithc->regs->state));
  1170. + return -ETIMEDOUT;
  1171. + }
  1172. + pci_warn(ithc->pci, "invalid state, retrying reset\n");
  1173. + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  1174. + if (msleep_interruptible(1000))
  1175. + return -EINTR;
  1176. + }
  1177. + ithc_log_regs(ithc);
  1178. +
  1179. + // Waiting for the following status bit makes reading config much more reliable,
  1180. + // however the official driver does not seem to do this...
  1181. + CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4);
  1182. +
  1183. + // Read configuration data.
  1184. + for (int retries = 0; ; retries++) {
  1185. + ithc_log_regs(ithc);
  1186. + memset(&ithc->config, 0, sizeof(ithc->config));
  1187. + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof(ithc->config), &ithc->config);
  1188. + u32 *p = (void *)&ithc->config;
  1189. + pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  1190. + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
  1191. + if (ithc_is_config_valid(ithc))
  1192. + break;
  1193. + if (retries > 10) {
  1194. + pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n",
  1195. + ithc->config.device_id);
  1196. + return -EIO;
  1197. + }
  1198. + pci_warn(ithc->pci, "failed to read config, retrying\n");
  1199. + if (msleep_interruptible(100))
  1200. + return -EINTR;
  1201. + }
  1202. + ithc_log_regs(ithc);
  1203. +
  1204. + // Apply SPI config and enable touch device.
  1205. + CHECK_RET(ithc_set_spi_config, ithc,
  1206. + DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config),
  1207. + DEVCFG_SPI_MODE(ithc->config.spi_config));
  1208. + CHECK_RET(ithc_set_device_enabled, ithc, true);
  1209. + ithc_log_regs(ithc);
  1210. + return 0;
  1211. +}
  1212. +
  1213. +int ithc_reset(struct ithc *ithc)
  1214. +{
  1215. + // FIXME This should probably do devres_release_group()+ithc_start().
  1216. + // But because this is called during DMA processing, that would have to be done
  1217. + // asynchronously (schedule_work()?). And with extra locking?
  1218. + pci_err(ithc->pci, "reset\n");
  1219. + CHECK(ithc_init_device, ithc);
  1220. + if (ithc_use_rx0)
  1221. + ithc_dma_rx_enable(ithc, 0);
  1222. + if (ithc_use_rx1)
  1223. + ithc_dma_rx_enable(ithc, 1);
  1224. + ithc_log_regs(ithc);
  1225. + pci_dbg(ithc->pci, "reset completed\n");
  1226. + return 0;
  1227. +}
  1228. +
  1229. +static void ithc_stop(void *res)
  1230. +{
  1231. + struct ithc *ithc = res;
  1232. + pci_dbg(ithc->pci, "stopping\n");
  1233. + ithc_log_regs(ithc);
  1234. +
  1235. + if (ithc->poll_thread)
  1236. + CHECK(kthread_stop, ithc->poll_thread);
  1237. + if (ithc->irq >= 0)
  1238. + disable_irq(ithc->irq);
  1239. + CHECK(ithc_set_device_enabled, ithc, false);
  1240. + ithc_disable(ithc);
  1241. + hrtimer_cancel(&ithc->activity_start_timer);
  1242. + hrtimer_cancel(&ithc->activity_end_timer);
  1243. + cpu_latency_qos_remove_request(&ithc->activity_qos);
  1244. +
  1245. + // Clear DMA config.
  1246. + for (unsigned int i = 0; i < 2; i++) {
  1247. + CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0);
  1248. + lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr);
  1249. + writeb(0, &ithc->regs->dma_rx[i].num_bufs);
  1250. + writeb(0, &ithc->regs->dma_rx[i].num_prds);
  1251. + }
  1252. + lo_hi_writeq(0, &ithc->regs->dma_tx.addr);
  1253. + writeb(0, &ithc->regs->dma_tx.num_prds);
  1254. +
  1255. + ithc_log_regs(ithc);
  1256. + pci_dbg(ithc->pci, "stopped\n");
  1257. +}
  1258. +
  1259. +static void ithc_clear_drvdata(void *res)
  1260. +{
  1261. + struct pci_dev *pci = res;
  1262. + pci_set_drvdata(pci, NULL);
  1263. +}
  1264. +
  1265. +static int ithc_start(struct pci_dev *pci)
  1266. +{
  1267. + pci_dbg(pci, "starting\n");
  1268. + if (pci_get_drvdata(pci)) {
  1269. + pci_err(pci, "device already initialized\n");
  1270. + return -EINVAL;
  1271. + }
  1272. + if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL))
  1273. + return -ENOMEM;
  1274. +
  1275. + // Allocate/init main driver struct.
  1276. + struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof(*ithc), GFP_KERNEL);
  1277. + if (!ithc)
  1278. + return -ENOMEM;
  1279. + ithc->irq = -1;
  1280. + ithc->pci = pci;
  1281. + snprintf(ithc->phys, sizeof(ithc->phys), "pci-%s/" DEVNAME, pci_name(pci));
  1282. + init_waitqueue_head(&ithc->wait_hid_parse);
  1283. + init_waitqueue_head(&ithc->wait_hid_get_feature);
  1284. + mutex_init(&ithc->hid_get_feature_mutex);
  1285. + pci_set_drvdata(pci, ithc);
  1286. + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci);
  1287. + if (ithc_log_regs_enabled)
  1288. + ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof(*ithc->prev_regs), GFP_KERNEL);
  1289. +
  1290. + // PCI initialization.
  1291. + CHECK_RET(pcim_enable_device, pci);
  1292. + pci_set_master(pci);
  1293. + CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs");
  1294. + CHECK_RET(dma_set_mask_and_coherent, &pci->dev, DMA_BIT_MASK(64));
  1295. + CHECK_RET(pci_set_power_state, pci, PCI_D0);
  1296. + ithc->regs = pcim_iomap_table(pci)[0];
  1297. +
  1298. + // Allocate IRQ.
  1299. + if (!ithc_use_polling) {
  1300. + CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
  1301. + ithc->irq = CHECK(pci_irq_vector, pci, 0);
  1302. + if (ithc->irq < 0)
  1303. + return ithc->irq;
  1304. + }
  1305. +
  1306. + // Initialize THC and touch device.
  1307. + CHECK_RET(ithc_init_device, ithc);
  1308. + CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups);
  1309. + if (ithc_use_rx0)
  1310. + CHECK_RET(ithc_dma_rx_init, ithc, 0);
  1311. + if (ithc_use_rx1)
  1312. + CHECK_RET(ithc_dma_rx_init, ithc, 1);
  1313. + CHECK_RET(ithc_dma_tx_init, ithc);
  1314. +
  1315. + cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  1316. + hrtimer_init(&ithc->activity_start_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  1317. + ithc->activity_start_timer.function = ithc_activity_start_timer_callback;
  1318. + hrtimer_init(&ithc->activity_end_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1319. + ithc->activity_end_timer.function = ithc_activity_end_timer_callback;
  1320. +
  1321. + // Add ithc_stop() callback AFTER setting up DMA buffers, so that polling/irqs/DMA are
  1322. + // disabled BEFORE the buffers are freed.
  1323. + CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc);
  1324. +
  1325. + CHECK_RET(ithc_hid_init, ithc);
  1326. +
  1327. + // Start polling/IRQ.
  1328. + if (ithc_use_polling) {
  1329. + pci_info(pci, "using polling instead of irq\n");
  1330. + // Use a thread instead of simple timer because we want to be able to sleep.
  1331. + ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll");
  1332. + if (IS_ERR(ithc->poll_thread)) {
  1333. + int err = PTR_ERR(ithc->poll_thread);
  1334. + ithc->poll_thread = NULL;
  1335. + return err;
  1336. + }
  1337. + } else {
  1338. + CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL,
  1339. + ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc);
  1340. + }
  1341. +
  1342. + if (ithc_use_rx0)
  1343. + ithc_dma_rx_enable(ithc, 0);
  1344. + if (ithc_use_rx1)
  1345. + ithc_dma_rx_enable(ithc, 1);
  1346. +
  1347. + // hid_add_device() can only be called after irq/polling is started and DMA is enabled,
  1348. + // because it calls ithc_hid_parse() which reads the report descriptor via DMA.
  1349. + CHECK_RET(hid_add_device, ithc->hid);
  1350. +
  1351. + CHECK(ithc_debug_init, ithc);
  1352. +
  1353. + pci_dbg(pci, "started\n");
  1354. + return 0;
  1355. +}
  1356. +
  1357. +static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id)
  1358. +{
  1359. + pci_dbg(pci, "device probe\n");
  1360. + return ithc_start(pci);
  1361. +}
  1362. +
  1363. +static void ithc_remove(struct pci_dev *pci)
  1364. +{
  1365. + pci_dbg(pci, "device remove\n");
  1366. + // all cleanup is handled by devres
  1367. +}
  1368. +
  1369. +// For suspend/resume, we just deinitialize and reinitialize everything.
  1370. +// TODO It might be cleaner to keep the HID device around, however we would then have to signal
  1371. +// to userspace that the touch device has lost state and userspace needs to e.g. resend 'set
  1372. +// feature' requests. Hidraw does not seem to have a facility to do that.
  1373. +static int ithc_suspend(struct device *dev)
  1374. +{
  1375. + struct pci_dev *pci = to_pci_dev(dev);
  1376. + pci_dbg(pci, "pm suspend\n");
  1377. + devres_release_group(dev, ithc_start);
  1378. + return 0;
  1379. +}
  1380. +
  1381. +static int ithc_resume(struct device *dev)
  1382. +{
  1383. + struct pci_dev *pci = to_pci_dev(dev);
  1384. + pci_dbg(pci, "pm resume\n");
  1385. + return ithc_start(pci);
  1386. +}
  1387. +
  1388. +static int ithc_freeze(struct device *dev)
  1389. +{
  1390. + struct pci_dev *pci = to_pci_dev(dev);
  1391. + pci_dbg(pci, "pm freeze\n");
  1392. + devres_release_group(dev, ithc_start);
  1393. + return 0;
  1394. +}
  1395. +
  1396. +static int ithc_thaw(struct device *dev)
  1397. +{
  1398. + struct pci_dev *pci = to_pci_dev(dev);
  1399. + pci_dbg(pci, "pm thaw\n");
  1400. + return ithc_start(pci);
  1401. +}
  1402. +
  1403. +static int ithc_restore(struct device *dev)
  1404. +{
  1405. + struct pci_dev *pci = to_pci_dev(dev);
  1406. + pci_dbg(pci, "pm restore\n");
  1407. + return ithc_start(pci);
  1408. +}
  1409. +
  1410. +static struct pci_driver ithc_driver = {
  1411. + .name = DEVNAME,
  1412. + .id_table = ithc_pci_tbl,
  1413. + .probe = ithc_probe,
  1414. + .remove = ithc_remove,
  1415. + .driver.pm = &(const struct dev_pm_ops) {
  1416. + .suspend = ithc_suspend,
  1417. + .resume = ithc_resume,
  1418. + .freeze = ithc_freeze,
  1419. + .thaw = ithc_thaw,
  1420. + .restore = ithc_restore,
  1421. + },
  1422. + //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway
  1423. +};
  1424. +
  1425. +static int __init ithc_init(void)
  1426. +{
  1427. + return pci_register_driver(&ithc_driver);
  1428. +}
  1429. +
  1430. +static void __exit ithc_exit(void)
  1431. +{
  1432. + pci_unregister_driver(&ithc_driver);
  1433. +}
  1434. +
  1435. +module_init(ithc_init);
  1436. +module_exit(ithc_exit);
  1437. +
  1438. diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c
  1439. new file mode 100644
  1440. index 000000000000..e058721886e3
  1441. --- /dev/null
  1442. +++ b/drivers/hid/ithc/ithc-regs.c
  1443. @@ -0,0 +1,96 @@
  1444. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  1445. +
  1446. +#include "ithc.h"
  1447. +
  1448. +#define reg_num(r) (0x1fff & (u16)(__force u64)(r))
  1449. +
  1450. +void bitsl(__iomem u32 *reg, u32 mask, u32 val)
  1451. +{
  1452. + if (val & ~mask)
  1453. + pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n",
  1454. + reg_num(reg), val, mask);
  1455. + writel((readl(reg) & ~mask) | (val & mask), reg);
  1456. +}
  1457. +
  1458. +void bitsb(__iomem u8 *reg, u8 mask, u8 val)
  1459. +{
  1460. + if (val & ~mask)
  1461. + pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n",
  1462. + reg_num(reg), val, mask);
  1463. + writeb((readb(reg) & ~mask) | (val & mask), reg);
  1464. +}
  1465. +
  1466. +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val)
  1467. +{
  1468. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
  1469. + reg_num(reg), mask, val);
  1470. + u32 x;
  1471. + if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  1472. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
  1473. + reg_num(reg), mask, val);
  1474. + return -ETIMEDOUT;
  1475. + }
  1476. + pci_dbg(ithc->pci, "done waiting\n");
  1477. + return 0;
  1478. +}
  1479. +
  1480. +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val)
  1481. +{
  1482. + pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
  1483. + reg_num(reg), mask, val);
  1484. + u8 x;
  1485. + if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  1486. + pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
  1487. + reg_num(reg), mask, val);
  1488. + return -ETIMEDOUT;
  1489. + }
  1490. + pci_dbg(ithc->pci, "done waiting\n");
  1491. + return 0;
  1492. +}
  1493. +
  1494. +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode)
  1495. +{
  1496. + pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode);
  1497. + if (mode == 3)
  1498. + mode = 2;
  1499. + bitsl(&ithc->regs->spi_config,
  1500. + SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff),
  1501. + SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed));
  1502. + return 0;
  1503. +}
  1504. +
  1505. +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data)
  1506. +{
  1507. + pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset);
  1508. + if (size > sizeof(ithc->regs->spi_cmd.data))
  1509. + return -EINVAL;
  1510. +
  1511. + // Wait if the device is still busy.
  1512. + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  1513. + // Clear result flags.
  1514. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1515. +
  1516. + // Init SPI command data.
  1517. + writeb(command, &ithc->regs->spi_cmd.code);
  1518. + writew(size, &ithc->regs->spi_cmd.size);
  1519. + writel(offset, &ithc->regs->spi_cmd.offset);
  1520. + u32 *p = data, n = (size + 3) / 4;
  1521. + for (u32 i = 0; i < n; i++)
  1522. + writel(p[i], &ithc->regs->spi_cmd.data[i]);
  1523. +
  1524. + // Start transmission.
  1525. + bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND);
  1526. + CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0);
  1527. +
  1528. + // Read response.
  1529. + if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE)
  1530. + return -EIO;
  1531. + if (readw(&ithc->regs->spi_cmd.size) != size)
  1532. + return -EMSGSIZE;
  1533. + for (u32 i = 0; i < n; i++)
  1534. + p[i] = readl(&ithc->regs->spi_cmd.data[i]);
  1535. +
  1536. + writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status);
  1537. + return 0;
  1538. +}
  1539. +
  1540. diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h
  1541. new file mode 100644
  1542. index 000000000000..d4007d9e2bac
  1543. --- /dev/null
  1544. +++ b/drivers/hid/ithc/ithc-regs.h
  1545. @@ -0,0 +1,189 @@
  1546. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  1547. +
  1548. +#define CONTROL_QUIESCE BIT(1)
  1549. +#define CONTROL_IS_QUIESCED BIT(2)
  1550. +#define CONTROL_NRESET BIT(3)
  1551. +#define CONTROL_READY BIT(29)
  1552. +
  1553. +#define SPI_CONFIG_MODE(x) (((x) & 3) << 2)
  1554. +#define SPI_CONFIG_SPEED(x) (((x) & 7) << 4)
  1555. +#define SPI_CONFIG_UNKNOWN_18(x) (((x) & 3) << 18)
  1556. +#define SPI_CONFIG_SPEED2(x) (((x) & 0xf) << 20) // high bit = high speed mode?
  1557. +
  1558. +#define ERROR_CONTROL_UNKNOWN_0 BIT(0)
  1559. +#define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs
  1560. +#define ERROR_CONTROL_UNKNOWN_2 BIT(2)
  1561. +#define ERROR_CONTROL_UNKNOWN_3 BIT(3)
  1562. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_9 BIT(9)
  1563. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_10 BIT(10)
  1564. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_12 BIT(12)
  1565. +#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_13 BIT(13)
  1566. +#define ERROR_CONTROL_UNKNOWN_16(x) (((x) & 0xff) << 16) // spi error code irq?
  1567. +#define ERROR_CONTROL_SET_DMA_STATUS BIT(29) // sets DMA_RX_STATUS_ERROR when a DMA error occurs
  1568. +
  1569. +#define ERROR_STATUS_DMA BIT(28)
  1570. +#define ERROR_STATUS_SPI BIT(30)
  1571. +
  1572. +#define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9)
  1573. +#define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10)
  1574. +#define ERROR_FLAG_DMA_RX_TIMEOUT BIT(12) // set when we receive a truncated DMA message
  1575. +#define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13)
  1576. +#define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16)
  1577. +#define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17)
  1578. +#define ERROR_FLAG_SPI_INTRA_PACKET_TIMEOUT BIT(18)
  1579. +#define ERROR_FLAG_SPI_INVALID_RESPONSE BIT(19)
  1580. +#define ERROR_FLAG_SPI_HS_RX_TIMEOUT BIT(20)
  1581. +#define ERROR_FLAG_SPI_TOUCH_IC_INIT BIT(21)
  1582. +
  1583. +#define SPI_CMD_CONTROL_SEND BIT(0) // cleared by device when sending is complete
  1584. +#define SPI_CMD_CONTROL_IRQ BIT(1)
  1585. +
  1586. +#define SPI_CMD_CODE_READ 4
  1587. +#define SPI_CMD_CODE_WRITE 6
  1588. +
  1589. +#define SPI_CMD_STATUS_DONE BIT(0)
  1590. +#define SPI_CMD_STATUS_ERROR BIT(1)
  1591. +#define SPI_CMD_STATUS_BUSY BIT(3)
  1592. +
  1593. +#define DMA_TX_CONTROL_SEND BIT(0) // cleared by device when sending is complete
  1594. +#define DMA_TX_CONTROL_IRQ BIT(3)
  1595. +
  1596. +#define DMA_TX_STATUS_DONE BIT(0)
  1597. +#define DMA_TX_STATUS_ERROR BIT(1)
  1598. +#define DMA_TX_STATUS_UNKNOWN_2 BIT(2)
  1599. +#define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy?
  1600. +
  1601. +#define DMA_RX_CONTROL_ENABLE BIT(0)
  1602. +#define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only?
  1603. +#define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only?
  1604. +#define DMA_RX_CONTROL_IRQ_UNKNOWN_4 BIT(4) // rx0 only?
  1605. +#define DMA_RX_CONTROL_IRQ_DATA BIT(5)
  1606. +
  1607. +#define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only?
  1608. +#define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices
  1609. +
  1610. +#define DMA_RX_WRAP_FLAG BIT(7)
  1611. +
  1612. +#define DMA_RX_STATUS_ERROR BIT(3)
  1613. +#define DMA_RX_STATUS_UNKNOWN_4 BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms)
  1614. +#define DMA_RX_STATUS_HAVE_DATA BIT(5)
  1615. +#define DMA_RX_STATUS_ENABLED BIT(8)
  1616. +
  1617. +// COUNTER_RESET can be written to counter registers to reset them to zero. However, in some cases this can mess up the THC.
  1618. +#define COUNTER_RESET BIT(31)
  1619. +
  1620. +struct ithc_registers {
  1621. + /* 0000 */ u32 _unknown_0000[1024];
  1622. + /* 1000 */ u32 _unknown_1000;
  1623. + /* 1004 */ u32 _unknown_1004;
  1624. + /* 1008 */ u32 control_bits;
  1625. + /* 100c */ u32 _unknown_100c;
  1626. + /* 1010 */ u32 spi_config;
  1627. + /* 1014 */ u32 _unknown_1014[3];
  1628. + /* 1020 */ u32 error_control;
  1629. + /* 1024 */ u32 error_status; // write to clear
  1630. + /* 1028 */ u32 error_flags; // write to clear
  1631. + /* 102c */ u32 _unknown_102c[5];
  1632. + struct {
  1633. + /* 1040 */ u8 control;
  1634. + /* 1041 */ u8 code;
  1635. + /* 1042 */ u16 size;
  1636. + /* 1044 */ u32 status; // write to clear
  1637. + /* 1048 */ u32 offset;
  1638. + /* 104c */ u32 data[16];
  1639. + /* 108c */ u32 _unknown_108c;
  1640. + } spi_cmd;
  1641. + struct {
  1642. + /* 1090 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
  1643. + /* 1098 */ u8 control;
  1644. + /* 1099 */ u8 _unknown_1099;
  1645. + /* 109a */ u8 _unknown_109a;
  1646. + /* 109b */ u8 num_prds;
  1647. + /* 109c */ u32 status; // write to clear
  1648. + } dma_tx;
  1649. + /* 10a0 */ u32 _unknown_10a0[7];
  1650. + /* 10bc */ u32 state; // is 0xe0000402 (dev config val 0) after CONTROL_NRESET, 0xe0000461 after first touch, 0xe0000401 after DMA_RX_CODE_RESET
  1651. + /* 10c0 */ u32 _unknown_10c0[8];
  1652. + /* 10e0 */ u32 _unknown_10e0_counters[3];
  1653. + /* 10ec */ u32 _unknown_10ec[5];
  1654. + struct {
  1655. + /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
  1656. + /* 1108/1208 */ u8 num_bufs;
  1657. + /* 1109/1209 */ u8 num_prds;
  1658. + /* 110a/120a */ u16 _unknown_110a;
  1659. + /* 110c/120c */ u8 control;
  1660. + /* 110d/120d */ u8 head;
  1661. + /* 110e/120e */ u8 tail;
  1662. + /* 110f/120f */ u8 control2;
  1663. + /* 1110/1210 */ u32 status; // write to clear
  1664. + /* 1114/1214 */ u32 _unknown_1114;
  1665. + /* 1118/1218 */ u64 _unknown_1118_guc_addr;
  1666. + /* 1120/1220 */ u32 _unknown_1120_guc;
  1667. + /* 1124/1224 */ u32 _unknown_1124_guc;
  1668. + /* 1128/1228 */ u32 unknown_init_bits; // bit 2 = guc related, bit 3 = rx1 related, bit 4 = guc related
  1669. + /* 112c/122c */ u32 _unknown_112c;
  1670. + /* 1130/1230 */ u64 _unknown_1130_guc_addr;
  1671. + /* 1138/1238 */ u32 _unknown_1138_guc;
  1672. + /* 113c/123c */ u32 _unknown_113c;
  1673. + /* 1140/1240 */ u32 _unknown_1140_guc;
  1674. + /* 1144/1244 */ u32 _unknown_1144[23];
  1675. + /* 11a0/12a0 */ u32 _unknown_11a0_counters[6];
  1676. + /* 11b8/12b8 */ u32 _unknown_11b8[18];
  1677. + } dma_rx[2];
  1678. +};
  1679. +static_assert(sizeof(struct ithc_registers) == 0x1300);
  1680. +
  1681. +#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6)
  1682. +#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6)
  1683. +
  1684. +#define DEVCFG_TOUCH_MASK 0x3f
  1685. +#define DEVCFG_TOUCH_ENABLE BIT(0)
  1686. +#define DEVCFG_TOUCH_UNKNOWN_1 BIT(1)
  1687. +#define DEVCFG_TOUCH_UNKNOWN_2 BIT(2)
  1688. +#define DEVCFG_TOUCH_UNKNOWN_3 BIT(3)
  1689. +#define DEVCFG_TOUCH_UNKNOWN_4 BIT(4)
  1690. +#define DEVCFG_TOUCH_UNKNOWN_5 BIT(5)
  1691. +#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6)
  1692. +
  1693. +#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC"
  1694. +
  1695. +#define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode?
  1696. +#define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3)
  1697. +#define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f)
  1698. +#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) // TODO implement heartbeat
  1699. +#define DEVCFG_SPI_HEARTBEAT_INTERVAL(x) (((x) >> 21) & 7)
  1700. +#define DEVCFG_SPI_UNKNOWN_25 BIT(25)
  1701. +#define DEVCFG_SPI_UNKNOWN_26 BIT(26)
  1702. +#define DEVCFG_SPI_UNKNOWN_27 BIT(27)
  1703. +#define DEVCFG_SPI_DELAY(x) (((x) >> 28) & 7) // TODO use this
  1704. +#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) // TODO use this?
  1705. +
  1706. +struct ithc_device_config { // (Example values are from an SP7+.)
  1707. + u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET)
  1708. + u32 _unknown_04; // 04 = 0x00000000
  1709. + u32 dma_buf_sizes; // 08 = 0x000a00ff
  1710. + u32 touch_cfg; // 0c = 0x0000001c
  1711. + u32 _unknown_10; // 10 = 0x0000001c
  1712. + u32 device_id; // 14 = 0x43495424 = "$TIC"
  1713. + u32 spi_config; // 18 = 0xfda00a2e
  1714. + u16 vendor_id; // 1c = 0x045e = Microsoft Corp.
  1715. + u16 product_id; // 1e = 0x0c1a
  1716. + u32 revision; // 20 = 0x00000001
  1717. + u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 (this value looks more random on newer devices)
  1718. + u32 _unknown_28; // 28 = 0x00000000
  1719. + u32 fw_mode; // 2c = 0x00000000 (for fw update?)
  1720. + u32 _unknown_30; // 30 = 0x00000000
  1721. + u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?)
  1722. + u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET)
  1723. + u32 _unknown_3c; // 3c = 0x00000002
  1724. +};
  1725. +
  1726. +void bitsl(__iomem u32 *reg, u32 mask, u32 val);
  1727. +void bitsb(__iomem u8 *reg, u8 mask, u8 val);
  1728. +#define bitsl_set(reg, x) bitsl(reg, x, x)
  1729. +#define bitsb_set(reg, x) bitsb(reg, x, x)
  1730. +int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val);
  1731. +int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val);
  1732. +int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode);
  1733. +int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data);
  1734. +
  1735. diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h
  1736. new file mode 100644
  1737. index 000000000000..028e55a4ec53
  1738. --- /dev/null
  1739. +++ b/drivers/hid/ithc/ithc.h
  1740. @@ -0,0 +1,67 @@
  1741. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  1742. +
  1743. +#include <linux/module.h>
  1744. +#include <linux/input.h>
  1745. +#include <linux/hid.h>
  1746. +#include <linux/dma-mapping.h>
  1747. +#include <linux/highmem.h>
  1748. +#include <linux/pci.h>
  1749. +#include <linux/io-64-nonatomic-lo-hi.h>
  1750. +#include <linux/iopoll.h>
  1751. +#include <linux/delay.h>
  1752. +#include <linux/kthread.h>
  1753. +#include <linux/miscdevice.h>
  1754. +#include <linux/debugfs.h>
  1755. +#include <linux/poll.h>
  1756. +#include <linux/timer.h>
  1757. +#include <linux/pm_qos.h>
  1758. +
  1759. +#define DEVNAME "ithc"
  1760. +#define DEVFULLNAME "Intel Touch Host Controller"
  1761. +
  1762. +#undef pr_fmt
  1763. +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  1764. +
  1765. +#define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; })
  1766. +#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while (0)
  1767. +
  1768. +#define NUM_RX_BUF 16
  1769. +
  1770. +struct ithc;
  1771. +
  1772. +#include "ithc-regs.h"
  1773. +#include "ithc-dma.h"
  1774. +
  1775. +struct ithc {
  1776. + char phys[32];
  1777. + struct pci_dev *pci;
  1778. + int irq;
  1779. + struct task_struct *poll_thread;
  1780. +
  1781. + struct pm_qos_request activity_qos;
  1782. + struct hrtimer activity_start_timer;
  1783. + struct hrtimer activity_end_timer;
  1784. + ktime_t last_rx_time;
  1785. + unsigned int cur_rx_seq_count;
  1786. + unsigned int cur_rx_seq_errors;
  1787. +
  1788. + struct hid_device *hid;
  1789. + bool hid_parse_done;
  1790. + wait_queue_head_t wait_hid_parse;
  1791. + wait_queue_head_t wait_hid_get_feature;
  1792. + struct mutex hid_get_feature_mutex;
  1793. + void *hid_get_feature_buf;
  1794. + size_t hid_get_feature_size;
  1795. +
  1796. + struct ithc_registers __iomem *regs;
  1797. + struct ithc_registers *prev_regs; // for debugging
  1798. + struct ithc_device_config config;
  1799. + struct ithc_dma_rx dma_rx[2];
  1800. + struct ithc_dma_tx dma_tx;
  1801. +};
  1802. +
  1803. +int ithc_reset(struct ithc *ithc);
  1804. +void ithc_set_active(struct ithc *ithc, unsigned int duration_us);
  1805. +int ithc_debug_init(struct ithc *ithc);
  1806. +void ithc_log_regs(struct ithc *ithc);
  1807. +
  1808. --
  1809. 2.45.2
  1810. From 299f645a4bc247c2f5adae925f56978870b133f8 Mon Sep 17 00:00:00 2001
  1811. From: quo <tuple@list.ru>
  1812. Date: Fri, 19 Apr 2024 22:11:09 +0200
  1813. Subject: [PATCH] hid: ithc: Update from quo/ithc-linux
  1814. - Added QuickSPI support for Surface Laptop Studio 2
  1815. - Use Latency Tolerance Reporting instead of manual CPU latency adjustments
  1816. Based on: https://github.com/quo/ithc-linux/commit/18afc6ffacd70b49fdee2eb1ab0a8acd159edb31
  1817. Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
  1818. Patchset: ithc
  1819. ---
  1820. drivers/hid/ithc/Kbuild | 2 +-
  1821. drivers/hid/ithc/ithc-debug.c | 33 +-
  1822. drivers/hid/ithc/ithc-debug.h | 7 +
  1823. drivers/hid/ithc/ithc-dma.c | 125 ++-----
  1824. drivers/hid/ithc/ithc-dma.h | 24 +-
  1825. drivers/hid/ithc/ithc-hid.c | 207 +++++++++++
  1826. drivers/hid/ithc/ithc-hid.h | 32 ++
  1827. drivers/hid/ithc/ithc-legacy.c | 252 ++++++++++++++
  1828. drivers/hid/ithc/ithc-legacy.h | 8 +
  1829. drivers/hid/ithc/ithc-main.c | 386 ++++-----------------
  1830. drivers/hid/ithc/ithc-quickspi.c | 578 +++++++++++++++++++++++++++++++
  1831. drivers/hid/ithc/ithc-quickspi.h | 39 +++
  1832. drivers/hid/ithc/ithc-regs.c | 72 +++-
  1833. drivers/hid/ithc/ithc-regs.h | 143 ++++----
  1834. drivers/hid/ithc/ithc.h | 71 ++--
  1835. 15 files changed, 1441 insertions(+), 538 deletions(-)
  1836. create mode 100644 drivers/hid/ithc/ithc-debug.h
  1837. create mode 100644 drivers/hid/ithc/ithc-hid.c
  1838. create mode 100644 drivers/hid/ithc/ithc-hid.h
  1839. create mode 100644 drivers/hid/ithc/ithc-legacy.c
  1840. create mode 100644 drivers/hid/ithc/ithc-legacy.h
  1841. create mode 100644 drivers/hid/ithc/ithc-quickspi.c
  1842. create mode 100644 drivers/hid/ithc/ithc-quickspi.h
  1843. diff --git a/drivers/hid/ithc/Kbuild b/drivers/hid/ithc/Kbuild
  1844. index aea83f2ac07b..4937ba131297 100644
  1845. --- a/drivers/hid/ithc/Kbuild
  1846. +++ b/drivers/hid/ithc/Kbuild
  1847. @@ -1,6 +1,6 @@
  1848. obj-$(CONFIG_HID_ITHC) := ithc.o
  1849. -ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-debug.o
  1850. +ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-hid.o ithc-legacy.o ithc-quickspi.o ithc-debug.o
  1851. ccflags-y := -std=gnu11 -Wno-declaration-after-statement
  1852. diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c
  1853. index 1f1f1e33f2e5..2d8c6afe9966 100644
  1854. --- a/drivers/hid/ithc/ithc-debug.c
  1855. +++ b/drivers/hid/ithc/ithc-debug.c
  1856. @@ -85,10 +85,11 @@ static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, si
  1857. case 'd': // dma command: cmd len data...
  1858. // get report descriptor: d 7 8 0 0
  1859. // enable multitouch: d 3 2 0x0105
  1860. - if (n < 2 || a[1] > (n - 2) * 4)
  1861. + if (n < 1)
  1862. return -EINVAL;
  1863. - pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]);
  1864. - if (ithc_dma_tx(ithc, a[0], a[1], a + 2))
  1865. + pci_info(ithc->pci, "debug dma command with %u bytes of data\n", n * 4);
  1866. + struct ithc_data data = { .type = ITHC_DATA_RAW, .size = n * 4, .data = a };
  1867. + if (ithc_dma_tx(ithc, &data))
  1868. pci_err(ithc->pci, "dma tx failed\n");
  1869. break;
  1870. default:
  1871. @@ -98,6 +99,23 @@ static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, si
  1872. return len;
  1873. }
  1874. +static struct dentry *dbg_dir;
  1875. +
  1876. +void __init ithc_debug_init_module(void)
  1877. +{
  1878. + struct dentry *d = debugfs_create_dir(DEVNAME, NULL);
  1879. + if (IS_ERR(d))
  1880. + pr_warn("failed to create debugfs dir (%li)\n", PTR_ERR(d));
  1881. + else
  1882. + dbg_dir = d;
  1883. +}
  1884. +
  1885. +void __exit ithc_debug_exit_module(void)
  1886. +{
  1887. + debugfs_remove_recursive(dbg_dir);
  1888. + dbg_dir = NULL;
  1889. +}
  1890. +
  1891. static const struct file_operations ithc_debugfops_cmd = {
  1892. .owner = THIS_MODULE,
  1893. .write = ithc_debugfs_cmd_write,
  1894. @@ -106,17 +124,18 @@ static const struct file_operations ithc_debugfops_cmd = {
  1895. static void ithc_debugfs_devres_release(struct device *dev, void *res)
  1896. {
  1897. struct dentry **dbgm = res;
  1898. - if (*dbgm)
  1899. - debugfs_remove_recursive(*dbgm);
  1900. + debugfs_remove_recursive(*dbgm);
  1901. }
  1902. -int ithc_debug_init(struct ithc *ithc)
  1903. +int ithc_debug_init_device(struct ithc *ithc)
  1904. {
  1905. + if (!dbg_dir)
  1906. + return -ENOENT;
  1907. struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof(*dbgm), GFP_KERNEL);
  1908. if (!dbgm)
  1909. return -ENOMEM;
  1910. devres_add(&ithc->pci->dev, dbgm);
  1911. - struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL);
  1912. + struct dentry *dbg = debugfs_create_dir(pci_name(ithc->pci), dbg_dir);
  1913. if (IS_ERR(dbg))
  1914. return PTR_ERR(dbg);
  1915. *dbgm = dbg;
  1916. diff --git a/drivers/hid/ithc/ithc-debug.h b/drivers/hid/ithc/ithc-debug.h
  1917. new file mode 100644
  1918. index 000000000000..38c53d916bdb
  1919. --- /dev/null
  1920. +++ b/drivers/hid/ithc/ithc-debug.h
  1921. @@ -0,0 +1,7 @@
  1922. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  1923. +
  1924. +void ithc_debug_init_module(void);
  1925. +void ithc_debug_exit_module(void);
  1926. +int ithc_debug_init_device(struct ithc *ithc);
  1927. +void ithc_log_regs(struct ithc *ithc);
  1928. +
  1929. diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c
  1930. index ffb8689b8a78..bf4eab33062b 100644
  1931. --- a/drivers/hid/ithc/ithc-dma.c
  1932. +++ b/drivers/hid/ithc/ithc-dma.c
  1933. @@ -173,10 +173,9 @@ int ithc_dma_rx_init(struct ithc *ithc, u8 channel)
  1934. mutex_init(&rx->mutex);
  1935. // Allocate buffers.
  1936. - u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes);
  1937. - unsigned int num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE;
  1938. + unsigned int num_pages = (ithc->max_rx_size + PAGE_SIZE - 1) / PAGE_SIZE;
  1939. pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n",
  1940. - NUM_RX_BUF, buf_size, num_pages);
  1941. + NUM_RX_BUF, ithc->max_rx_size, num_pages);
  1942. CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE);
  1943. for (unsigned int i = 0; i < NUM_RX_BUF; i++)
  1944. CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]);
  1945. @@ -214,10 +213,9 @@ int ithc_dma_tx_init(struct ithc *ithc)
  1946. mutex_init(&tx->mutex);
  1947. // Allocate buffers.
  1948. - tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes);
  1949. - unsigned int num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE;
  1950. + unsigned int num_pages = (ithc->max_tx_size + PAGE_SIZE - 1) / PAGE_SIZE;
  1951. pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n",
  1952. - tx->max_size, num_pages);
  1953. + ithc->max_tx_size, num_pages);
  1954. CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE);
  1955. CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf);
  1956. @@ -230,71 +228,6 @@ int ithc_dma_tx_init(struct ithc *ithc)
  1957. return 0;
  1958. }
  1959. -static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data,
  1960. - u8 channel, u8 buf)
  1961. -{
  1962. - if (buf >= NUM_RX_BUF) {
  1963. - pci_err(ithc->pci, "invalid dma ringbuffer index\n");
  1964. - return -EINVAL;
  1965. - }
  1966. - u32 len = data->data_size;
  1967. - struct ithc_dma_rx_header *hdr = data->addr;
  1968. - u8 *hiddata = (void *)(hdr + 1);
  1969. - if (len >= sizeof(*hdr) && hdr->code == DMA_RX_CODE_RESET) {
  1970. - // The THC sends a reset request when we need to reinitialize the device.
  1971. - // This usually only happens if we send an invalid command or put the device
  1972. - // in a bad state.
  1973. - CHECK(ithc_reset, ithc);
  1974. - } else if (len < sizeof(*hdr) || len != sizeof(*hdr) + hdr->data_size) {
  1975. - if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  1976. - // When the CPU enters a low power state during DMA, we can get truncated
  1977. - // messages. For Surface devices, this will typically be a single touch
  1978. - // report that is only 1 byte, or a multitouch report that is 257 bytes.
  1979. - // See also ithc_set_active().
  1980. - } else {
  1981. - pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n",
  1982. - channel, buf, len, hdr->code, hdr->data_size);
  1983. - print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
  1984. - hdr, min(len, 0x400u), 0);
  1985. - }
  1986. - } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) {
  1987. - // Response to a 'get report descriptor' request.
  1988. - // The actual descriptor is preceded by 8 nul bytes.
  1989. - CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8);
  1990. - WRITE_ONCE(ithc->hid_parse_done, true);
  1991. - wake_up(&ithc->wait_hid_parse);
  1992. - } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) {
  1993. - // Standard HID input report containing touch data.
  1994. - CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1);
  1995. - } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) {
  1996. - // Response to a 'get feature' request.
  1997. - bool done = false;
  1998. - mutex_lock(&ithc->hid_get_feature_mutex);
  1999. - if (ithc->hid_get_feature_buf) {
  2000. - if (hdr->data_size < ithc->hid_get_feature_size)
  2001. - ithc->hid_get_feature_size = hdr->data_size;
  2002. - memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size);
  2003. - ithc->hid_get_feature_buf = NULL;
  2004. - done = true;
  2005. - }
  2006. - mutex_unlock(&ithc->hid_get_feature_mutex);
  2007. - if (done) {
  2008. - wake_up(&ithc->wait_hid_get_feature);
  2009. - } else {
  2010. - // Received data without a matching request, or the request already
  2011. - // timed out. (XXX What's the correct thing to do here?)
  2012. - CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT,
  2013. - hiddata, hdr->data_size, 1);
  2014. - }
  2015. - } else {
  2016. - pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n",
  2017. - channel, buf, len, hdr->code);
  2018. - print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
  2019. - hdr, min(len, 0x400u), 0);
  2020. - }
  2021. - return 0;
  2022. -}
  2023. -
  2024. static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel)
  2025. {
  2026. // Process all filled RX buffers from the ringbuffer.
  2027. @@ -316,7 +249,16 @@ static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel)
  2028. rx->num_received = ++n;
  2029. // process data
  2030. - CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail);
  2031. + struct ithc_data d;
  2032. + if ((ithc->use_quickspi ? ithc_quickspi_decode_rx : ithc_legacy_decode_rx)
  2033. + (ithc, b->addr, b->data_size, &d) < 0) {
  2034. + pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u: %*ph\n",
  2035. + channel, tail, b->data_size, min((int)b->data_size, 64), b->addr);
  2036. + print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1,
  2037. + b->addr, min(b->data_size, 0x400u), 0);
  2038. + } else {
  2039. + ithc_hid_process_data(ithc, &d);
  2040. + }
  2041. // give the buffer back to the device
  2042. CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail);
  2043. @@ -331,31 +273,28 @@ int ithc_dma_rx(struct ithc *ithc, u8 channel)
  2044. return ret;
  2045. }
  2046. -static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
  2047. +static int ithc_dma_tx_unlocked(struct ithc *ithc, const struct ithc_data *data)
  2048. {
  2049. - ithc_set_active(ithc, 100 * USEC_PER_MSEC);
  2050. -
  2051. // Send a single TX buffer to the THC.
  2052. - pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize);
  2053. - struct ithc_dma_tx_header *hdr;
  2054. - // Data must be padded to next 4-byte boundary.
  2055. - u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0;
  2056. - unsigned int fullsize = sizeof(*hdr) + datasize + padding;
  2057. - if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE)
  2058. - return -EINVAL;
  2059. + pci_dbg(ithc->pci, "dma tx data type %u, size %u\n", data->type, data->size);
  2060. CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  2061. // Fill the TX buffer with header and data.
  2062. - ithc->dma_tx.buf.data_size = fullsize;
  2063. - hdr = ithc->dma_tx.buf.addr;
  2064. - hdr->code = cmdcode;
  2065. - hdr->data_size = datasize;
  2066. - u8 *dest = (void *)(hdr + 1);
  2067. - memcpy(dest, data, datasize);
  2068. - dest += datasize;
  2069. - for (u8 p = 0; p < padding; p++)
  2070. - *dest++ = 0;
  2071. + ssize_t sz;
  2072. + if (data->type == ITHC_DATA_RAW) {
  2073. + sz = min(data->size, ithc->max_tx_size);
  2074. + memcpy(ithc->dma_tx.buf.addr, data->data, sz);
  2075. + } else {
  2076. + sz = (ithc->use_quickspi ? ithc_quickspi_encode_tx : ithc_legacy_encode_tx)
  2077. + (ithc, data, ithc->dma_tx.buf.addr, ithc->max_tx_size);
  2078. + }
  2079. + ithc->dma_tx.buf.data_size = sz < 0 ? 0 : sz;
  2080. CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0);
  2081. + if (sz < 0) {
  2082. + pci_err(ithc->pci, "failed to encode tx data type %i, size %u, error %i\n",
  2083. + data->type, data->size, (int)sz);
  2084. + return -EINVAL;
  2085. + }
  2086. // Let the THC process the buffer.
  2087. bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND);
  2088. @@ -363,10 +302,10 @@ static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, vo
  2089. writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status);
  2090. return 0;
  2091. }
  2092. -int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data)
  2093. +int ithc_dma_tx(struct ithc *ithc, const struct ithc_data *data)
  2094. {
  2095. mutex_lock(&ithc->dma_tx.mutex);
  2096. - int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data);
  2097. + int ret = ithc_dma_tx_unlocked(ithc, data);
  2098. mutex_unlock(&ithc->dma_tx.mutex);
  2099. return ret;
  2100. }
  2101. diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h
  2102. index 93652e4476bf..1749a5819b3e 100644
  2103. --- a/drivers/hid/ithc/ithc-dma.h
  2104. +++ b/drivers/hid/ithc/ithc-dma.h
  2105. @@ -11,27 +11,6 @@ struct ithc_phys_region_desc {
  2106. u32 unused;
  2107. };
  2108. -#define DMA_RX_CODE_INPUT_REPORT 3
  2109. -#define DMA_RX_CODE_FEATURE_REPORT 4
  2110. -#define DMA_RX_CODE_REPORT_DESCRIPTOR 5
  2111. -#define DMA_RX_CODE_RESET 7
  2112. -
  2113. -struct ithc_dma_rx_header {
  2114. - u32 code;
  2115. - u32 data_size;
  2116. - u32 _unknown[14];
  2117. -};
  2118. -
  2119. -#define DMA_TX_CODE_SET_FEATURE 3
  2120. -#define DMA_TX_CODE_GET_FEATURE 4
  2121. -#define DMA_TX_CODE_OUTPUT_REPORT 5
  2122. -#define DMA_TX_CODE_GET_REPORT_DESCRIPTOR 7
  2123. -
  2124. -struct ithc_dma_tx_header {
  2125. - u32 code;
  2126. - u32 data_size;
  2127. -};
  2128. -
  2129. struct ithc_dma_prd_buffer {
  2130. void *addr;
  2131. dma_addr_t dma_addr;
  2132. @@ -49,7 +28,6 @@ struct ithc_dma_data_buffer {
  2133. struct ithc_dma_tx {
  2134. struct mutex mutex;
  2135. - u32 max_size;
  2136. struct ithc_dma_prd_buffer prds;
  2137. struct ithc_dma_data_buffer buf;
  2138. };
  2139. @@ -65,5 +43,5 @@ int ithc_dma_rx_init(struct ithc *ithc, u8 channel);
  2140. void ithc_dma_rx_enable(struct ithc *ithc, u8 channel);
  2141. int ithc_dma_tx_init(struct ithc *ithc);
  2142. int ithc_dma_rx(struct ithc *ithc, u8 channel);
  2143. -int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *cmddata);
  2144. +int ithc_dma_tx(struct ithc *ithc, const struct ithc_data *data);
  2145. diff --git a/drivers/hid/ithc/ithc-hid.c b/drivers/hid/ithc/ithc-hid.c
  2146. new file mode 100644
  2147. index 000000000000..065646ab499e
  2148. --- /dev/null
  2149. +++ b/drivers/hid/ithc/ithc-hid.c
  2150. @@ -0,0 +1,207 @@
  2151. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  2152. +
  2153. +#include "ithc.h"
  2154. +
  2155. +static int ithc_hid_start(struct hid_device *hdev) { return 0; }
  2156. +static void ithc_hid_stop(struct hid_device *hdev) { }
  2157. +static int ithc_hid_open(struct hid_device *hdev) { return 0; }
  2158. +static void ithc_hid_close(struct hid_device *hdev) { }
  2159. +
  2160. +static int ithc_hid_parse(struct hid_device *hdev)
  2161. +{
  2162. + struct ithc *ithc = hdev->driver_data;
  2163. + const struct ithc_data get_report_desc = { .type = ITHC_DATA_REPORT_DESCRIPTOR };
  2164. + WRITE_ONCE(ithc->hid.parse_done, false);
  2165. + for (int retries = 0; ; retries++) {
  2166. + ithc_log_regs(ithc);
  2167. + CHECK_RET(ithc_dma_tx, ithc, &get_report_desc);
  2168. + if (wait_event_timeout(ithc->hid.wait_parse, READ_ONCE(ithc->hid.parse_done),
  2169. + msecs_to_jiffies(200))) {
  2170. + ithc_log_regs(ithc);
  2171. + return 0;
  2172. + }
  2173. + if (retries > 5) {
  2174. + ithc_log_regs(ithc);
  2175. + pci_err(ithc->pci, "failed to read report descriptor\n");
  2176. + return -ETIMEDOUT;
  2177. + }
  2178. + pci_warn(ithc->pci, "failed to read report descriptor, retrying\n");
  2179. + }
  2180. +}
  2181. +
  2182. +static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf,
  2183. + size_t len, unsigned char rtype, int reqtype)
  2184. +{
  2185. + struct ithc *ithc = hdev->driver_data;
  2186. + if (!buf || !len)
  2187. + return -EINVAL;
  2188. +
  2189. + struct ithc_data d = { .size = len, .data = buf };
  2190. + buf[0] = reportnum;
  2191. +
  2192. + if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) {
  2193. + d.type = ITHC_DATA_OUTPUT_REPORT;
  2194. + CHECK_RET(ithc_dma_tx, ithc, &d);
  2195. + return 0;
  2196. + }
  2197. +
  2198. + if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) {
  2199. + d.type = ITHC_DATA_SET_FEATURE;
  2200. + CHECK_RET(ithc_dma_tx, ithc, &d);
  2201. + return 0;
  2202. + }
  2203. +
  2204. + if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) {
  2205. + d.type = ITHC_DATA_GET_FEATURE;
  2206. + d.data = &reportnum;
  2207. + d.size = 1;
  2208. +
  2209. + // Prepare for response.
  2210. + mutex_lock(&ithc->hid.get_feature_mutex);
  2211. + ithc->hid.get_feature_buf = buf;
  2212. + ithc->hid.get_feature_size = len;
  2213. + mutex_unlock(&ithc->hid.get_feature_mutex);
  2214. +
  2215. + // Transmit 'get feature' request.
  2216. + int r = CHECK(ithc_dma_tx, ithc, &d);
  2217. + if (!r) {
  2218. + r = wait_event_interruptible_timeout(ithc->hid.wait_get_feature,
  2219. + !ithc->hid.get_feature_buf, msecs_to_jiffies(1000));
  2220. + if (!r)
  2221. + r = -ETIMEDOUT;
  2222. + else if (r < 0)
  2223. + r = -EINTR;
  2224. + else
  2225. + r = 0;
  2226. + }
  2227. +
  2228. + // If everything went ok, the buffer has been filled with the response data.
  2229. + // Return the response size.
  2230. + mutex_lock(&ithc->hid.get_feature_mutex);
  2231. + ithc->hid.get_feature_buf = NULL;
  2232. + if (!r)
  2233. + r = ithc->hid.get_feature_size;
  2234. + mutex_unlock(&ithc->hid.get_feature_mutex);
  2235. + return r;
  2236. + }
  2237. +
  2238. + pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n",
  2239. + rtype, reqtype, reportnum);
  2240. + return -EINVAL;
  2241. +}
  2242. +
  2243. +// FIXME hid_input_report()/hid_parse_report() currently don't take const buffers, so we have to
  2244. +// cast away the const to avoid a compiler warning...
  2245. +#define NOCONST(x) ((void *)x)
  2246. +
  2247. +void ithc_hid_process_data(struct ithc *ithc, struct ithc_data *d)
  2248. +{
  2249. + WARN_ON(!ithc->hid.dev);
  2250. + if (!ithc->hid.dev)
  2251. + return;
  2252. +
  2253. + switch (d->type) {
  2254. +
  2255. + case ITHC_DATA_IGNORE:
  2256. + return;
  2257. +
  2258. + case ITHC_DATA_ERROR:
  2259. + CHECK(ithc_reset, ithc);
  2260. + return;
  2261. +
  2262. + case ITHC_DATA_REPORT_DESCRIPTOR:
  2263. + // Response to the report descriptor request sent by ithc_hid_parse().
  2264. + CHECK(hid_parse_report, ithc->hid.dev, NOCONST(d->data), d->size);
  2265. + WRITE_ONCE(ithc->hid.parse_done, true);
  2266. + wake_up(&ithc->hid.wait_parse);
  2267. + return;
  2268. +
  2269. + case ITHC_DATA_INPUT_REPORT:
  2270. + {
  2271. + // Standard HID input report.
  2272. + int r = hid_input_report(ithc->hid.dev, HID_INPUT_REPORT, NOCONST(d->data), d->size, 1);
  2273. + if (r < 0) {
  2274. + pci_warn(ithc->pci, "hid_input_report failed with %i (size %u, report ID 0x%02x)\n",
  2275. + r, d->size, d->size ? *(u8 *)d->data : 0);
  2276. + print_hex_dump_debug(DEVNAME " report: ", DUMP_PREFIX_OFFSET, 32, 1,
  2277. + d->data, min(d->size, 0x400u), 0);
  2278. + }
  2279. + return;
  2280. + }
  2281. +
  2282. + case ITHC_DATA_GET_FEATURE:
  2283. + {
  2284. + // Response to a 'get feature' request sent by ithc_hid_raw_request().
  2285. + bool done = false;
  2286. + mutex_lock(&ithc->hid.get_feature_mutex);
  2287. + if (ithc->hid.get_feature_buf) {
  2288. + if (d->size < ithc->hid.get_feature_size)
  2289. + ithc->hid.get_feature_size = d->size;
  2290. + memcpy(ithc->hid.get_feature_buf, d->data, ithc->hid.get_feature_size);
  2291. + ithc->hid.get_feature_buf = NULL;
  2292. + done = true;
  2293. + }
  2294. + mutex_unlock(&ithc->hid.get_feature_mutex);
  2295. + if (done) {
  2296. + wake_up(&ithc->hid.wait_get_feature);
  2297. + } else {
  2298. + // Received data without a matching request, or the request already
  2299. + // timed out. (XXX What's the correct thing to do here?)
  2300. + CHECK(hid_input_report, ithc->hid.dev, HID_FEATURE_REPORT,
  2301. + NOCONST(d->data), d->size, 1);
  2302. + }
  2303. + return;
  2304. + }
  2305. +
  2306. + default:
  2307. + pci_err(ithc->pci, "unhandled data type %i\n", d->type);
  2308. + return;
  2309. + }
  2310. +}
  2311. +
  2312. +static struct hid_ll_driver ithc_ll_driver = {
  2313. + .start = ithc_hid_start,
  2314. + .stop = ithc_hid_stop,
  2315. + .open = ithc_hid_open,
  2316. + .close = ithc_hid_close,
  2317. + .parse = ithc_hid_parse,
  2318. + .raw_request = ithc_hid_raw_request,
  2319. +};
  2320. +
  2321. +static void ithc_hid_devres_release(struct device *dev, void *res)
  2322. +{
  2323. + struct hid_device **hidm = res;
  2324. + if (*hidm)
  2325. + hid_destroy_device(*hidm);
  2326. +}
  2327. +
  2328. +int ithc_hid_init(struct ithc *ithc)
  2329. +{
  2330. + struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof(*hidm), GFP_KERNEL);
  2331. + if (!hidm)
  2332. + return -ENOMEM;
  2333. + devres_add(&ithc->pci->dev, hidm);
  2334. + struct hid_device *hid = hid_allocate_device();
  2335. + if (IS_ERR(hid))
  2336. + return PTR_ERR(hid);
  2337. + *hidm = hid;
  2338. +
  2339. + strscpy(hid->name, DEVFULLNAME, sizeof(hid->name));
  2340. + strscpy(hid->phys, ithc->phys, sizeof(hid->phys));
  2341. + hid->ll_driver = &ithc_ll_driver;
  2342. + hid->bus = BUS_PCI;
  2343. + hid->vendor = ithc->vendor_id;
  2344. + hid->product = ithc->product_id;
  2345. + hid->version = 0x100;
  2346. + hid->dev.parent = &ithc->pci->dev;
  2347. + hid->driver_data = ithc;
  2348. +
  2349. + ithc->hid.dev = hid;
  2350. +
  2351. + init_waitqueue_head(&ithc->hid.wait_parse);
  2352. + init_waitqueue_head(&ithc->hid.wait_get_feature);
  2353. + mutex_init(&ithc->hid.get_feature_mutex);
  2354. +
  2355. + return 0;
  2356. +}
  2357. +
  2358. diff --git a/drivers/hid/ithc/ithc-hid.h b/drivers/hid/ithc/ithc-hid.h
  2359. new file mode 100644
  2360. index 000000000000..599eb912c8c8
  2361. --- /dev/null
  2362. +++ b/drivers/hid/ithc/ithc-hid.h
  2363. @@ -0,0 +1,32 @@
  2364. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  2365. +
  2366. +enum ithc_data_type {
  2367. + ITHC_DATA_IGNORE,
  2368. + ITHC_DATA_RAW,
  2369. + ITHC_DATA_ERROR,
  2370. + ITHC_DATA_REPORT_DESCRIPTOR,
  2371. + ITHC_DATA_INPUT_REPORT,
  2372. + ITHC_DATA_OUTPUT_REPORT,
  2373. + ITHC_DATA_GET_FEATURE,
  2374. + ITHC_DATA_SET_FEATURE,
  2375. +};
  2376. +
  2377. +struct ithc_data {
  2378. + enum ithc_data_type type;
  2379. + u32 size;
  2380. + const void *data;
  2381. +};
  2382. +
  2383. +struct ithc_hid {
  2384. + struct hid_device *dev;
  2385. + bool parse_done;
  2386. + wait_queue_head_t wait_parse;
  2387. + wait_queue_head_t wait_get_feature;
  2388. + struct mutex get_feature_mutex;
  2389. + void *get_feature_buf;
  2390. + size_t get_feature_size;
  2391. +};
  2392. +
  2393. +int ithc_hid_init(struct ithc *ithc);
  2394. +void ithc_hid_process_data(struct ithc *ithc, struct ithc_data *d);
  2395. +
  2396. diff --git a/drivers/hid/ithc/ithc-legacy.c b/drivers/hid/ithc/ithc-legacy.c
  2397. new file mode 100644
  2398. index 000000000000..5c1da11e3f1d
  2399. --- /dev/null
  2400. +++ b/drivers/hid/ithc/ithc-legacy.c
  2401. @@ -0,0 +1,252 @@
  2402. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  2403. +
  2404. +#include "ithc.h"
  2405. +
  2406. +#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6)
  2407. +#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6)
  2408. +
  2409. +#define DEVCFG_TOUCH_MASK 0x3f
  2410. +#define DEVCFG_TOUCH_ENABLE BIT(0)
  2411. +#define DEVCFG_TOUCH_PROP_DATA_ENABLE BIT(1)
  2412. +#define DEVCFG_TOUCH_HID_REPORT_ENABLE BIT(2)
  2413. +#define DEVCFG_TOUCH_POWER_STATE(x) (((x) & 7) << 3)
  2414. +#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6)
  2415. +
  2416. +#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC"
  2417. +
  2418. +#define DEVCFG_SPI_CLKDIV(x) (((x) >> 1) & 7)
  2419. +#define DEVCFG_SPI_CLKDIV_8 BIT(4)
  2420. +#define DEVCFG_SPI_SUPPORTS_SINGLE BIT(5)
  2421. +#define DEVCFG_SPI_SUPPORTS_DUAL BIT(6)
  2422. +#define DEVCFG_SPI_SUPPORTS_QUAD BIT(7)
  2423. +#define DEVCFG_SPI_MAX_TOUCH_POINTS(x) (((x) >> 8) & 0x3f)
  2424. +#define DEVCFG_SPI_MIN_RESET_TIME(x) (((x) >> 16) & 0xf)
  2425. +#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) // TODO implement heartbeat
  2426. +#define DEVCFG_SPI_HEARTBEAT_INTERVAL(x) (((x) >> 21) & 7)
  2427. +#define DEVCFG_SPI_UNKNOWN_25 BIT(25)
  2428. +#define DEVCFG_SPI_UNKNOWN_26 BIT(26)
  2429. +#define DEVCFG_SPI_UNKNOWN_27 BIT(27)
  2430. +#define DEVCFG_SPI_DELAY(x) (((x) >> 28) & 7) // TODO use this
  2431. +#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) // TODO use this?
  2432. +
  2433. +struct ithc_device_config { // (Example values are from an SP7+.)
  2434. + u32 irq_cause; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET)
  2435. + u32 error; // 04 = 0x00000000
  2436. + u32 dma_buf_sizes; // 08 = 0x000a00ff
  2437. + u32 touch_cfg; // 0c = 0x0000001c
  2438. + u32 touch_state; // 10 = 0x0000001c
  2439. + u32 device_id; // 14 = 0x43495424 = "$TIC"
  2440. + u32 spi_config; // 18 = 0xfda00a2e
  2441. + u16 vendor_id; // 1c = 0x045e = Microsoft Corp.
  2442. + u16 product_id; // 1e = 0x0c1a
  2443. + u32 revision; // 20 = 0x00000001
  2444. + u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 (this value looks more random on newer devices)
  2445. + u32 command; // 28 = 0x00000000
  2446. + u32 fw_mode; // 2c = 0x00000000 (for fw update?)
  2447. + u32 _unknown_30; // 30 = 0x00000000
  2448. + u8 eds_minor_ver; // 34 = 0x5e
  2449. + u8 eds_major_ver; // 35 = 0x03
  2450. + u8 interface_rev; // 36 = 0x04
  2451. + u8 eu_kernel_ver; // 37 = 0x04
  2452. + u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET)
  2453. + u32 _unknown_3c; // 3c = 0x00000002
  2454. +};
  2455. +static_assert(sizeof(struct ithc_device_config) == 64);
  2456. +
  2457. +#define RX_CODE_INPUT_REPORT 3
  2458. +#define RX_CODE_FEATURE_REPORT 4
  2459. +#define RX_CODE_REPORT_DESCRIPTOR 5
  2460. +#define RX_CODE_RESET 7
  2461. +
  2462. +#define TX_CODE_SET_FEATURE 3
  2463. +#define TX_CODE_GET_FEATURE 4
  2464. +#define TX_CODE_OUTPUT_REPORT 5
  2465. +#define TX_CODE_GET_REPORT_DESCRIPTOR 7
  2466. +
  2467. +static int ithc_set_device_enabled(struct ithc *ithc, bool enable)
  2468. +{
  2469. + u32 x = ithc->legacy_touch_cfg =
  2470. + (ithc->legacy_touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) |
  2471. + DEVCFG_TOUCH_HID_REPORT_ENABLE |
  2472. + (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_POWER_STATE(3) : 0);
  2473. + return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE,
  2474. + offsetof(struct ithc_device_config, touch_cfg), sizeof(x), &x);
  2475. +}
  2476. +
  2477. +int ithc_legacy_init(struct ithc *ithc)
  2478. +{
  2479. + // Since we don't yet know which SPI config the device wants, use default speed and mode
  2480. + // initially for reading config data.
  2481. + CHECK(ithc_set_spi_config, ithc, 2, true, SPI_MODE_SINGLE, SPI_MODE_SINGLE);
  2482. +
  2483. + // Setting the following bit seems to make reading the config more reliable.
  2484. + bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_31);
  2485. +
  2486. + // Setting this bit may be necessary on some ADL devices.
  2487. + switch (ithc->pci->device) {
  2488. + case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1:
  2489. + case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2:
  2490. + case PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1:
  2491. + case PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2:
  2492. + bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_5);
  2493. + break;
  2494. + }
  2495. +
  2496. + // Take the touch device out of reset.
  2497. + bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
  2498. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0);
  2499. + for (int retries = 0; ; retries++) {
  2500. + ithc_log_regs(ithc);
  2501. + bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET);
  2502. + if (!waitl(ithc, &ithc->regs->irq_cause, 0xf, 2))
  2503. + break;
  2504. + if (retries > 5) {
  2505. + pci_err(ithc->pci, "failed to reset device, irq_cause = 0x%08x\n",
  2506. + readl(&ithc->regs->irq_cause));
  2507. + return -ETIMEDOUT;
  2508. + }
  2509. + pci_warn(ithc->pci, "invalid irq_cause, retrying reset\n");
  2510. + bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  2511. + if (msleep_interruptible(1000))
  2512. + return -EINTR;
  2513. + }
  2514. + ithc_log_regs(ithc);
  2515. +
  2516. + CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_READY, DMA_RX_STATUS_READY);
  2517. +
  2518. + // Read configuration data.
  2519. + u32 spi_cfg;
  2520. + for (int retries = 0; ; retries++) {
  2521. + ithc_log_regs(ithc);
  2522. + struct ithc_device_config config = { 0 };
  2523. + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof(config), &config);
  2524. + u32 *p = (void *)&config;
  2525. + pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  2526. + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
  2527. + if (config.device_id == DEVCFG_DEVICE_ID_TIC) {
  2528. + spi_cfg = config.spi_config;
  2529. + ithc->vendor_id = config.vendor_id;
  2530. + ithc->product_id = config.product_id;
  2531. + ithc->product_rev = config.revision;
  2532. + ithc->max_rx_size = DEVCFG_DMA_RX_SIZE(config.dma_buf_sizes);
  2533. + ithc->max_tx_size = DEVCFG_DMA_TX_SIZE(config.dma_buf_sizes);
  2534. + ithc->legacy_touch_cfg = config.touch_cfg;
  2535. + ithc->have_config = true;
  2536. + break;
  2537. + }
  2538. + if (retries > 10) {
  2539. + pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n",
  2540. + config.device_id);
  2541. + return -EIO;
  2542. + }
  2543. + pci_warn(ithc->pci, "failed to read config, retrying\n");
  2544. + if (msleep_interruptible(100))
  2545. + return -EINTR;
  2546. + }
  2547. + ithc_log_regs(ithc);
  2548. +
  2549. + // Apply SPI config and enable touch device.
  2550. + CHECK_RET(ithc_set_spi_config, ithc,
  2551. + DEVCFG_SPI_CLKDIV(spi_cfg), (spi_cfg & DEVCFG_SPI_CLKDIV_8) != 0,
  2552. + spi_cfg & DEVCFG_SPI_SUPPORTS_QUAD ? SPI_MODE_QUAD :
  2553. + spi_cfg & DEVCFG_SPI_SUPPORTS_DUAL ? SPI_MODE_DUAL :
  2554. + SPI_MODE_SINGLE,
  2555. + SPI_MODE_SINGLE);
  2556. + CHECK_RET(ithc_set_device_enabled, ithc, true);
  2557. + ithc_log_regs(ithc);
  2558. + return 0;
  2559. +}
  2560. +
  2561. +void ithc_legacy_exit(struct ithc *ithc)
  2562. +{
  2563. + CHECK(ithc_set_device_enabled, ithc, false);
  2564. +}
  2565. +
  2566. +int ithc_legacy_decode_rx(struct ithc *ithc, const void *src, size_t len, struct ithc_data *dest)
  2567. +{
  2568. + const struct {
  2569. + u32 code;
  2570. + u32 data_size;
  2571. + u32 _unknown[14];
  2572. + } *hdr = src;
  2573. +
  2574. + if (len < sizeof(*hdr))
  2575. + return -ENODATA;
  2576. + // Note: RX data is not padded, even though TX data must be padded.
  2577. + if (len != sizeof(*hdr) + hdr->data_size)
  2578. + return -EMSGSIZE;
  2579. +
  2580. + dest->data = hdr + 1;
  2581. + dest->size = hdr->data_size;
  2582. +
  2583. + switch (hdr->code) {
  2584. + case RX_CODE_RESET:
  2585. + // The THC sends a reset request when we need to reinitialize the device.
  2586. + // This usually only happens if we send an invalid command or put the device
  2587. + // in a bad state.
  2588. + dest->type = ITHC_DATA_ERROR;
  2589. + return 0;
  2590. + case RX_CODE_REPORT_DESCRIPTOR:
  2591. + // The descriptor is preceded by 8 nul bytes.
  2592. + if (hdr->data_size < 8)
  2593. + return -ENODATA;
  2594. + dest->type = ITHC_DATA_REPORT_DESCRIPTOR;
  2595. + dest->data = (char *)(hdr + 1) + 8;
  2596. + dest->size = hdr->data_size - 8;
  2597. + return 0;
  2598. + case RX_CODE_INPUT_REPORT:
  2599. + dest->type = ITHC_DATA_INPUT_REPORT;
  2600. + return 0;
  2601. + case RX_CODE_FEATURE_REPORT:
  2602. + dest->type = ITHC_DATA_GET_FEATURE;
  2603. + return 0;
  2604. + default:
  2605. + return -EINVAL;
  2606. + }
  2607. +}
  2608. +
  2609. +ssize_t ithc_legacy_encode_tx(struct ithc *ithc, const struct ithc_data *src, void *dest,
  2610. + size_t maxlen)
  2611. +{
  2612. + struct {
  2613. + u32 code;
  2614. + u32 data_size;
  2615. + } *hdr = dest;
  2616. +
  2617. + size_t src_size = src->size;
  2618. + const void *src_data = src->data;
  2619. + const u64 get_report_desc_data = 0;
  2620. + u32 code;
  2621. +
  2622. + switch (src->type) {
  2623. + case ITHC_DATA_SET_FEATURE:
  2624. + code = TX_CODE_SET_FEATURE;
  2625. + break;
  2626. + case ITHC_DATA_GET_FEATURE:
  2627. + code = TX_CODE_GET_FEATURE;
  2628. + break;
  2629. + case ITHC_DATA_OUTPUT_REPORT:
  2630. + code = TX_CODE_OUTPUT_REPORT;
  2631. + break;
  2632. + case ITHC_DATA_REPORT_DESCRIPTOR:
  2633. + code = TX_CODE_GET_REPORT_DESCRIPTOR;
  2634. + src_size = sizeof(get_report_desc_data);
  2635. + src_data = &get_report_desc_data;
  2636. + break;
  2637. + default:
  2638. + return -EINVAL;
  2639. + }
  2640. +
  2641. + // Data must be padded to next 4-byte boundary.
  2642. + size_t padded = round_up(src_size, 4);
  2643. + if (sizeof(*hdr) + padded > maxlen)
  2644. + return -EOVERFLOW;
  2645. +
  2646. + // Fill the TX buffer with header and data.
  2647. + hdr->code = code;
  2648. + hdr->data_size = src_size;
  2649. + memcpy_and_pad(hdr + 1, padded, src_data, src_size, 0);
  2650. +
  2651. + return sizeof(*hdr) + padded;
  2652. +}
  2653. +
  2654. diff --git a/drivers/hid/ithc/ithc-legacy.h b/drivers/hid/ithc/ithc-legacy.h
  2655. new file mode 100644
  2656. index 000000000000..28d692462072
  2657. --- /dev/null
  2658. +++ b/drivers/hid/ithc/ithc-legacy.h
  2659. @@ -0,0 +1,8 @@
  2660. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  2661. +
  2662. +int ithc_legacy_init(struct ithc *ithc);
  2663. +void ithc_legacy_exit(struct ithc *ithc);
  2664. +int ithc_legacy_decode_rx(struct ithc *ithc, const void *src, size_t len, struct ithc_data *dest);
  2665. +ssize_t ithc_legacy_encode_tx(struct ithc *ithc, const struct ithc_data *src, void *dest,
  2666. + size_t maxlen);
  2667. +
  2668. diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c
  2669. index 87ed4aa70fda..2acf02e41d40 100644
  2670. --- a/drivers/hid/ithc/ithc-main.c
  2671. +++ b/drivers/hid/ithc/ithc-main.c
  2672. @@ -5,28 +5,6 @@
  2673. MODULE_DESCRIPTION("Intel Touch Host Controller driver");
  2674. MODULE_LICENSE("Dual BSD/GPL");
  2675. -// Lakefield
  2676. -#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0
  2677. -#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1
  2678. -// Tiger Lake
  2679. -#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0
  2680. -#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1
  2681. -#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0
  2682. -#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1
  2683. -// Alder Lake
  2684. -#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8
  2685. -#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9
  2686. -#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0
  2687. -#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1
  2688. -#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0
  2689. -#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1
  2690. -// Raptor Lake
  2691. -#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58
  2692. -#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59
  2693. -// Meteor Lake
  2694. -#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48
  2695. -#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a
  2696. -
  2697. static const struct pci_device_id ithc_pci_tbl[] = {
  2698. { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) },
  2699. { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) },
  2700. @@ -66,15 +44,13 @@ static bool ithc_use_rx1 = true;
  2701. module_param_named(rx1, ithc_use_rx1, bool, 0);
  2702. MODULE_PARM_DESC(rx1, "Use DMA RX channel 1");
  2703. -// Values below 250 seem to work well on the SP7+. If this is set too high, you may observe cursor stuttering.
  2704. -static int ithc_dma_latency_us = 200;
  2705. -module_param_named(dma_latency_us, ithc_dma_latency_us, int, 0);
  2706. -MODULE_PARM_DESC(dma_latency_us, "Determines the CPU latency QoS value for DMA transfers (in microseconds), -1 to disable latency QoS");
  2707. +static int ithc_active_ltr_us = -1;
  2708. +module_param_named(activeltr, ithc_active_ltr_us, int, 0);
  2709. +MODULE_PARM_DESC(activeltr, "Active LTR value override (in microseconds)");
  2710. -// Values above 1700 seem to work well on the SP7+. If this is set too low, you may observe cursor stuttering.
  2711. -static unsigned int ithc_dma_early_us = 2000;
  2712. -module_param_named(dma_early_us, ithc_dma_early_us, uint, 0);
  2713. -MODULE_PARM_DESC(dma_early_us, "Determines how early the CPU latency QoS value is applied before the next expected IRQ (in microseconds)");
  2714. +static int ithc_idle_ltr_us = -1;
  2715. +module_param_named(idleltr, ithc_idle_ltr_us, int, 0);
  2716. +MODULE_PARM_DESC(idleltr, "Idle LTR value override (in microseconds)");
  2717. static bool ithc_log_regs_enabled = false;
  2718. module_param_named(logregs, ithc_log_regs_enabled, bool, 0);
  2719. @@ -82,44 +58,30 @@ MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)");
  2720. // Sysfs attributes
  2721. -static bool ithc_is_config_valid(struct ithc *ithc)
  2722. -{
  2723. - return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC;
  2724. -}
  2725. -
  2726. static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
  2727. {
  2728. struct ithc *ithc = dev_get_drvdata(dev);
  2729. - if (!ithc || !ithc_is_config_valid(ithc))
  2730. + if (!ithc || !ithc->have_config)
  2731. return -ENODEV;
  2732. - return sprintf(buf, "0x%04x", ithc->config.vendor_id);
  2733. + return sprintf(buf, "0x%04x", ithc->vendor_id);
  2734. }
  2735. static DEVICE_ATTR_RO(vendor);
  2736. static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf)
  2737. {
  2738. struct ithc *ithc = dev_get_drvdata(dev);
  2739. - if (!ithc || !ithc_is_config_valid(ithc))
  2740. + if (!ithc || !ithc->have_config)
  2741. return -ENODEV;
  2742. - return sprintf(buf, "0x%04x", ithc->config.product_id);
  2743. + return sprintf(buf, "0x%04x", ithc->product_id);
  2744. }
  2745. static DEVICE_ATTR_RO(product);
  2746. static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf)
  2747. {
  2748. struct ithc *ithc = dev_get_drvdata(dev);
  2749. - if (!ithc || !ithc_is_config_valid(ithc))
  2750. + if (!ithc || !ithc->have_config)
  2751. return -ENODEV;
  2752. - return sprintf(buf, "%u", ithc->config.revision);
  2753. + return sprintf(buf, "%u", ithc->product_rev);
  2754. }
  2755. static DEVICE_ATTR_RO(revision);
  2756. -static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
  2757. -{
  2758. - struct ithc *ithc = dev_get_drvdata(dev);
  2759. - if (!ithc || !ithc_is_config_valid(ithc))
  2760. - return -ENODEV;
  2761. - u32 v = ithc->config.fw_version;
  2762. - return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff);
  2763. -}
  2764. -static DEVICE_ATTR_RO(fw_version);
  2765. static const struct attribute_group *ithc_attribute_groups[] = {
  2766. &(const struct attribute_group){
  2767. @@ -128,185 +90,26 @@ static const struct attribute_group *ithc_attribute_groups[] = {
  2768. &dev_attr_vendor.attr,
  2769. &dev_attr_product.attr,
  2770. &dev_attr_revision.attr,
  2771. - &dev_attr_fw_version.attr,
  2772. NULL
  2773. },
  2774. },
  2775. NULL
  2776. };
  2777. -// HID setup
  2778. -
  2779. -static int ithc_hid_start(struct hid_device *hdev) { return 0; }
  2780. -static void ithc_hid_stop(struct hid_device *hdev) { }
  2781. -static int ithc_hid_open(struct hid_device *hdev) { return 0; }
  2782. -static void ithc_hid_close(struct hid_device *hdev) { }
  2783. -
  2784. -static int ithc_hid_parse(struct hid_device *hdev)
  2785. -{
  2786. - struct ithc *ithc = hdev->driver_data;
  2787. - u64 val = 0;
  2788. - WRITE_ONCE(ithc->hid_parse_done, false);
  2789. - for (int retries = 0; ; retries++) {
  2790. - CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof(val), &val);
  2791. - if (wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done),
  2792. - msecs_to_jiffies(200)))
  2793. - return 0;
  2794. - if (retries > 5) {
  2795. - pci_err(ithc->pci, "failed to read report descriptor\n");
  2796. - return -ETIMEDOUT;
  2797. - }
  2798. - pci_warn(ithc->pci, "failed to read report descriptor, retrying\n");
  2799. - }
  2800. -}
  2801. -
  2802. -static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf,
  2803. - size_t len, unsigned char rtype, int reqtype)
  2804. -{
  2805. - struct ithc *ithc = hdev->driver_data;
  2806. - if (!buf || !len)
  2807. - return -EINVAL;
  2808. - u32 code;
  2809. - if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) {
  2810. - code = DMA_TX_CODE_OUTPUT_REPORT;
  2811. - } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) {
  2812. - code = DMA_TX_CODE_SET_FEATURE;
  2813. - } else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) {
  2814. - code = DMA_TX_CODE_GET_FEATURE;
  2815. - } else {
  2816. - pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n",
  2817. - rtype, reqtype, reportnum);
  2818. - return -EINVAL;
  2819. - }
  2820. - buf[0] = reportnum;
  2821. -
  2822. - if (reqtype == HID_REQ_GET_REPORT) {
  2823. - // Prepare for response.
  2824. - mutex_lock(&ithc->hid_get_feature_mutex);
  2825. - ithc->hid_get_feature_buf = buf;
  2826. - ithc->hid_get_feature_size = len;
  2827. - mutex_unlock(&ithc->hid_get_feature_mutex);
  2828. -
  2829. - // Transmit 'get feature' request.
  2830. - int r = CHECK(ithc_dma_tx, ithc, code, 1, buf);
  2831. - if (!r) {
  2832. - r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature,
  2833. - !ithc->hid_get_feature_buf, msecs_to_jiffies(1000));
  2834. - if (!r)
  2835. - r = -ETIMEDOUT;
  2836. - else if (r < 0)
  2837. - r = -EINTR;
  2838. - else
  2839. - r = 0;
  2840. - }
  2841. -
  2842. - // If everything went ok, the buffer has been filled with the response data.
  2843. - // Return the response size.
  2844. - mutex_lock(&ithc->hid_get_feature_mutex);
  2845. - ithc->hid_get_feature_buf = NULL;
  2846. - if (!r)
  2847. - r = ithc->hid_get_feature_size;
  2848. - mutex_unlock(&ithc->hid_get_feature_mutex);
  2849. - return r;
  2850. - }
  2851. -
  2852. - // 'Set feature', or 'output report'. These don't have a response.
  2853. - CHECK_RET(ithc_dma_tx, ithc, code, len, buf);
  2854. - return 0;
  2855. -}
  2856. -
  2857. -static struct hid_ll_driver ithc_ll_driver = {
  2858. - .start = ithc_hid_start,
  2859. - .stop = ithc_hid_stop,
  2860. - .open = ithc_hid_open,
  2861. - .close = ithc_hid_close,
  2862. - .parse = ithc_hid_parse,
  2863. - .raw_request = ithc_hid_raw_request,
  2864. -};
  2865. -
  2866. -static void ithc_hid_devres_release(struct device *dev, void *res)
  2867. -{
  2868. - struct hid_device **hidm = res;
  2869. - if (*hidm)
  2870. - hid_destroy_device(*hidm);
  2871. -}
  2872. -
  2873. -static int ithc_hid_init(struct ithc *ithc)
  2874. -{
  2875. - struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof(*hidm), GFP_KERNEL);
  2876. - if (!hidm)
  2877. - return -ENOMEM;
  2878. - devres_add(&ithc->pci->dev, hidm);
  2879. - struct hid_device *hid = hid_allocate_device();
  2880. - if (IS_ERR(hid))
  2881. - return PTR_ERR(hid);
  2882. - *hidm = hid;
  2883. -
  2884. - strscpy(hid->name, DEVFULLNAME, sizeof(hid->name));
  2885. - strscpy(hid->phys, ithc->phys, sizeof(hid->phys));
  2886. - hid->ll_driver = &ithc_ll_driver;
  2887. - hid->bus = BUS_PCI;
  2888. - hid->vendor = ithc->config.vendor_id;
  2889. - hid->product = ithc->config.product_id;
  2890. - hid->version = 0x100;
  2891. - hid->dev.parent = &ithc->pci->dev;
  2892. - hid->driver_data = ithc;
  2893. -
  2894. - ithc->hid = hid;
  2895. - return 0;
  2896. -}
  2897. -
  2898. // Interrupts/polling
  2899. -static enum hrtimer_restart ithc_activity_start_timer_callback(struct hrtimer *t)
  2900. -{
  2901. - struct ithc *ithc = container_of(t, struct ithc, activity_start_timer);
  2902. - ithc_set_active(ithc, ithc_dma_early_us * 2 + USEC_PER_MSEC);
  2903. - return HRTIMER_NORESTART;
  2904. -}
  2905. -
  2906. -static enum hrtimer_restart ithc_activity_end_timer_callback(struct hrtimer *t)
  2907. -{
  2908. - struct ithc *ithc = container_of(t, struct ithc, activity_end_timer);
  2909. - cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  2910. - return HRTIMER_NORESTART;
  2911. -}
  2912. -
  2913. -void ithc_set_active(struct ithc *ithc, unsigned int duration_us)
  2914. -{
  2915. - if (ithc_dma_latency_us < 0)
  2916. - return;
  2917. - // When CPU usage is very low, the CPU can enter various low power states (C2-C10).
  2918. - // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_RX_TIMEOUT will be
  2919. - // set when this happens. The amount of truncated messages can become very high, resulting
  2920. - // in user-visible effects (laggy/stuttering cursor). To avoid this, we use a CPU latency
  2921. - // QoS request to prevent the CPU from entering low power states during touch interactions.
  2922. - cpu_latency_qos_update_request(&ithc->activity_qos, ithc_dma_latency_us);
  2923. - hrtimer_start_range_ns(&ithc->activity_end_timer,
  2924. - ns_to_ktime(duration_us * NSEC_PER_USEC), duration_us * NSEC_PER_USEC, HRTIMER_MODE_REL);
  2925. -}
  2926. -
  2927. -static int ithc_set_device_enabled(struct ithc *ithc, bool enable)
  2928. -{
  2929. - u32 x = ithc->config.touch_cfg =
  2930. - (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2 |
  2931. - (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0);
  2932. - return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE,
  2933. - offsetof(struct ithc_device_config, touch_cfg), sizeof(x), &x);
  2934. -}
  2935. -
  2936. static void ithc_disable_interrupts(struct ithc *ithc)
  2937. {
  2938. writel(0, &ithc->regs->error_control);
  2939. bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0);
  2940. - bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  2941. - bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0);
  2942. + bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_READY | DMA_RX_CONTROL_IRQ_DATA, 0);
  2943. + bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_READY | DMA_RX_CONTROL_IRQ_DATA, 0);
  2944. bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0);
  2945. }
  2946. static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned int channel)
  2947. {
  2948. - writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA,
  2949. + writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_READY | DMA_RX_STATUS_HAVE_DATA,
  2950. &ithc->regs->dma_rx[channel].status);
  2951. }
  2952. @@ -325,39 +128,22 @@ static void ithc_process(struct ithc *ithc)
  2953. {
  2954. ithc_log_regs(ithc);
  2955. + // The THC automatically transitions from LTR idle to active at the start of a DMA transfer.
  2956. + // It does not appear to automatically go back to idle, so we switch it back here, since
  2957. + // the DMA transfer should be complete.
  2958. + ithc_set_ltr_idle(ithc);
  2959. +
  2960. bool rx0 = ithc_use_rx0 && (readl(&ithc->regs->dma_rx[0].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  2961. bool rx1 = ithc_use_rx1 && (readl(&ithc->regs->dma_rx[1].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  2962. - // Track time between DMA rx transfers, so we can try to predict when we need to enable CPU latency QoS for the next transfer
  2963. - ktime_t t = ktime_get();
  2964. - ktime_t dt = ktime_sub(t, ithc->last_rx_time);
  2965. - if (rx0 || rx1) {
  2966. - ithc->last_rx_time = t;
  2967. - if (dt > ms_to_ktime(100)) {
  2968. - ithc->cur_rx_seq_count = 0;
  2969. - ithc->cur_rx_seq_errors = 0;
  2970. - }
  2971. - ithc->cur_rx_seq_count++;
  2972. - if (!ithc_use_polling && ithc_dma_latency_us >= 0) {
  2973. - // Disable QoS, since the DMA transfer has completed (we re-enable it after a delay below)
  2974. - cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  2975. - hrtimer_try_to_cancel(&ithc->activity_end_timer);
  2976. - }
  2977. - }
  2978. -
  2979. // Read and clear error bits
  2980. u32 err = readl(&ithc->regs->error_flags);
  2981. if (err) {
  2982. writel(err, &ithc->regs->error_flags);
  2983. if (err & ~ERROR_FLAG_DMA_RX_TIMEOUT)
  2984. pci_err(ithc->pci, "error flags: 0x%08x\n", err);
  2985. - if (err & ERROR_FLAG_DMA_RX_TIMEOUT) {
  2986. - // Only log an error if we see a significant number of these errors.
  2987. - ithc->cur_rx_seq_errors++;
  2988. - if (ithc->cur_rx_seq_errors && ithc->cur_rx_seq_errors % 50 == 0 && ithc->cur_rx_seq_errors > ithc->cur_rx_seq_count / 10)
  2989. - pci_err(ithc->pci, "High number of DMA RX timeouts/errors (%u/%u, dt=%lldus). Try adjusting dma_early_us and/or dma_latency_us.\n",
  2990. - ithc->cur_rx_seq_errors, ithc->cur_rx_seq_count, ktime_to_us(dt));
  2991. - }
  2992. + if (err & ERROR_FLAG_DMA_RX_TIMEOUT)
  2993. + pci_err(ithc->pci, "DMA RX timeout/error (try decreasing activeltr/idleltr if this happens frequently)\n");
  2994. }
  2995. // Process DMA rx
  2996. @@ -372,12 +158,6 @@ static void ithc_process(struct ithc *ithc)
  2997. ithc_dma_rx(ithc, 1);
  2998. }
  2999. - // Start timer to re-enable QoS for next rx, but only if we've seen an ERROR_FLAG_DMA_RX_TIMEOUT
  3000. - if ((rx0 || rx1) && !ithc_use_polling && ithc_dma_latency_us >= 0 && ithc->cur_rx_seq_errors > 0) {
  3001. - ktime_t expires = ktime_add(t, ktime_sub_us(dt, ithc_dma_early_us));
  3002. - hrtimer_start_range_ns(&ithc->activity_start_timer, expires, 10 * NSEC_PER_USEC, HRTIMER_MODE_ABS);
  3003. - }
  3004. -
  3005. ithc_log_regs(ithc);
  3006. }
  3007. @@ -403,12 +183,8 @@ static int ithc_poll_thread(void *arg)
  3008. ithc_process(ithc);
  3009. // Decrease polling interval to 20ms if we received data, otherwise slowly
  3010. // increase it up to 200ms.
  3011. - if (n != ithc->dma_rx[1].num_received) {
  3012. - ithc_set_active(ithc, 100 * USEC_PER_MSEC);
  3013. - sleep = 20;
  3014. - } else {
  3015. - sleep = min(200u, sleep + (sleep >> 4) + 1);
  3016. - }
  3017. + sleep = n != ithc->dma_rx[1].num_received ? 20
  3018. + : min(200u, sleep + (sleep >> 4) + 1);
  3019. msleep_interruptible(sleep);
  3020. }
  3021. return 0;
  3022. @@ -431,73 +207,44 @@ static void ithc_disable(struct ithc *ithc)
  3023. static int ithc_init_device(struct ithc *ithc)
  3024. {
  3025. + // Read ACPI config for QuickSPI mode
  3026. + struct ithc_acpi_config cfg = { 0 };
  3027. + CHECK_RET(ithc_read_acpi_config, ithc, &cfg);
  3028. + if (!cfg.has_config)
  3029. + pci_info(ithc->pci, "no ACPI config, using legacy mode\n");
  3030. + else
  3031. + ithc_print_acpi_config(ithc, &cfg);
  3032. + ithc->use_quickspi = cfg.has_config;
  3033. +
  3034. + // Shut down device
  3035. ithc_log_regs(ithc);
  3036. bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0;
  3037. ithc_disable(ithc);
  3038. CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY);
  3039. -
  3040. - // Since we don't yet know which SPI config the device wants, use default speed and mode
  3041. - // initially for reading config data.
  3042. - ithc_set_spi_config(ithc, 10, 0);
  3043. -
  3044. - // Setting the following bit seems to make reading the config more reliable.
  3045. - bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000);
  3046. + ithc_log_regs(ithc);
  3047. // If the device was previously enabled, wait a bit to make sure it's fully shut down.
  3048. if (was_enabled)
  3049. if (msleep_interruptible(100))
  3050. return -EINTR;
  3051. - // Take the touch device out of reset.
  3052. - bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
  3053. - CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0);
  3054. - for (int retries = 0; ; retries++) {
  3055. - ithc_log_regs(ithc);
  3056. - bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET);
  3057. - if (!waitl(ithc, &ithc->regs->state, 0xf, 2))
  3058. - break;
  3059. - if (retries > 5) {
  3060. - pci_err(ithc->pci, "failed to reset device, state = 0x%08x\n", readl(&ithc->regs->state));
  3061. - return -ETIMEDOUT;
  3062. - }
  3063. - pci_warn(ithc->pci, "invalid state, retrying reset\n");
  3064. - bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0);
  3065. - if (msleep_interruptible(1000))
  3066. - return -EINTR;
  3067. - }
  3068. - ithc_log_regs(ithc);
  3069. + // Set Latency Tolerance Reporting config. The device will automatically
  3070. + // apply these values depending on whether it is active or idle.
  3071. + // If active value is too high, DMA buffer data can become truncated.
  3072. + // By default, we set the active LTR value to 100us, and idle to 100ms.
  3073. + u64 active_ltr_ns = ithc_active_ltr_us >= 0 ? (u64)ithc_active_ltr_us * 1000
  3074. + : cfg.has_config && cfg.has_active_ltr ? (u64)cfg.active_ltr << 10
  3075. + : 100 * 1000;
  3076. + u64 idle_ltr_ns = ithc_idle_ltr_us >= 0 ? (u64)ithc_idle_ltr_us * 1000
  3077. + : cfg.has_config && cfg.has_idle_ltr ? (u64)cfg.idle_ltr << 10
  3078. + : 100 * 1000 * 1000;
  3079. + ithc_set_ltr_config(ithc, active_ltr_ns, idle_ltr_ns);
  3080. +
  3081. + if (ithc->use_quickspi)
  3082. + CHECK_RET(ithc_quickspi_init, ithc, &cfg);
  3083. + else
  3084. + CHECK_RET(ithc_legacy_init, ithc);
  3085. - // Waiting for the following status bit makes reading config much more reliable,
  3086. - // however the official driver does not seem to do this...
  3087. - CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4);
  3088. -
  3089. - // Read configuration data.
  3090. - for (int retries = 0; ; retries++) {
  3091. - ithc_log_regs(ithc);
  3092. - memset(&ithc->config, 0, sizeof(ithc->config));
  3093. - CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof(ithc->config), &ithc->config);
  3094. - u32 *p = (void *)&ithc->config;
  3095. - pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3096. - p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
  3097. - if (ithc_is_config_valid(ithc))
  3098. - break;
  3099. - if (retries > 10) {
  3100. - pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n",
  3101. - ithc->config.device_id);
  3102. - return -EIO;
  3103. - }
  3104. - pci_warn(ithc->pci, "failed to read config, retrying\n");
  3105. - if (msleep_interruptible(100))
  3106. - return -EINTR;
  3107. - }
  3108. - ithc_log_regs(ithc);
  3109. -
  3110. - // Apply SPI config and enable touch device.
  3111. - CHECK_RET(ithc_set_spi_config, ithc,
  3112. - DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config),
  3113. - DEVCFG_SPI_MODE(ithc->config.spi_config));
  3114. - CHECK_RET(ithc_set_device_enabled, ithc, true);
  3115. - ithc_log_regs(ithc);
  3116. return 0;
  3117. }
  3118. @@ -527,11 +274,11 @@ static void ithc_stop(void *res)
  3119. CHECK(kthread_stop, ithc->poll_thread);
  3120. if (ithc->irq >= 0)
  3121. disable_irq(ithc->irq);
  3122. - CHECK(ithc_set_device_enabled, ithc, false);
  3123. + if (ithc->use_quickspi)
  3124. + ithc_quickspi_exit(ithc);
  3125. + else
  3126. + ithc_legacy_exit(ithc);
  3127. ithc_disable(ithc);
  3128. - hrtimer_cancel(&ithc->activity_start_timer);
  3129. - hrtimer_cancel(&ithc->activity_end_timer);
  3130. - cpu_latency_qos_remove_request(&ithc->activity_qos);
  3131. // Clear DMA config.
  3132. for (unsigned int i = 0; i < 2; i++) {
  3133. @@ -570,9 +317,6 @@ static int ithc_start(struct pci_dev *pci)
  3134. ithc->irq = -1;
  3135. ithc->pci = pci;
  3136. snprintf(ithc->phys, sizeof(ithc->phys), "pci-%s/" DEVNAME, pci_name(pci));
  3137. - init_waitqueue_head(&ithc->wait_hid_parse);
  3138. - init_waitqueue_head(&ithc->wait_hid_get_feature);
  3139. - mutex_init(&ithc->hid_get_feature_mutex);
  3140. pci_set_drvdata(pci, ithc);
  3141. CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci);
  3142. if (ithc_log_regs_enabled)
  3143. @@ -596,6 +340,9 @@ static int ithc_start(struct pci_dev *pci)
  3144. // Initialize THC and touch device.
  3145. CHECK_RET(ithc_init_device, ithc);
  3146. +
  3147. + // Initialize HID and DMA.
  3148. + CHECK_RET(ithc_hid_init, ithc);
  3149. CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups);
  3150. if (ithc_use_rx0)
  3151. CHECK_RET(ithc_dma_rx_init, ithc, 0);
  3152. @@ -603,18 +350,10 @@ static int ithc_start(struct pci_dev *pci)
  3153. CHECK_RET(ithc_dma_rx_init, ithc, 1);
  3154. CHECK_RET(ithc_dma_tx_init, ithc);
  3155. - cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE);
  3156. - hrtimer_init(&ithc->activity_start_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  3157. - ithc->activity_start_timer.function = ithc_activity_start_timer_callback;
  3158. - hrtimer_init(&ithc->activity_end_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  3159. - ithc->activity_end_timer.function = ithc_activity_end_timer_callback;
  3160. -
  3161. // Add ithc_stop() callback AFTER setting up DMA buffers, so that polling/irqs/DMA are
  3162. // disabled BEFORE the buffers are freed.
  3163. CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc);
  3164. - CHECK_RET(ithc_hid_init, ithc);
  3165. -
  3166. // Start polling/IRQ.
  3167. if (ithc_use_polling) {
  3168. pci_info(pci, "using polling instead of irq\n");
  3169. @@ -637,9 +376,11 @@ static int ithc_start(struct pci_dev *pci)
  3170. // hid_add_device() can only be called after irq/polling is started and DMA is enabled,
  3171. // because it calls ithc_hid_parse() which reads the report descriptor via DMA.
  3172. - CHECK_RET(hid_add_device, ithc->hid);
  3173. + CHECK_RET(hid_add_device, ithc->hid.dev);
  3174. +
  3175. + CHECK(ithc_debug_init_device, ithc);
  3176. - CHECK(ithc_debug_init, ithc);
  3177. + ithc_set_ltr_idle(ithc);
  3178. pci_dbg(pci, "started\n");
  3179. return 0;
  3180. @@ -710,17 +451,20 @@ static struct pci_driver ithc_driver = {
  3181. .thaw = ithc_thaw,
  3182. .restore = ithc_restore,
  3183. },
  3184. + .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
  3185. //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway
  3186. };
  3187. static int __init ithc_init(void)
  3188. {
  3189. + ithc_debug_init_module();
  3190. return pci_register_driver(&ithc_driver);
  3191. }
  3192. static void __exit ithc_exit(void)
  3193. {
  3194. pci_unregister_driver(&ithc_driver);
  3195. + ithc_debug_exit_module();
  3196. }
  3197. module_init(ithc_init);
  3198. diff --git a/drivers/hid/ithc/ithc-quickspi.c b/drivers/hid/ithc/ithc-quickspi.c
  3199. new file mode 100644
  3200. index 000000000000..760e55ead078
  3201. --- /dev/null
  3202. +++ b/drivers/hid/ithc/ithc-quickspi.c
  3203. @@ -0,0 +1,578 @@
  3204. +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  3205. +
  3206. +// Some public THC/QuickSPI documentation can be found in:
  3207. +// - Intel Firmware Support Package repo: https://github.com/intel/FSP
  3208. +// - HID over SPI (HIDSPI) spec: https://www.microsoft.com/en-us/download/details.aspx?id=103325
  3209. +
  3210. +#include "ithc.h"
  3211. +
  3212. +static const guid_t guid_hidspi =
  3213. + GUID_INIT(0x6e2ac436, 0x0fcf, 0x41af, 0xa2, 0x65, 0xb3, 0x2a, 0x22, 0x0d, 0xcf, 0xab);
  3214. +static const guid_t guid_thc_quickspi =
  3215. + GUID_INIT(0x300d35b7, 0xac20, 0x413e, 0x8e, 0x9c, 0x92, 0xe4, 0xda, 0xfd, 0x0a, 0xfe);
  3216. +static const guid_t guid_thc_ltr =
  3217. + GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30, 0xf7, 0x87, 0xa1, 0x38);
  3218. +
  3219. +// TODO The HIDSPI spec says revision should be 3. Should we try both?
  3220. +#define DSM_REV 2
  3221. +
  3222. +struct hidspi_header {
  3223. + u8 type;
  3224. + u16 len;
  3225. + u8 id;
  3226. +} __packed;
  3227. +static_assert(sizeof(struct hidspi_header) == 4);
  3228. +
  3229. +#define HIDSPI_INPUT_TYPE_DATA 1
  3230. +#define HIDSPI_INPUT_TYPE_RESET_RESPONSE 3
  3231. +#define HIDSPI_INPUT_TYPE_COMMAND_RESPONSE 4
  3232. +#define HIDSPI_INPUT_TYPE_GET_FEATURE_RESPONSE 5
  3233. +#define HIDSPI_INPUT_TYPE_DEVICE_DESCRIPTOR 7
  3234. +#define HIDSPI_INPUT_TYPE_REPORT_DESCRIPTOR 8
  3235. +#define HIDSPI_INPUT_TYPE_SET_FEATURE_RESPONSE 9
  3236. +#define HIDSPI_INPUT_TYPE_OUTPUT_REPORT_RESPONSE 10
  3237. +#define HIDSPI_INPUT_TYPE_GET_INPUT_REPORT_RESPONSE 11
  3238. +
  3239. +#define HIDSPI_OUTPUT_TYPE_DEVICE_DESCRIPTOR_REQUEST 1
  3240. +#define HIDSPI_OUTPUT_TYPE_REPORT_DESCRIPTOR_REQUEST 2
  3241. +#define HIDSPI_OUTPUT_TYPE_SET_FEATURE 3
  3242. +#define HIDSPI_OUTPUT_TYPE_GET_FEATURE 4
  3243. +#define HIDSPI_OUTPUT_TYPE_OUTPUT_REPORT 5
  3244. +#define HIDSPI_OUTPUT_TYPE_INPUT_REPORT_REQUEST 6
  3245. +#define HIDSPI_OUTPUT_TYPE_COMMAND 7
  3246. +
  3247. +struct hidspi_device_descriptor {
  3248. + u16 wDeviceDescLength;
  3249. + u16 bcdVersion;
  3250. + u16 wReportDescLength;
  3251. + u16 wMaxInputLength;
  3252. + u16 wMaxOutputLength;
  3253. + u16 wMaxFragmentLength;
  3254. + u16 wVendorID;
  3255. + u16 wProductID;
  3256. + u16 wVersionID;
  3257. + u16 wFlags;
  3258. + u32 dwReserved;
  3259. +};
  3260. +static_assert(sizeof(struct hidspi_device_descriptor) == 24);
  3261. +
  3262. +static int read_acpi_u32(struct ithc *ithc, const guid_t *guid, u32 func, u32 *dest)
  3263. +{
  3264. + acpi_handle handle = ACPI_HANDLE(&ithc->pci->dev);
  3265. + union acpi_object *o = acpi_evaluate_dsm(handle, guid, DSM_REV, func, NULL);
  3266. + if (!o)
  3267. + return 0;
  3268. + if (o->type != ACPI_TYPE_INTEGER) {
  3269. + pci_err(ithc->pci, "DSM %pUl %u returned type %i instead of integer\n",
  3270. + guid, func, o->type);
  3271. + ACPI_FREE(o);
  3272. + return -1;
  3273. + }
  3274. + pci_dbg(ithc->pci, "DSM %pUl %u = 0x%08x\n", guid, func, (u32)o->integer.value);
  3275. + *dest = (u32)o->integer.value;
  3276. + ACPI_FREE(o);
  3277. + return 1;
  3278. +}
  3279. +
  3280. +static int read_acpi_buf(struct ithc *ithc, const guid_t *guid, u32 func, size_t len, u8 *dest)
  3281. +{
  3282. + acpi_handle handle = ACPI_HANDLE(&ithc->pci->dev);
  3283. + union acpi_object *o = acpi_evaluate_dsm(handle, guid, DSM_REV, func, NULL);
  3284. + if (!o)
  3285. + return 0;
  3286. + if (o->type != ACPI_TYPE_BUFFER) {
  3287. + pci_err(ithc->pci, "DSM %pUl %u returned type %i instead of buffer\n",
  3288. + guid, func, o->type);
  3289. + ACPI_FREE(o);
  3290. + return -1;
  3291. + }
  3292. + if (o->buffer.length != len) {
  3293. + pci_err(ithc->pci, "DSM %pUl %u returned len %u instead of %zu\n",
  3294. + guid, func, o->buffer.length, len);
  3295. + ACPI_FREE(o);
  3296. + return -1;
  3297. + }
  3298. + memcpy(dest, o->buffer.pointer, len);
  3299. + pci_dbg(ithc->pci, "DSM %pUl %u = 0x%02x\n", guid, func, dest[0]);
  3300. + ACPI_FREE(o);
  3301. + return 1;
  3302. +}
  3303. +
  3304. +int ithc_read_acpi_config(struct ithc *ithc, struct ithc_acpi_config *cfg)
  3305. +{
  3306. + int r;
  3307. + acpi_handle handle = ACPI_HANDLE(&ithc->pci->dev);
  3308. +
  3309. + cfg->has_config = acpi_check_dsm(handle, &guid_hidspi, DSM_REV, BIT(0));
  3310. + if (!cfg->has_config)
  3311. + return 0;
  3312. +
  3313. + // HIDSPI settings
  3314. +
  3315. + r = read_acpi_u32(ithc, &guid_hidspi, 1, &cfg->input_report_header_address);
  3316. + if (r < 0)
  3317. + return r;
  3318. + cfg->has_input_report_header_address = r > 0;
  3319. + if (r > 0 && cfg->input_report_header_address > 0xffffff) {
  3320. + pci_err(ithc->pci, "Invalid input report header address 0x%x\n",
  3321. + cfg->input_report_header_address);
  3322. + return -1;
  3323. + }
  3324. +
  3325. + r = read_acpi_u32(ithc, &guid_hidspi, 2, &cfg->input_report_body_address);
  3326. + if (r < 0)
  3327. + return r;
  3328. + cfg->has_input_report_body_address = r > 0;
  3329. + if (r > 0 && cfg->input_report_body_address > 0xffffff) {
  3330. + pci_err(ithc->pci, "Invalid input report body address 0x%x\n",
  3331. + cfg->input_report_body_address);
  3332. + return -1;
  3333. + }
  3334. +
  3335. + r = read_acpi_u32(ithc, &guid_hidspi, 3, &cfg->output_report_body_address);
  3336. + if (r < 0)
  3337. + return r;
  3338. + cfg->has_output_report_body_address = r > 0;
  3339. + if (r > 0 && cfg->output_report_body_address > 0xffffff) {
  3340. + pci_err(ithc->pci, "Invalid output report body address 0x%x\n",
  3341. + cfg->output_report_body_address);
  3342. + return -1;
  3343. + }
  3344. +
  3345. + r = read_acpi_buf(ithc, &guid_hidspi, 4, sizeof(cfg->read_opcode), &cfg->read_opcode);
  3346. + if (r < 0)
  3347. + return r;
  3348. + cfg->has_read_opcode = r > 0;
  3349. +
  3350. + r = read_acpi_buf(ithc, &guid_hidspi, 5, sizeof(cfg->write_opcode), &cfg->write_opcode);
  3351. + if (r < 0)
  3352. + return r;
  3353. + cfg->has_write_opcode = r > 0;
  3354. +
  3355. + u32 flags;
  3356. + r = read_acpi_u32(ithc, &guid_hidspi, 6, &flags);
  3357. + if (r < 0)
  3358. + return r;
  3359. + cfg->has_read_mode = cfg->has_write_mode = r > 0;
  3360. + if (r > 0) {
  3361. + cfg->read_mode = (flags >> 14) & 3;
  3362. + cfg->write_mode = flags & BIT(13) ? cfg->read_mode : SPI_MODE_SINGLE;
  3363. + }
  3364. +
  3365. + // Quick SPI settings
  3366. +
  3367. + r = read_acpi_u32(ithc, &guid_thc_quickspi, 1, &cfg->spi_frequency);
  3368. + if (r < 0)
  3369. + return r;
  3370. + cfg->has_spi_frequency = r > 0;
  3371. +
  3372. + r = read_acpi_u32(ithc, &guid_thc_quickspi, 2, &cfg->limit_packet_size);
  3373. + if (r < 0)
  3374. + return r;
  3375. + cfg->has_limit_packet_size = r > 0;
  3376. +
  3377. + r = read_acpi_u32(ithc, &guid_thc_quickspi, 3, &cfg->tx_delay);
  3378. + if (r < 0)
  3379. + return r;
  3380. + cfg->has_tx_delay = r > 0;
  3381. + if (r > 0)
  3382. + cfg->tx_delay &= 0xffff;
  3383. +
  3384. + // LTR settings
  3385. +
  3386. + r = read_acpi_u32(ithc, &guid_thc_ltr, 1, &cfg->active_ltr);
  3387. + if (r < 0)
  3388. + return r;
  3389. + cfg->has_active_ltr = r > 0;
  3390. + if (r > 0 && (!cfg->active_ltr || cfg->active_ltr > 0x3ff)) {
  3391. + if (cfg->active_ltr != 0xffffffff)
  3392. + pci_warn(ithc->pci, "Ignoring invalid active LTR value 0x%x\n",
  3393. + cfg->active_ltr);
  3394. + cfg->active_ltr = 500;
  3395. + }
  3396. +
  3397. + r = read_acpi_u32(ithc, &guid_thc_ltr, 2, &cfg->idle_ltr);
  3398. + if (r < 0)
  3399. + return r;
  3400. + cfg->has_idle_ltr = r > 0;
  3401. + if (r > 0 && (!cfg->idle_ltr || cfg->idle_ltr > 0x3ff)) {
  3402. + if (cfg->idle_ltr != 0xffffffff)
  3403. + pci_warn(ithc->pci, "Ignoring invalid idle LTR value 0x%x\n",
  3404. + cfg->idle_ltr);
  3405. + cfg->idle_ltr = 500;
  3406. + if (cfg->has_active_ltr && cfg->active_ltr > cfg->idle_ltr)
  3407. + cfg->idle_ltr = cfg->active_ltr;
  3408. + }
  3409. +
  3410. + return 0;
  3411. +}
  3412. +
  3413. +void ithc_print_acpi_config(struct ithc *ithc, const struct ithc_acpi_config *cfg)
  3414. +{
  3415. + if (!cfg->has_config) {
  3416. + pci_info(ithc->pci, "No ACPI config");
  3417. + return;
  3418. + }
  3419. +
  3420. + char input_report_header_address[16] = "-";
  3421. + if (cfg->has_input_report_header_address)
  3422. + sprintf(input_report_header_address, "0x%x", cfg->input_report_header_address);
  3423. + char input_report_body_address[16] = "-";
  3424. + if (cfg->has_input_report_body_address)
  3425. + sprintf(input_report_body_address, "0x%x", cfg->input_report_body_address);
  3426. + char output_report_body_address[16] = "-";
  3427. + if (cfg->has_output_report_body_address)
  3428. + sprintf(output_report_body_address, "0x%x", cfg->output_report_body_address);
  3429. + char read_opcode[16] = "-";
  3430. + if (cfg->has_read_opcode)
  3431. + sprintf(read_opcode, "0x%02x", cfg->read_opcode);
  3432. + char write_opcode[16] = "-";
  3433. + if (cfg->has_write_opcode)
  3434. + sprintf(write_opcode, "0x%02x", cfg->write_opcode);
  3435. + char read_mode[16] = "-";
  3436. + if (cfg->has_read_mode)
  3437. + sprintf(read_mode, "%i", cfg->read_mode);
  3438. + char write_mode[16] = "-";
  3439. + if (cfg->has_write_mode)
  3440. + sprintf(write_mode, "%i", cfg->write_mode);
  3441. + char spi_frequency[16] = "-";
  3442. + if (cfg->has_spi_frequency)
  3443. + sprintf(spi_frequency, "%u", cfg->spi_frequency);
  3444. + char limit_packet_size[16] = "-";
  3445. + if (cfg->has_limit_packet_size)
  3446. + sprintf(limit_packet_size, "%u", cfg->limit_packet_size);
  3447. + char tx_delay[16] = "-";
  3448. + if (cfg->has_tx_delay)
  3449. + sprintf(tx_delay, "%u", cfg->tx_delay);
  3450. + char active_ltr[16] = "-";
  3451. + if (cfg->has_active_ltr)
  3452. + sprintf(active_ltr, "%u", cfg->active_ltr);
  3453. + char idle_ltr[16] = "-";
  3454. + if (cfg->has_idle_ltr)
  3455. + sprintf(idle_ltr, "%u", cfg->idle_ltr);
  3456. +
  3457. + pci_info(ithc->pci, "ACPI config: InputHeaderAddr=%s InputBodyAddr=%s OutputBodyAddr=%s ReadOpcode=%s WriteOpcode=%s ReadMode=%s WriteMode=%s Frequency=%s LimitPacketSize=%s TxDelay=%s ActiveLTR=%s IdleLTR=%s\n",
  3458. + input_report_header_address, input_report_body_address, output_report_body_address,
  3459. + read_opcode, write_opcode, read_mode, write_mode,
  3460. + spi_frequency, limit_packet_size, tx_delay, active_ltr, idle_ltr);
  3461. +}
  3462. +
  3463. +static int ithc_quickspi_init_regs(struct ithc *ithc, const struct ithc_acpi_config *cfg)
  3464. +{
  3465. + pci_dbg(ithc->pci, "initializing QuickSPI registers\n");
  3466. +
  3467. + // SPI frequency and mode
  3468. + if (!cfg->has_spi_frequency || !cfg->spi_frequency) {
  3469. + pci_err(ithc->pci, "Missing SPI frequency in configuration\n");
  3470. + return -EINVAL;
  3471. + }
  3472. + unsigned int clkdiv = DIV_ROUND_UP(SPI_CLK_FREQ_BASE, cfg->spi_frequency);
  3473. + bool clkdiv8 = clkdiv > 7;
  3474. + if (clkdiv8)
  3475. + clkdiv = min(7u, DIV_ROUND_UP(clkdiv, 8u));
  3476. + if (!clkdiv)
  3477. + clkdiv = 1;
  3478. + CHECK_RET(ithc_set_spi_config, ithc, clkdiv, clkdiv8,
  3479. + cfg->has_read_mode ? cfg->read_mode : SPI_MODE_SINGLE,
  3480. + cfg->has_write_mode ? cfg->write_mode : SPI_MODE_SINGLE);
  3481. +
  3482. + // SPI addresses and opcodes
  3483. + if (cfg->has_input_report_header_address)
  3484. + writel(cfg->input_report_header_address, &ithc->regs->spi_header_addr);
  3485. + if (cfg->has_input_report_body_address)
  3486. + writel(cfg->input_report_body_address, &ithc->regs->dma_rx[0].spi_addr);
  3487. + if (cfg->has_output_report_body_address)
  3488. + writel(cfg->output_report_body_address, &ithc->regs->dma_tx.spi_addr);
  3489. +
  3490. + if (cfg->has_read_opcode) {
  3491. + writeb(cfg->read_opcode, &ithc->regs->read_opcode);
  3492. + writeb(cfg->read_opcode, &ithc->regs->read_opcode_single);
  3493. + writeb(cfg->read_opcode, &ithc->regs->read_opcode_dual);
  3494. + writeb(cfg->read_opcode, &ithc->regs->read_opcode_quad);
  3495. + }
  3496. + if (cfg->has_write_opcode) {
  3497. + writeb(cfg->write_opcode, &ithc->regs->write_opcode);
  3498. + writeb(cfg->write_opcode, &ithc->regs->write_opcode_single);
  3499. + writeb(cfg->write_opcode, &ithc->regs->write_opcode_dual);
  3500. + writeb(cfg->write_opcode, &ithc->regs->write_opcode_quad);
  3501. + }
  3502. + ithc_log_regs(ithc);
  3503. +
  3504. + // The rest...
  3505. + bitsl(&ithc->regs->quickspi_config1,
  3506. + QUICKSPI_CONFIG1_UNKNOWN_0(0xff) | QUICKSPI_CONFIG1_UNKNOWN_5(0xff) |
  3507. + QUICKSPI_CONFIG1_UNKNOWN_10(0xff) | QUICKSPI_CONFIG1_UNKNOWN_16(0xffff),
  3508. + QUICKSPI_CONFIG1_UNKNOWN_0(4) | QUICKSPI_CONFIG1_UNKNOWN_5(4) |
  3509. + QUICKSPI_CONFIG1_UNKNOWN_10(22) | QUICKSPI_CONFIG1_UNKNOWN_16(2));
  3510. +
  3511. + bitsl(&ithc->regs->quickspi_config2,
  3512. + QUICKSPI_CONFIG2_UNKNOWN_0(0xff) | QUICKSPI_CONFIG2_UNKNOWN_5(0xff) |
  3513. + QUICKSPI_CONFIG2_UNKNOWN_12(0xff),
  3514. + QUICKSPI_CONFIG2_UNKNOWN_0(8) | QUICKSPI_CONFIG2_UNKNOWN_5(14) |
  3515. + QUICKSPI_CONFIG2_UNKNOWN_12(2));
  3516. +
  3517. + u32 pktsize = cfg->has_limit_packet_size && cfg->limit_packet_size == 1 ? 4 : 0x80;
  3518. + bitsl(&ithc->regs->spi_config,
  3519. + SPI_CONFIG_READ_PACKET_SIZE(0xfff) | SPI_CONFIG_WRITE_PACKET_SIZE(0xfff),
  3520. + SPI_CONFIG_READ_PACKET_SIZE(pktsize) | SPI_CONFIG_WRITE_PACKET_SIZE(pktsize));
  3521. +
  3522. + bitsl_set(&ithc->regs->quickspi_config2,
  3523. + QUICKSPI_CONFIG2_UNKNOWN_16 | QUICKSPI_CONFIG2_UNKNOWN_17);
  3524. + bitsl(&ithc->regs->quickspi_config2,
  3525. + QUICKSPI_CONFIG2_DISABLE_READ_ADDRESS_INCREMENT |
  3526. + QUICKSPI_CONFIG2_DISABLE_WRITE_ADDRESS_INCREMENT |
  3527. + QUICKSPI_CONFIG2_ENABLE_WRITE_STREAMING_MODE, 0);
  3528. +
  3529. + return 0;
  3530. +}
  3531. +
  3532. +static int wait_for_report(struct ithc *ithc)
  3533. +{
  3534. + CHECK_RET(waitl, ithc, &ithc->regs->dma_rx[0].status,
  3535. + DMA_RX_STATUS_READY, DMA_RX_STATUS_READY);
  3536. + writel(DMA_RX_STATUS_READY, &ithc->regs->dma_rx[0].status);
  3537. +
  3538. + u32 h = readl(&ithc->regs->input_header);
  3539. + ithc_log_regs(ithc);
  3540. + if (INPUT_HEADER_SYNC(h) != INPUT_HEADER_SYNC_VALUE
  3541. + || INPUT_HEADER_VERSION(h) != INPUT_HEADER_VERSION_VALUE) {
  3542. + pci_err(ithc->pci, "invalid input report frame header 0x%08x\n", h);
  3543. + return -ENODATA;
  3544. + }
  3545. + return INPUT_HEADER_REPORT_LENGTH(h) * 4;
  3546. +}
  3547. +
  3548. +static int ithc_quickspi_init_hidspi(struct ithc *ithc, const struct ithc_acpi_config *cfg)
  3549. +{
  3550. + pci_dbg(ithc->pci, "initializing HIDSPI\n");
  3551. +
  3552. + // HIDSPI initialization sequence:
  3553. + // "1. The host shall invoke the ACPI reset method to clear the device state."
  3554. + acpi_status s = acpi_evaluate_object(ACPI_HANDLE(&ithc->pci->dev), "_RST", NULL, NULL);
  3555. + if (ACPI_FAILURE(s)) {
  3556. + pci_err(ithc->pci, "ACPI reset failed\n");
  3557. + return -EIO;
  3558. + }
  3559. +
  3560. + bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0);
  3561. +
  3562. + // "2. Within 1 second, the device shall signal an interrupt and make available to the host
  3563. + // an input report containing a device reset response."
  3564. + int size = wait_for_report(ithc);
  3565. + if (size < 0)
  3566. + return size;
  3567. + if (size < sizeof(struct hidspi_header)) {
  3568. + pci_err(ithc->pci, "SPI data size too small for reset response (%u)\n", size);
  3569. + return -EMSGSIZE;
  3570. + }
  3571. +
  3572. + // "3. The host shall read the reset response from the device at the Input Report addresses
  3573. + // specified in ACPI."
  3574. + u32 in_addr = cfg->has_input_report_body_address ? cfg->input_report_body_address : 0x1000;
  3575. + struct {
  3576. + struct hidspi_header header;
  3577. + union {
  3578. + struct hidspi_device_descriptor device_desc;
  3579. + u32 data[16];
  3580. + };
  3581. + } resp = { 0 };
  3582. + if (size > sizeof(resp)) {
  3583. + pci_err(ithc->pci, "SPI data size for reset response too big (%u)\n", size);
  3584. + return -EMSGSIZE;
  3585. + }
  3586. + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, in_addr, size, &resp);
  3587. + if (resp.header.type != HIDSPI_INPUT_TYPE_RESET_RESPONSE) {
  3588. + pci_err(ithc->pci, "received type %i instead of reset response\n", resp.header.type);
  3589. + return -ENOMSG;
  3590. + }
  3591. +
  3592. + // "4. The host shall then write an Output Report to the device at the Output Report Address
  3593. + // specified in ACPI, requesting the Device Descriptor from the device."
  3594. + u32 out_addr = cfg->has_output_report_body_address ? cfg->output_report_body_address : 0x1000;
  3595. + struct hidspi_header req = { .type = HIDSPI_OUTPUT_TYPE_DEVICE_DESCRIPTOR_REQUEST };
  3596. + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_WRITE, out_addr, sizeof(req), &req);
  3597. +
  3598. + // "5. Within 1 second, the device shall signal an interrupt and make available to the host
  3599. + // an input report containing the Device Descriptor."
  3600. + size = wait_for_report(ithc);
  3601. + if (size < 0)
  3602. + return size;
  3603. + if (size < sizeof(resp.header) + sizeof(resp.device_desc)) {
  3604. + pci_err(ithc->pci, "SPI data size too small for device descriptor (%u)\n", size);
  3605. + return -EMSGSIZE;
  3606. + }
  3607. +
  3608. + // "6. The host shall read the Device Descriptor from the Input Report addresses specified
  3609. + // in ACPI."
  3610. + if (size > sizeof(resp)) {
  3611. + pci_err(ithc->pci, "SPI data size for device descriptor too big (%u)\n", size);
  3612. + return -EMSGSIZE;
  3613. + }
  3614. + memset(&resp, 0, sizeof(resp));
  3615. + CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, in_addr, size, &resp);
  3616. + if (resp.header.type != HIDSPI_INPUT_TYPE_DEVICE_DESCRIPTOR) {
  3617. + pci_err(ithc->pci, "received type %i instead of device descriptor\n",
  3618. + resp.header.type);
  3619. + return -ENOMSG;
  3620. + }
  3621. + struct hidspi_device_descriptor *d = &resp.device_desc;
  3622. + if (resp.header.len < sizeof(*d)) {
  3623. + pci_err(ithc->pci, "response too small for device descriptor (%u)\n",
  3624. + resp.header.len);
  3625. + return -EMSGSIZE;
  3626. + }
  3627. + if (d->wDeviceDescLength != sizeof(*d)) {
  3628. + pci_err(ithc->pci, "invalid device descriptor length (%u)\n",
  3629. + d->wDeviceDescLength);
  3630. + return -EMSGSIZE;
  3631. + }
  3632. +
  3633. + pci_info(ithc->pci, "Device descriptor: bcdVersion=0x%04x wReportDescLength=%u wMaxInputLength=%u wMaxOutputLength=%u wMaxFragmentLength=%u wVendorID=0x%04x wProductID=0x%04x wVersionID=0x%04x wFlags=0x%04x dwReserved=0x%08x\n",
  3634. + d->bcdVersion, d->wReportDescLength,
  3635. + d->wMaxInputLength, d->wMaxOutputLength, d->wMaxFragmentLength,
  3636. + d->wVendorID, d->wProductID, d->wVersionID,
  3637. + d->wFlags, d->dwReserved);
  3638. +
  3639. + ithc->vendor_id = d->wVendorID;
  3640. + ithc->product_id = d->wProductID;
  3641. + ithc->product_rev = d->wVersionID;
  3642. + ithc->max_rx_size = max_t(u32, d->wMaxInputLength,
  3643. + d->wReportDescLength + sizeof(struct hidspi_header));
  3644. + ithc->max_tx_size = d->wMaxOutputLength;
  3645. + ithc->have_config = true;
  3646. +
  3647. + // "7. The device and host shall then enter their "Ready" states - where the device may
  3648. + // begin sending Input Reports, and the device shall be prepared for Output Reports from
  3649. + // the host."
  3650. +
  3651. + return 0;
  3652. +}
  3653. +
  3654. +int ithc_quickspi_init(struct ithc *ithc, const struct ithc_acpi_config *cfg)
  3655. +{
  3656. + bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE);
  3657. + CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED);
  3658. +
  3659. + ithc_log_regs(ithc);
  3660. + CHECK_RET(ithc_quickspi_init_regs, ithc, cfg);
  3661. + ithc_log_regs(ithc);
  3662. + CHECK_RET(ithc_quickspi_init_hidspi, ithc, cfg);
  3663. + ithc_log_regs(ithc);
  3664. +
  3665. + // This value is set to 2 in ithc_quickspi_init_regs(). It needs to be set to 1 here,
  3666. + // otherwise DMA will not work. Maybe selects between DMA and PIO mode?
  3667. + bitsl(&ithc->regs->quickspi_config1,
  3668. + QUICKSPI_CONFIG1_UNKNOWN_16(0xffff), QUICKSPI_CONFIG1_UNKNOWN_16(1));
  3669. +
  3670. + // TODO Do we need to set any of the following bits here?
  3671. + //bitsb_set(&ithc->regs->dma_rx[1].control2, DMA_RX_CONTROL2_UNKNOWN_4);
  3672. + //bitsb_set(&ithc->regs->dma_rx[0].control2, DMA_RX_CONTROL2_UNKNOWN_5);
  3673. + //bitsb_set(&ithc->regs->dma_rx[1].control2, DMA_RX_CONTROL2_UNKNOWN_5);
  3674. + //bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_3);
  3675. + //bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_31);
  3676. +
  3677. + ithc_log_regs(ithc);
  3678. +
  3679. + return 0;
  3680. +}
  3681. +
  3682. +void ithc_quickspi_exit(struct ithc *ithc)
  3683. +{
  3684. + // TODO Should we send HIDSPI 'power off' command?
  3685. + //struct hidspi_header h = { .type = HIDSPI_OUTPUT_TYPE_COMMAND, .id = 3, };
  3686. + //struct ithc_data d = { .type = ITHC_DATA_RAW, .data = &h, .size = sizeof(h) };
  3687. + //CHECK(ithc_dma_tx, ithc, &d); // or ithc_spi_command()
  3688. +}
  3689. +
  3690. +int ithc_quickspi_decode_rx(struct ithc *ithc, const void *src, size_t len, struct ithc_data *dest)
  3691. +{
  3692. + const struct hidspi_header *hdr = src;
  3693. +
  3694. + if (len < sizeof(*hdr))
  3695. + return -ENODATA;
  3696. + // TODO Do we need to handle HIDSPI packet fragmentation?
  3697. + if (len < sizeof(*hdr) + hdr->len)
  3698. + return -EMSGSIZE;
  3699. + if (len > round_up(sizeof(*hdr) + hdr->len, 4))
  3700. + return -EMSGSIZE;
  3701. +
  3702. + switch (hdr->type) {
  3703. + case HIDSPI_INPUT_TYPE_RESET_RESPONSE:
  3704. + // TODO "When the device detects an error condition, it may interrupt and make
  3705. + // available to the host an Input Report containing an unsolicited Reset Response.
  3706. + // After receiving an unsolicited Reset Response, the host shall initiate the
  3707. + // request procedure from step (4) in the [HIDSPI initialization] process."
  3708. + dest->type = ITHC_DATA_ERROR;
  3709. + return 0;
  3710. + case HIDSPI_INPUT_TYPE_REPORT_DESCRIPTOR:
  3711. + dest->type = ITHC_DATA_REPORT_DESCRIPTOR;
  3712. + dest->data = hdr + 1;
  3713. + dest->size = hdr->len;
  3714. + return 0;
  3715. + case HIDSPI_INPUT_TYPE_DATA:
  3716. + case HIDSPI_INPUT_TYPE_GET_INPUT_REPORT_RESPONSE:
  3717. + dest->type = ITHC_DATA_INPUT_REPORT;
  3718. + dest->data = &hdr->id;
  3719. + dest->size = hdr->len + 1;
  3720. + return 0;
  3721. + case HIDSPI_INPUT_TYPE_GET_FEATURE_RESPONSE:
  3722. + dest->type = ITHC_DATA_GET_FEATURE;
  3723. + dest->data = &hdr->id;
  3724. + dest->size = hdr->len + 1;
  3725. + return 0;
  3726. + case HIDSPI_INPUT_TYPE_SET_FEATURE_RESPONSE:
  3727. + case HIDSPI_INPUT_TYPE_OUTPUT_REPORT_RESPONSE:
  3728. + dest->type = ITHC_DATA_IGNORE;
  3729. + return 0;
  3730. + default:
  3731. + return -EINVAL;
  3732. + }
  3733. +}
  3734. +
  3735. +ssize_t ithc_quickspi_encode_tx(struct ithc *ithc, const struct ithc_data *src, void *dest,
  3736. + size_t maxlen)
  3737. +{
  3738. + struct hidspi_header *hdr = dest;
  3739. +
  3740. + size_t src_size = src->size;
  3741. + const u8 *src_data = src->data;
  3742. + u8 type;
  3743. +
  3744. + switch (src->type) {
  3745. + case ITHC_DATA_SET_FEATURE:
  3746. + type = HIDSPI_OUTPUT_TYPE_SET_FEATURE;
  3747. + break;
  3748. + case ITHC_DATA_GET_FEATURE:
  3749. + type = HIDSPI_OUTPUT_TYPE_GET_FEATURE;
  3750. + break;
  3751. + case ITHC_DATA_OUTPUT_REPORT:
  3752. + type = HIDSPI_OUTPUT_TYPE_OUTPUT_REPORT;
  3753. + break;
  3754. + case ITHC_DATA_REPORT_DESCRIPTOR:
  3755. + type = HIDSPI_OUTPUT_TYPE_REPORT_DESCRIPTOR_REQUEST;
  3756. + src_size = 0;
  3757. + break;
  3758. + default:
  3759. + return -EINVAL;
  3760. + }
  3761. +
  3762. + u8 id = 0;
  3763. + if (src_size) {
  3764. + id = *src_data++;
  3765. + src_size--;
  3766. + }
  3767. +
  3768. + // Data must be padded to next 4-byte boundary.
  3769. + size_t padded = round_up(src_size, 4);
  3770. + if (sizeof(*hdr) + padded > maxlen)
  3771. + return -EOVERFLOW;
  3772. +
  3773. + // Fill the TX buffer with header and data.
  3774. + hdr->type = type;
  3775. + hdr->len = (u16)src_size;
  3776. + hdr->id = id;
  3777. + memcpy_and_pad(hdr + 1, padded, src_data, src_size, 0);
  3778. +
  3779. + return sizeof(*hdr) + padded;
  3780. +}
  3781. +
  3782. diff --git a/drivers/hid/ithc/ithc-quickspi.h b/drivers/hid/ithc/ithc-quickspi.h
  3783. new file mode 100644
  3784. index 000000000000..74d882f6b2f0
  3785. --- /dev/null
  3786. +++ b/drivers/hid/ithc/ithc-quickspi.h
  3787. @@ -0,0 +1,39 @@
  3788. +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  3789. +
  3790. +struct ithc_acpi_config {
  3791. + bool has_config: 1;
  3792. + bool has_input_report_header_address: 1;
  3793. + bool has_input_report_body_address: 1;
  3794. + bool has_output_report_body_address: 1;
  3795. + bool has_read_opcode: 1;
  3796. + bool has_write_opcode: 1;
  3797. + bool has_read_mode: 1;
  3798. + bool has_write_mode: 1;
  3799. + bool has_spi_frequency: 1;
  3800. + bool has_limit_packet_size: 1;
  3801. + bool has_tx_delay: 1;
  3802. + bool has_active_ltr: 1;
  3803. + bool has_idle_ltr: 1;
  3804. + u32 input_report_header_address;
  3805. + u32 input_report_body_address;
  3806. + u32 output_report_body_address;
  3807. + u8 read_opcode;
  3808. + u8 write_opcode;
  3809. + u8 read_mode;
  3810. + u8 write_mode;
  3811. + u32 spi_frequency;
  3812. + u32 limit_packet_size;
  3813. + u32 tx_delay; // us/10 // TODO use?
  3814. + u32 active_ltr; // ns/1024
  3815. + u32 idle_ltr; // ns/1024
  3816. +};
  3817. +
  3818. +int ithc_read_acpi_config(struct ithc *ithc, struct ithc_acpi_config *cfg);
  3819. +void ithc_print_acpi_config(struct ithc *ithc, const struct ithc_acpi_config *cfg);
  3820. +
  3821. +int ithc_quickspi_init(struct ithc *ithc, const struct ithc_acpi_config *cfg);
  3822. +void ithc_quickspi_exit(struct ithc *ithc);
  3823. +int ithc_quickspi_decode_rx(struct ithc *ithc, const void *src, size_t len, struct ithc_data *dest);
  3824. +ssize_t ithc_quickspi_encode_tx(struct ithc *ithc, const struct ithc_data *src, void *dest,
  3825. + size_t maxlen);
  3826. +
  3827. diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c
  3828. index e058721886e3..c0f13506af20 100644
  3829. --- a/drivers/hid/ithc/ithc-regs.c
  3830. +++ b/drivers/hid/ithc/ithc-regs.c
  3831. @@ -22,46 +22,104 @@ void bitsb(__iomem u8 *reg, u8 mask, u8 val)
  3832. int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val)
  3833. {
  3834. + ithc_log_regs(ithc);
  3835. pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
  3836. reg_num(reg), mask, val);
  3837. u32 x;
  3838. if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  3839. + ithc_log_regs(ithc);
  3840. pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n",
  3841. reg_num(reg), mask, val);
  3842. return -ETIMEDOUT;
  3843. }
  3844. + ithc_log_regs(ithc);
  3845. pci_dbg(ithc->pci, "done waiting\n");
  3846. return 0;
  3847. }
  3848. int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val)
  3849. {
  3850. + ithc_log_regs(ithc);
  3851. pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
  3852. reg_num(reg), mask, val);
  3853. u8 x;
  3854. if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) {
  3855. + ithc_log_regs(ithc);
  3856. pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n",
  3857. reg_num(reg), mask, val);
  3858. return -ETIMEDOUT;
  3859. }
  3860. + ithc_log_regs(ithc);
  3861. pci_dbg(ithc->pci, "done waiting\n");
  3862. return 0;
  3863. }
  3864. -int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode)
  3865. +static void calc_ltr(u64 *ns, unsigned int *val, unsigned int *scale)
  3866. {
  3867. - pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode);
  3868. - if (mode == 3)
  3869. - mode = 2;
  3870. + unsigned int s = 0;
  3871. + u64 v = *ns;
  3872. + while (v > 0x3ff) {
  3873. + s++;
  3874. + v >>= 5;
  3875. + }
  3876. + if (s > 5) {
  3877. + s = 5;
  3878. + v = 0x3ff;
  3879. + }
  3880. + *val = v;
  3881. + *scale = s;
  3882. + *ns = v << (5 * s);
  3883. +}
  3884. +
  3885. +void ithc_set_ltr_config(struct ithc *ithc, u64 active_ltr_ns, u64 idle_ltr_ns)
  3886. +{
  3887. + unsigned int active_val, active_scale, idle_val, idle_scale;
  3888. + calc_ltr(&active_ltr_ns, &active_val, &active_scale);
  3889. + calc_ltr(&idle_ltr_ns, &idle_val, &idle_scale);
  3890. + pci_dbg(ithc->pci, "setting active LTR value to %llu ns, idle LTR value to %llu ns\n",
  3891. + active_ltr_ns, idle_ltr_ns);
  3892. + writel(LTR_CONFIG_ENABLE_ACTIVE | LTR_CONFIG_ENABLE_IDLE | LTR_CONFIG_APPLY |
  3893. + LTR_CONFIG_ACTIVE_LTR_SCALE(active_scale) | LTR_CONFIG_ACTIVE_LTR_VALUE(active_val) |
  3894. + LTR_CONFIG_IDLE_LTR_SCALE(idle_scale) | LTR_CONFIG_IDLE_LTR_VALUE(idle_val),
  3895. + &ithc->regs->ltr_config);
  3896. +}
  3897. +
  3898. +void ithc_set_ltr_idle(struct ithc *ithc)
  3899. +{
  3900. + u32 ltr = readl(&ithc->regs->ltr_config);
  3901. + switch (ltr & (LTR_CONFIG_STATUS_ACTIVE | LTR_CONFIG_STATUS_IDLE)) {
  3902. + case LTR_CONFIG_STATUS_IDLE:
  3903. + break;
  3904. + case LTR_CONFIG_STATUS_ACTIVE:
  3905. + writel(ltr | LTR_CONFIG_TOGGLE | LTR_CONFIG_APPLY, &ithc->regs->ltr_config);
  3906. + break;
  3907. + default:
  3908. + pci_err(ithc->pci, "invalid LTR state 0x%08x\n", ltr);
  3909. + break;
  3910. + }
  3911. +}
  3912. +
  3913. +int ithc_set_spi_config(struct ithc *ithc, u8 clkdiv, bool clkdiv8, u8 read_mode, u8 write_mode)
  3914. +{
  3915. + if (clkdiv == 0 || clkdiv > 7 || read_mode > SPI_MODE_QUAD || write_mode > SPI_MODE_QUAD)
  3916. + return -EINVAL;
  3917. + static const char * const modes[] = { "single", "dual", "quad" };
  3918. + pci_dbg(ithc->pci, "setting SPI frequency to %i Hz, %s read, %s write\n",
  3919. + SPI_CLK_FREQ_BASE / (clkdiv * (clkdiv8 ? 8 : 1)),
  3920. + modes[read_mode], modes[write_mode]);
  3921. bitsl(&ithc->regs->spi_config,
  3922. - SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff),
  3923. - SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed));
  3924. + SPI_CONFIG_READ_MODE(0xff) | SPI_CONFIG_READ_CLKDIV(0xff) |
  3925. + SPI_CONFIG_WRITE_MODE(0xff) | SPI_CONFIG_WRITE_CLKDIV(0xff) |
  3926. + SPI_CONFIG_CLKDIV_8,
  3927. + SPI_CONFIG_READ_MODE(read_mode) | SPI_CONFIG_READ_CLKDIV(clkdiv) |
  3928. + SPI_CONFIG_WRITE_MODE(write_mode) | SPI_CONFIG_WRITE_CLKDIV(clkdiv) |
  3929. + (clkdiv8 ? SPI_CONFIG_CLKDIV_8 : 0));
  3930. return 0;
  3931. }
  3932. int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data)
  3933. {
  3934. - pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset);
  3935. + pci_dbg(ithc->pci, "SPI command %u, size %u, offset 0x%x\n", command, size, offset);
  3936. if (size > sizeof(ithc->regs->spi_cmd.data))
  3937. return -EINVAL;
  3938. diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h
  3939. index d4007d9e2bac..a9d236454644 100644
  3940. --- a/drivers/hid/ithc/ithc-regs.h
  3941. +++ b/drivers/hid/ithc/ithc-regs.h
  3942. @@ -1,14 +1,34 @@
  3943. /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  3944. +#define LTR_CONFIG_ENABLE_ACTIVE BIT(0)
  3945. +#define LTR_CONFIG_TOGGLE BIT(1)
  3946. +#define LTR_CONFIG_ENABLE_IDLE BIT(2)
  3947. +#define LTR_CONFIG_APPLY BIT(3)
  3948. +#define LTR_CONFIG_IDLE_LTR_SCALE(x) (((x) & 7) << 4)
  3949. +#define LTR_CONFIG_IDLE_LTR_VALUE(x) (((x) & 0x3ff) << 7)
  3950. +#define LTR_CONFIG_ACTIVE_LTR_SCALE(x) (((x) & 7) << 17)
  3951. +#define LTR_CONFIG_ACTIVE_LTR_VALUE(x) (((x) & 0x3ff) << 20)
  3952. +#define LTR_CONFIG_STATUS_ACTIVE BIT(30)
  3953. +#define LTR_CONFIG_STATUS_IDLE BIT(31)
  3954. +
  3955. #define CONTROL_QUIESCE BIT(1)
  3956. #define CONTROL_IS_QUIESCED BIT(2)
  3957. #define CONTROL_NRESET BIT(3)
  3958. +#define CONTROL_UNKNOWN_24(x) (((x) & 3) << 24)
  3959. #define CONTROL_READY BIT(29)
  3960. -#define SPI_CONFIG_MODE(x) (((x) & 3) << 2)
  3961. -#define SPI_CONFIG_SPEED(x) (((x) & 7) << 4)
  3962. -#define SPI_CONFIG_UNKNOWN_18(x) (((x) & 3) << 18)
  3963. -#define SPI_CONFIG_SPEED2(x) (((x) & 0xf) << 20) // high bit = high speed mode?
  3964. +#define SPI_CONFIG_READ_MODE(x) (((x) & 3) << 2)
  3965. +#define SPI_CONFIG_READ_CLKDIV(x) (((x) & 7) << 4)
  3966. +#define SPI_CONFIG_READ_PACKET_SIZE(x) (((x) & 0x1ff) << 7)
  3967. +#define SPI_CONFIG_WRITE_MODE(x) (((x) & 3) << 18)
  3968. +#define SPI_CONFIG_WRITE_CLKDIV(x) (((x) & 7) << 20)
  3969. +#define SPI_CONFIG_CLKDIV_8 BIT(23) // additionally divide clk by 8, for both read and write
  3970. +#define SPI_CONFIG_WRITE_PACKET_SIZE(x) (((x) & 0xff) << 24)
  3971. +
  3972. +#define SPI_CLK_FREQ_BASE 125000000
  3973. +#define SPI_MODE_SINGLE 0
  3974. +#define SPI_MODE_DUAL 1
  3975. +#define SPI_MODE_QUAD 2
  3976. #define ERROR_CONTROL_UNKNOWN_0 BIT(0)
  3977. #define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs
  3978. @@ -53,33 +73,71 @@
  3979. #define DMA_TX_STATUS_UNKNOWN_2 BIT(2)
  3980. #define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy?
  3981. +#define INPUT_HEADER_VERSION(x) ((x) & 0xf)
  3982. +#define INPUT_HEADER_REPORT_LENGTH(x) (((x) >> 8) & 0x3fff)
  3983. +#define INPUT_HEADER_SYNC(x) ((x) >> 24)
  3984. +#define INPUT_HEADER_VERSION_VALUE 3
  3985. +#define INPUT_HEADER_SYNC_VALUE 0x5a
  3986. +
  3987. +#define QUICKSPI_CONFIG1_UNKNOWN_0(x) (((x) & 0x1f) << 0)
  3988. +#define QUICKSPI_CONFIG1_UNKNOWN_5(x) (((x) & 0x1f) << 5)
  3989. +#define QUICKSPI_CONFIG1_UNKNOWN_10(x) (((x) & 0x1f) << 10)
  3990. +#define QUICKSPI_CONFIG1_UNKNOWN_16(x) (((x) & 0xffff) << 16)
  3991. +
  3992. +#define QUICKSPI_CONFIG2_UNKNOWN_0(x) (((x) & 0x1f) << 0)
  3993. +#define QUICKSPI_CONFIG2_UNKNOWN_5(x) (((x) & 0x1f) << 5)
  3994. +#define QUICKSPI_CONFIG2_UNKNOWN_12(x) (((x) & 0xf) << 12)
  3995. +#define QUICKSPI_CONFIG2_UNKNOWN_16 BIT(16)
  3996. +#define QUICKSPI_CONFIG2_UNKNOWN_17 BIT(17)
  3997. +#define QUICKSPI_CONFIG2_DISABLE_READ_ADDRESS_INCREMENT BIT(24)
  3998. +#define QUICKSPI_CONFIG2_DISABLE_WRITE_ADDRESS_INCREMENT BIT(25)
  3999. +#define QUICKSPI_CONFIG2_ENABLE_WRITE_STREAMING_MODE BIT(27)
  4000. +#define QUICKSPI_CONFIG2_IRQ_POLARITY BIT(28)
  4001. +
  4002. #define DMA_RX_CONTROL_ENABLE BIT(0)
  4003. #define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only?
  4004. #define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only?
  4005. -#define DMA_RX_CONTROL_IRQ_UNKNOWN_4 BIT(4) // rx0 only?
  4006. +#define DMA_RX_CONTROL_IRQ_READY BIT(4) // rx0 only
  4007. #define DMA_RX_CONTROL_IRQ_DATA BIT(5)
  4008. +#define DMA_RX_CONTROL2_UNKNOWN_4 BIT(4) // rx1 only?
  4009. #define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only?
  4010. #define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices
  4011. #define DMA_RX_WRAP_FLAG BIT(7)
  4012. #define DMA_RX_STATUS_ERROR BIT(3)
  4013. -#define DMA_RX_STATUS_UNKNOWN_4 BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms)
  4014. +#define DMA_RX_STATUS_READY BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms)
  4015. #define DMA_RX_STATUS_HAVE_DATA BIT(5)
  4016. #define DMA_RX_STATUS_ENABLED BIT(8)
  4017. +#define INIT_UNKNOWN_GUC_2 BIT(2)
  4018. +#define INIT_UNKNOWN_3 BIT(3)
  4019. +#define INIT_UNKNOWN_GUC_4 BIT(4)
  4020. +#define INIT_UNKNOWN_5 BIT(5)
  4021. +#define INIT_UNKNOWN_31 BIT(31)
  4022. +
  4023. // COUNTER_RESET can be written to counter registers to reset them to zero. However, in some cases this can mess up the THC.
  4024. #define COUNTER_RESET BIT(31)
  4025. struct ithc_registers {
  4026. - /* 0000 */ u32 _unknown_0000[1024];
  4027. + /* 0000 */ u32 _unknown_0000[5];
  4028. + /* 0014 */ u32 ltr_config;
  4029. + /* 0018 */ u32 _unknown_0018[1018];
  4030. /* 1000 */ u32 _unknown_1000;
  4031. /* 1004 */ u32 _unknown_1004;
  4032. /* 1008 */ u32 control_bits;
  4033. /* 100c */ u32 _unknown_100c;
  4034. /* 1010 */ u32 spi_config;
  4035. - /* 1014 */ u32 _unknown_1014[3];
  4036. + /* 1014 */ u8 read_opcode; // maybe for header?
  4037. + /* 1015 */ u8 read_opcode_quad;
  4038. + /* 1016 */ u8 read_opcode_dual;
  4039. + /* 1017 */ u8 read_opcode_single;
  4040. + /* 1018 */ u8 write_opcode; // not used?
  4041. + /* 1019 */ u8 write_opcode_quad;
  4042. + /* 101a */ u8 write_opcode_dual;
  4043. + /* 101b */ u8 write_opcode_single;
  4044. + /* 101c */ u32 _unknown_101c;
  4045. /* 1020 */ u32 error_control;
  4046. /* 1024 */ u32 error_status; // write to clear
  4047. /* 1028 */ u32 error_flags; // write to clear
  4048. @@ -100,12 +158,19 @@ struct ithc_registers {
  4049. /* 109a */ u8 _unknown_109a;
  4050. /* 109b */ u8 num_prds;
  4051. /* 109c */ u32 status; // write to clear
  4052. + /* 10a0 */ u32 _unknown_10a0[5];
  4053. + /* 10b4 */ u32 spi_addr;
  4054. } dma_tx;
  4055. - /* 10a0 */ u32 _unknown_10a0[7];
  4056. - /* 10bc */ u32 state; // is 0xe0000402 (dev config val 0) after CONTROL_NRESET, 0xe0000461 after first touch, 0xe0000401 after DMA_RX_CODE_RESET
  4057. + /* 10b8 */ u32 spi_header_addr;
  4058. + union {
  4059. + /* 10bc */ u32 irq_cause; // in legacy THC mode
  4060. + /* 10bc */ u32 input_header; // in QuickSPI mode (see HIDSPI spec)
  4061. + };
  4062. /* 10c0 */ u32 _unknown_10c0[8];
  4063. /* 10e0 */ u32 _unknown_10e0_counters[3];
  4064. - /* 10ec */ u32 _unknown_10ec[5];
  4065. + /* 10ec */ u32 quickspi_config1;
  4066. + /* 10f0 */ u32 quickspi_config2;
  4067. + /* 10f4 */ u32 _unknown_10f4[3];
  4068. struct {
  4069. /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq()
  4070. /* 1108/1208 */ u8 num_bufs;
  4071. @@ -120,70 +185,30 @@ struct ithc_registers {
  4072. /* 1118/1218 */ u64 _unknown_1118_guc_addr;
  4073. /* 1120/1220 */ u32 _unknown_1120_guc;
  4074. /* 1124/1224 */ u32 _unknown_1124_guc;
  4075. - /* 1128/1228 */ u32 unknown_init_bits; // bit 2 = guc related, bit 3 = rx1 related, bit 4 = guc related
  4076. + /* 1128/1228 */ u32 init_unknown;
  4077. /* 112c/122c */ u32 _unknown_112c;
  4078. /* 1130/1230 */ u64 _unknown_1130_guc_addr;
  4079. /* 1138/1238 */ u32 _unknown_1138_guc;
  4080. /* 113c/123c */ u32 _unknown_113c;
  4081. /* 1140/1240 */ u32 _unknown_1140_guc;
  4082. - /* 1144/1244 */ u32 _unknown_1144[23];
  4083. + /* 1144/1244 */ u32 _unknown_1144[11];
  4084. + /* 1170/1270 */ u32 spi_addr;
  4085. + /* 1174/1274 */ u32 _unknown_1174[11];
  4086. /* 11a0/12a0 */ u32 _unknown_11a0_counters[6];
  4087. /* 11b8/12b8 */ u32 _unknown_11b8[18];
  4088. } dma_rx[2];
  4089. };
  4090. static_assert(sizeof(struct ithc_registers) == 0x1300);
  4091. -#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6)
  4092. -#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6)
  4093. -
  4094. -#define DEVCFG_TOUCH_MASK 0x3f
  4095. -#define DEVCFG_TOUCH_ENABLE BIT(0)
  4096. -#define DEVCFG_TOUCH_UNKNOWN_1 BIT(1)
  4097. -#define DEVCFG_TOUCH_UNKNOWN_2 BIT(2)
  4098. -#define DEVCFG_TOUCH_UNKNOWN_3 BIT(3)
  4099. -#define DEVCFG_TOUCH_UNKNOWN_4 BIT(4)
  4100. -#define DEVCFG_TOUCH_UNKNOWN_5 BIT(5)
  4101. -#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6)
  4102. -
  4103. -#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC"
  4104. -
  4105. -#define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode?
  4106. -#define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3)
  4107. -#define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f)
  4108. -#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) // TODO implement heartbeat
  4109. -#define DEVCFG_SPI_HEARTBEAT_INTERVAL(x) (((x) >> 21) & 7)
  4110. -#define DEVCFG_SPI_UNKNOWN_25 BIT(25)
  4111. -#define DEVCFG_SPI_UNKNOWN_26 BIT(26)
  4112. -#define DEVCFG_SPI_UNKNOWN_27 BIT(27)
  4113. -#define DEVCFG_SPI_DELAY(x) (((x) >> 28) & 7) // TODO use this
  4114. -#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) // TODO use this?
  4115. -
  4116. -struct ithc_device_config { // (Example values are from an SP7+.)
  4117. - u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET)
  4118. - u32 _unknown_04; // 04 = 0x00000000
  4119. - u32 dma_buf_sizes; // 08 = 0x000a00ff
  4120. - u32 touch_cfg; // 0c = 0x0000001c
  4121. - u32 _unknown_10; // 10 = 0x0000001c
  4122. - u32 device_id; // 14 = 0x43495424 = "$TIC"
  4123. - u32 spi_config; // 18 = 0xfda00a2e
  4124. - u16 vendor_id; // 1c = 0x045e = Microsoft Corp.
  4125. - u16 product_id; // 1e = 0x0c1a
  4126. - u32 revision; // 20 = 0x00000001
  4127. - u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 (this value looks more random on newer devices)
  4128. - u32 _unknown_28; // 28 = 0x00000000
  4129. - u32 fw_mode; // 2c = 0x00000000 (for fw update?)
  4130. - u32 _unknown_30; // 30 = 0x00000000
  4131. - u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?)
  4132. - u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET)
  4133. - u32 _unknown_3c; // 3c = 0x00000002
  4134. -};
  4135. -
  4136. void bitsl(__iomem u32 *reg, u32 mask, u32 val);
  4137. void bitsb(__iomem u8 *reg, u8 mask, u8 val);
  4138. #define bitsl_set(reg, x) bitsl(reg, x, x)
  4139. #define bitsb_set(reg, x) bitsb(reg, x, x)
  4140. int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val);
  4141. int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val);
  4142. -int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode);
  4143. +
  4144. +void ithc_set_ltr_config(struct ithc *ithc, u64 active_ltr_ns, u64 idle_ltr_ns);
  4145. +void ithc_set_ltr_idle(struct ithc *ithc);
  4146. +int ithc_set_spi_config(struct ithc *ithc, u8 clkdiv, bool clkdiv8, u8 read_mode, u8 write_mode);
  4147. int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data);
  4148. diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h
  4149. index 028e55a4ec53..e90c38044432 100644
  4150. --- a/drivers/hid/ithc/ithc.h
  4151. +++ b/drivers/hid/ithc/ithc.h
  4152. @@ -1,20 +1,19 @@
  4153. /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
  4154. -#include <linux/module.h>
  4155. -#include <linux/input.h>
  4156. -#include <linux/hid.h>
  4157. +#include <linux/acpi.h>
  4158. +#include <linux/debugfs.h>
  4159. +#include <linux/delay.h>
  4160. #include <linux/dma-mapping.h>
  4161. +#include <linux/hid.h>
  4162. #include <linux/highmem.h>
  4163. -#include <linux/pci.h>
  4164. +#include <linux/input.h>
  4165. #include <linux/io-64-nonatomic-lo-hi.h>
  4166. #include <linux/iopoll.h>
  4167. -#include <linux/delay.h>
  4168. #include <linux/kthread.h>
  4169. #include <linux/miscdevice.h>
  4170. -#include <linux/debugfs.h>
  4171. +#include <linux/module.h>
  4172. +#include <linux/pci.h>
  4173. #include <linux/poll.h>
  4174. -#include <linux/timer.h>
  4175. -#include <linux/pm_qos.h>
  4176. #define DEVNAME "ithc"
  4177. #define DEVFULLNAME "Intel Touch Host Controller"
  4178. @@ -27,10 +26,37 @@
  4179. #define NUM_RX_BUF 16
  4180. +// PCI device IDs:
  4181. +// Lakefield
  4182. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0
  4183. +#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1
  4184. +// Tiger Lake
  4185. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0
  4186. +#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1
  4187. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0
  4188. +#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1
  4189. +// Alder Lake
  4190. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8
  4191. +#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9
  4192. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0
  4193. +#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1
  4194. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0
  4195. +#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1
  4196. +// Raptor Lake
  4197. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58
  4198. +#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59
  4199. +// Meteor Lake
  4200. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48
  4201. +#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a
  4202. +
  4203. struct ithc;
  4204. #include "ithc-regs.h"
  4205. +#include "ithc-hid.h"
  4206. #include "ithc-dma.h"
  4207. +#include "ithc-legacy.h"
  4208. +#include "ithc-quickspi.h"
  4209. +#include "ithc-debug.h"
  4210. struct ithc {
  4211. char phys[32];
  4212. @@ -38,30 +64,21 @@ struct ithc {
  4213. int irq;
  4214. struct task_struct *poll_thread;
  4215. - struct pm_qos_request activity_qos;
  4216. - struct hrtimer activity_start_timer;
  4217. - struct hrtimer activity_end_timer;
  4218. - ktime_t last_rx_time;
  4219. - unsigned int cur_rx_seq_count;
  4220. - unsigned int cur_rx_seq_errors;
  4221. -
  4222. - struct hid_device *hid;
  4223. - bool hid_parse_done;
  4224. - wait_queue_head_t wait_hid_parse;
  4225. - wait_queue_head_t wait_hid_get_feature;
  4226. - struct mutex hid_get_feature_mutex;
  4227. - void *hid_get_feature_buf;
  4228. - size_t hid_get_feature_size;
  4229. -
  4230. struct ithc_registers __iomem *regs;
  4231. struct ithc_registers *prev_regs; // for debugging
  4232. - struct ithc_device_config config;
  4233. struct ithc_dma_rx dma_rx[2];
  4234. struct ithc_dma_tx dma_tx;
  4235. + struct ithc_hid hid;
  4236. +
  4237. + bool use_quickspi;
  4238. + bool have_config;
  4239. + u16 vendor_id;
  4240. + u16 product_id;
  4241. + u32 product_rev;
  4242. + u32 max_rx_size;
  4243. + u32 max_tx_size;
  4244. + u32 legacy_touch_cfg;
  4245. };
  4246. int ithc_reset(struct ithc *ithc);
  4247. -void ithc_set_active(struct ithc *ithc, unsigned int duration_us);
  4248. -int ithc_debug_init(struct ithc *ithc);
  4249. -void ithc_log_regs(struct ithc *ithc);
  4250. --
  4251. 2.45.2
  4252. From 79abe7fc9d3cd1eda0d9904695a98e98eab037aa Mon Sep 17 00:00:00 2001
  4253. From: Maximilian Luz <luzmaximilian@gmail.com>
  4254. Date: Sun, 4 Aug 2024 16:04:53 +0200
  4255. Subject: [PATCH] hid: ithc: Update from quo/ithc-linux
  4256. - Enable support for SL6/SP10
  4257. - Fixes for SP8
  4258. Based on: https://github.com/quo/ithc-linux/commit/34539af4726d970f9765363bb78b5fd920611a0b
  4259. Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
  4260. Patchset: ithc
  4261. ---
  4262. drivers/hid/ithc/ithc-legacy.c | 4 +-
  4263. drivers/hid/ithc/ithc-main.c | 91 +++++++++-----------------------
  4264. drivers/hid/ithc/ithc-quickspi.c | 53 ++++++++++++++-----
  4265. drivers/hid/ithc/ithc-regs.h | 15 +++---
  4266. drivers/hid/ithc/ithc.h | 9 +++-
  4267. 5 files changed, 82 insertions(+), 90 deletions(-)
  4268. diff --git a/drivers/hid/ithc/ithc-legacy.c b/drivers/hid/ithc/ithc-legacy.c
  4269. index 5c1da11e3f1d..8883987fb352 100644
  4270. --- a/drivers/hid/ithc/ithc-legacy.c
  4271. +++ b/drivers/hid/ithc/ithc-legacy.c
  4272. @@ -82,8 +82,10 @@ int ithc_legacy_init(struct ithc *ithc)
  4273. // Setting the following bit seems to make reading the config more reliable.
  4274. bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_31);
  4275. - // Setting this bit may be necessary on some ADL devices.
  4276. + // Setting this bit may be necessary on ADL devices.
  4277. switch (ithc->pci->device) {
  4278. + case PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1:
  4279. + case PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2:
  4280. case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1:
  4281. case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2:
  4282. case PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1:
  4283. diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c
  4284. index 2acf02e41d40..ac56c253674b 100644
  4285. --- a/drivers/hid/ithc/ithc-main.c
  4286. +++ b/drivers/hid/ithc/ithc-main.c
  4287. @@ -6,25 +6,14 @@ MODULE_DESCRIPTION("Intel Touch Host Controller driver");
  4288. MODULE_LICENSE("Dual BSD/GPL");
  4289. static const struct pci_device_id ithc_pci_tbl[] = {
  4290. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) },
  4291. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) },
  4292. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) },
  4293. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) },
  4294. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) },
  4295. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) },
  4296. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) },
  4297. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) },
  4298. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) },
  4299. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) },
  4300. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) },
  4301. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) },
  4302. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) },
  4303. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) },
  4304. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) },
  4305. - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) },
  4306. - // XXX So far the THC seems to be the only Intel PCI device with PCI_CLASS_INPUT_PEN,
  4307. - // so instead of the device list we could just do:
  4308. - // { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = PCI_CLASS_INPUT_PEN, .class_mask = ~0, },
  4309. + {
  4310. + .vendor = PCI_VENDOR_ID_INTEL,
  4311. + .device = PCI_ANY_ID,
  4312. + .subvendor = PCI_ANY_ID,
  4313. + .subdevice = PCI_ANY_ID,
  4314. + .class = PCI_CLASS_INPUT_PEN << 8,
  4315. + .class_mask = ~0,
  4316. + },
  4317. {}
  4318. };
  4319. MODULE_DEVICE_TABLE(pci, ithc_pci_tbl);
  4320. @@ -52,50 +41,14 @@ static int ithc_idle_ltr_us = -1;
  4321. module_param_named(idleltr, ithc_idle_ltr_us, int, 0);
  4322. MODULE_PARM_DESC(idleltr, "Idle LTR value override (in microseconds)");
  4323. +static unsigned int ithc_idle_delay_ms = 1000;
  4324. +module_param_named(idledelay, ithc_idle_delay_ms, uint, 0);
  4325. +MODULE_PARM_DESC(idleltr, "Minimum idle time before applying idle LTR value (in milliseconds)");
  4326. +
  4327. static bool ithc_log_regs_enabled = false;
  4328. module_param_named(logregs, ithc_log_regs_enabled, bool, 0);
  4329. MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)");
  4330. -// Sysfs attributes
  4331. -
  4332. -static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
  4333. -{
  4334. - struct ithc *ithc = dev_get_drvdata(dev);
  4335. - if (!ithc || !ithc->have_config)
  4336. - return -ENODEV;
  4337. - return sprintf(buf, "0x%04x", ithc->vendor_id);
  4338. -}
  4339. -static DEVICE_ATTR_RO(vendor);
  4340. -static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf)
  4341. -{
  4342. - struct ithc *ithc = dev_get_drvdata(dev);
  4343. - if (!ithc || !ithc->have_config)
  4344. - return -ENODEV;
  4345. - return sprintf(buf, "0x%04x", ithc->product_id);
  4346. -}
  4347. -static DEVICE_ATTR_RO(product);
  4348. -static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf)
  4349. -{
  4350. - struct ithc *ithc = dev_get_drvdata(dev);
  4351. - if (!ithc || !ithc->have_config)
  4352. - return -ENODEV;
  4353. - return sprintf(buf, "%u", ithc->product_rev);
  4354. -}
  4355. -static DEVICE_ATTR_RO(revision);
  4356. -
  4357. -static const struct attribute_group *ithc_attribute_groups[] = {
  4358. - &(const struct attribute_group){
  4359. - .name = DEVNAME,
  4360. - .attrs = (struct attribute *[]){
  4361. - &dev_attr_vendor.attr,
  4362. - &dev_attr_product.attr,
  4363. - &dev_attr_revision.attr,
  4364. - NULL
  4365. - },
  4366. - },
  4367. - NULL
  4368. -};
  4369. -
  4370. // Interrupts/polling
  4371. static void ithc_disable_interrupts(struct ithc *ithc)
  4372. @@ -124,14 +77,19 @@ static void ithc_clear_interrupts(struct ithc *ithc)
  4373. &ithc->regs->dma_tx.status);
  4374. }
  4375. +static void ithc_idle_timer_callback(struct timer_list *t)
  4376. +{
  4377. + struct ithc *ithc = container_of(t, struct ithc, idle_timer);
  4378. + ithc_set_ltr_idle(ithc);
  4379. +}
  4380. +
  4381. static void ithc_process(struct ithc *ithc)
  4382. {
  4383. ithc_log_regs(ithc);
  4384. // The THC automatically transitions from LTR idle to active at the start of a DMA transfer.
  4385. - // It does not appear to automatically go back to idle, so we switch it back here, since
  4386. - // the DMA transfer should be complete.
  4387. - ithc_set_ltr_idle(ithc);
  4388. + // It does not appear to automatically go back to idle, so we switch it back after a delay.
  4389. + mod_timer(&ithc->idle_timer, jiffies + msecs_to_jiffies(ithc_idle_delay_ms));
  4390. bool rx0 = ithc_use_rx0 && (readl(&ithc->regs->dma_rx[0].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  4391. bool rx1 = ithc_use_rx1 && (readl(&ithc->regs->dma_rx[1].status) & (DMA_RX_STATUS_ERROR | DMA_RX_STATUS_HAVE_DATA)) != 0;
  4392. @@ -231,10 +189,10 @@ static int ithc_init_device(struct ithc *ithc)
  4393. // Set Latency Tolerance Reporting config. The device will automatically
  4394. // apply these values depending on whether it is active or idle.
  4395. // If active value is too high, DMA buffer data can become truncated.
  4396. - // By default, we set the active LTR value to 100us, and idle to 100ms.
  4397. + // By default, we set the active LTR value to 50us, and idle to 100ms.
  4398. u64 active_ltr_ns = ithc_active_ltr_us >= 0 ? (u64)ithc_active_ltr_us * 1000
  4399. : cfg.has_config && cfg.has_active_ltr ? (u64)cfg.active_ltr << 10
  4400. - : 100 * 1000;
  4401. + : 50 * 1000;
  4402. u64 idle_ltr_ns = ithc_idle_ltr_us >= 0 ? (u64)ithc_idle_ltr_us * 1000
  4403. : cfg.has_config && cfg.has_idle_ltr ? (u64)cfg.idle_ltr << 10
  4404. : 100 * 1000 * 1000;
  4405. @@ -279,6 +237,7 @@ static void ithc_stop(void *res)
  4406. else
  4407. ithc_legacy_exit(ithc);
  4408. ithc_disable(ithc);
  4409. + del_timer_sync(&ithc->idle_timer);
  4410. // Clear DMA config.
  4411. for (unsigned int i = 0; i < 2; i++) {
  4412. @@ -343,13 +302,14 @@ static int ithc_start(struct pci_dev *pci)
  4413. // Initialize HID and DMA.
  4414. CHECK_RET(ithc_hid_init, ithc);
  4415. - CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups);
  4416. if (ithc_use_rx0)
  4417. CHECK_RET(ithc_dma_rx_init, ithc, 0);
  4418. if (ithc_use_rx1)
  4419. CHECK_RET(ithc_dma_rx_init, ithc, 1);
  4420. CHECK_RET(ithc_dma_tx_init, ithc);
  4421. + timer_setup(&ithc->idle_timer, ithc_idle_timer_callback, 0);
  4422. +
  4423. // Add ithc_stop() callback AFTER setting up DMA buffers, so that polling/irqs/DMA are
  4424. // disabled BEFORE the buffers are freed.
  4425. CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc);
  4426. @@ -452,7 +412,6 @@ static struct pci_driver ithc_driver = {
  4427. .restore = ithc_restore,
  4428. },
  4429. .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
  4430. - //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway
  4431. };
  4432. static int __init ithc_init(void)
  4433. diff --git a/drivers/hid/ithc/ithc-quickspi.c b/drivers/hid/ithc/ithc-quickspi.c
  4434. index 760e55ead078..e2d1690b8cf8 100644
  4435. --- a/drivers/hid/ithc/ithc-quickspi.c
  4436. +++ b/drivers/hid/ithc/ithc-quickspi.c
  4437. @@ -257,6 +257,14 @@ void ithc_print_acpi_config(struct ithc *ithc, const struct ithc_acpi_config *cf
  4438. spi_frequency, limit_packet_size, tx_delay, active_ltr, idle_ltr);
  4439. }
  4440. +static void set_opcode(struct ithc *ithc, size_t i, u8 opcode)
  4441. +{
  4442. + writeb(opcode, &ithc->regs->opcode[i].header);
  4443. + writeb(opcode, &ithc->regs->opcode[i].single);
  4444. + writeb(opcode, &ithc->regs->opcode[i].dual);
  4445. + writeb(opcode, &ithc->regs->opcode[i].quad);
  4446. +}
  4447. +
  4448. static int ithc_quickspi_init_regs(struct ithc *ithc, const struct ithc_acpi_config *cfg)
  4449. {
  4450. pci_dbg(ithc->pci, "initializing QuickSPI registers\n");
  4451. @@ -279,26 +287,47 @@ static int ithc_quickspi_init_regs(struct ithc *ithc, const struct ithc_acpi_con
  4452. // SPI addresses and opcodes
  4453. if (cfg->has_input_report_header_address)
  4454. writel(cfg->input_report_header_address, &ithc->regs->spi_header_addr);
  4455. - if (cfg->has_input_report_body_address)
  4456. + if (cfg->has_input_report_body_address) {
  4457. writel(cfg->input_report_body_address, &ithc->regs->dma_rx[0].spi_addr);
  4458. + writel(cfg->input_report_body_address, &ithc->regs->dma_rx[1].spi_addr);
  4459. + }
  4460. if (cfg->has_output_report_body_address)
  4461. writel(cfg->output_report_body_address, &ithc->regs->dma_tx.spi_addr);
  4462. - if (cfg->has_read_opcode) {
  4463. - writeb(cfg->read_opcode, &ithc->regs->read_opcode);
  4464. - writeb(cfg->read_opcode, &ithc->regs->read_opcode_single);
  4465. - writeb(cfg->read_opcode, &ithc->regs->read_opcode_dual);
  4466. - writeb(cfg->read_opcode, &ithc->regs->read_opcode_quad);
  4467. - }
  4468. - if (cfg->has_write_opcode) {
  4469. - writeb(cfg->write_opcode, &ithc->regs->write_opcode);
  4470. - writeb(cfg->write_opcode, &ithc->regs->write_opcode_single);
  4471. - writeb(cfg->write_opcode, &ithc->regs->write_opcode_dual);
  4472. - writeb(cfg->write_opcode, &ithc->regs->write_opcode_quad);
  4473. + switch (ithc->pci->device) {
  4474. + // LKF/TGL don't support QuickSPI.
  4475. + // For ADL, opcode layout is RX/TX/unused.
  4476. + case PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1:
  4477. + case PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2:
  4478. + case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1:
  4479. + case PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2:
  4480. + case PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1:
  4481. + case PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2:
  4482. + if (cfg->has_read_opcode) {
  4483. + set_opcode(ithc, 0, cfg->read_opcode);
  4484. + }
  4485. + if (cfg->has_write_opcode) {
  4486. + set_opcode(ithc, 1, cfg->write_opcode);
  4487. + }
  4488. + break;
  4489. + // For MTL, opcode layout was changed to RX/RX/TX.
  4490. + // (RPL layout is unknown.)
  4491. + default:
  4492. + if (cfg->has_read_opcode) {
  4493. + set_opcode(ithc, 0, cfg->read_opcode);
  4494. + set_opcode(ithc, 1, cfg->read_opcode);
  4495. + }
  4496. + if (cfg->has_write_opcode) {
  4497. + set_opcode(ithc, 2, cfg->write_opcode);
  4498. + }
  4499. + break;
  4500. }
  4501. +
  4502. ithc_log_regs(ithc);
  4503. // The rest...
  4504. + bitsl_set(&ithc->regs->dma_rx[0].init_unknown, INIT_UNKNOWN_31);
  4505. +
  4506. bitsl(&ithc->regs->quickspi_config1,
  4507. QUICKSPI_CONFIG1_UNKNOWN_0(0xff) | QUICKSPI_CONFIG1_UNKNOWN_5(0xff) |
  4508. QUICKSPI_CONFIG1_UNKNOWN_10(0xff) | QUICKSPI_CONFIG1_UNKNOWN_16(0xffff),
  4509. diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h
  4510. index a9d236454644..4f541fe533fa 100644
  4511. --- a/drivers/hid/ithc/ithc-regs.h
  4512. +++ b/drivers/hid/ithc/ithc-regs.h
  4513. @@ -129,15 +129,12 @@ struct ithc_registers {
  4514. /* 1008 */ u32 control_bits;
  4515. /* 100c */ u32 _unknown_100c;
  4516. /* 1010 */ u32 spi_config;
  4517. - /* 1014 */ u8 read_opcode; // maybe for header?
  4518. - /* 1015 */ u8 read_opcode_quad;
  4519. - /* 1016 */ u8 read_opcode_dual;
  4520. - /* 1017 */ u8 read_opcode_single;
  4521. - /* 1018 */ u8 write_opcode; // not used?
  4522. - /* 1019 */ u8 write_opcode_quad;
  4523. - /* 101a */ u8 write_opcode_dual;
  4524. - /* 101b */ u8 write_opcode_single;
  4525. - /* 101c */ u32 _unknown_101c;
  4526. + struct {
  4527. + /* 1014/1018/101c */ u8 header;
  4528. + /* 1015/1019/101d */ u8 quad;
  4529. + /* 1016/101a/101e */ u8 dual;
  4530. + /* 1017/101b/101f */ u8 single;
  4531. + } opcode[3];
  4532. /* 1020 */ u32 error_control;
  4533. /* 1024 */ u32 error_status; // write to clear
  4534. /* 1028 */ u32 error_flags; // write to clear
  4535. diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h
  4536. index e90c38044432..aec320d4e945 100644
  4537. --- a/drivers/hid/ithc/ithc.h
  4538. +++ b/drivers/hid/ithc/ithc.h
  4539. @@ -14,6 +14,8 @@
  4540. #include <linux/module.h>
  4541. #include <linux/pci.h>
  4542. #include <linux/poll.h>
  4543. +#include <linux/timer.h>
  4544. +#include <linux/vmalloc.h>
  4545. #define DEVNAME "ithc"
  4546. #define DEVFULLNAME "Intel Touch Host Controller"
  4547. @@ -46,8 +48,10 @@
  4548. #define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58
  4549. #define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59
  4550. // Meteor Lake
  4551. -#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48
  4552. -#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a
  4553. +#define PCI_DEVICE_ID_INTEL_THC_MTL_S_PORT1 0x7f59
  4554. +#define PCI_DEVICE_ID_INTEL_THC_MTL_S_PORT2 0x7f5b
  4555. +#define PCI_DEVICE_ID_INTEL_THC_MTL_MP_PORT1 0x7e49
  4556. +#define PCI_DEVICE_ID_INTEL_THC_MTL_MP_PORT2 0x7e4b
  4557. struct ithc;
  4558. @@ -63,6 +67,7 @@ struct ithc {
  4559. struct pci_dev *pci;
  4560. int irq;
  4561. struct task_struct *poll_thread;
  4562. + struct timer_list idle_timer;
  4563. struct ithc_registers __iomem *regs;
  4564. struct ithc_registers *prev_regs; // for debugging
  4565. --
  4566. 2.45.2