0003-surface-sam.patch 346 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813
  1. From cebebc5233f7fc9406332e77ada2950a4e1896ab Mon Sep 17 00:00:00 2001
  2. From: qzed <qzed@users.noreply.github.com>
  3. Date: Mon, 26 Aug 2019 01:11:08 +0200
  4. Subject: [PATCH 3/6] surface-sam
  5. ---
  6. drivers/acpi/acpica/dsopcode.c | 2 +-
  7. drivers/acpi/acpica/exfield.c | 12 +-
  8. drivers/platform/x86/Kconfig | 1 +
  9. drivers/platform/x86/Makefile | 1 +
  10. drivers/platform/x86/surface_sam/Kconfig | 176 +
  11. drivers/platform/x86/surface_sam/Makefile | 16 +
  12. .../x86/surface_sam/surface_sam_debugfs.c | 270 +
  13. .../x86/surface_sam/surface_sam_dtx.c | 582 ++
  14. .../x86/surface_sam/surface_sam_hps.c | 1287 ++++
  15. .../x86/surface_sam/surface_sam_san.c | 930 +++
  16. .../x86/surface_sam/surface_sam_san.h | 30 +
  17. .../x86/surface_sam/surface_sam_sid.c | 283 +
  18. .../x86/surface_sam/surface_sam_sid_gpelid.c | 232 +
  19. .../surface_sam/surface_sam_sid_perfmode.c | 214 +
  20. .../x86/surface_sam/surface_sam_sid_power.c | 1054 ++++
  21. .../x86/surface_sam/surface_sam_sid_power.h | 16 +
  22. .../x86/surface_sam/surface_sam_sid_vhf.c | 429 ++
  23. .../x86/surface_sam/surface_sam_sid_vhf.h | 14 +
  24. .../x86/surface_sam/surface_sam_ssh.c | 5329 +++++++++++++++++
  25. .../x86/surface_sam/surface_sam_ssh.h | 717 +++
  26. .../x86/surface_sam/surface_sam_ssh_trace.h | 587 ++
  27. .../x86/surface_sam/surface_sam_vhf.c | 266 +
  28. drivers/tty/serdev/core.c | 111 +-
  29. 23 files changed, 12543 insertions(+), 16 deletions(-)
  30. create mode 100644 drivers/platform/x86/surface_sam/Kconfig
  31. create mode 100644 drivers/platform/x86/surface_sam/Makefile
  32. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_debugfs.c
  33. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_dtx.c
  34. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_hps.c
  35. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_san.c
  36. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_san.h
  37. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid.c
  38. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c
  39. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c
  40. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_power.c
  41. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_power.h
  42. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c
  43. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h
  44. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh.c
  45. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh.h
  46. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h
  47. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_vhf.c
  48. diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
  49. index 10f32b62608ee..7b2a4987f0507 100644
  50. --- a/drivers/acpi/acpica/dsopcode.c
  51. +++ b/drivers/acpi/acpica/dsopcode.c
  52. @@ -123,7 +123,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
  53. /* Offset is in bits, count is in bits */
  54. - field_flags = AML_FIELD_ACCESS_BYTE;
  55. + field_flags = AML_FIELD_ACCESS_BUFFER;
  56. bit_offset = offset;
  57. bit_count = (u32) length_desc->integer.value;
  58. diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
  59. index d3d2dbfba680c..0b7f617a6e9b1 100644
  60. --- a/drivers/acpi/acpica/exfield.c
  61. +++ b/drivers/acpi/acpica/exfield.c
  62. @@ -109,6 +109,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
  63. union acpi_operand_object *buffer_desc;
  64. void *buffer;
  65. u32 buffer_length;
  66. + u8 field_flags;
  67. ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
  68. @@ -157,11 +158,16 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
  69. * Note: Field.length is in bits.
  70. */
  71. buffer_length =
  72. - (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
  73. + (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
  74. + field_flags = obj_desc->common_field.field_flags;
  75. - if (buffer_length > acpi_gbl_integer_byte_width) {
  76. + if (buffer_length > acpi_gbl_integer_byte_width ||
  77. + (field_flags & AML_FIELD_ACCESS_TYPE_MASK) == AML_FIELD_ACCESS_BUFFER) {
  78. - /* Field is too large for an Integer, create a Buffer instead */
  79. + /*
  80. + * Field is either too large for an Integer, or a actually of type
  81. + * buffer, so create a Buffer.
  82. + */
  83. buffer_desc = acpi_ut_create_buffer_object(buffer_length);
  84. if (!buffer_desc) {
  85. diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
  86. index 348c795019fa4..d25067a838a33 100644
  87. --- a/drivers/platform/x86/Kconfig
  88. +++ b/drivers/platform/x86/Kconfig
  89. @@ -1342,6 +1342,7 @@ config PCENGINES_APU2
  90. will be called pcengines-apuv2.
  91. source "drivers/platform/x86/intel_speed_select_if/Kconfig"
  92. +source "drivers/platform/x86/surface_sam/Kconfig"
  93. endif # X86_PLATFORM_DEVICES
  94. diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
  95. index 6dd955ad9bf18..19b56f2181eb9 100644
  96. --- a/drivers/platform/x86/Makefile
  97. +++ b/drivers/platform/x86/Makefile
  98. @@ -101,3 +101,4 @@ obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
  99. obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
  100. obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
  101. obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
  102. +obj-$(CONFIG_SURFACE_SAM) += surface_sam/
  103. diff --git a/drivers/platform/x86/surface_sam/Kconfig b/drivers/platform/x86/surface_sam/Kconfig
  104. new file mode 100644
  105. index 0000000000000..b5bb55248a5d5
  106. --- /dev/null
  107. +++ b/drivers/platform/x86/surface_sam/Kconfig
  108. @@ -0,0 +1,176 @@
  109. +menuconfig SURFACE_SAM
  110. + depends on ACPI
  111. + tristate "Microsoft Surface/System Aggregator Module and Platform Drivers"
  112. + help
  113. + Drivers for the Surface/System Aggregator Module (SAM) of Microsoft
  114. + Surface devices.
  115. +
  116. + SAM is an embedded controller that provides access to various
  117. + functionalities on these devices, including battery status, keyboard
  118. + events (on the Laptops) and many more.
  119. +
  120. + Say M/Y here if you have a Microsoft Surface device with a SAM device
  121. + (i.e. 5th generation or later).
  122. +
  123. +config SURFACE_SAM_SSH
  124. + tristate "Surface Serial Hub Driver"
  125. + depends on SURFACE_SAM
  126. + depends on SERIAL_DEV_CTRL_TTYPORT
  127. + select CRC_CCITT
  128. + default m
  129. + help
  130. + Surface Serial Hub driver for 5th generation (or later) Microsoft
  131. + Surface devices.
  132. +
  133. + This is the base driver for the embedded serial controller found on
  134. + 5th generation (and later) Microsoft Surface devices (e.g. Book 2,
  135. + Laptop, Laptop 2, Pro 2017, Pro 6, ...). This driver itself only
  136. + provides access to the embedded controller (SAM) and subsequent
  137. + drivers are required for the respective functionalities.
  138. +
  139. + If you have a 5th generation (or later) Microsoft Surface device, say
  140. + Y or M here.
  141. +
  142. +config SURFACE_SAM_SSH_ERROR_INJECTION
  143. + bool "Surface Serial Hub Error Injection Capabilities"
  144. + depends on SURFACE_SAM_SSH
  145. + depends on FUNCTION_ERROR_INJECTION
  146. + default n
  147. + help
  148. + Enable error injection capabilities for the Surface Serial Hub.
  149. + This is used to debug the driver, specifically the communication
  150. + interface. It is not required for normal use.
  151. +
  152. + If you are not sure, say N here.
  153. +
  154. +config SURFACE_SAM_DEBUGFS
  155. + tristate "Surface Serial Hub Debug Device"
  156. + depends on SURFACE_SAM_SSH
  157. + depends on DEBUG_FS
  158. + default n
  159. + help
  160. + Debug device for direct communication with the embedded controller
  161. + found on 5th generation (and later) Microsoft Surface devices (e.g.
  162. + Book 2, Laptop, Laptop 2, Pro 2017, Pro 6, ...) via debugfs.
  163. +
  164. + If you are not sure, say N here.
  165. +
  166. +config SURFACE_SAM_SAN
  167. + tristate "Surface ACPI Notify Driver"
  168. + depends on SURFACE_SAM_SSH
  169. + default m
  170. + help
  171. + Surface ACPI Notify driver for 5th generation (or later) Microsoft
  172. + Surface devices.
  173. +
  174. + This driver enables basic ACPI events and requests, such as battery
  175. + status requests/events, thermal events, lid status, and possibly more,
  176. + which would otherwise not work on these devices.
  177. +
  178. + If you are not sure, say M here.
  179. +
  180. +config SURFACE_SAM_VHF
  181. + tristate "Surface Virtual HID Framework Driver"
  182. + depends on SURFACE_SAM_SSH
  183. + depends on HID
  184. + default m
  185. + help
  186. + Surface Virtual HID Framework driver for 5th generation (or later)
  187. + Microsoft Surface devices.
  188. +
  189. + This driver provides support for the Microsoft Virtual HID framework,
  190. + which is required for keyboard support on the Surface Laptop 1 and 2.
  191. +
  192. + If you are not sure, say M here.
  193. +
  194. +config SURFACE_SAM_DTX
  195. + tristate "Surface Detachment System (DTX) Driver"
  196. + depends on SURFACE_SAM_SSH
  197. + depends on INPUT
  198. + default m
  199. + help
  200. + Surface Detachment System (DTX) driver for the Microsoft Surface Book
  201. + 2. This driver provides support for proper detachment handling in
  202. + user-space, status-events relating to the base and support for
  203. + the safe-guard keeping the base attached when the discrete GPU
  204. + contained in it is running via the special /dev/surface-dtx device.
  205. +
  206. + Also provides a standard input device to provide SW_TABLET_MODE events
  207. + upon device mode change.
  208. +
  209. + If you are not sure, say M here.
  210. +
  211. +config SURFACE_SAM_HPS
  212. + tristate "Surface dGPU Hot-Plug System (dGPU-HPS) Driver"
  213. + depends on SURFACE_SAM_SSH
  214. + depends on SURFACE_SAM_SAN
  215. + depends on GPIO_SYSFS
  216. + default m
  217. + help
  218. + Driver to properly handle hot-plugging and explicit power-on/power-off
  219. + of the discrete GPU (dGPU) on the Surface Book 2 and 3.
  220. +
  221. + If you are not sure, say M here.
  222. +
  223. +config SURFACE_SAM_SID
  224. + tristate "Surface Platform Integration Driver"
  225. + depends on SURFACE_SAM_SSH
  226. + default m
  227. + help
  228. + Surface Platform Integration Driver for the Microsoft Surface Devices.
  229. + This driver loads various model-specific sub-drivers, including
  230. + battery and keyboard support on 7th generation Surface devices, proper
  231. + lid setup to enable device wakeup when the lid is opened on multiple
  232. + models, as well as performance mode setting support on the Surface
  233. + Book 2.
  234. +
  235. + If you are not sure, say M here.
  236. +
  237. +config SURFACE_SAM_SID_GPELID
  238. + tristate "Surface Lid Wakeup Driver"
  239. + depends on SURFACE_SAM_SID
  240. + default m
  241. + help
  242. + Driver to set up device wake-up via lid on Intel-based Microsoft
  243. + Surface devices. These devices do not wake up from sleep as their GPE
  244. + interrupt is not configured automatically. This driver solves that
  245. + problem.
  246. +
  247. + If you are not sure, say M here.
  248. +
  249. +config SURFACE_SAM_SID_PERFMODE
  250. + tristate "Surface Performance Mode Driver"
  251. + depends on SURFACE_SAM_SID
  252. + depends on SYSFS
  253. + default m
  254. + help
  255. + This driver provides support for setting performance-modes on Surface
  256. + devices via the perf_mode sysfs attribute. Currently only supports the
  257. + Surface Book 2. Performance-modes directly influence the fan-profile
  258. + of the device, allowing to choose between higher performance or
  259. + quieter operation.
  260. +
  261. + If you are not sure, say M here.
  262. +
  263. +config SURFACE_SAM_SID_VHF
  264. + tristate "Surface SAM HID Driver"
  265. + depends on SURFACE_SAM_SID
  266. + depends on HID
  267. + default m
  268. + help
  269. + This driver provides support for HID devices connected via the Surface
  270. + SAM embedded controller. It provides support for keyboard and touchpad
  271. + on the Surface Laptop 3 models.
  272. +
  273. + If you are not sure, say M here.
  274. +
  275. +config SURFACE_SAM_SID_POWER
  276. + tristate "Surface SAM Battery/AC Driver"
  277. + depends on SURFACE_SAM_SID
  278. + select POWER_SUPPLY
  279. + default m
  280. + help
  281. + This driver provides support for the battery and AC on 7th generation
  282. + Surface devices.
  283. +
  284. + If you are not sure, say M here.
  285. diff --git a/drivers/platform/x86/surface_sam/Makefile b/drivers/platform/x86/surface_sam/Makefile
  286. new file mode 100644
  287. index 0000000000000..89bced46ebcdd
  288. --- /dev/null
  289. +++ b/drivers/platform/x86/surface_sam/Makefile
  290. @@ -0,0 +1,16 @@
  291. +# SPDX-License-Identifier: GPL-2.0-or-later
  292. +
  293. +# For include/trace/define_trace.h to include surface_sam_ssh_trace.h
  294. +CFLAGS_surface_sam_ssh.o = -I$(src)
  295. +
  296. +obj-$(CONFIG_SURFACE_SAM_SSH) += surface_sam_ssh.o
  297. +obj-$(CONFIG_SURFACE_SAM_SAN) += surface_sam_san.o
  298. +obj-$(CONFIG_SURFACE_SAM_DTX) += surface_sam_dtx.o
  299. +obj-$(CONFIG_SURFACE_SAM_HPS) += surface_sam_hps.o
  300. +obj-$(CONFIG_SURFACE_SAM_VHF) += surface_sam_vhf.o
  301. +obj-$(CONFIG_SURFACE_SAM_SID) += surface_sam_sid.o
  302. +obj-$(CONFIG_SURFACE_SAM_SID_GPELID) += surface_sam_sid_gpelid.o
  303. +obj-$(CONFIG_SURFACE_SAM_SID_PERFMODE) += surface_sam_sid_perfmode.o
  304. +obj-$(CONFIG_SURFACE_SAM_SID_POWER) += surface_sam_sid_power.o
  305. +obj-$(CONFIG_SURFACE_SAM_SID_VHF) += surface_sam_sid_vhf.o
  306. +obj-$(CONFIG_SURFACE_SAM_DEBUGFS) += surface_sam_debugfs.o
  307. diff --git a/drivers/platform/x86/surface_sam/surface_sam_debugfs.c b/drivers/platform/x86/surface_sam/surface_sam_debugfs.c
  308. new file mode 100644
  309. index 0000000000000..13e93404775c5
  310. --- /dev/null
  311. +++ b/drivers/platform/x86/surface_sam/surface_sam_debugfs.c
  312. @@ -0,0 +1,270 @@
  313. +// SPDX-License-Identifier: GPL-2.0-or-later
  314. +
  315. +#include <linux/debugfs.h>
  316. +#include <linux/fs.h>
  317. +#include <linux/kernel.h>
  318. +#include <linux/module.h>
  319. +#include <linux/platform_device.h>
  320. +#include <linux/slab.h>
  321. +#include <linux/uaccess.h>
  322. +
  323. +#include "surface_sam_ssh.h"
  324. +
  325. +#define SSAM_DBGDEV_NAME "surface_sam_dbgdev"
  326. +#define SSAM_DBGDEV_VERS 0x0100
  327. +
  328. +
  329. +struct ssam_dbgdev_request {
  330. + __u8 target_category;
  331. + __u8 command_id;
  332. + __u8 instance_id;
  333. + __u8 channel;
  334. + __u16 flags;
  335. + __s16 status;
  336. +
  337. + struct {
  338. + __u8 __pad[6];
  339. + __u16 length;
  340. + const __u8 __user *data;
  341. + } payload;
  342. +
  343. + struct {
  344. + __u8 __pad[6];
  345. + __u16 length;
  346. + __u8 __user *data;
  347. + } response;
  348. +};
  349. +
  350. +#define SSAM_DBGDEV_IOCTL_GETVERSION _IOR(0xA5, 0, __u32)
  351. +#define SSAM_DBGDEV_IOCTL_REQUEST _IOWR(0xA5, 1, struct ssam_dbgdev_request)
  352. +
  353. +
  354. +struct ssam_dbgdev {
  355. + struct ssam_controller *ctrl;
  356. + struct dentry *dentry_dir;
  357. + struct dentry *dentry_dev;
  358. +};
  359. +
  360. +
  361. +static int ssam_dbgdev_open(struct inode *inode, struct file *filp)
  362. +{
  363. + filp->private_data = inode->i_private;
  364. + return nonseekable_open(inode, filp);
  365. +}
  366. +
  367. +static long ssam_dbgdev_request(struct file *file, unsigned long arg)
  368. +{
  369. + struct ssam_dbgdev *ddev = file->private_data;
  370. + struct ssam_dbgdev_request __user *r;
  371. + struct ssam_dbgdev_request rqst;
  372. + struct ssam_request spec;
  373. + struct ssam_response rsp;
  374. + u8 *pldbuf = NULL;
  375. + u8 *rspbuf = NULL;
  376. + int status = 0, ret = 0, tmp;
  377. +
  378. + r = (struct ssam_dbgdev_request __user *)arg;
  379. + ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
  380. + if (ret)
  381. + goto out;
  382. +
  383. + // setup basic request fields
  384. + spec.target_category = rqst.target_category;
  385. + spec.command_id = rqst.command_id;
  386. + spec.instance_id = rqst.instance_id;
  387. + spec.channel = rqst.channel;
  388. + spec.flags = rqst.flags;
  389. + spec.length = rqst.payload.length;
  390. +
  391. + rsp.capacity = rqst.response.length;
  392. + rsp.length = 0;
  393. +
  394. + // get request payload from user-space
  395. + if (spec.length) {
  396. + if (!rqst.payload.data) {
  397. + ret = -EINVAL;
  398. + goto out;
  399. + }
  400. +
  401. + pldbuf = kzalloc(spec.length, GFP_KERNEL);
  402. + if (!pldbuf) {
  403. + status = -ENOMEM;
  404. + ret = -EFAULT;
  405. + goto out;
  406. + }
  407. +
  408. + if (copy_from_user(pldbuf, rqst.payload.data, spec.length)) {
  409. + ret = -EFAULT;
  410. + goto out;
  411. + }
  412. + }
  413. + spec.payload = pldbuf;
  414. +
  415. + // allocate response buffer
  416. + if (rsp.capacity) {
  417. + if (!rqst.response.data) {
  418. + ret = -EINVAL;
  419. + goto out;
  420. + }
  421. +
  422. + rspbuf = kzalloc(rsp.capacity, GFP_KERNEL);
  423. + if (!rspbuf) {
  424. + status = -ENOMEM;
  425. + ret = -EFAULT;
  426. + goto out;
  427. + }
  428. + }
  429. + rsp.pointer = rspbuf;
  430. +
  431. + // perform request
  432. + status = ssam_request_sync(ddev->ctrl, &spec, &rsp);
  433. + if (status)
  434. + goto out;
  435. +
  436. + // copy response to user-space
  437. + if (rsp.length) {
  438. + if (copy_to_user(rqst.response.data, rsp.pointer, rsp.length)) {
  439. + ret = -EFAULT;
  440. + goto out;
  441. + }
  442. + }
  443. +
  444. +out:
  445. + // always try to set response-length and status
  446. + tmp = put_user(rsp.length, &r->response.length);
  447. + if (!ret)
  448. + ret = tmp;
  449. +
  450. + tmp = put_user(status, &r->status);
  451. + if (!ret)
  452. + ret = tmp;
  453. +
  454. + // cleanup
  455. + if (pldbuf)
  456. + kfree(pldbuf);
  457. +
  458. + if (rspbuf)
  459. + kfree(rspbuf);
  460. +
  461. + return ret;
  462. +}
  463. +
  464. +static long ssam_dbgdev_getversion(struct file *file, unsigned long arg)
  465. +{
  466. + put_user(SSAM_DBGDEV_VERS, (u32 __user *)arg);
  467. + return 0;
  468. +}
  469. +
  470. +static long ssam_dbgdev_ioctl(struct file *file, unsigned int cmd,
  471. + unsigned long arg)
  472. +{
  473. + switch (cmd) {
  474. + case SSAM_DBGDEV_IOCTL_GETVERSION:
  475. + return ssam_dbgdev_getversion(file, arg);
  476. +
  477. + case SSAM_DBGDEV_IOCTL_REQUEST:
  478. + return ssam_dbgdev_request(file, arg);
  479. +
  480. + default:
  481. + return -EINVAL;
  482. + }
  483. +}
  484. +
  485. +const struct file_operations ssam_dbgdev_fops = {
  486. + .owner = THIS_MODULE,
  487. + .open = ssam_dbgdev_open,
  488. + .unlocked_ioctl = ssam_dbgdev_ioctl,
  489. + .compat_ioctl = ssam_dbgdev_ioctl,
  490. + .llseek = noop_llseek,
  491. +};
  492. +
  493. +static int ssam_dbgdev_probe(struct platform_device *pdev)
  494. +{
  495. + struct ssam_dbgdev *ddev;
  496. + struct ssam_controller *ctrl;
  497. + int status;
  498. +
  499. + status = ssam_client_bind(&pdev->dev, &ctrl);
  500. + if (status)
  501. + return status == -ENXIO ? -EPROBE_DEFER : status;
  502. +
  503. + ddev = devm_kzalloc(&pdev->dev, sizeof(struct ssam_dbgdev), GFP_KERNEL);
  504. + if (!ddev)
  505. + return -ENOMEM;
  506. +
  507. + ddev->ctrl = ctrl;
  508. +
  509. + ddev->dentry_dir = debugfs_create_dir("surface_sam", NULL);
  510. + if (IS_ERR(ddev->dentry_dir))
  511. + return PTR_ERR(ddev->dentry_dir);
  512. +
  513. + ddev->dentry_dev = debugfs_create_file("controller", 0600,
  514. + ddev->dentry_dir, ddev,
  515. + &ssam_dbgdev_fops);
  516. + if (IS_ERR(ddev->dentry_dev)) {
  517. + debugfs_remove(ddev->dentry_dir);
  518. + return PTR_ERR(ddev->dentry_dev);
  519. + }
  520. +
  521. + platform_set_drvdata(pdev, ddev);
  522. + return 0;
  523. +}
  524. +
  525. +static int ssam_dbgdev_remove(struct platform_device *pdev)
  526. +{
  527. + struct ssam_dbgdev *ddev = platform_get_drvdata(pdev);
  528. +
  529. + debugfs_remove(ddev->dentry_dev);
  530. + debugfs_remove(ddev->dentry_dir);
  531. +
  532. + platform_set_drvdata(pdev, NULL);
  533. + return 0;
  534. +}
  535. +
  536. +static void ssam_dbgdev_release(struct device *dev)
  537. +{
  538. + // nothing to do
  539. +}
  540. +
  541. +
  542. +static struct platform_device ssam_dbgdev_device = {
  543. + .name = SSAM_DBGDEV_NAME,
  544. + .id = PLATFORM_DEVID_NONE,
  545. + .dev.release = ssam_dbgdev_release,
  546. +};
  547. +
  548. +static struct platform_driver ssam_dbgdev_driver = {
  549. + .probe = ssam_dbgdev_probe,
  550. + .remove = ssam_dbgdev_remove,
  551. + .driver = {
  552. + .name = SSAM_DBGDEV_NAME,
  553. + },
  554. +};
  555. +
  556. +static int __init surface_sam_debugfs_init(void)
  557. +{
  558. + int status;
  559. +
  560. + status = platform_device_register(&ssam_dbgdev_device);
  561. + if (status)
  562. + return status;
  563. +
  564. + status = platform_driver_register(&ssam_dbgdev_driver);
  565. + if (status)
  566. + platform_device_unregister(&ssam_dbgdev_device);
  567. +
  568. + return status;
  569. +}
  570. +
  571. +static void __exit surface_sam_debugfs_exit(void)
  572. +{
  573. + platform_driver_unregister(&ssam_dbgdev_driver);
  574. + platform_device_unregister(&ssam_dbgdev_device);
  575. +}
  576. +
  577. +module_init(surface_sam_debugfs_init);
  578. +module_exit(surface_sam_debugfs_exit);
  579. +
  580. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  581. +MODULE_DESCRIPTION("DebugFS entries for Surface Aggregator Module");
  582. +MODULE_LICENSE("GPL");
  583. diff --git a/drivers/platform/x86/surface_sam/surface_sam_dtx.c b/drivers/platform/x86/surface_sam/surface_sam_dtx.c
  584. new file mode 100644
  585. index 0000000000000..9c844bb0f7739
  586. --- /dev/null
  587. +++ b/drivers/platform/x86/surface_sam/surface_sam_dtx.c
  588. @@ -0,0 +1,582 @@
  589. +// SPDX-License-Identifier: GPL-2.0-or-later
  590. +/*
  591. + * Detachment system (DTX) driver for Microsoft Surface Book 2.
  592. + */
  593. +
  594. +#include <linux/acpi.h>
  595. +#include <linux/delay.h>
  596. +#include <linux/fs.h>
  597. +#include <linux/input.h>
  598. +#include <linux/ioctl.h>
  599. +#include <linux/kernel.h>
  600. +#include <linux/miscdevice.h>
  601. +#include <linux/module.h>
  602. +#include <linux/poll.h>
  603. +#include <linux/rculist.h>
  604. +#include <linux/slab.h>
  605. +#include <linux/spinlock.h>
  606. +#include <linux/platform_device.h>
  607. +
  608. +#include "surface_sam_ssh.h"
  609. +
  610. +
  611. +#define USB_VENDOR_ID_MICROSOFT 0x045e
  612. +#define USB_DEVICE_ID_MS_SURFACE_BASE_2_INTEGRATION 0x0922
  613. +
  614. +// name copied from MS device manager
  615. +#define DTX_INPUT_NAME "Microsoft Surface Base 2 Integration Device"
  616. +
  617. +
  618. +#define DTX_CMD_LATCH_LOCK _IO(0x11, 0x01)
  619. +#define DTX_CMD_LATCH_UNLOCK _IO(0x11, 0x02)
  620. +#define DTX_CMD_LATCH_REQUEST _IO(0x11, 0x03)
  621. +#define DTX_CMD_LATCH_OPEN _IO(0x11, 0x04)
  622. +#define DTX_CMD_GET_OPMODE _IOR(0x11, 0x05, int)
  623. +
  624. +#define SAM_EVENT_DTX_CID_CONNECTION 0x0c
  625. +#define SAM_EVENT_DTX_CID_BUTTON 0x0e
  626. +#define SAM_EVENT_DTX_CID_ERROR 0x0f
  627. +#define SAM_EVENT_DTX_CID_LATCH_STATUS 0x11
  628. +
  629. +#define DTX_OPMODE_TABLET 0x00
  630. +#define DTX_OPMODE_LAPTOP 0x01
  631. +#define DTX_OPMODE_STUDIO 0x02
  632. +
  633. +#define DTX_LATCH_CLOSED 0x00
  634. +#define DTX_LATCH_OPENED 0x01
  635. +
  636. +
  637. +// Warning: This must always be a power of 2!
  638. +#define DTX_CLIENT_BUF_SIZE 16
  639. +
  640. +#define DTX_CONNECT_OPMODE_DELAY 1000
  641. +
  642. +#define DTX_ERR KERN_ERR "surface_sam_dtx: "
  643. +#define DTX_WARN KERN_WARNING "surface_sam_dtx: "
  644. +
  645. +
  646. +struct surface_dtx_event {
  647. + u8 type;
  648. + u8 code;
  649. + u8 arg0;
  650. + u8 arg1;
  651. +} __packed;
  652. +
  653. +struct surface_dtx_dev {
  654. + struct ssam_controller *ctrl;
  655. +
  656. + struct ssam_event_notifier notif;
  657. + struct delayed_work opmode_work;
  658. + wait_queue_head_t waitq;
  659. + struct miscdevice mdev;
  660. + spinlock_t client_lock;
  661. + struct list_head client_list;
  662. + struct mutex mutex;
  663. + bool active;
  664. + spinlock_t input_lock;
  665. + struct input_dev *input_dev;
  666. +};
  667. +
  668. +struct surface_dtx_client {
  669. + struct list_head node;
  670. + struct surface_dtx_dev *ddev;
  671. + struct fasync_struct *fasync;
  672. + spinlock_t buffer_lock;
  673. + unsigned int buffer_head;
  674. + unsigned int buffer_tail;
  675. + struct surface_dtx_event buffer[DTX_CLIENT_BUF_SIZE];
  676. +};
  677. +
  678. +
  679. +static struct surface_dtx_dev surface_dtx_dev;
  680. +
  681. +
  682. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
  683. + .target_category = SSAM_SSH_TC_BAS,
  684. + .command_id = 0x06,
  685. + .instance_id = 0x00,
  686. + .channel = 0x01,
  687. +});
  688. +
  689. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
  690. + .target_category = SSAM_SSH_TC_BAS,
  691. + .command_id = 0x07,
  692. + .instance_id = 0x00,
  693. + .channel = 0x01,
  694. +});
  695. +
  696. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
  697. + .target_category = SSAM_SSH_TC_BAS,
  698. + .command_id = 0x08,
  699. + .instance_id = 0x00,
  700. + .channel = 0x01,
  701. +});
  702. +
  703. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_open, {
  704. + .target_category = SSAM_SSH_TC_BAS,
  705. + .command_id = 0x09,
  706. + .instance_id = 0x00,
  707. + .channel = 0x01,
  708. +});
  709. +
  710. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
  711. + .target_category = SSAM_SSH_TC_BAS,
  712. + .command_id = 0x0d,
  713. + .instance_id = 0x00,
  714. + .channel = 0x01,
  715. +});
  716. +
  717. +
  718. +static int dtx_bas_get_opmode(struct ssam_controller *ctrl, int __user *buf)
  719. +{
  720. + u8 opmode;
  721. + int status;
  722. +
  723. + status = ssam_bas_query_opmode(ctrl, &opmode);
  724. + if (status < 0)
  725. + return status;
  726. +
  727. + if (put_user(opmode, buf))
  728. + return -EACCES;
  729. +
  730. + return 0;
  731. +}
  732. +
  733. +
  734. +static int surface_dtx_open(struct inode *inode, struct file *file)
  735. +{
  736. + struct surface_dtx_dev *ddev = container_of(file->private_data, struct surface_dtx_dev, mdev);
  737. + struct surface_dtx_client *client;
  738. +
  739. + // initialize client
  740. + client = kzalloc(sizeof(struct surface_dtx_client), GFP_KERNEL);
  741. + if (!client)
  742. + return -ENOMEM;
  743. +
  744. + spin_lock_init(&client->buffer_lock);
  745. + client->buffer_head = 0;
  746. + client->buffer_tail = 0;
  747. + client->ddev = ddev;
  748. +
  749. + // attach client
  750. + spin_lock(&ddev->client_lock);
  751. + list_add_tail_rcu(&client->node, &ddev->client_list);
  752. + spin_unlock(&ddev->client_lock);
  753. +
  754. + file->private_data = client;
  755. + nonseekable_open(inode, file);
  756. +
  757. + return 0;
  758. +}
  759. +
  760. +static int surface_dtx_release(struct inode *inode, struct file *file)
  761. +{
  762. + struct surface_dtx_client *client = file->private_data;
  763. +
  764. + // detach client
  765. + spin_lock(&client->ddev->client_lock);
  766. + list_del_rcu(&client->node);
  767. + spin_unlock(&client->ddev->client_lock);
  768. + synchronize_rcu();
  769. +
  770. + kfree(client);
  771. + file->private_data = NULL;
  772. +
  773. + return 0;
  774. +}
  775. +
  776. +static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
  777. +{
  778. + struct surface_dtx_client *client = file->private_data;
  779. + struct surface_dtx_dev *ddev = client->ddev;
  780. + struct surface_dtx_event event;
  781. + size_t read = 0;
  782. + int status = 0;
  783. +
  784. + if (count != 0 && count < sizeof(struct surface_dtx_event))
  785. + return -EINVAL;
  786. +
  787. + if (!ddev->active)
  788. + return -ENODEV;
  789. +
  790. + // check availability
  791. + if (client->buffer_head == client->buffer_tail) {
  792. + if (file->f_flags & O_NONBLOCK)
  793. + return -EAGAIN;
  794. +
  795. + status = wait_event_interruptible(ddev->waitq,
  796. + client->buffer_head != client->buffer_tail ||
  797. + !ddev->active);
  798. + if (status)
  799. + return status;
  800. +
  801. + if (!ddev->active)
  802. + return -ENODEV;
  803. + }
  804. +
  805. + // copy events one by one
  806. + while (read + sizeof(struct surface_dtx_event) <= count) {
  807. + spin_lock_irq(&client->buffer_lock);
  808. +
  809. + if (client->buffer_head == client->buffer_tail) {
  810. + spin_unlock_irq(&client->buffer_lock);
  811. + break;
  812. + }
  813. +
  814. + // get one event
  815. + event = client->buffer[client->buffer_tail];
  816. + client->buffer_tail = (client->buffer_tail + 1) & (DTX_CLIENT_BUF_SIZE - 1);
  817. + spin_unlock_irq(&client->buffer_lock);
  818. +
  819. + // copy to userspace
  820. + if (copy_to_user(buf, &event, sizeof(struct surface_dtx_event)))
  821. + return -EFAULT;
  822. +
  823. + read += sizeof(struct surface_dtx_event);
  824. + }
  825. +
  826. + return read;
  827. +}
  828. +
  829. +static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
  830. +{
  831. + struct surface_dtx_client *client = file->private_data;
  832. + int mask;
  833. +
  834. + poll_wait(file, &client->ddev->waitq, pt);
  835. +
  836. + if (client->ddev->active)
  837. + mask = EPOLLOUT | EPOLLWRNORM;
  838. + else
  839. + mask = EPOLLHUP | EPOLLERR;
  840. +
  841. + if (client->buffer_head != client->buffer_tail)
  842. + mask |= EPOLLIN | EPOLLRDNORM;
  843. +
  844. + return mask;
  845. +}
  846. +
  847. +static int surface_dtx_fasync(int fd, struct file *file, int on)
  848. +{
  849. + struct surface_dtx_client *client = file->private_data;
  850. +
  851. + return fasync_helper(fd, file, on, &client->fasync);
  852. +}
  853. +
  854. +static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  855. +{
  856. + struct surface_dtx_client *client = file->private_data;
  857. + struct surface_dtx_dev *ddev = client->ddev;
  858. + int status;
  859. +
  860. + status = mutex_lock_interruptible(&ddev->mutex);
  861. + if (status)
  862. + return status;
  863. +
  864. + if (!ddev->active) {
  865. + mutex_unlock(&ddev->mutex);
  866. + return -ENODEV;
  867. + }
  868. +
  869. + switch (cmd) {
  870. + case DTX_CMD_LATCH_LOCK:
  871. + status = ssam_bas_latch_lock(ddev->ctrl);
  872. + break;
  873. +
  874. + case DTX_CMD_LATCH_UNLOCK:
  875. + status = ssam_bas_latch_unlock(ddev->ctrl);
  876. + break;
  877. +
  878. + case DTX_CMD_LATCH_REQUEST:
  879. + status = ssam_bas_latch_request(ddev->ctrl);
  880. + break;
  881. +
  882. + case DTX_CMD_LATCH_OPEN:
  883. + status = ssam_bas_latch_open(ddev->ctrl);
  884. + break;
  885. +
  886. + case DTX_CMD_GET_OPMODE:
  887. + status = dtx_bas_get_opmode(ddev->ctrl, (int __user *)arg);
  888. + break;
  889. +
  890. + default:
  891. + status = -EINVAL;
  892. + break;
  893. + }
  894. +
  895. + mutex_unlock(&ddev->mutex);
  896. + return status;
  897. +}
  898. +
  899. +static const struct file_operations surface_dtx_fops = {
  900. + .owner = THIS_MODULE,
  901. + .open = surface_dtx_open,
  902. + .release = surface_dtx_release,
  903. + .read = surface_dtx_read,
  904. + .poll = surface_dtx_poll,
  905. + .fasync = surface_dtx_fasync,
  906. + .unlocked_ioctl = surface_dtx_ioctl,
  907. + .llseek = no_llseek,
  908. +};
  909. +
  910. +static struct surface_dtx_dev surface_dtx_dev = {
  911. + .mdev = {
  912. + .minor = MISC_DYNAMIC_MINOR,
  913. + .name = "surface_dtx",
  914. + .fops = &surface_dtx_fops,
  915. + },
  916. + .client_lock = __SPIN_LOCK_UNLOCKED(),
  917. + .input_lock = __SPIN_LOCK_UNLOCKED(),
  918. + .mutex = __MUTEX_INITIALIZER(surface_dtx_dev.mutex),
  919. + .active = false,
  920. +};
  921. +
  922. +
  923. +static void surface_dtx_push_event(struct surface_dtx_dev *ddev, struct surface_dtx_event *event)
  924. +{
  925. + struct surface_dtx_client *client;
  926. +
  927. + rcu_read_lock();
  928. + list_for_each_entry_rcu(client, &ddev->client_list, node) {
  929. + spin_lock(&client->buffer_lock);
  930. +
  931. + client->buffer[client->buffer_head++] = *event;
  932. + client->buffer_head &= DTX_CLIENT_BUF_SIZE - 1;
  933. +
  934. + if (unlikely(client->buffer_head == client->buffer_tail)) {
  935. + printk(DTX_WARN "event buffer overrun\n");
  936. + client->buffer_tail = (client->buffer_tail + 1) & (DTX_CLIENT_BUF_SIZE - 1);
  937. + }
  938. +
  939. + spin_unlock(&client->buffer_lock);
  940. +
  941. + kill_fasync(&client->fasync, SIGIO, POLL_IN);
  942. + }
  943. + rcu_read_unlock();
  944. +
  945. + wake_up_interruptible(&ddev->waitq);
  946. +}
  947. +
  948. +
  949. +static void surface_dtx_update_opmpde(struct surface_dtx_dev *ddev)
  950. +{
  951. + struct surface_dtx_event event;
  952. + u8 opmode;
  953. + int status;
  954. +
  955. + // get operation mode
  956. + status = ssam_bas_query_opmode(ddev->ctrl, &opmode);
  957. + if (status < 0) {
  958. + printk(DTX_ERR "EC request failed with error %d\n", status);
  959. + return;
  960. + }
  961. +
  962. + // send DTX event
  963. + event.type = 0x11;
  964. + event.code = 0x0D;
  965. + event.arg0 = opmode;
  966. + event.arg1 = 0x00;
  967. +
  968. + surface_dtx_push_event(ddev, &event);
  969. +
  970. + // send SW_TABLET_MODE event
  971. + spin_lock(&ddev->input_lock);
  972. + input_report_switch(ddev->input_dev, SW_TABLET_MODE, opmode != DTX_OPMODE_LAPTOP);
  973. + input_sync(ddev->input_dev);
  974. + spin_unlock(&ddev->input_lock);
  975. +}
  976. +
  977. +static void surface_dtx_opmode_workfn(struct work_struct *work)
  978. +{
  979. + struct surface_dtx_dev *ddev = container_of(work, struct surface_dtx_dev, opmode_work.work);
  980. +
  981. + surface_dtx_update_opmpde(ddev);
  982. +}
  983. +
  984. +static u32 surface_dtx_notification(struct ssam_notifier_block *nb, const struct ssam_event *in_event)
  985. +{
  986. + struct surface_dtx_dev *ddev = container_of(nb, struct surface_dtx_dev, notif.base);
  987. + struct surface_dtx_event event;
  988. + unsigned long delay;
  989. +
  990. + switch (in_event->command_id) {
  991. + case SAM_EVENT_DTX_CID_CONNECTION:
  992. + case SAM_EVENT_DTX_CID_BUTTON:
  993. + case SAM_EVENT_DTX_CID_ERROR:
  994. + case SAM_EVENT_DTX_CID_LATCH_STATUS:
  995. + if (in_event->length > 2) {
  996. + printk(DTX_ERR "unexpected payload size (cid: %x, len: %u)\n",
  997. + in_event->command_id, in_event->length);
  998. + return SSAM_NOTIF_HANDLED;
  999. + }
  1000. +
  1001. + event.type = in_event->target_category;
  1002. + event.code = in_event->command_id;
  1003. + event.arg0 = in_event->length >= 1 ? in_event->data[0] : 0x00;
  1004. + event.arg1 = in_event->length >= 2 ? in_event->data[1] : 0x00;
  1005. + surface_dtx_push_event(ddev, &event);
  1006. + break;
  1007. +
  1008. + default:
  1009. + return 0;
  1010. + }
  1011. +
  1012. + // update device mode
  1013. + if (in_event->command_id == SAM_EVENT_DTX_CID_CONNECTION) {
  1014. + delay = event.arg0 ? DTX_CONNECT_OPMODE_DELAY : 0;
  1015. + schedule_delayed_work(&ddev->opmode_work, delay);
  1016. + }
  1017. +
  1018. + return SSAM_NOTIF_HANDLED;
  1019. +}
  1020. +
  1021. +
  1022. +static struct input_dev *surface_dtx_register_inputdev(
  1023. + struct platform_device *pdev, struct ssam_controller *ctrl)
  1024. +{
  1025. + struct input_dev *input_dev;
  1026. + u8 opmode;
  1027. + int status;
  1028. +
  1029. + input_dev = input_allocate_device();
  1030. + if (!input_dev)
  1031. + return ERR_PTR(-ENOMEM);
  1032. +
  1033. + input_dev->name = DTX_INPUT_NAME;
  1034. + input_dev->dev.parent = &pdev->dev;
  1035. + input_dev->id.bustype = BUS_VIRTUAL;
  1036. + input_dev->id.vendor = USB_VENDOR_ID_MICROSOFT;
  1037. + input_dev->id.product = USB_DEVICE_ID_MS_SURFACE_BASE_2_INTEGRATION;
  1038. +
  1039. + input_set_capability(input_dev, EV_SW, SW_TABLET_MODE);
  1040. +
  1041. + status = ssam_bas_query_opmode(ctrl, &opmode);
  1042. + if (status < 0) {
  1043. + input_free_device(input_dev);
  1044. + return ERR_PTR(status);
  1045. + }
  1046. +
  1047. + input_report_switch(input_dev, SW_TABLET_MODE, opmode != DTX_OPMODE_LAPTOP);
  1048. +
  1049. + status = input_register_device(input_dev);
  1050. + if (status) {
  1051. + input_unregister_device(input_dev);
  1052. + return ERR_PTR(status);
  1053. + }
  1054. +
  1055. + return input_dev;
  1056. +}
  1057. +
  1058. +
  1059. +static int surface_sam_dtx_probe(struct platform_device *pdev)
  1060. +{
  1061. + struct surface_dtx_dev *ddev = &surface_dtx_dev;
  1062. + struct ssam_controller *ctrl;
  1063. + struct input_dev *input_dev;
  1064. + int status;
  1065. +
  1066. + // link to ec
  1067. + status = ssam_client_bind(&pdev->dev, &ctrl);
  1068. + if (status)
  1069. + return status == -ENXIO ? -EPROBE_DEFER : status;
  1070. +
  1071. + input_dev = surface_dtx_register_inputdev(pdev, ctrl);
  1072. + if (IS_ERR(input_dev))
  1073. + return PTR_ERR(input_dev);
  1074. +
  1075. + // initialize device
  1076. + mutex_lock(&ddev->mutex);
  1077. + if (ddev->active) {
  1078. + mutex_unlock(&ddev->mutex);
  1079. + status = -ENODEV;
  1080. + goto err_register;
  1081. + }
  1082. +
  1083. + ddev->ctrl = ctrl;
  1084. + INIT_DELAYED_WORK(&ddev->opmode_work, surface_dtx_opmode_workfn);
  1085. + INIT_LIST_HEAD(&ddev->client_list);
  1086. + init_waitqueue_head(&ddev->waitq);
  1087. + ddev->active = true;
  1088. + ddev->input_dev = input_dev;
  1089. + mutex_unlock(&ddev->mutex);
  1090. +
  1091. + status = misc_register(&ddev->mdev);
  1092. + if (status)
  1093. + goto err_register;
  1094. +
  1095. + // set up events
  1096. + ddev->notif.base.priority = 1;
  1097. + ddev->notif.base.fn = surface_dtx_notification;
  1098. + ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
  1099. + ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
  1100. + ddev->notif.event.id.instance = 0;
  1101. + ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
  1102. +
  1103. + status = ssam_notifier_register(ctrl, &ddev->notif);
  1104. + if (status)
  1105. + goto err_events_setup;
  1106. +
  1107. + return 0;
  1108. +
  1109. +err_events_setup:
  1110. + misc_deregister(&ddev->mdev);
  1111. +err_register:
  1112. + input_unregister_device(ddev->input_dev);
  1113. + return status;
  1114. +}
  1115. +
  1116. +static int surface_sam_dtx_remove(struct platform_device *pdev)
  1117. +{
  1118. + struct surface_dtx_dev *ddev = &surface_dtx_dev;
  1119. + struct surface_dtx_client *client;
  1120. +
  1121. + mutex_lock(&ddev->mutex);
  1122. + if (!ddev->active) {
  1123. + mutex_unlock(&ddev->mutex);
  1124. + return 0;
  1125. + }
  1126. +
  1127. + // mark as inactive
  1128. + ddev->active = false;
  1129. + mutex_unlock(&ddev->mutex);
  1130. +
  1131. + // After this call we're guaranteed that no more input events will arive
  1132. + ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
  1133. +
  1134. + // wake up clients
  1135. + spin_lock(&ddev->client_lock);
  1136. + list_for_each_entry(client, &ddev->client_list, node) {
  1137. + kill_fasync(&client->fasync, SIGIO, POLL_HUP);
  1138. + }
  1139. + spin_unlock(&ddev->client_lock);
  1140. +
  1141. + wake_up_interruptible(&ddev->waitq);
  1142. +
  1143. + // unregister user-space devices
  1144. + input_unregister_device(ddev->input_dev);
  1145. + misc_deregister(&ddev->mdev);
  1146. +
  1147. + return 0;
  1148. +}
  1149. +
  1150. +
  1151. +static const struct acpi_device_id surface_sam_dtx_match[] = {
  1152. + { "MSHW0133", 0 },
  1153. + { },
  1154. +};
  1155. +MODULE_DEVICE_TABLE(acpi, surface_sam_dtx_match);
  1156. +
  1157. +static struct platform_driver surface_sam_dtx = {
  1158. + .probe = surface_sam_dtx_probe,
  1159. + .remove = surface_sam_dtx_remove,
  1160. + .driver = {
  1161. + .name = "surface_sam_dtx",
  1162. + .acpi_match_table = surface_sam_dtx_match,
  1163. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1164. + },
  1165. +};
  1166. +module_platform_driver(surface_sam_dtx);
  1167. +
  1168. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  1169. +MODULE_DESCRIPTION("Surface Detachment System (DTX) Driver for 5th Generation Surface Devices");
  1170. +MODULE_LICENSE("GPL");
  1171. diff --git a/drivers/platform/x86/surface_sam/surface_sam_hps.c b/drivers/platform/x86/surface_sam/surface_sam_hps.c
  1172. new file mode 100644
  1173. index 0000000000000..b11f9fa8095fb
  1174. --- /dev/null
  1175. +++ b/drivers/platform/x86/surface_sam/surface_sam_hps.c
  1176. @@ -0,0 +1,1287 @@
  1177. +// SPDX-License-Identifier: GPL-2.0-or-later
  1178. +/*
  1179. + * Surface dGPU hot-plug system driver.
  1180. + * Supports explicit setting of the dGPU power-state on the Surface Book 2 and
  1181. + * properly handles hot-plugging by detaching the base.
  1182. + */
  1183. +
  1184. +#include <linux/acpi.h>
  1185. +#include <linux/delay.h>
  1186. +#include <linux/gpio.h>
  1187. +#include <linux/kernel.h>
  1188. +#include <linux/module.h>
  1189. +#include <linux/mutex.h>
  1190. +#include <linux/pci.h>
  1191. +#include <linux/platform_device.h>
  1192. +#include <linux/sysfs.h>
  1193. +
  1194. +#include "surface_sam_ssh.h"
  1195. +#include "surface_sam_san.h"
  1196. +
  1197. +
  1198. +// TODO: vgaswitcheroo integration
  1199. +
  1200. +
  1201. +static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix);
  1202. +
  1203. +
  1204. +#define SHPS_DSM_REVISION 1
  1205. +#define SHPS_DSM_GPU_ADDRS 0x02
  1206. +#define SHPS_DSM_GPU_POWER 0x05
  1207. +static const guid_t SHPS_DSM_UUID =
  1208. + GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd,
  1209. + 0x32, 0x0e, 0x10, 0x36, 0x0a);
  1210. +
  1211. +
  1212. +#define SAM_DGPU_TC 0x13
  1213. +#define SAM_DGPU_CID_POWERON 0x02
  1214. +#define ACPI_SGCP_NOTIFY_POWER_ON 0x81
  1215. +
  1216. +#define SHPS_DSM_GPU_ADDRS_RP "RP5_PCIE"
  1217. +#define SHPS_DSM_GPU_ADDRS_DGPU "DGPU_PCIE"
  1218. +#define SHPS_PCI_GPU_ADDR_RP "\\_SB.PCI0.RP13._ADR"
  1219. +
  1220. +static const struct acpi_gpio_params gpio_base_presence_int = { 0, 0, false };
  1221. +static const struct acpi_gpio_params gpio_base_presence = { 1, 0, false };
  1222. +static const struct acpi_gpio_params gpio_dgpu_power_int = { 2, 0, false };
  1223. +static const struct acpi_gpio_params gpio_dgpu_power = { 3, 0, false };
  1224. +static const struct acpi_gpio_params gpio_dgpu_presence_int = { 4, 0, false };
  1225. +static const struct acpi_gpio_params gpio_dgpu_presence = { 5, 0, false };
  1226. +
  1227. +static const struct acpi_gpio_mapping shps_acpi_gpios[] = {
  1228. + { "base_presence-int-gpio", &gpio_base_presence_int, 1 },
  1229. + { "base_presence-gpio", &gpio_base_presence, 1 },
  1230. + { "dgpu_power-int-gpio", &gpio_dgpu_power_int, 1 },
  1231. + { "dgpu_power-gpio", &gpio_dgpu_power, 1 },
  1232. + { "dgpu_presence-int-gpio", &gpio_dgpu_presence_int, 1 },
  1233. + { "dgpu_presence-gpio", &gpio_dgpu_presence, 1 },
  1234. + { },
  1235. +};
  1236. +
  1237. +
  1238. +enum shps_dgpu_power {
  1239. + SHPS_DGPU_POWER_OFF = 0,
  1240. + SHPS_DGPU_POWER_ON = 1,
  1241. + SHPS_DGPU_POWER_UNKNOWN = 2,
  1242. +};
  1243. +
  1244. +static const char *shps_dgpu_power_str(enum shps_dgpu_power power)
  1245. +{
  1246. + if (power == SHPS_DGPU_POWER_OFF)
  1247. + return "off";
  1248. + else if (power == SHPS_DGPU_POWER_ON)
  1249. + return "on";
  1250. + else if (power == SHPS_DGPU_POWER_UNKNOWN)
  1251. + return "unknown";
  1252. + else
  1253. + return "<invalid>";
  1254. +}
  1255. +
  1256. +enum shps_notification_method {
  1257. + SHPS_NOTIFICATION_METHOD_SAN = 1,
  1258. + SHPS_NOTIFICATION_METHOD_SGCP = 2
  1259. +};
  1260. +
  1261. +struct shps_hardware_traits {
  1262. + enum shps_notification_method notification_method;
  1263. + const char *dgpu_rp_pci_address;
  1264. +};
  1265. +
  1266. +struct shps_driver_data {
  1267. + struct ssam_controller *ctrl;
  1268. +
  1269. + struct mutex lock;
  1270. + struct pci_dev *dgpu_root_port;
  1271. + struct pci_saved_state *dgpu_root_port_state;
  1272. + struct gpio_desc *gpio_dgpu_power;
  1273. + struct gpio_desc *gpio_dgpu_presence;
  1274. + struct gpio_desc *gpio_base_presence;
  1275. + unsigned int irq_dgpu_presence;
  1276. + unsigned int irq_base_presence;
  1277. + unsigned long state;
  1278. + acpi_handle sgpc_handle;
  1279. + struct shps_hardware_traits hardware_traits;
  1280. +};
  1281. +
  1282. +struct shps_hardware_probe {
  1283. + const char *hardware_id;
  1284. + int generation;
  1285. + struct shps_hardware_traits *hardware_traits;
  1286. +};
  1287. +
  1288. +static struct shps_hardware_traits shps_gen1_hwtraits = {
  1289. + .notification_method = SHPS_NOTIFICATION_METHOD_SAN
  1290. +};
  1291. +
  1292. +static struct shps_hardware_traits shps_gen2_hwtraits = {
  1293. + .notification_method = SHPS_NOTIFICATION_METHOD_SGCP,
  1294. + .dgpu_rp_pci_address = SHPS_PCI_GPU_ADDR_RP
  1295. +};
  1296. +
  1297. +static const struct shps_hardware_probe shps_hardware_probe_match[] = {
  1298. + /* Surface Book 3 */
  1299. + { "MSHW0117", 2, &shps_gen2_hwtraits },
  1300. +
  1301. + /* Surface Book 2 (default, must be last entry) */
  1302. + { NULL, 1, &shps_gen1_hwtraits }
  1303. +};
  1304. +
  1305. +#define SHPS_STATE_BIT_PWRTGT 0 /* desired power state: 1 for on, 0 for off */
  1306. +#define SHPS_STATE_BIT_RPPWRON_SYNC 1 /* synchronous/requested power-up in progress */
  1307. +#define SHPS_STATE_BIT_WAKE_ENABLED 2 /* wakeup via base-presence GPIO enabled */
  1308. +
  1309. +
  1310. +#define SHPS_DGPU_PARAM_PERM 0644
  1311. +
  1312. +enum shps_dgpu_power_mp {
  1313. + SHPS_DGPU_MP_POWER_OFF = SHPS_DGPU_POWER_OFF,
  1314. + SHPS_DGPU_MP_POWER_ON = SHPS_DGPU_POWER_ON,
  1315. + SHPS_DGPU_MP_POWER_ASIS = -1,
  1316. +
  1317. + __SHPS_DGPU_MP_POWER_START = -1,
  1318. + __SHPS_DGPU_MP_POWER_END = 1,
  1319. +};
  1320. +
  1321. +static int param_dgpu_power_set(const char *val, const struct kernel_param *kp)
  1322. +{
  1323. + int power = SHPS_DGPU_MP_POWER_OFF;
  1324. + int status;
  1325. +
  1326. + status = kstrtoint(val, 0, &power);
  1327. + if (status)
  1328. + return status;
  1329. +
  1330. + if (power < __SHPS_DGPU_MP_POWER_START || power > __SHPS_DGPU_MP_POWER_END)
  1331. + return -EINVAL;
  1332. +
  1333. + return param_set_int(val, kp);
  1334. +}
  1335. +
  1336. +static const struct kernel_param_ops param_dgpu_power_ops = {
  1337. + .set = param_dgpu_power_set,
  1338. + .get = param_get_int,
  1339. +};
  1340. +
  1341. +static int param_dgpu_power_init = SHPS_DGPU_MP_POWER_OFF;
  1342. +static int param_dgpu_power_exit = SHPS_DGPU_MP_POWER_ON;
  1343. +static int param_dgpu_power_susp = SHPS_DGPU_MP_POWER_ASIS;
  1344. +static bool param_dtx_latch = true;
  1345. +
  1346. +module_param_cb(dgpu_power_init, &param_dgpu_power_ops, &param_dgpu_power_init, SHPS_DGPU_PARAM_PERM);
  1347. +module_param_cb(dgpu_power_exit, &param_dgpu_power_ops, &param_dgpu_power_exit, SHPS_DGPU_PARAM_PERM);
  1348. +module_param_cb(dgpu_power_susp, &param_dgpu_power_ops, &param_dgpu_power_susp, SHPS_DGPU_PARAM_PERM);
  1349. +module_param_named(dtx_latch, param_dtx_latch, bool, SHPS_DGPU_PARAM_PERM);
  1350. +
  1351. +MODULE_PARM_DESC(dgpu_power_init, "dGPU power state to be set on init (0: off / 1: on / 2: as-is, default: off)");
  1352. +MODULE_PARM_DESC(dgpu_power_exit, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: on)");
  1353. +MODULE_PARM_DESC(dgpu_power_susp, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: as-is)");
  1354. +MODULE_PARM_DESC(dtx_latch, "lock/unlock DTX base latch in accordance to power-state (Y/n)");
  1355. +
  1356. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
  1357. + .target_category = SSAM_SSH_TC_BAS,
  1358. + .command_id = 0x06,
  1359. + .instance_id = 0x00,
  1360. + .channel = 0x01,
  1361. +});
  1362. +
  1363. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
  1364. + .target_category = SSAM_SSH_TC_BAS,
  1365. + .command_id = 0x07,
  1366. + .instance_id = 0x00,
  1367. + .channel = 0x01,
  1368. +});
  1369. +
  1370. +static int shps_dgpu_dsm_get_pci_addr_from_adr(struct platform_device *pdev, const char *entry) {
  1371. + acpi_handle handle = ACPI_HANDLE(&pdev->dev);
  1372. + int status;
  1373. + struct acpi_object_list input;
  1374. + union acpi_object input_args[0];
  1375. + u64 device_addr;
  1376. + u8 bus, dev, fun;
  1377. +
  1378. + input.count = 0;
  1379. + input.pointer = input_args;
  1380. +
  1381. +
  1382. + status = acpi_evaluate_integer(handle, (acpi_string)entry, &input, &device_addr);
  1383. + if (status) {
  1384. + return -ENODEV;
  1385. + }
  1386. +
  1387. + bus = 0;
  1388. + dev = (device_addr & 0xFF0000) >> 16;
  1389. + fun = device_addr & 0xFF;
  1390. +
  1391. + dev_info(&pdev->dev, "found pci device at bus = %d, dev = %x, fun = %x\n",
  1392. + (u32)bus, (u32)dev, (u32)fun);
  1393. +
  1394. + return bus << 8 | PCI_DEVFN(dev, fun);
  1395. +}
  1396. +
  1397. +static int shps_dgpu_dsm_get_pci_addr_from_dsm(struct platform_device *pdev, const char *entry)
  1398. +{
  1399. + acpi_handle handle = ACPI_HANDLE(&pdev->dev);
  1400. + union acpi_object *result;
  1401. + union acpi_object *e0;
  1402. + union acpi_object *e1;
  1403. + union acpi_object *e2;
  1404. + u64 device_addr = 0;
  1405. + u8 bus, dev, fun;
  1406. + int i;
  1407. +
  1408. +
  1409. + result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION,
  1410. + SHPS_DSM_GPU_ADDRS, NULL, ACPI_TYPE_PACKAGE);
  1411. +
  1412. + if (IS_ERR_OR_NULL(result))
  1413. + return result ? PTR_ERR(result) : -EIO;
  1414. +
  1415. + // three entries per device: name, address, <integer>
  1416. + for (i = 0; i + 2 < result->package.count; i += 3) {
  1417. + e0 = &result->package.elements[i];
  1418. + e1 = &result->package.elements[i + 1];
  1419. + e2 = &result->package.elements[i + 2];
  1420. +
  1421. + if (e0->type != ACPI_TYPE_STRING) {
  1422. + ACPI_FREE(result);
  1423. + return -EIO;
  1424. + }
  1425. +
  1426. + if (e1->type != ACPI_TYPE_INTEGER) {
  1427. + ACPI_FREE(result);
  1428. + return -EIO;
  1429. + }
  1430. +
  1431. + if (e2->type != ACPI_TYPE_INTEGER) {
  1432. + ACPI_FREE(result);
  1433. + return -EIO;
  1434. + }
  1435. +
  1436. + if (strncmp(e0->string.pointer, entry, 64) == 0)
  1437. + device_addr = e1->integer.value;
  1438. + }
  1439. +
  1440. + ACPI_FREE(result);
  1441. + if (device_addr == 0)
  1442. + return -ENODEV;
  1443. +
  1444. +
  1445. + // convert address
  1446. + bus = (device_addr & 0x0FF00000) >> 20;
  1447. + dev = (device_addr & 0x000F8000) >> 15;
  1448. + fun = (device_addr & 0x00007000) >> 12;
  1449. +
  1450. + return bus << 8 | PCI_DEVFN(dev, fun);
  1451. +}
  1452. +
  1453. +static struct pci_dev *shps_dgpu_dsm_get_pci_dev(struct platform_device *pdev)
  1454. +{
  1455. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1456. + struct pci_dev *dev;
  1457. + int addr;
  1458. +
  1459. +
  1460. + if (drvdata->hardware_traits.dgpu_rp_pci_address) {
  1461. + addr = shps_dgpu_dsm_get_pci_addr_from_adr(pdev, drvdata->hardware_traits.dgpu_rp_pci_address);
  1462. + } else {
  1463. + addr = shps_dgpu_dsm_get_pci_addr_from_dsm(pdev, SHPS_DSM_GPU_ADDRS_RP);
  1464. + }
  1465. +
  1466. + if (addr < 0)
  1467. + return ERR_PTR(addr);
  1468. +
  1469. + dev = pci_get_domain_bus_and_slot(0, (addr & 0xFF00) >> 8, addr & 0xFF);
  1470. + return dev ? dev : ERR_PTR(-ENODEV);
  1471. +}
  1472. +
  1473. +
  1474. +static int shps_dgpu_dsm_get_power_unlocked(struct platform_device *pdev)
  1475. +{
  1476. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1477. + struct gpio_desc *gpio = drvdata->gpio_dgpu_power;
  1478. + int status;
  1479. +
  1480. + status = gpiod_get_value_cansleep(gpio);
  1481. + if (status < 0)
  1482. + return status;
  1483. +
  1484. + return status == 0 ? SHPS_DGPU_POWER_OFF : SHPS_DGPU_POWER_ON;
  1485. +}
  1486. +
  1487. +static int shps_dgpu_dsm_get_power(struct platform_device *pdev)
  1488. +{
  1489. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1490. + int status;
  1491. +
  1492. + mutex_lock(&drvdata->lock);
  1493. + status = shps_dgpu_dsm_get_power_unlocked(pdev);
  1494. + mutex_unlock(&drvdata->lock);
  1495. +
  1496. + return status;
  1497. +}
  1498. +
  1499. +static int __shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
  1500. +{
  1501. + acpi_handle handle = ACPI_HANDLE(&pdev->dev);
  1502. + union acpi_object *result;
  1503. + union acpi_object param;
  1504. +
  1505. + dev_info(&pdev->dev, "setting dGPU direct power to \'%s\'\n", shps_dgpu_power_str(power));
  1506. +
  1507. + param.type = ACPI_TYPE_INTEGER;
  1508. + param.integer.value = power == SHPS_DGPU_POWER_ON;
  1509. +
  1510. + result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION,
  1511. + SHPS_DSM_GPU_POWER, &param, ACPI_TYPE_BUFFER);
  1512. +
  1513. + if (IS_ERR_OR_NULL(result))
  1514. + return result ? PTR_ERR(result) : -EIO;
  1515. +
  1516. + // check for the expected result
  1517. + if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) {
  1518. + ACPI_FREE(result);
  1519. + return -EIO;
  1520. + }
  1521. +
  1522. + ACPI_FREE(result);
  1523. + return 0;
  1524. +}
  1525. +
  1526. +static int shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
  1527. +{
  1528. + int status;
  1529. +
  1530. + if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF)
  1531. + return -EINVAL;
  1532. +
  1533. + status = shps_dgpu_dsm_get_power_unlocked(pdev);
  1534. + if (status < 0)
  1535. + return status;
  1536. + if (status == power)
  1537. + return 0;
  1538. +
  1539. + return __shps_dgpu_dsm_set_power_unlocked(pdev, power);
  1540. +}
  1541. +
  1542. +static int shps_dgpu_dsm_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
  1543. +{
  1544. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1545. + int status;
  1546. +
  1547. + mutex_lock(&drvdata->lock);
  1548. + status = shps_dgpu_dsm_set_power_unlocked(pdev, power);
  1549. + mutex_unlock(&drvdata->lock);
  1550. +
  1551. + return status;
  1552. +}
  1553. +
  1554. +
  1555. +static bool shps_rp_link_up(struct pci_dev *rp)
  1556. +{
  1557. + u16 lnksta = 0, sltsta = 0;
  1558. +
  1559. + pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta);
  1560. + pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta);
  1561. +
  1562. + return (lnksta & PCI_EXP_LNKSTA_DLLLA) || (sltsta & PCI_EXP_SLTSTA_PDS);
  1563. +}
  1564. +
  1565. +
  1566. +static int shps_dgpu_rp_get_power_unlocked(struct platform_device *pdev)
  1567. +{
  1568. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1569. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1570. +
  1571. + if (rp->current_state == PCI_D3hot || rp->current_state == PCI_D3cold)
  1572. + return SHPS_DGPU_POWER_OFF;
  1573. + else if (rp->current_state == PCI_UNKNOWN || rp->current_state == PCI_POWER_ERROR)
  1574. + return SHPS_DGPU_POWER_UNKNOWN;
  1575. + else
  1576. + return SHPS_DGPU_POWER_ON;
  1577. +}
  1578. +
  1579. +static int shps_dgpu_rp_get_power(struct platform_device *pdev)
  1580. +{
  1581. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1582. + int status;
  1583. +
  1584. + mutex_lock(&drvdata->lock);
  1585. + status = shps_dgpu_rp_get_power_unlocked(pdev);
  1586. + mutex_unlock(&drvdata->lock);
  1587. +
  1588. + return status;
  1589. +}
  1590. +
  1591. +static int __shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
  1592. +{
  1593. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1594. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1595. + int status, i;
  1596. +
  1597. + dev_info(&pdev->dev, "setting dGPU power state to \'%s\'\n", shps_dgpu_power_str(power));
  1598. +
  1599. + dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.1");
  1600. + if (power == SHPS_DGPU_POWER_ON) {
  1601. + set_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state);
  1602. + pci_set_power_state(rp, PCI_D0);
  1603. +
  1604. + if (drvdata->dgpu_root_port_state)
  1605. + pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state);
  1606. +
  1607. + pci_restore_state(rp);
  1608. +
  1609. + if (!pci_is_enabled(rp))
  1610. + pci_enable_device(rp);
  1611. +
  1612. + pci_set_master(rp);
  1613. + clear_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state);
  1614. +
  1615. + set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1616. + } else {
  1617. + if (!drvdata->dgpu_root_port_state) {
  1618. + pci_save_state(rp);
  1619. + drvdata->dgpu_root_port_state = pci_store_saved_state(rp);
  1620. + }
  1621. +
  1622. + /*
  1623. + * To properly update the hot-plug system we need to "remove" the dGPU
  1624. + * before disabling it and sending it to D3cold. Following this, we
  1625. + * need to wait for the link and slot status to actually change.
  1626. + */
  1627. + status = shps_dgpu_dsm_set_power_unlocked(pdev, SHPS_DGPU_POWER_OFF);
  1628. + if (status)
  1629. + return status;
  1630. +
  1631. + for (i = 0; i < 20 && shps_rp_link_up(rp); i++)
  1632. + msleep(50);
  1633. +
  1634. + if (shps_rp_link_up(rp))
  1635. + dev_err(&pdev->dev, "dGPU removal via DSM timed out\n");
  1636. +
  1637. + pci_clear_master(rp);
  1638. +
  1639. + if (pci_is_enabled(rp))
  1640. + pci_disable_device(rp);
  1641. +
  1642. + pci_set_power_state(rp, PCI_D3cold);
  1643. +
  1644. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1645. + }
  1646. + dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.2");
  1647. +
  1648. + return 0;
  1649. +}
  1650. +
  1651. +static int shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
  1652. +{
  1653. + int status;
  1654. +
  1655. + if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF)
  1656. + return -EINVAL;
  1657. +
  1658. + status = shps_dgpu_rp_get_power_unlocked(pdev);
  1659. + if (status < 0)
  1660. + return status;
  1661. + if (status == power)
  1662. + return 0;
  1663. +
  1664. + return __shps_dgpu_rp_set_power_unlocked(pdev, power);
  1665. +}
  1666. +
  1667. +static int shps_dgpu_rp_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
  1668. +{
  1669. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1670. + int status;
  1671. +
  1672. + mutex_lock(&drvdata->lock);
  1673. + status = shps_dgpu_rp_set_power_unlocked(pdev, power);
  1674. + mutex_unlock(&drvdata->lock);
  1675. +
  1676. + return status;
  1677. +}
  1678. +
  1679. +
  1680. +static int shps_dgpu_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
  1681. +{
  1682. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1683. + int status;
  1684. +
  1685. + if (!param_dtx_latch)
  1686. + return shps_dgpu_rp_set_power(pdev, power);
  1687. +
  1688. + if (power == SHPS_DGPU_POWER_ON) {
  1689. + status = ssam_bas_latch_lock(drvdata->ctrl);
  1690. + if (status)
  1691. + return status;
  1692. +
  1693. + status = shps_dgpu_rp_set_power(pdev, power);
  1694. + if (status)
  1695. + ssam_bas_latch_unlock(drvdata->ctrl);
  1696. +
  1697. + } else {
  1698. + status = shps_dgpu_rp_set_power(pdev, power);
  1699. + if (status)
  1700. + return status;
  1701. +
  1702. + status = ssam_bas_latch_unlock(drvdata->ctrl);
  1703. + }
  1704. +
  1705. + return status;
  1706. +}
  1707. +
  1708. +
  1709. +static int shps_dgpu_is_present(struct platform_device *pdev)
  1710. +{
  1711. + struct shps_driver_data *drvdata;
  1712. +
  1713. + drvdata = platform_get_drvdata(pdev);
  1714. + return gpiod_get_value_cansleep(drvdata->gpio_dgpu_presence);
  1715. +}
  1716. +
  1717. +
  1718. +static ssize_t dgpu_power_show(struct device *dev, struct device_attribute *attr, char *data)
  1719. +{
  1720. + struct platform_device *pdev = to_platform_device(dev);
  1721. + int power = shps_dgpu_rp_get_power(pdev);
  1722. +
  1723. + if (power < 0)
  1724. + return power;
  1725. +
  1726. + return sprintf(data, "%s\n", shps_dgpu_power_str(power));
  1727. +}
  1728. +
  1729. +static ssize_t dgpu_power_store(struct device *dev, struct device_attribute *attr,
  1730. + const char *data, size_t count)
  1731. +{
  1732. + struct platform_device *pdev = to_platform_device(dev);
  1733. + enum shps_dgpu_power power;
  1734. + bool b = false;
  1735. + int status;
  1736. +
  1737. + status = kstrtobool(data, &b);
  1738. + if (status)
  1739. + return status;
  1740. +
  1741. + status = shps_dgpu_is_present(pdev);
  1742. + if (status <= 0)
  1743. + return status < 0 ? status : -EPERM;
  1744. +
  1745. + power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF;
  1746. + status = shps_dgpu_set_power(pdev, power);
  1747. +
  1748. + return status < 0 ? status : count;
  1749. +}
  1750. +
  1751. +static ssize_t dgpu_power_dsm_show(struct device *dev, struct device_attribute *attr, char *data)
  1752. +{
  1753. + struct platform_device *pdev = to_platform_device(dev);
  1754. + int power = shps_dgpu_dsm_get_power(pdev);
  1755. +
  1756. + if (power < 0)
  1757. + return power;
  1758. +
  1759. + return sprintf(data, "%s\n", shps_dgpu_power_str(power));
  1760. +}
  1761. +
  1762. +static ssize_t dgpu_power_dsm_store(struct device *dev, struct device_attribute *attr,
  1763. + const char *data, size_t count)
  1764. +{
  1765. + struct platform_device *pdev = to_platform_device(dev);
  1766. + enum shps_dgpu_power power;
  1767. + bool b = false;
  1768. + int status;
  1769. +
  1770. + status = kstrtobool(data, &b);
  1771. + if (status)
  1772. + return status;
  1773. +
  1774. + status = shps_dgpu_is_present(pdev);
  1775. + if (status <= 0)
  1776. + return status < 0 ? status : -EPERM;
  1777. +
  1778. + power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF;
  1779. + status = shps_dgpu_dsm_set_power(pdev, power);
  1780. +
  1781. + return status < 0 ? status : count;
  1782. +}
  1783. +
  1784. +static DEVICE_ATTR_RW(dgpu_power);
  1785. +static DEVICE_ATTR_RW(dgpu_power_dsm);
  1786. +
  1787. +static struct attribute *shps_power_attrs[] = {
  1788. + &dev_attr_dgpu_power.attr,
  1789. + &dev_attr_dgpu_power_dsm.attr,
  1790. + NULL,
  1791. +};
  1792. +ATTRIBUTE_GROUPS(shps_power);
  1793. +
  1794. +
  1795. +static void dbg_dump_power_states(struct platform_device *pdev, const char *prefix)
  1796. +{
  1797. + enum shps_dgpu_power power_dsm;
  1798. + enum shps_dgpu_power power_rp;
  1799. + int status;
  1800. +
  1801. + status = shps_dgpu_rp_get_power_unlocked(pdev);
  1802. + if (status < 0)
  1803. + dev_err(&pdev->dev, "%s: failed to get root-port power state: %d\n", prefix, status);
  1804. + power_rp = status;
  1805. +
  1806. + status = shps_dgpu_rp_get_power_unlocked(pdev);
  1807. + if (status < 0)
  1808. + dev_err(&pdev->dev, "%s: failed to get direct power state: %d\n", prefix, status);
  1809. + power_dsm = status;
  1810. +
  1811. + dev_dbg(&pdev->dev, "%s: root-port power state: %d\n", prefix, power_rp);
  1812. + dev_dbg(&pdev->dev, "%s: direct power state: %d\n", prefix, power_dsm);
  1813. +}
  1814. +
  1815. +static void dbg_dump_pciesta(struct platform_device *pdev, const char *prefix)
  1816. +{
  1817. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1818. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1819. + u16 lnksta, lnksta2, sltsta, sltsta2;
  1820. +
  1821. + pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta);
  1822. + pcie_capability_read_word(rp, PCI_EXP_LNKSTA2, &lnksta2);
  1823. + pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta);
  1824. + pcie_capability_read_word(rp, PCI_EXP_SLTSTA2, &sltsta2);
  1825. +
  1826. + dev_dbg(&pdev->dev, "%s: LNKSTA: 0x%04x\n", prefix, lnksta);
  1827. + dev_dbg(&pdev->dev, "%s: LNKSTA2: 0x%04x\n", prefix, lnksta2);
  1828. + dev_dbg(&pdev->dev, "%s: SLTSTA: 0x%04x\n", prefix, sltsta);
  1829. + dev_dbg(&pdev->dev, "%s: SLTSTA2: 0x%04x\n", prefix, sltsta2);
  1830. +}
  1831. +
  1832. +static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix)
  1833. +{
  1834. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1835. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1836. +
  1837. + dev_dbg(&pdev->dev, "%s: RP power: %d\n", prefix, rp->current_state);
  1838. + dev_dbg(&pdev->dev, "%s: RP state saved: %d\n", prefix, rp->state_saved);
  1839. + dev_dbg(&pdev->dev, "%s: RP state stored: %d\n", prefix, !!drvdata->dgpu_root_port_state);
  1840. + dev_dbg(&pdev->dev, "%s: RP enabled: %d\n", prefix, atomic_read(&rp->enable_cnt));
  1841. + dev_dbg(&pdev->dev, "%s: RP mastered: %d\n", prefix, rp->is_busmaster);
  1842. +}
  1843. +
  1844. +static int shps_pm_prepare(struct device *dev)
  1845. +{
  1846. + struct platform_device *pdev = to_platform_device(dev);
  1847. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1848. + bool pwrtgt;
  1849. + int status = 0;
  1850. +
  1851. + dbg_dump_power_states(pdev, "shps_pm_prepare");
  1852. +
  1853. + if (param_dgpu_power_susp != SHPS_DGPU_MP_POWER_ASIS) {
  1854. + pwrtgt = test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1855. +
  1856. + status = shps_dgpu_set_power(pdev, param_dgpu_power_susp);
  1857. + if (status) {
  1858. + dev_err(&pdev->dev, "failed to power %s dGPU: %d\n",
  1859. + param_dgpu_power_susp == SHPS_DGPU_MP_POWER_OFF ? "off" : "on",
  1860. + status);
  1861. + return status;
  1862. + }
  1863. +
  1864. + if (pwrtgt)
  1865. + set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1866. + else
  1867. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1868. + }
  1869. +
  1870. + return 0;
  1871. +}
  1872. +
  1873. +static void shps_pm_complete(struct device *dev)
  1874. +{
  1875. + struct platform_device *pdev = to_platform_device(dev);
  1876. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1877. + int status;
  1878. +
  1879. + dbg_dump_power_states(pdev, "shps_pm_complete");
  1880. + dbg_dump_pciesta(pdev, "shps_pm_complete");
  1881. + dbg_dump_drvsta(pdev, "shps_pm_complete.1");
  1882. +
  1883. + // update power target, dGPU may have been detached while suspended
  1884. + status = shps_dgpu_is_present(pdev);
  1885. + if (status < 0) {
  1886. + dev_err(&pdev->dev, "failed to get dGPU presence: %d\n", status);
  1887. + return;
  1888. + } else if (status == 0) {
  1889. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1890. + }
  1891. +
  1892. + /*
  1893. + * During resume, the PCIe core will power on the root-port, which in turn
  1894. + * will power on the dGPU. Most of the state synchronization is already
  1895. + * handled via the SAN RQSG handler, so it is in a fully consistent
  1896. + * on-state here. If requested, turn it off here.
  1897. + *
  1898. + * As there seem to be some synchronization issues turning off the dGPU
  1899. + * directly after the power-on SAN RQSG notification during the resume
  1900. + * process, let's do this here.
  1901. + *
  1902. + * TODO/FIXME:
  1903. + * This does not combat unhandled power-ons when the device is not fully
  1904. + * resumed, i.e. re-suspended before shps_pm_complete is called. Those
  1905. + * should normally not be an issue, but the dGPU does get hot even though
  1906. + * it is suspended, so ideally we want to keep it off.
  1907. + */
  1908. + if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) {
  1909. + status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF);
  1910. + if (status)
  1911. + dev_err(&pdev->dev, "failed to power-off dGPU: %d\n", status);
  1912. + }
  1913. +
  1914. + dbg_dump_drvsta(pdev, "shps_pm_complete.2");
  1915. +}
  1916. +
  1917. +static int shps_pm_suspend(struct device *dev)
  1918. +{
  1919. + struct platform_device *pdev = to_platform_device(dev);
  1920. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1921. + int status;
  1922. +
  1923. + if (device_may_wakeup(dev)) {
  1924. + status = enable_irq_wake(drvdata->irq_base_presence);
  1925. + if (status)
  1926. + return status;
  1927. +
  1928. + set_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state);
  1929. + }
  1930. +
  1931. + return 0;
  1932. +}
  1933. +
  1934. +static int shps_pm_resume(struct device *dev)
  1935. +{
  1936. + struct platform_device *pdev = to_platform_device(dev);
  1937. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1938. + int status = 0;
  1939. +
  1940. + if (test_and_clear_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state))
  1941. + status = disable_irq_wake(drvdata->irq_base_presence);
  1942. +
  1943. + return status;
  1944. +}
  1945. +
  1946. +static void shps_shutdown(struct platform_device *pdev)
  1947. +{
  1948. + int status;
  1949. +
  1950. + /*
  1951. + * Turn on dGPU before shutting down. This allows the core drivers to
  1952. + * properly shut down the device. If we don't do this, the pcieport driver
  1953. + * will complain that the device has already been disabled.
  1954. + */
  1955. + status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_ON);
  1956. + if (status)
  1957. + dev_err(&pdev->dev, "failed to turn on dGPU: %d\n", status);
  1958. +}
  1959. +
  1960. +static int shps_dgpu_detached(struct platform_device *pdev)
  1961. +{
  1962. + dbg_dump_power_states(pdev, "shps_dgpu_detached");
  1963. + return shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF);
  1964. +}
  1965. +
  1966. +static int shps_dgpu_attached(struct platform_device *pdev)
  1967. +{
  1968. + dbg_dump_power_states(pdev, "shps_dgpu_attached");
  1969. + return 0;
  1970. +}
  1971. +
  1972. +static int shps_dgpu_powered_on(struct platform_device *pdev)
  1973. +{
  1974. + /*
  1975. + * This function gets called directly after a power-state transition of
  1976. + * the dGPU root port out of D3cold state, indicating a power-on of the
  1977. + * dGPU. Specifically, this function is called from the RQSG handler of
  1978. + * SAN, invoked by the ACPI _ON method of the dGPU root port. This means
  1979. + * that this function is run inside `pci_set_power_state(rp, ...)`
  1980. + * syncrhonously and thus returns before the `pci_set_power_state` call
  1981. + * does.
  1982. + *
  1983. + * `pci_set_power_state` may either be called by us or when the PCI
  1984. + * subsystem decides to power up the root port (e.g. during resume). Thus
  1985. + * we should use this function to ensure that the dGPU and root port
  1986. + * states are consistent when an unexpected power-up is encountered.
  1987. + */
  1988. +
  1989. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1990. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1991. + int status;
  1992. +
  1993. + dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.1");
  1994. +
  1995. + // if we caused the root port to power-on, return
  1996. + if (test_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state))
  1997. + return 0;
  1998. +
  1999. + // if dGPU is not present, force power-target to off and return
  2000. + status = shps_dgpu_is_present(pdev);
  2001. + if (status == 0)
  2002. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  2003. + if (status <= 0)
  2004. + return status;
  2005. +
  2006. + mutex_lock(&drvdata->lock);
  2007. +
  2008. + dbg_dump_power_states(pdev, "shps_dgpu_powered_on.1");
  2009. + dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.1");
  2010. + if (drvdata->dgpu_root_port_state)
  2011. + pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state);
  2012. + pci_restore_state(rp);
  2013. + if (!pci_is_enabled(rp))
  2014. + pci_enable_device(rp);
  2015. + pci_set_master(rp);
  2016. + dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.2");
  2017. + dbg_dump_power_states(pdev, "shps_dgpu_powered_on.2");
  2018. + dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.2");
  2019. +
  2020. + mutex_unlock(&drvdata->lock);
  2021. +
  2022. + if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) {
  2023. + dev_warn(&pdev->dev, "unexpected dGPU power-on detected\n");
  2024. + // TODO: schedule state re-check and update
  2025. + }
  2026. +
  2027. + return 0;
  2028. +}
  2029. +
  2030. +static int shps_dgpu_handle_rqsg(struct surface_sam_san_rqsg *rqsg, void *data)
  2031. +{
  2032. + struct platform_device *pdev = data;
  2033. +
  2034. + if (rqsg->tc == SAM_DGPU_TC && rqsg->cid == SAM_DGPU_CID_POWERON)
  2035. + return shps_dgpu_powered_on(pdev);
  2036. +
  2037. + dev_warn(&pdev->dev, "unimplemented dGPU request: RQSG(0x%02x, 0x%02x, 0x%02x)\n",
  2038. + rqsg->tc, rqsg->cid, rqsg->iid);
  2039. + return 0;
  2040. +}
  2041. +
  2042. +static irqreturn_t shps_dgpu_presence_irq(int irq, void *data)
  2043. +{
  2044. + struct platform_device *pdev = data;
  2045. + bool dgpu_present;
  2046. + int status;
  2047. +
  2048. + status = shps_dgpu_is_present(pdev);
  2049. + if (status < 0) {
  2050. + dev_err(&pdev->dev, "failed to check physical dGPU presence: %d\n", status);
  2051. + return IRQ_HANDLED;
  2052. + }
  2053. +
  2054. + dgpu_present = status != 0;
  2055. + dev_info(&pdev->dev, "dGPU physically %s\n", dgpu_present ? "attached" : "detached");
  2056. +
  2057. + if (dgpu_present)
  2058. + status = shps_dgpu_attached(pdev);
  2059. + else
  2060. + status = shps_dgpu_detached(pdev);
  2061. +
  2062. + if (status)
  2063. + dev_err(&pdev->dev, "error handling dGPU interrupt: %d\n", status);
  2064. +
  2065. + return IRQ_HANDLED;
  2066. +}
  2067. +
  2068. +static irqreturn_t shps_base_presence_irq(int irq, void *data)
  2069. +{
  2070. + return IRQ_HANDLED; // nothing to do, just wake
  2071. +}
  2072. +
  2073. +
  2074. +static int shps_gpios_setup(struct platform_device *pdev)
  2075. +{
  2076. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2077. + struct gpio_desc *gpio_dgpu_power;
  2078. + struct gpio_desc *gpio_dgpu_presence;
  2079. + struct gpio_desc *gpio_base_presence;
  2080. + int status;
  2081. +
  2082. + // get GPIOs
  2083. + gpio_dgpu_power = devm_gpiod_get(&pdev->dev, "dgpu_power", GPIOD_IN);
  2084. + if (IS_ERR(gpio_dgpu_power)) {
  2085. + status = PTR_ERR(gpio_dgpu_power);
  2086. + goto err_out;
  2087. + }
  2088. +
  2089. + gpio_dgpu_presence = devm_gpiod_get(&pdev->dev, "dgpu_presence", GPIOD_IN);
  2090. + if (IS_ERR(gpio_dgpu_presence)) {
  2091. + status = PTR_ERR(gpio_dgpu_presence);
  2092. + goto err_out;
  2093. + }
  2094. +
  2095. + gpio_base_presence = devm_gpiod_get(&pdev->dev, "base_presence", GPIOD_IN);
  2096. + if (IS_ERR(gpio_base_presence)) {
  2097. + status = PTR_ERR(gpio_base_presence);
  2098. + goto err_out;
  2099. + }
  2100. +
  2101. + // export GPIOs
  2102. + status = gpiod_export(gpio_dgpu_power, false);
  2103. + if (status)
  2104. + goto err_out;
  2105. +
  2106. + status = gpiod_export(gpio_dgpu_presence, false);
  2107. + if (status)
  2108. + goto err_export_dgpu_presence;
  2109. +
  2110. + status = gpiod_export(gpio_base_presence, false);
  2111. + if (status)
  2112. + goto err_export_base_presence;
  2113. +
  2114. + // create sysfs links
  2115. + status = gpiod_export_link(&pdev->dev, "gpio-dgpu_power", gpio_dgpu_power);
  2116. + if (status)
  2117. + goto err_link_dgpu_power;
  2118. +
  2119. + status = gpiod_export_link(&pdev->dev, "gpio-dgpu_presence", gpio_dgpu_presence);
  2120. + if (status)
  2121. + goto err_link_dgpu_presence;
  2122. +
  2123. + status = gpiod_export_link(&pdev->dev, "gpio-base_presence", gpio_base_presence);
  2124. + if (status)
  2125. + goto err_link_base_presence;
  2126. +
  2127. + drvdata->gpio_dgpu_power = gpio_dgpu_power;
  2128. + drvdata->gpio_dgpu_presence = gpio_dgpu_presence;
  2129. + drvdata->gpio_base_presence = gpio_base_presence;
  2130. + return 0;
  2131. +
  2132. +err_link_base_presence:
  2133. + sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence");
  2134. +err_link_dgpu_presence:
  2135. + sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power");
  2136. +err_link_dgpu_power:
  2137. + gpiod_unexport(gpio_base_presence);
  2138. +err_export_base_presence:
  2139. + gpiod_unexport(gpio_dgpu_presence);
  2140. +err_export_dgpu_presence:
  2141. + gpiod_unexport(gpio_dgpu_power);
  2142. +err_out:
  2143. + return status;
  2144. +}
  2145. +
  2146. +static void shps_gpios_remove(struct platform_device *pdev)
  2147. +{
  2148. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2149. +
  2150. + sysfs_remove_link(&pdev->dev.kobj, "gpio-base_presence");
  2151. + sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence");
  2152. + sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power");
  2153. + gpiod_unexport(drvdata->gpio_base_presence);
  2154. + gpiod_unexport(drvdata->gpio_dgpu_presence);
  2155. + gpiod_unexport(drvdata->gpio_dgpu_power);
  2156. +}
  2157. +
  2158. +static int shps_gpios_setup_irq(struct platform_device *pdev)
  2159. +{
  2160. + const int irqf_dgpu = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
  2161. + const int irqf_base = IRQF_SHARED;
  2162. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2163. + int status;
  2164. +
  2165. + status = gpiod_to_irq(drvdata->gpio_base_presence);
  2166. + if (status < 0)
  2167. + return status;
  2168. + drvdata->irq_base_presence = status;
  2169. +
  2170. + status = gpiod_to_irq(drvdata->gpio_dgpu_presence);
  2171. + if (status < 0)
  2172. + return status;
  2173. + drvdata->irq_dgpu_presence = status;
  2174. +
  2175. + status = request_irq(drvdata->irq_base_presence,
  2176. + shps_base_presence_irq, irqf_base,
  2177. + "shps_base_presence_irq", pdev);
  2178. + if (status) {
  2179. + dev_err(&pdev->dev, "base irq failed: %d\n", status);
  2180. + return status;
  2181. + }
  2182. +
  2183. + status = request_threaded_irq(drvdata->irq_dgpu_presence,
  2184. + NULL, shps_dgpu_presence_irq, irqf_dgpu,
  2185. + "shps_dgpu_presence_irq", pdev);
  2186. + if (status) {
  2187. + free_irq(drvdata->irq_base_presence, pdev);
  2188. + return status;
  2189. + }
  2190. +
  2191. + return 0;
  2192. +}
  2193. +
  2194. +static void shps_gpios_remove_irq(struct platform_device *pdev)
  2195. +{
  2196. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2197. +
  2198. + free_irq(drvdata->irq_base_presence, pdev);
  2199. + free_irq(drvdata->irq_dgpu_presence, pdev);
  2200. +}
  2201. +
  2202. +static void shps_sgcp_notify(acpi_handle device, u32 value, void *context) {
  2203. + struct platform_device *pdev = context;
  2204. + switch (value) {
  2205. + case ACPI_SGCP_NOTIFY_POWER_ON:
  2206. + shps_dgpu_powered_on(pdev);
  2207. + }
  2208. +}
  2209. +
  2210. +static int shps_start_sgcp_notification(struct platform_device *pdev, acpi_handle *sgpc_handle) {
  2211. + acpi_handle handle;
  2212. + int status;
  2213. +
  2214. + status = acpi_get_handle(NULL, "\\_SB.SGPC", &handle);
  2215. + if (status) {
  2216. + dev_err(&pdev->dev, "error in get_handle %d\n", status);
  2217. + return status;
  2218. + }
  2219. +
  2220. + status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify, pdev);
  2221. + if (status) {
  2222. + dev_err(&pdev->dev, "error in install notify %d\n", status);
  2223. + *sgpc_handle = NULL;
  2224. + return status;
  2225. + }
  2226. +
  2227. + *sgpc_handle = handle;
  2228. + return 0;
  2229. +}
  2230. +
  2231. +static void shps_remove_sgcp_notification(struct platform_device *pdev) {
  2232. + int status;
  2233. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2234. +
  2235. + if (drvdata->sgpc_handle) {
  2236. + status = acpi_remove_notify_handler(drvdata->sgpc_handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify);
  2237. + if (status) {
  2238. + dev_err(&pdev->dev, "failed to remove notify handler: %d\n", status);
  2239. + }
  2240. + }
  2241. +}
  2242. +
  2243. +static struct shps_hardware_traits shps_detect_hardware_traits(struct platform_device *pdev) {
  2244. + const struct shps_hardware_probe *p;
  2245. +
  2246. + for (p = shps_hardware_probe_match; p->hardware_id; ++p) {
  2247. + if (acpi_dev_present(p->hardware_id, NULL, -1)) {
  2248. + break;
  2249. + }
  2250. + }
  2251. +
  2252. + dev_info(&pdev->dev,
  2253. + "shps_detect_hardware_traits found device %s, generation %d\n",
  2254. + p->hardware_id ? p->hardware_id : "SAN (default)",
  2255. + p->generation);
  2256. +
  2257. + return *p->hardware_traits;
  2258. +}
  2259. +
  2260. +static int shps_probe(struct platform_device *pdev)
  2261. +{
  2262. + struct acpi_device *shps_dev = ACPI_COMPANION(&pdev->dev);
  2263. + struct shps_driver_data *drvdata;
  2264. + struct ssam_controller *ctrl;
  2265. + struct device_link *link;
  2266. + int power, status;
  2267. + struct shps_hardware_traits detected_traits;
  2268. +
  2269. + if (gpiod_count(&pdev->dev, NULL) < 0) {
  2270. + dev_err(&pdev->dev, "gpiod_count returned < 0\n");
  2271. + return -ENODEV;
  2272. + }
  2273. +
  2274. + // link to SSH
  2275. + status = ssam_client_bind(&pdev->dev, &ctrl);
  2276. + if (status) {
  2277. + return status == -ENXIO ? -EPROBE_DEFER : status;
  2278. + }
  2279. +
  2280. + // detect what kind of hardware we're running
  2281. + detected_traits = shps_detect_hardware_traits(pdev);
  2282. +
  2283. + if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
  2284. + // link to SAN
  2285. + status = surface_sam_san_consumer_register(&pdev->dev, 0);
  2286. + if (status) {
  2287. + dev_err(&pdev->dev, "failed to register with san consumer: %d\n", status);
  2288. + return status == -ENXIO ? -EPROBE_DEFER : status;
  2289. + }
  2290. + }
  2291. +
  2292. + status = acpi_dev_add_driver_gpios(shps_dev, shps_acpi_gpios);
  2293. + if (status) {
  2294. + dev_err(&pdev->dev, "failed to add gpios: %d\n", status);
  2295. + return status;
  2296. + }
  2297. +
  2298. + drvdata = kzalloc(sizeof(struct shps_driver_data), GFP_KERNEL);
  2299. + if (!drvdata) {
  2300. + status = -ENOMEM;
  2301. + goto err_drvdata;
  2302. + }
  2303. + mutex_init(&drvdata->lock);
  2304. + platform_set_drvdata(pdev, drvdata);
  2305. +
  2306. + drvdata->ctrl = ctrl;
  2307. + drvdata->hardware_traits = detected_traits;
  2308. +
  2309. + drvdata->dgpu_root_port = shps_dgpu_dsm_get_pci_dev(pdev);
  2310. + if (IS_ERR(drvdata->dgpu_root_port)) {
  2311. + status = PTR_ERR(drvdata->dgpu_root_port);
  2312. + dev_err(&pdev->dev, "failed to get pci dev: %d\n", status);
  2313. + goto err_rp_lookup;
  2314. + }
  2315. +
  2316. + status = shps_gpios_setup(pdev);
  2317. + if (status) {
  2318. + dev_err(&pdev->dev, "unable to set up gpios, %d\n", status);
  2319. + goto err_gpio;
  2320. + }
  2321. +
  2322. + status = shps_gpios_setup_irq(pdev);
  2323. + if (status) {
  2324. + dev_err(&pdev->dev, "unable to set up irqs %d\n", status);
  2325. + goto err_gpio_irqs;
  2326. + }
  2327. +
  2328. + status = device_add_groups(&pdev->dev, shps_power_groups);
  2329. + if (status)
  2330. + goto err_devattr;
  2331. +
  2332. + link = device_link_add(&pdev->dev, &drvdata->dgpu_root_port->dev,
  2333. + DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER);
  2334. + if (!link)
  2335. + goto err_devlink;
  2336. +
  2337. + if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
  2338. + status = surface_sam_san_set_rqsg_handler(shps_dgpu_handle_rqsg, pdev);
  2339. + if (status) {
  2340. + dev_err(&pdev->dev, "unable to set SAN notification handler (%d)\n", status);
  2341. + goto err_devlink;
  2342. + }
  2343. + } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
  2344. + status = shps_start_sgcp_notification(pdev, &drvdata->sgpc_handle);
  2345. + if (status) {
  2346. + dev_err(&pdev->dev, "unable to install SGCP notification handler (%d)\n", status);
  2347. + goto err_devlink;
  2348. + }
  2349. + }
  2350. +
  2351. + // if dGPU is not present turn-off root-port, else obey module param
  2352. + status = shps_dgpu_is_present(pdev);
  2353. + if (status < 0)
  2354. + goto err_post_notification;
  2355. +
  2356. + power = status == 0 ? SHPS_DGPU_POWER_OFF : param_dgpu_power_init;
  2357. + if (power != SHPS_DGPU_MP_POWER_ASIS) {
  2358. + status = shps_dgpu_set_power(pdev, power);
  2359. + if (status)
  2360. + goto err_post_notification;
  2361. + }
  2362. +
  2363. + // initialize power target
  2364. + status = shps_dgpu_rp_get_power(pdev);
  2365. + if (status < 0)
  2366. + goto err_pwrtgt;
  2367. +
  2368. + if (status)
  2369. + set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  2370. + else
  2371. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  2372. +
  2373. + device_init_wakeup(&pdev->dev, true);
  2374. + return 0;
  2375. +
  2376. +err_pwrtgt:
  2377. + if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) {
  2378. + status = shps_dgpu_set_power(pdev, param_dgpu_power_exit);
  2379. + if (status)
  2380. + dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status);
  2381. + }
  2382. +err_post_notification:
  2383. + if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
  2384. + shps_remove_sgcp_notification(pdev);
  2385. + } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
  2386. + surface_sam_san_set_rqsg_handler(NULL, NULL);
  2387. + }
  2388. +err_devlink:
  2389. + device_remove_groups(&pdev->dev, shps_power_groups);
  2390. +err_devattr:
  2391. + shps_gpios_remove_irq(pdev);
  2392. +err_gpio_irqs:
  2393. + shps_gpios_remove(pdev);
  2394. +err_gpio:
  2395. + pci_dev_put(drvdata->dgpu_root_port);
  2396. +err_rp_lookup:
  2397. + platform_set_drvdata(pdev, NULL);
  2398. + kfree(drvdata);
  2399. +err_drvdata:
  2400. + acpi_dev_remove_driver_gpios(shps_dev);
  2401. + return status;
  2402. +}
  2403. +
  2404. +static int shps_remove(struct platform_device *pdev)
  2405. +{
  2406. + struct acpi_device *shps_dev = ACPI_COMPANION(&pdev->dev);
  2407. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2408. + int status;
  2409. +
  2410. + if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) {
  2411. + status = shps_dgpu_set_power(pdev, param_dgpu_power_exit);
  2412. + if (status)
  2413. + dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status);
  2414. + }
  2415. +
  2416. + device_set_wakeup_capable(&pdev->dev, false);
  2417. +
  2418. + if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
  2419. + shps_remove_sgcp_notification(pdev);
  2420. + } else if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
  2421. + surface_sam_san_set_rqsg_handler(NULL, NULL);
  2422. + }
  2423. + device_remove_groups(&pdev->dev, shps_power_groups);
  2424. + shps_gpios_remove_irq(pdev);
  2425. + shps_gpios_remove(pdev);
  2426. + pci_dev_put(drvdata->dgpu_root_port);
  2427. + platform_set_drvdata(pdev, NULL);
  2428. + kfree(drvdata);
  2429. +
  2430. + acpi_dev_remove_driver_gpios(shps_dev);
  2431. + return 0;
  2432. +}
  2433. +
  2434. +
  2435. +static const struct dev_pm_ops shps_pm_ops = {
  2436. + .prepare = shps_pm_prepare,
  2437. + .complete = shps_pm_complete,
  2438. + .suspend = shps_pm_suspend,
  2439. + .resume = shps_pm_resume,
  2440. +};
  2441. +
  2442. +static const struct acpi_device_id shps_acpi_match[] = {
  2443. + { "MSHW0153", 0 },
  2444. + { },
  2445. +};
  2446. +MODULE_DEVICE_TABLE(acpi, shps_acpi_match);
  2447. +
  2448. +static struct platform_driver surface_sam_hps = {
  2449. + .probe = shps_probe,
  2450. + .remove = shps_remove,
  2451. + .shutdown = shps_shutdown,
  2452. + .driver = {
  2453. + .name = "surface_dgpu_hps",
  2454. + .acpi_match_table = shps_acpi_match,
  2455. + .pm = &shps_pm_ops,
  2456. + },
  2457. +};
  2458. +
  2459. +module_platform_driver(surface_sam_hps);
  2460. +
  2461. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  2462. +MODULE_DESCRIPTION("Surface Hot-Plug System (HPS) and dGPU power-state Driver for Surface Book 2");
  2463. +MODULE_LICENSE("GPL");
  2464. diff --git a/drivers/platform/x86/surface_sam/surface_sam_san.c b/drivers/platform/x86/surface_sam/surface_sam_san.c
  2465. new file mode 100644
  2466. index 0000000000000..eab4e178a8450
  2467. --- /dev/null
  2468. +++ b/drivers/platform/x86/surface_sam/surface_sam_san.c
  2469. @@ -0,0 +1,930 @@
  2470. +// SPDX-License-Identifier: GPL-2.0-or-later
  2471. +/*
  2472. + * Surface ACPI Notify (SAN) and ACPI integration driver for SAM.
  2473. + * Translates communication from ACPI to SSH and back.
  2474. + */
  2475. +
  2476. +#include <asm/unaligned.h>
  2477. +#include <linux/acpi.h>
  2478. +#include <linux/delay.h>
  2479. +#include <linux/jiffies.h>
  2480. +#include <linux/kernel.h>
  2481. +#include <linux/platform_device.h>
  2482. +
  2483. +#include "surface_sam_ssh.h"
  2484. +#include "surface_sam_san.h"
  2485. +
  2486. +
  2487. +#define SAN_RQST_RETRY 5
  2488. +
  2489. +#define SAN_DSM_REVISION 0
  2490. +#define SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT 0x09
  2491. +
  2492. +static const guid_t SAN_DSM_UUID =
  2493. + GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d,
  2494. + 0x48, 0x7c, 0x91, 0xab, 0x3c);
  2495. +
  2496. +#define SAM_EVENT_DELAY_PWR_ADAPTER msecs_to_jiffies(5000)
  2497. +#define SAM_EVENT_DELAY_PWR_BST msecs_to_jiffies(2500)
  2498. +
  2499. +#define SAM_EVENT_PWR_CID_BIX 0x15
  2500. +#define SAM_EVENT_PWR_CID_BST 0x16
  2501. +#define SAM_EVENT_PWR_CID_ADAPTER 0x17
  2502. +#define SAM_EVENT_PWR_CID_DPTF 0x4f
  2503. +
  2504. +#define SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT 0x0b
  2505. +
  2506. +
  2507. +struct san_acpi_consumer {
  2508. + char *path;
  2509. + bool required;
  2510. + u32 flags;
  2511. +};
  2512. +
  2513. +struct san_handler_data {
  2514. + struct acpi_connection_info info; // must be first
  2515. +};
  2516. +
  2517. +struct san_consumer_link {
  2518. + const struct san_acpi_consumer *properties;
  2519. + struct device_link *link;
  2520. +};
  2521. +
  2522. +struct san_consumers {
  2523. + u32 num;
  2524. + struct san_consumer_link *links;
  2525. +};
  2526. +
  2527. +struct san_data {
  2528. + struct device *dev;
  2529. + struct ssam_controller *ctrl;
  2530. +
  2531. + struct san_handler_data context;
  2532. + struct san_consumers consumers;
  2533. +
  2534. + struct ssam_event_notifier nf_bat;
  2535. + struct ssam_event_notifier nf_tmp;
  2536. +};
  2537. +
  2538. +#define to_san_data(ptr, member) \
  2539. + container_of(ptr, struct san_data, member)
  2540. +
  2541. +struct san_event_work {
  2542. + struct delayed_work work;
  2543. + struct device *dev;
  2544. + struct ssam_event event; // must be last
  2545. +};
  2546. +
  2547. +struct gsb_data_in {
  2548. + u8 cv;
  2549. +} __packed;
  2550. +
  2551. +struct gsb_data_rqsx {
  2552. + u8 cv; // command value (should be 0x01 or 0x03)
  2553. + u8 tc; // target controller
  2554. + u8 tid; // transport channnel ID
  2555. + u8 iid; // target sub-controller (e.g. primary vs. secondary battery)
  2556. + u8 snc; // expect-response-flag
  2557. + u8 cid; // command ID
  2558. + u16 cdl; // payload length
  2559. + u8 pld[0]; // payload
  2560. +} __packed;
  2561. +
  2562. +struct gsb_data_etwl {
  2563. + u8 cv; // command value (should be 0x02)
  2564. + u8 etw3; // ?
  2565. + u8 etw4; // ?
  2566. + u8 msg[0]; // error message (ASCIIZ)
  2567. +} __packed;
  2568. +
  2569. +struct gsb_data_out {
  2570. + u8 status; // _SSH communication status
  2571. + u8 len; // _SSH payload length
  2572. + u8 pld[0]; // _SSH payload
  2573. +} __packed;
  2574. +
  2575. +union gsb_buffer_data {
  2576. + struct gsb_data_in in; // common input
  2577. + struct gsb_data_rqsx rqsx; // RQSX input
  2578. + struct gsb_data_etwl etwl; // ETWL input
  2579. + struct gsb_data_out out; // output
  2580. +};
  2581. +
  2582. +struct gsb_buffer {
  2583. + u8 status; // GSB AttribRawProcess status
  2584. + u8 len; // GSB AttribRawProcess length
  2585. + union gsb_buffer_data data;
  2586. +} __packed;
  2587. +
  2588. +#define SAN_GSB_MAX_RQSX_PAYLOAD (U8_MAX - 2 - sizeof(struct gsb_data_rqsx))
  2589. +#define SAN_GSB_MAX_RESPONSE (U8_MAX - 2 - sizeof(struct gsb_data_out))
  2590. +
  2591. +#define san_request_sync_onstack(ctrl, rqst, rsp) \
  2592. + ssam_request_sync_onstack(ctrl, rqst, rsp, SAN_GSB_MAX_RQSX_PAYLOAD)
  2593. +
  2594. +
  2595. +enum san_pwr_event {
  2596. + SAN_PWR_EVENT_BAT1_STAT = 0x03,
  2597. + SAN_PWR_EVENT_BAT1_INFO = 0x04,
  2598. + SAN_PWR_EVENT_ADP1_STAT = 0x05,
  2599. + SAN_PWR_EVENT_ADP1_INFO = 0x06,
  2600. + SAN_PWR_EVENT_BAT2_STAT = 0x07,
  2601. + SAN_PWR_EVENT_BAT2_INFO = 0x08,
  2602. + SAN_PWR_EVENT_DPTF = 0x0A,
  2603. +};
  2604. +
  2605. +
  2606. +static int sam_san_default_rqsg_handler(struct surface_sam_san_rqsg *rqsg, void *data);
  2607. +
  2608. +struct sam_san_rqsg_if {
  2609. + struct mutex lock;
  2610. + struct device *san_dev;
  2611. + surface_sam_san_rqsg_handler_fn handler;
  2612. + void *handler_data;
  2613. +};
  2614. +
  2615. +static struct sam_san_rqsg_if rqsg_if = {
  2616. + .lock = __MUTEX_INITIALIZER(rqsg_if.lock),
  2617. + .san_dev = NULL,
  2618. + .handler = sam_san_default_rqsg_handler,
  2619. + .handler_data = NULL,
  2620. +};
  2621. +
  2622. +int surface_sam_san_consumer_register(struct device *consumer, u32 flags)
  2623. +{
  2624. + const u32 valid = DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE;
  2625. + int status;
  2626. +
  2627. + if ((flags | valid) != valid)
  2628. + return -EINVAL;
  2629. +
  2630. + flags |= DL_FLAG_AUTOREMOVE_CONSUMER;
  2631. +
  2632. + mutex_lock(&rqsg_if.lock);
  2633. + if (rqsg_if.san_dev)
  2634. + status = device_link_add(consumer, rqsg_if.san_dev, flags) ? 0 : -EINVAL;
  2635. + else
  2636. + status = -ENXIO;
  2637. + mutex_unlock(&rqsg_if.lock);
  2638. + return status;
  2639. +}
  2640. +EXPORT_SYMBOL_GPL(surface_sam_san_consumer_register);
  2641. +
  2642. +int surface_sam_san_set_rqsg_handler(surface_sam_san_rqsg_handler_fn fn, void *data)
  2643. +{
  2644. + int status = -EBUSY;
  2645. +
  2646. + mutex_lock(&rqsg_if.lock);
  2647. +
  2648. + if (rqsg_if.handler == sam_san_default_rqsg_handler || !fn) {
  2649. + rqsg_if.handler = fn ? fn : sam_san_default_rqsg_handler;
  2650. + rqsg_if.handler_data = fn ? data : NULL;
  2651. + status = 0;
  2652. + }
  2653. +
  2654. + mutex_unlock(&rqsg_if.lock);
  2655. + return status;
  2656. +}
  2657. +EXPORT_SYMBOL_GPL(surface_sam_san_set_rqsg_handler);
  2658. +
  2659. +int san_call_rqsg_handler(struct surface_sam_san_rqsg *rqsg)
  2660. +{
  2661. + int status;
  2662. +
  2663. + mutex_lock(&rqsg_if.lock);
  2664. + status = rqsg_if.handler(rqsg, rqsg_if.handler_data);
  2665. + mutex_unlock(&rqsg_if.lock);
  2666. +
  2667. + return status;
  2668. +}
  2669. +
  2670. +static int sam_san_default_rqsg_handler(struct surface_sam_san_rqsg *rqsg, void *data)
  2671. +{
  2672. + struct device *dev = rqsg_if.san_dev;
  2673. +
  2674. + dev_warn(dev, "unhandled request: RQSG(0x%02x, 0x%02x, 0x%02x)\n",
  2675. + rqsg->tc, rqsg->cid, rqsg->iid);
  2676. +
  2677. + return 0;
  2678. +}
  2679. +
  2680. +
  2681. +static bool san_acpi_can_notify(struct device *dev, u64 func)
  2682. +{
  2683. + acpi_handle san = ACPI_HANDLE(dev);
  2684. + return acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, 1 << func);
  2685. +}
  2686. +
  2687. +static int san_acpi_notify_power_event(struct device *dev, enum san_pwr_event event)
  2688. +{
  2689. + acpi_handle san = ACPI_HANDLE(dev);
  2690. + union acpi_object *obj;
  2691. +
  2692. + if (!san_acpi_can_notify(dev, event))
  2693. + return 0;
  2694. +
  2695. + dev_dbg(dev, "notify power event 0x%02x\n", event);
  2696. + obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
  2697. + event, NULL, ACPI_TYPE_BUFFER);
  2698. +
  2699. + if (IS_ERR_OR_NULL(obj))
  2700. + return obj ? PTR_ERR(obj) : -ENXIO;
  2701. +
  2702. + if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
  2703. + dev_err(dev, "got unexpected result from _DSM\n");
  2704. + return -EFAULT;
  2705. + }
  2706. +
  2707. + ACPI_FREE(obj);
  2708. + return 0;
  2709. +}
  2710. +
  2711. +static int san_acpi_notify_sensor_trip_point(struct device *dev, u8 iid)
  2712. +{
  2713. + acpi_handle san = ACPI_HANDLE(dev);
  2714. + union acpi_object *obj;
  2715. + union acpi_object param;
  2716. +
  2717. + if (!san_acpi_can_notify(dev, SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT))
  2718. + return 0;
  2719. +
  2720. + param.type = ACPI_TYPE_INTEGER;
  2721. + param.integer.value = iid;
  2722. +
  2723. + obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
  2724. + SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT,
  2725. + &param, ACPI_TYPE_BUFFER);
  2726. +
  2727. + if (IS_ERR_OR_NULL(obj))
  2728. + return obj ? PTR_ERR(obj) : -ENXIO;
  2729. +
  2730. + if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
  2731. + dev_err(dev, "got unexpected result from _DSM\n");
  2732. + return -EFAULT;
  2733. + }
  2734. +
  2735. + ACPI_FREE(obj);
  2736. + return 0;
  2737. +}
  2738. +
  2739. +
  2740. +static inline int san_evt_power_adapter(struct device *dev, const struct ssam_event *event)
  2741. +{
  2742. + int status;
  2743. +
  2744. + status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_ADP1_STAT);
  2745. + if (status)
  2746. + return status;
  2747. +
  2748. + /*
  2749. + * Enusre that the battery states get updated correctly.
  2750. + * When the battery is fully charged and an adapter is plugged in, it
  2751. + * sometimes is not updated correctly, instead showing it as charging.
  2752. + * Explicitly trigger battery updates to fix this.
  2753. + */
  2754. +
  2755. + status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT1_STAT);
  2756. + if (status)
  2757. + return status;
  2758. +
  2759. + return san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT2_STAT);
  2760. +}
  2761. +
  2762. +static inline int san_evt_power_bix(struct device *dev, const struct ssam_event *event)
  2763. +{
  2764. + enum san_pwr_event evcode;
  2765. +
  2766. + if (event->instance_id == 0x02)
  2767. + evcode = SAN_PWR_EVENT_BAT2_INFO;
  2768. + else
  2769. + evcode = SAN_PWR_EVENT_BAT1_INFO;
  2770. +
  2771. + return san_acpi_notify_power_event(dev, evcode);
  2772. +}
  2773. +
  2774. +static inline int san_evt_power_bst(struct device *dev, const struct ssam_event *event)
  2775. +{
  2776. + enum san_pwr_event evcode;
  2777. +
  2778. + if (event->instance_id == 0x02)
  2779. + evcode = SAN_PWR_EVENT_BAT2_STAT;
  2780. + else
  2781. + evcode = SAN_PWR_EVENT_BAT1_STAT;
  2782. +
  2783. + return san_acpi_notify_power_event(dev, evcode);
  2784. +}
  2785. +
  2786. +static inline int san_evt_power_dptf(struct device *dev, const struct ssam_event *event)
  2787. +{
  2788. + union acpi_object payload;
  2789. + acpi_handle san = ACPI_HANDLE(dev);
  2790. + union acpi_object *obj;
  2791. +
  2792. + if (!san_acpi_can_notify(dev, SAN_PWR_EVENT_DPTF))
  2793. + return 0;
  2794. +
  2795. + /*
  2796. + * The Surface ACPI expects a buffer and not a package. It specifically
  2797. + * checks for ObjectType (Arg3) == 0x03. This will cause a warning in
  2798. + * acpica/nsarguments.c, but this can safely be ignored.
  2799. + */
  2800. + payload.type = ACPI_TYPE_BUFFER;
  2801. + payload.buffer.length = event->length;
  2802. + payload.buffer.pointer = (u8 *)&event->data[0];
  2803. +
  2804. + dev_dbg(dev, "notify power event 0x%02x\n", event->command_id);
  2805. + obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
  2806. + SAN_PWR_EVENT_DPTF, &payload,
  2807. + ACPI_TYPE_BUFFER);
  2808. +
  2809. + if (IS_ERR_OR_NULL(obj))
  2810. + return obj ? PTR_ERR(obj) : -ENXIO;
  2811. +
  2812. + if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
  2813. + dev_err(dev, "got unexpected result from _DSM\n");
  2814. + return -EFAULT;
  2815. + }
  2816. +
  2817. + ACPI_FREE(obj);
  2818. + return 0;
  2819. +}
  2820. +
  2821. +static unsigned long san_evt_power_delay(u8 cid)
  2822. +{
  2823. + switch (cid) {
  2824. + case SAM_EVENT_PWR_CID_ADAPTER:
  2825. + /*
  2826. + * Wait for battery state to update before signalling adapter change.
  2827. + */
  2828. + return SAM_EVENT_DELAY_PWR_ADAPTER;
  2829. +
  2830. + case SAM_EVENT_PWR_CID_BST:
  2831. + /*
  2832. + * Ensure we do not miss anything important due to caching.
  2833. + */
  2834. + return SAM_EVENT_DELAY_PWR_BST;
  2835. +
  2836. + case SAM_EVENT_PWR_CID_BIX:
  2837. + case SAM_EVENT_PWR_CID_DPTF:
  2838. + default:
  2839. + return 0;
  2840. + }
  2841. +}
  2842. +
  2843. +static bool san_evt_power(const struct ssam_event *event, struct device *dev)
  2844. +{
  2845. + int status;
  2846. +
  2847. + switch (event->command_id) {
  2848. + case SAM_EVENT_PWR_CID_BIX:
  2849. + status = san_evt_power_bix(dev, event);
  2850. + break;
  2851. +
  2852. + case SAM_EVENT_PWR_CID_BST:
  2853. + status = san_evt_power_bst(dev, event);
  2854. + break;
  2855. +
  2856. + case SAM_EVENT_PWR_CID_ADAPTER:
  2857. + status = san_evt_power_adapter(dev, event);
  2858. + break;
  2859. +
  2860. + case SAM_EVENT_PWR_CID_DPTF:
  2861. + status = san_evt_power_dptf(dev, event);
  2862. + break;
  2863. +
  2864. + default:
  2865. + return false;
  2866. + }
  2867. +
  2868. + if (status)
  2869. + dev_err(dev, "error handling power event (cid = %x)\n",
  2870. + event->command_id);
  2871. +
  2872. + return true;
  2873. +}
  2874. +
  2875. +static void san_evt_power_workfn(struct work_struct *work)
  2876. +{
  2877. + struct san_event_work *ev = container_of(work, struct san_event_work, work.work);
  2878. +
  2879. + san_evt_power(&ev->event, ev->dev);
  2880. + kfree(ev);
  2881. +}
  2882. +
  2883. +
  2884. +static u32 san_evt_power_nb(struct ssam_notifier_block *nb, const struct ssam_event *event)
  2885. +{
  2886. + struct san_data *d = to_san_data(nb, nf_bat.base);
  2887. + struct san_event_work *work;
  2888. + unsigned long delay = san_evt_power_delay(event->command_id);
  2889. +
  2890. + if (delay == 0) {
  2891. + if (san_evt_power(event, d->dev))
  2892. + return SSAM_NOTIF_HANDLED;
  2893. + else
  2894. + return 0;
  2895. + }
  2896. +
  2897. + work = kzalloc(sizeof(struct san_event_work) + event->length, GFP_KERNEL);
  2898. + if (!work)
  2899. + return ssam_notifier_from_errno(-ENOMEM);
  2900. +
  2901. + INIT_DELAYED_WORK(&work->work, san_evt_power_workfn);
  2902. + work->dev = d->dev;
  2903. +
  2904. + memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
  2905. +
  2906. + schedule_delayed_work(&work->work, delay);
  2907. + return SSAM_NOTIF_HANDLED;
  2908. +}
  2909. +
  2910. +
  2911. +static inline int san_evt_thermal_notify(struct device *dev, const struct ssam_event *event)
  2912. +{
  2913. + return san_acpi_notify_sensor_trip_point(dev, event->instance_id);
  2914. +}
  2915. +
  2916. +static bool san_evt_thermal(const struct ssam_event *event, struct device *dev)
  2917. +{
  2918. + int status;
  2919. +
  2920. + switch (event->command_id) {
  2921. + case SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT:
  2922. + status = san_evt_thermal_notify(dev, event);
  2923. + break;
  2924. +
  2925. + default:
  2926. + return false;
  2927. + }
  2928. +
  2929. + if (status) {
  2930. + dev_err(dev, "error handling thermal event (cid = %x)\n",
  2931. + event->command_id);
  2932. + }
  2933. +
  2934. + return true;
  2935. +}
  2936. +
  2937. +static u32 san_evt_thermal_nb(struct ssam_notifier_block *nb, const struct ssam_event *event)
  2938. +{
  2939. + if (san_evt_thermal(event, to_san_data(nb, nf_tmp.base)->dev))
  2940. + return SSAM_NOTIF_HANDLED;
  2941. + else
  2942. + return 0;
  2943. +}
  2944. +
  2945. +
  2946. +static struct gsb_data_rqsx
  2947. +*san_validate_rqsx(struct device *dev, const char *type, struct gsb_buffer *buffer)
  2948. +{
  2949. + struct gsb_data_rqsx *rqsx = &buffer->data.rqsx;
  2950. +
  2951. + if (buffer->len < 8) {
  2952. + dev_err(dev, "invalid %s package (len = %d)\n",
  2953. + type, buffer->len);
  2954. + return NULL;
  2955. + }
  2956. +
  2957. + if (get_unaligned(&rqsx->cdl) != buffer->len - sizeof(struct gsb_data_rqsx)) {
  2958. + dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n",
  2959. + type, buffer->len, get_unaligned(&rqsx->cdl));
  2960. + return NULL;
  2961. + }
  2962. +
  2963. + if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) {
  2964. + dev_err(dev, "payload for %s package too large (cdl = %d)\n",
  2965. + type, get_unaligned(&rqsx->cdl));
  2966. + return NULL;
  2967. + }
  2968. +
  2969. + if (rqsx->tid != 0x01) {
  2970. + dev_warn(dev, "unsupported %s package (tid = 0x%02x)\n",
  2971. + type, rqsx->tid);
  2972. + return NULL;
  2973. + }
  2974. +
  2975. + return rqsx;
  2976. +}
  2977. +
  2978. +static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *buffer)
  2979. +{
  2980. + struct gsb_data_etwl *etwl = &buffer->data.etwl;
  2981. +
  2982. + if (buffer->len < 3) {
  2983. + dev_err(d->dev, "invalid ETWL package (len = %d)\n", buffer->len);
  2984. + return AE_OK;
  2985. + }
  2986. +
  2987. + dev_err(d->dev, "ETWL(0x%02x, 0x%02x): %.*s\n",
  2988. + etwl->etw3, etwl->etw4,
  2989. + buffer->len - 3, (char *)etwl->msg);
  2990. +
  2991. + // indicate success
  2992. + buffer->status = 0x00;
  2993. + buffer->len = 0x00;
  2994. +
  2995. + return AE_OK;
  2996. +}
  2997. +
  2998. +static void gsb_response_error(struct gsb_buffer *gsb, int status)
  2999. +{
  3000. + gsb->status = 0x00;
  3001. + gsb->len = 0x02;
  3002. + gsb->data.out.status = (u8)(-status);
  3003. + gsb->data.out.len = 0x00;
  3004. +}
  3005. +
  3006. +static void gsb_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len)
  3007. +{
  3008. + gsb->status = 0x00;
  3009. + gsb->len = len + 2;
  3010. + gsb->data.out.status = 0x00;
  3011. + gsb->data.out.len = len;
  3012. +
  3013. + if (len)
  3014. + memcpy(&gsb->data.out.pld[0], ptr, len);
  3015. +}
  3016. +
  3017. +static acpi_status san_rqst_fixup_suspended(struct ssam_request *rqst,
  3018. + struct gsb_buffer *gsb)
  3019. +{
  3020. + if (rqst->target_category == 0x11 && rqst->command_id == 0x0D) {
  3021. + /* Base state quirk:
  3022. + * The base state may be queried from ACPI when the EC is still
  3023. + * suspended. In this case it will return '-EPERM'. This query
  3024. + * will only be triggered from the ACPI lid GPE interrupt, thus
  3025. + * we are either in laptop or studio mode (base status 0x01 or
  3026. + * 0x02). Furthermore, we will only get here if the device (and
  3027. + * EC) have been suspended.
  3028. + *
  3029. + * We now assume that the device is in laptop mode (0x01). This
  3030. + * has the drawback that it will wake the device when unfolding
  3031. + * it in studio mode, but it also allows us to avoid actively
  3032. + * waiting for the EC to wake up, which may incur a notable
  3033. + * delay.
  3034. + */
  3035. +
  3036. + u8 base_state = 1;
  3037. + gsb_response_success(gsb, &base_state, 1);
  3038. + return AE_OK;
  3039. + }
  3040. +
  3041. + gsb_response_error(gsb, -ENXIO);
  3042. + return AE_OK;
  3043. +}
  3044. +
  3045. +static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
  3046. +{
  3047. + u8 rspbuf[SAN_GSB_MAX_RESPONSE];
  3048. + struct gsb_data_rqsx *gsb_rqst;
  3049. + struct ssam_request rqst;
  3050. + struct ssam_response rsp;
  3051. + int status = 0;
  3052. + int try;
  3053. +
  3054. + gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer);
  3055. + if (!gsb_rqst)
  3056. + return AE_OK;
  3057. +
  3058. + rqst.target_category = gsb_rqst->tc;
  3059. + rqst.command_id = gsb_rqst->cid;
  3060. + rqst.instance_id = gsb_rqst->iid;
  3061. + rqst.channel = gsb_rqst->tid;
  3062. + rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0;
  3063. + rqst.length = get_unaligned(&gsb_rqst->cdl);
  3064. + rqst.payload = &gsb_rqst->pld[0];
  3065. +
  3066. + rsp.capacity = ARRAY_SIZE(rspbuf);
  3067. + rsp.length = 0;
  3068. + rsp.pointer = &rspbuf[0];
  3069. +
  3070. + // handle suspended device
  3071. + if (d->dev->power.is_suspended) {
  3072. + dev_warn(d->dev, "rqst: device is suspended, not executing\n");
  3073. + return san_rqst_fixup_suspended(&rqst, buffer);
  3074. + }
  3075. +
  3076. + for (try = 0; try < SAN_RQST_RETRY; try++) {
  3077. + if (try)
  3078. + dev_warn(d->dev, "rqst: IO error, trying again\n");
  3079. +
  3080. + status = san_request_sync_onstack(d->ctrl, &rqst, &rsp);
  3081. + if (status != -ETIMEDOUT && status != -EREMOTEIO)
  3082. + break;
  3083. + }
  3084. +
  3085. + if (!status) {
  3086. + gsb_response_success(buffer, rsp.pointer, rsp.length);
  3087. + } else {
  3088. + dev_err(d->dev, "rqst: failed with error %d\n", status);
  3089. + gsb_response_error(buffer, status);
  3090. + }
  3091. +
  3092. + return AE_OK;
  3093. +}
  3094. +
  3095. +static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer)
  3096. +{
  3097. + struct gsb_data_rqsx *gsb_rqsg;
  3098. + struct surface_sam_san_rqsg rqsg;
  3099. + int status;
  3100. +
  3101. + gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer);
  3102. + if (!gsb_rqsg)
  3103. + return AE_OK;
  3104. +
  3105. + rqsg.tc = gsb_rqsg->tc;
  3106. + rqsg.cid = gsb_rqsg->cid;
  3107. + rqsg.iid = gsb_rqsg->iid;
  3108. + rqsg.cdl = get_unaligned(&gsb_rqsg->cdl);
  3109. + rqsg.pld = &gsb_rqsg->pld[0];
  3110. +
  3111. + status = san_call_rqsg_handler(&rqsg);
  3112. + if (!status) {
  3113. + gsb_response_success(buffer, NULL, 0);
  3114. + } else {
  3115. + dev_err(d->dev, "rqsg: failed with error %d\n", status);
  3116. + gsb_response_error(buffer, status);
  3117. + }
  3118. +
  3119. + return AE_OK;
  3120. +}
  3121. +
  3122. +
  3123. +static acpi_status
  3124. +san_opreg_handler(u32 function, acpi_physical_address command,
  3125. + u32 bits, u64 *value64,
  3126. + void *opreg_context, void *region_context)
  3127. +{
  3128. + struct san_data *d = to_san_data(opreg_context, context);
  3129. + struct gsb_buffer *buffer = (struct gsb_buffer *)value64;
  3130. + int accessor_type = (0xFFFF0000 & function) >> 16;
  3131. +
  3132. + if (command != 0) {
  3133. + dev_warn(d->dev, "unsupported command: 0x%02llx\n", command);
  3134. + return AE_OK;
  3135. + }
  3136. +
  3137. + if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
  3138. + dev_err(d->dev, "invalid access type: 0x%02x\n", accessor_type);
  3139. + return AE_OK;
  3140. + }
  3141. +
  3142. + // buffer must have at least contain the command-value
  3143. + if (buffer->len == 0) {
  3144. + dev_err(d->dev, "request-package too small\n");
  3145. + return AE_OK;
  3146. + }
  3147. +
  3148. + switch (buffer->data.in.cv) {
  3149. + case 0x01: return san_rqst(d, buffer);
  3150. + case 0x02: return san_etwl(d, buffer);
  3151. + case 0x03: return san_rqsg(d, buffer);
  3152. + }
  3153. +
  3154. + dev_warn(d->dev, "unsupported SAN0 request (cv: 0x%02x)\n", buffer->data.in.cv);
  3155. + return AE_OK;
  3156. +}
  3157. +
  3158. +static int san_events_register(struct platform_device *pdev)
  3159. +{
  3160. + struct san_data *d = platform_get_drvdata(pdev);
  3161. + int status;
  3162. +
  3163. + d->nf_bat.base.priority = 1;
  3164. + d->nf_bat.base.fn = san_evt_power_nb;
  3165. + d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;
  3166. + d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;
  3167. + d->nf_bat.event.id.instance = 0;
  3168. + d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;
  3169. +
  3170. + d->nf_tmp.base.priority = 1;
  3171. + d->nf_tmp.base.fn = san_evt_thermal_nb;
  3172. + d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;
  3173. + d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;
  3174. + d->nf_tmp.event.id.instance = 0;
  3175. + d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;
  3176. +
  3177. + status = ssam_notifier_register(d->ctrl, &d->nf_bat);
  3178. + if (status)
  3179. + return status;
  3180. +
  3181. + status = ssam_notifier_register(d->ctrl, &d->nf_tmp);
  3182. + if (status)
  3183. + ssam_notifier_unregister(d->ctrl, &d->nf_bat);
  3184. +
  3185. + return status;
  3186. +}
  3187. +
  3188. +static void san_events_unregister(struct platform_device *pdev)
  3189. +{
  3190. + struct san_data *d = platform_get_drvdata(pdev);
  3191. +
  3192. + ssam_notifier_unregister(d->ctrl, &d->nf_bat);
  3193. + ssam_notifier_unregister(d->ctrl, &d->nf_tmp);
  3194. +}
  3195. +
  3196. +
  3197. +static int san_consumers_link(struct platform_device *pdev,
  3198. + const struct san_acpi_consumer *cons,
  3199. + struct san_consumers *out)
  3200. +{
  3201. + const struct san_acpi_consumer *con;
  3202. + struct san_consumer_link *links, *link;
  3203. + struct acpi_device *adev;
  3204. + acpi_handle handle;
  3205. + u32 max_links = 0;
  3206. + int status;
  3207. +
  3208. + if (!cons)
  3209. + return 0;
  3210. +
  3211. + // count links
  3212. + for (con = cons; con->path; ++con)
  3213. + max_links += 1;
  3214. +
  3215. + // allocate
  3216. + links = kcalloc(max_links, sizeof(struct san_consumer_link), GFP_KERNEL);
  3217. + link = &links[0];
  3218. +
  3219. + if (!links)
  3220. + return -ENOMEM;
  3221. +
  3222. + // create links
  3223. + for (con = cons; con->path; ++con) {
  3224. + status = acpi_get_handle(NULL, con->path, &handle);
  3225. + if (status) {
  3226. + if (con->required || status != AE_NOT_FOUND) {
  3227. + status = -ENXIO;
  3228. + goto cleanup;
  3229. + } else {
  3230. + continue;
  3231. + }
  3232. + }
  3233. +
  3234. + status = acpi_bus_get_device(handle, &adev);
  3235. + if (status)
  3236. + goto cleanup;
  3237. +
  3238. + link->link = device_link_add(&adev->dev, &pdev->dev, con->flags);
  3239. + if (!(link->link)) {
  3240. + status = -EFAULT;
  3241. + goto cleanup;
  3242. + }
  3243. + link->properties = con;
  3244. +
  3245. + link += 1;
  3246. + }
  3247. +
  3248. + out->num = link - links;
  3249. + out->links = links;
  3250. +
  3251. + return 0;
  3252. +
  3253. +cleanup:
  3254. + for (link = link - 1; link >= links; --link) {
  3255. + if (link->properties->flags & DL_FLAG_STATELESS)
  3256. + device_link_del(link->link);
  3257. + }
  3258. +
  3259. + return status;
  3260. +}
  3261. +
  3262. +static void san_consumers_unlink(struct san_consumers *consumers)
  3263. +{
  3264. + u32 i;
  3265. +
  3266. + if (!consumers)
  3267. + return;
  3268. +
  3269. + for (i = 0; i < consumers->num; ++i) {
  3270. + if (consumers->links[i].properties->flags & DL_FLAG_STATELESS)
  3271. + device_link_del(consumers->links[i].link);
  3272. + }
  3273. +
  3274. + kfree(consumers->links);
  3275. +
  3276. + consumers->num = 0;
  3277. + consumers->links = NULL;
  3278. +}
  3279. +
  3280. +static int surface_sam_san_probe(struct platform_device *pdev)
  3281. +{
  3282. + const struct san_acpi_consumer *cons;
  3283. + acpi_handle san = ACPI_HANDLE(&pdev->dev); // _SAN device node
  3284. + struct ssam_controller *ctrl;
  3285. + struct san_data *data;
  3286. + int status;
  3287. +
  3288. + status = ssam_client_bind(&pdev->dev, &ctrl);
  3289. + if (status)
  3290. + return status == -ENXIO ? -EPROBE_DEFER : status;
  3291. +
  3292. + data = kzalloc(sizeof(struct san_data), GFP_KERNEL);
  3293. + if (!data)
  3294. + return -ENOMEM;
  3295. +
  3296. + data->dev = &pdev->dev;
  3297. + data->ctrl = ctrl;
  3298. +
  3299. + cons = acpi_device_get_match_data(&pdev->dev);
  3300. + status = san_consumers_link(pdev, cons, &data->consumers);
  3301. + if (status)
  3302. + goto err_consumers;
  3303. +
  3304. + platform_set_drvdata(pdev, data);
  3305. +
  3306. + status = acpi_install_address_space_handler(san,
  3307. + ACPI_ADR_SPACE_GSBUS,
  3308. + &san_opreg_handler,
  3309. + NULL, &data->context);
  3310. +
  3311. + if (ACPI_FAILURE(status)) {
  3312. + status = -ENODEV;
  3313. + goto err_install_handler;
  3314. + }
  3315. +
  3316. + status = san_events_register(pdev);
  3317. + if (status)
  3318. + goto err_enable_events;
  3319. +
  3320. + mutex_lock(&rqsg_if.lock);
  3321. + if (!rqsg_if.san_dev)
  3322. + rqsg_if.san_dev = &pdev->dev;
  3323. + else
  3324. + status = -EBUSY;
  3325. + mutex_unlock(&rqsg_if.lock);
  3326. +
  3327. + if (status)
  3328. + goto err_install_dev;
  3329. +
  3330. + acpi_walk_dep_device_list(san);
  3331. + return 0;
  3332. +
  3333. +err_install_dev:
  3334. + san_events_unregister(pdev);
  3335. +err_enable_events:
  3336. + acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler);
  3337. +err_install_handler:
  3338. + platform_set_drvdata(san, NULL);
  3339. + san_consumers_unlink(&data->consumers);
  3340. +err_consumers:
  3341. + kfree(data);
  3342. + return status;
  3343. +}
  3344. +
  3345. +static int surface_sam_san_remove(struct platform_device *pdev)
  3346. +{
  3347. + struct san_data *data = platform_get_drvdata(pdev);
  3348. + acpi_handle san = ACPI_HANDLE(&pdev->dev); // _SAN device node
  3349. + acpi_status status = AE_OK;
  3350. +
  3351. + mutex_lock(&rqsg_if.lock);
  3352. + rqsg_if.san_dev = NULL;
  3353. + mutex_unlock(&rqsg_if.lock);
  3354. +
  3355. + acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler);
  3356. + san_events_unregister(pdev);
  3357. +
  3358. + /*
  3359. + * We have unregistered our event sources. Now we need to ensure that
  3360. + * all delayed works they may have spawned are run to completion.
  3361. + */
  3362. + flush_scheduled_work();
  3363. +
  3364. + san_consumers_unlink(&data->consumers);
  3365. + kfree(data);
  3366. +
  3367. + platform_set_drvdata(pdev, NULL);
  3368. + return status;
  3369. +}
  3370. +
  3371. +
  3372. +static const struct san_acpi_consumer san_mshw0091_consumers[] = {
  3373. + { "\\_SB.SRTC", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
  3374. + { "\\ADP1", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
  3375. + { "\\_SB.BAT1", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
  3376. + { "\\_SB.BAT2", false, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
  3377. + { },
  3378. +};
  3379. +
  3380. +static const struct acpi_device_id surface_sam_san_match[] = {
  3381. + { "MSHW0091", (unsigned long) san_mshw0091_consumers },
  3382. + { },
  3383. +};
  3384. +MODULE_DEVICE_TABLE(acpi, surface_sam_san_match);
  3385. +
  3386. +static struct platform_driver surface_sam_san = {
  3387. + .probe = surface_sam_san_probe,
  3388. + .remove = surface_sam_san_remove,
  3389. + .driver = {
  3390. + .name = "surface_sam_san",
  3391. + .acpi_match_table = surface_sam_san_match,
  3392. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  3393. + },
  3394. +};
  3395. +module_platform_driver(surface_sam_san);
  3396. +
  3397. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  3398. +MODULE_DESCRIPTION("Surface ACPI Notify Driver for 5th Generation Surface Devices");
  3399. +MODULE_LICENSE("GPL");
  3400. diff --git a/drivers/platform/x86/surface_sam/surface_sam_san.h b/drivers/platform/x86/surface_sam/surface_sam_san.h
  3401. new file mode 100644
  3402. index 0000000000000..3408dde964b3c
  3403. --- /dev/null
  3404. +++ b/drivers/platform/x86/surface_sam/surface_sam_san.h
  3405. @@ -0,0 +1,30 @@
  3406. +/* SPDX-License-Identifier: GPL-2.0-or-later */
  3407. +/*
  3408. + * Interface for Surface ACPI/Notify (SAN).
  3409. + *
  3410. + * The SAN is the main interface between the Surface Serial Hub (SSH) and the
  3411. + * Surface/System Aggregator Module (SAM). It allows requests to be translated
  3412. + * from ACPI to SSH/SAM. It also interfaces with the discrete GPU hot-plug
  3413. + * driver.
  3414. + */
  3415. +
  3416. +#ifndef _SURFACE_SAM_SAN_H
  3417. +#define _SURFACE_SAM_SAN_H
  3418. +
  3419. +#include <linux/types.h>
  3420. +
  3421. +
  3422. +struct surface_sam_san_rqsg {
  3423. + u8 tc; // target category
  3424. + u8 cid; // command ID
  3425. + u8 iid; // instance ID
  3426. + u16 cdl; // command data length (length of payload)
  3427. + u8 *pld; // pointer to payload of length cdl
  3428. +};
  3429. +
  3430. +typedef int (*surface_sam_san_rqsg_handler_fn)(struct surface_sam_san_rqsg *rqsg, void *data);
  3431. +
  3432. +int surface_sam_san_consumer_register(struct device *consumer, u32 flags);
  3433. +int surface_sam_san_set_rqsg_handler(surface_sam_san_rqsg_handler_fn fn, void *data);
  3434. +
  3435. +#endif /* _SURFACE_SAM_SAN_H */
  3436. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid.c b/drivers/platform/x86/surface_sam/surface_sam_sid.c
  3437. new file mode 100644
  3438. index 0000000000000..bcf9a569ee719
  3439. --- /dev/null
  3440. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid.c
  3441. @@ -0,0 +1,283 @@
  3442. +// SPDX-License-Identifier: GPL-2.0-or-later
  3443. +/*
  3444. + * Surface Integration Driver.
  3445. + * MFD driver to provide device/model dependent functionality.
  3446. + */
  3447. +
  3448. +#include <linux/acpi.h>
  3449. +#include <linux/kernel.h>
  3450. +#include <linux/module.h>
  3451. +#include <linux/platform_device.h>
  3452. +#include <linux/mfd/core.h>
  3453. +
  3454. +#include "surface_sam_sid_power.h"
  3455. +#include "surface_sam_sid_vhf.h"
  3456. +
  3457. +
  3458. +static const struct ssam_battery_properties ssam_battery_props_bat1 = {
  3459. + .registry = SSAM_EVENT_REGISTRY_SAM,
  3460. + .num = 0,
  3461. + .channel = 1,
  3462. + .instance = 1,
  3463. +};
  3464. +
  3465. +static const struct ssam_battery_properties ssam_battery_props_bat2_sb3 = {
  3466. + .registry = SSAM_EVENT_REGISTRY_KIP,
  3467. + .num = 1,
  3468. + .channel = 2,
  3469. + .instance = 1,
  3470. +};
  3471. +
  3472. +
  3473. +static const struct ssam_hid_properties ssam_hid_props_keyboard = {
  3474. + .registry = SSAM_EVENT_REGISTRY_REG,
  3475. + .instance = 1,
  3476. +};
  3477. +
  3478. +static const struct ssam_hid_properties ssam_hid_props_touchpad = {
  3479. + .registry = SSAM_EVENT_REGISTRY_REG,
  3480. + .instance = 3,
  3481. +};
  3482. +
  3483. +static const struct ssam_hid_properties ssam_hid_props_iid5 = {
  3484. + .registry = SSAM_EVENT_REGISTRY_REG,
  3485. + .instance = 5,
  3486. +};
  3487. +
  3488. +static const struct ssam_hid_properties ssam_hid_props_iid6 = {
  3489. + .registry = SSAM_EVENT_REGISTRY_REG,
  3490. + .instance = 6,
  3491. +};
  3492. +
  3493. +
  3494. +static const struct mfd_cell sid_devs_sp4[] = {
  3495. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3496. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3497. + { },
  3498. +};
  3499. +
  3500. +static const struct mfd_cell sid_devs_sp6[] = {
  3501. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3502. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3503. + { },
  3504. +};
  3505. +
  3506. +static const struct mfd_cell sid_devs_sp7[] = {
  3507. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3508. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3509. + { .name = "surface_sam_sid_ac", .id = -1 },
  3510. + {
  3511. + .name = "surface_sam_sid_battery",
  3512. + .id = -1,
  3513. + .platform_data = (void *)&ssam_battery_props_bat1,
  3514. + .pdata_size = sizeof(struct ssam_battery_properties),
  3515. + },
  3516. + { },
  3517. +};
  3518. +
  3519. +static const struct mfd_cell sid_devs_sb1[] = {
  3520. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3521. + { },
  3522. +};
  3523. +
  3524. +static const struct mfd_cell sid_devs_sb2[] = {
  3525. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3526. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3527. + { },
  3528. +};
  3529. +
  3530. +static const struct mfd_cell sid_devs_sb3[] = {
  3531. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3532. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3533. + { .name = "surface_sam_sid_ac", .id = -1 },
  3534. + {
  3535. + .name = "surface_sam_sid_battery",
  3536. + .id = 1,
  3537. + .platform_data = (void *)&ssam_battery_props_bat1,
  3538. + .pdata_size = sizeof(struct ssam_battery_properties),
  3539. + },
  3540. + {
  3541. + .name = "surface_sam_sid_battery",
  3542. + .id = 2,
  3543. + .platform_data = (void *)&ssam_battery_props_bat2_sb3,
  3544. + .pdata_size = sizeof(struct ssam_battery_properties),
  3545. + },
  3546. + {
  3547. + .name = "surface_sam_sid_vhf",
  3548. + .id = 1,
  3549. + .platform_data = (void *)&ssam_hid_props_keyboard,
  3550. + .pdata_size = sizeof(struct ssam_hid_properties),
  3551. + },
  3552. + {
  3553. + .name = "surface_sam_sid_vhf",
  3554. + .id = 3,
  3555. + .platform_data = (void *)&ssam_hid_props_touchpad,
  3556. + .pdata_size = sizeof(struct ssam_hid_properties),
  3557. + },
  3558. + {
  3559. + .name = "surface_sam_sid_vhf",
  3560. + .id = 5,
  3561. + .platform_data = (void *)&ssam_hid_props_iid5,
  3562. + .pdata_size = sizeof(struct ssam_hid_properties),
  3563. + },
  3564. + {
  3565. + .name = "surface_sam_sid_vhf",
  3566. + .id = 6,
  3567. + .platform_data = (void *)&ssam_hid_props_iid6,
  3568. + .pdata_size = sizeof(struct ssam_hid_properties),
  3569. + },
  3570. + { },
  3571. +};
  3572. +
  3573. +static const struct mfd_cell sid_devs_sl1[] = {
  3574. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3575. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3576. + { },
  3577. +};
  3578. +
  3579. +static const struct mfd_cell sid_devs_sl2[] = {
  3580. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3581. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3582. + { },
  3583. +};
  3584. +
  3585. +static const struct mfd_cell sid_devs_sl3_13[] = {
  3586. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3587. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3588. + { .name = "surface_sam_sid_ac", .id = -1 },
  3589. + {
  3590. + .name = "surface_sam_sid_battery",
  3591. + .id = -1,
  3592. + .platform_data = (void *)&ssam_battery_props_bat1,
  3593. + .pdata_size = sizeof(struct ssam_battery_properties),
  3594. + },
  3595. + {
  3596. + .name = "surface_sam_sid_vhf",
  3597. + .id = 1,
  3598. + .platform_data = (void *)&ssam_hid_props_keyboard,
  3599. + .pdata_size = sizeof(struct ssam_hid_properties),
  3600. + },
  3601. + {
  3602. + .name = "surface_sam_sid_vhf",
  3603. + .id = 3,
  3604. + .platform_data = (void *)&ssam_hid_props_touchpad,
  3605. + .pdata_size = sizeof(struct ssam_hid_properties),
  3606. + },
  3607. + {
  3608. + .name = "surface_sam_sid_vhf",
  3609. + .id = 5,
  3610. + .platform_data = (void *)&ssam_hid_props_iid5,
  3611. + .pdata_size = sizeof(struct ssam_hid_properties),
  3612. + },
  3613. + { },
  3614. +};
  3615. +
  3616. +static const struct mfd_cell sid_devs_sl3_15[] = {
  3617. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3618. + { .name = "surface_sam_sid_ac", .id = -1 },
  3619. + {
  3620. + .name = "surface_sam_sid_battery",
  3621. + .id = -1,
  3622. + .platform_data = (void *)&ssam_battery_props_bat1,
  3623. + .pdata_size = sizeof(struct ssam_battery_properties),
  3624. + },
  3625. + {
  3626. + .name = "surface_sam_sid_vhf",
  3627. + .id = 1,
  3628. + .platform_data = (void *)&ssam_hid_props_keyboard,
  3629. + .pdata_size = sizeof(struct ssam_hid_properties),
  3630. + },
  3631. + {
  3632. + .name = "surface_sam_sid_vhf",
  3633. + .id = 3,
  3634. + .platform_data = (void *)&ssam_hid_props_touchpad,
  3635. + .pdata_size = sizeof(struct ssam_hid_properties),
  3636. + },
  3637. + {
  3638. + .name = "surface_sam_sid_vhf",
  3639. + .id = 5,
  3640. + .platform_data = (void *)&ssam_hid_props_iid5,
  3641. + .pdata_size = sizeof(struct ssam_hid_properties),
  3642. + },
  3643. + { },
  3644. +};
  3645. +
  3646. +static const struct acpi_device_id surface_sam_sid_match[] = {
  3647. + /* Surface Pro 4, 5, and 6 */
  3648. + { "MSHW0081", (unsigned long)sid_devs_sp4 },
  3649. +
  3650. + /* Surface Pro 6 (OMBR >= 0x10) */
  3651. + { "MSHW0111", (unsigned long)sid_devs_sp6 },
  3652. +
  3653. + /* Surface Pro 7 */
  3654. + { "MSHW0116", (unsigned long)sid_devs_sp7 },
  3655. +
  3656. + /* Surface Book 1 */
  3657. + { "MSHW0080", (unsigned long)sid_devs_sb1 },
  3658. +
  3659. + /* Surface Book 2 */
  3660. + { "MSHW0107", (unsigned long)sid_devs_sb2 },
  3661. +
  3662. + /* Surface Book 3 */
  3663. + { "MSHW0117", (unsigned long)sid_devs_sb3 },
  3664. +
  3665. + /* Surface Laptop 1 */
  3666. + { "MSHW0086", (unsigned long)sid_devs_sl1 },
  3667. +
  3668. + /* Surface Laptop 2 */
  3669. + { "MSHW0112", (unsigned long)sid_devs_sl2 },
  3670. +
  3671. + /* Surface Laptop 3 (13") */
  3672. + { "MSHW0114", (unsigned long)sid_devs_sl3_13 },
  3673. +
  3674. + /* Surface Laptop 3 (15") */
  3675. + { "MSHW0110", (unsigned long)sid_devs_sl3_15 },
  3676. +
  3677. + { },
  3678. +};
  3679. +MODULE_DEVICE_TABLE(acpi, surface_sam_sid_match);
  3680. +
  3681. +
  3682. +static int surface_sam_sid_probe(struct platform_device *pdev)
  3683. +{
  3684. + const struct acpi_device_id *match;
  3685. + const struct mfd_cell *cells, *p;
  3686. +
  3687. + match = acpi_match_device(surface_sam_sid_match, &pdev->dev);
  3688. + if (!match)
  3689. + return -ENODEV;
  3690. +
  3691. + cells = (struct mfd_cell *)match->driver_data;
  3692. + if (!cells)
  3693. + return -ENODEV;
  3694. +
  3695. + for (p = cells; p->name; ++p) {
  3696. + /* just count */
  3697. + }
  3698. +
  3699. + if (p == cells)
  3700. + return -ENODEV;
  3701. +
  3702. + return mfd_add_devices(&pdev->dev, 0, cells, p - cells, NULL, 0, NULL);
  3703. +}
  3704. +
  3705. +static int surface_sam_sid_remove(struct platform_device *pdev)
  3706. +{
  3707. + mfd_remove_devices(&pdev->dev);
  3708. + return 0;
  3709. +}
  3710. +
  3711. +static struct platform_driver surface_sam_sid = {
  3712. + .probe = surface_sam_sid_probe,
  3713. + .remove = surface_sam_sid_remove,
  3714. + .driver = {
  3715. + .name = "surface_sam_sid",
  3716. + .acpi_match_table = surface_sam_sid_match,
  3717. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  3718. + },
  3719. +};
  3720. +module_platform_driver(surface_sam_sid);
  3721. +
  3722. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  3723. +MODULE_DESCRIPTION("Surface Integration Driver for 5th Generation Surface Devices");
  3724. +MODULE_LICENSE("GPL");
  3725. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c b/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c
  3726. new file mode 100644
  3727. index 0000000000000..f0cee43c859b4
  3728. --- /dev/null
  3729. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c
  3730. @@ -0,0 +1,232 @@
  3731. +// SPDX-License-Identifier: GPL-2.0-or-later
  3732. +/*
  3733. + * Surface Lid driver to enable wakeup from suspend via the lid.
  3734. + */
  3735. +
  3736. +#include <linux/acpi.h>
  3737. +#include <linux/dmi.h>
  3738. +#include <linux/kernel.h>
  3739. +#include <linux/module.h>
  3740. +#include <linux/platform_device.h>
  3741. +
  3742. +
  3743. +struct sid_lid_device {
  3744. + const char *acpi_path;
  3745. + const u32 gpe_number;
  3746. +};
  3747. +
  3748. +
  3749. +static const struct sid_lid_device lid_device_l17 = {
  3750. + .acpi_path = "\\_SB.LID0",
  3751. + .gpe_number = 0x17,
  3752. +};
  3753. +
  3754. +static const struct sid_lid_device lid_device_l4D = {
  3755. + .acpi_path = "\\_SB.LID0",
  3756. + .gpe_number = 0x4D,
  3757. +};
  3758. +
  3759. +static const struct sid_lid_device lid_device_l4F = {
  3760. + .acpi_path = "\\_SB.LID0",
  3761. + .gpe_number = 0x4F,
  3762. +};
  3763. +
  3764. +static const struct sid_lid_device lid_device_l57 = {
  3765. + .acpi_path = "\\_SB.LID0",
  3766. + .gpe_number = 0x57,
  3767. +};
  3768. +
  3769. +
  3770. +static const struct dmi_system_id dmi_lid_device_table[] = {
  3771. + {
  3772. + .ident = "Surface Pro 4",
  3773. + .matches = {
  3774. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3775. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
  3776. + },
  3777. + .driver_data = (void *)&lid_device_l17,
  3778. + },
  3779. + {
  3780. + .ident = "Surface Pro 5",
  3781. + .matches = {
  3782. + /* match for SKU here due to generic product name "Surface Pro" */
  3783. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3784. + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
  3785. + },
  3786. + .driver_data = (void *)&lid_device_l4F,
  3787. + },
  3788. + {
  3789. + .ident = "Surface Pro 5 (LTE)",
  3790. + .matches = {
  3791. + /* match for SKU here due to generic product name "Surface Pro" */
  3792. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3793. + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
  3794. + },
  3795. + .driver_data = (void *)&lid_device_l4F,
  3796. + },
  3797. + {
  3798. + .ident = "Surface Pro 6",
  3799. + .matches = {
  3800. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3801. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
  3802. + },
  3803. + .driver_data = (void *)&lid_device_l4F,
  3804. + },
  3805. + {
  3806. + .ident = "Surface Pro 7",
  3807. + .matches = {
  3808. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3809. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 7"),
  3810. + },
  3811. + .driver_data = (void *)&lid_device_l4D,
  3812. + },
  3813. + {
  3814. + .ident = "Surface Book 1",
  3815. + .matches = {
  3816. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3817. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
  3818. + },
  3819. + .driver_data = (void *)&lid_device_l17,
  3820. + },
  3821. + {
  3822. + .ident = "Surface Book 2",
  3823. + .matches = {
  3824. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3825. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
  3826. + },
  3827. + .driver_data = (void *)&lid_device_l17,
  3828. + },
  3829. + {
  3830. + .ident = "Surface Book 3",
  3831. + .matches = {
  3832. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3833. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 3"),
  3834. + },
  3835. + .driver_data = (void *)&lid_device_l4D,
  3836. + },
  3837. + {
  3838. + .ident = "Surface Laptop 1",
  3839. + .matches = {
  3840. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3841. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
  3842. + },
  3843. + .driver_data = (void *)&lid_device_l57,
  3844. + },
  3845. + {
  3846. + .ident = "Surface Laptop 2",
  3847. + .matches = {
  3848. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3849. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
  3850. + },
  3851. + .driver_data = (void *)&lid_device_l57,
  3852. + },
  3853. + {
  3854. + .ident = "Surface Laptop 3 (13\")",
  3855. + .matches = {
  3856. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3857. + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1867:1868"),
  3858. + },
  3859. + .driver_data = (void *)&lid_device_l4D,
  3860. + },
  3861. + { }
  3862. +};
  3863. +
  3864. +
  3865. +static int sid_lid_enable_wakeup(const struct sid_lid_device *dev, bool enable)
  3866. +{
  3867. + int action = enable ? ACPI_GPE_ENABLE : ACPI_GPE_DISABLE;
  3868. + int status;
  3869. +
  3870. + status = acpi_set_gpe_wake_mask(NULL, dev->gpe_number, action);
  3871. + if (status)
  3872. + return -EFAULT;
  3873. +
  3874. + return 0;
  3875. +}
  3876. +
  3877. +
  3878. +static int surface_sam_sid_gpelid_suspend(struct device *dev)
  3879. +{
  3880. + const struct sid_lid_device *ldev;
  3881. +
  3882. + ldev = dev_get_drvdata(dev);
  3883. + return sid_lid_enable_wakeup(ldev, true);
  3884. +}
  3885. +
  3886. +static int surface_sam_sid_gpelid_resume(struct device *dev)
  3887. +{
  3888. + const struct sid_lid_device *ldev;
  3889. +
  3890. + ldev = dev_get_drvdata(dev);
  3891. + return sid_lid_enable_wakeup(ldev, false);
  3892. +}
  3893. +
  3894. +static SIMPLE_DEV_PM_OPS(surface_sam_sid_gpelid_pm,
  3895. + surface_sam_sid_gpelid_suspend,
  3896. + surface_sam_sid_gpelid_resume);
  3897. +
  3898. +
  3899. +static int surface_sam_sid_gpelid_probe(struct platform_device *pdev)
  3900. +{
  3901. + const struct dmi_system_id *match;
  3902. + struct sid_lid_device *dev;
  3903. + acpi_handle lid_handle;
  3904. + int status;
  3905. +
  3906. + match = dmi_first_match(dmi_lid_device_table);
  3907. + if (!match)
  3908. + return -ENODEV;
  3909. +
  3910. + dev = match->driver_data;
  3911. + if (!dev)
  3912. + return -ENODEV;
  3913. +
  3914. + status = acpi_get_handle(NULL, (acpi_string)dev->acpi_path, &lid_handle);
  3915. + if (status)
  3916. + return -EFAULT;
  3917. +
  3918. + status = acpi_setup_gpe_for_wake(lid_handle, NULL, dev->gpe_number);
  3919. + if (status)
  3920. + return -EFAULT;
  3921. +
  3922. + status = acpi_enable_gpe(NULL, dev->gpe_number);
  3923. + if (status)
  3924. + return -EFAULT;
  3925. +
  3926. + status = sid_lid_enable_wakeup(dev, false);
  3927. + if (status) {
  3928. + acpi_disable_gpe(NULL, dev->gpe_number);
  3929. + return status;
  3930. + }
  3931. +
  3932. + platform_set_drvdata(pdev, dev);
  3933. + return 0;
  3934. +}
  3935. +
  3936. +static int surface_sam_sid_gpelid_remove(struct platform_device *pdev)
  3937. +{
  3938. + struct sid_lid_device *dev = platform_get_drvdata(pdev);
  3939. +
  3940. + /* restore default behavior without this module */
  3941. + sid_lid_enable_wakeup(dev, false);
  3942. + acpi_disable_gpe(NULL, dev->gpe_number);
  3943. +
  3944. + platform_set_drvdata(pdev, NULL);
  3945. + return 0;
  3946. +}
  3947. +
  3948. +static struct platform_driver surface_sam_sid_gpelid = {
  3949. + .probe = surface_sam_sid_gpelid_probe,
  3950. + .remove = surface_sam_sid_gpelid_remove,
  3951. + .driver = {
  3952. + .name = "surface_sam_sid_gpelid",
  3953. + .pm = &surface_sam_sid_gpelid_pm,
  3954. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  3955. + },
  3956. +};
  3957. +module_platform_driver(surface_sam_sid_gpelid);
  3958. +
  3959. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  3960. +MODULE_DESCRIPTION("Surface Lid Driver for 5th Generation Surface Devices");
  3961. +MODULE_LICENSE("GPL");
  3962. +MODULE_ALIAS("platform:surface_sam_sid_gpelid");
  3963. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c b/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c
  3964. new file mode 100644
  3965. index 0000000000000..e0b1e42c2087f
  3966. --- /dev/null
  3967. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c
  3968. @@ -0,0 +1,214 @@
  3969. +// SPDX-License-Identifier: GPL-2.0-or-later
  3970. +/*
  3971. + * Surface Performance Mode Driver.
  3972. + * Allows to change cooling capabilities based on user preference.
  3973. + */
  3974. +
  3975. +#include <asm/unaligned.h>
  3976. +#include <linux/kernel.h>
  3977. +#include <linux/module.h>
  3978. +#include <linux/platform_device.h>
  3979. +
  3980. +#include "surface_sam_ssh.h"
  3981. +
  3982. +
  3983. +#define SID_PARAM_PERM 0644
  3984. +
  3985. +enum sam_perf_mode {
  3986. + SAM_PERF_MODE_NORMAL = 1,
  3987. + SAM_PERF_MODE_BATTERY = 2,
  3988. + SAM_PERF_MODE_PERF1 = 3,
  3989. + SAM_PERF_MODE_PERF2 = 4,
  3990. +
  3991. + __SAM_PERF_MODE__START = 1,
  3992. + __SAM_PERF_MODE__END = 4,
  3993. +};
  3994. +
  3995. +enum sid_param_perf_mode {
  3996. + SID_PARAM_PERF_MODE_AS_IS = 0,
  3997. + SID_PARAM_PERF_MODE_NORMAL = SAM_PERF_MODE_NORMAL,
  3998. + SID_PARAM_PERF_MODE_BATTERY = SAM_PERF_MODE_BATTERY,
  3999. + SID_PARAM_PERF_MODE_PERF1 = SAM_PERF_MODE_PERF1,
  4000. + SID_PARAM_PERF_MODE_PERF2 = SAM_PERF_MODE_PERF2,
  4001. +
  4002. + __SID_PARAM_PERF_MODE__START = 0,
  4003. + __SID_PARAM_PERF_MODE__END = 4,
  4004. +};
  4005. +
  4006. +struct spm_data {
  4007. + struct ssam_controller *ctrl;
  4008. +};
  4009. +
  4010. +
  4011. +struct ssam_perf_info {
  4012. + __le32 mode;
  4013. + __le16 unknown1;
  4014. + __le16 unknown2;
  4015. +} __packed;
  4016. +
  4017. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_tmp_perf_mode_get, struct ssam_perf_info, {
  4018. + .target_category = SSAM_SSH_TC_TMP,
  4019. + .command_id = 0x02,
  4020. + .instance_id = 0x00,
  4021. + .channel = 0x01,
  4022. +});
  4023. +
  4024. +static SSAM_DEFINE_SYNC_REQUEST_W(__ssam_tmp_perf_mode_set, __le32, {
  4025. + .target_category = SSAM_SSH_TC_TMP,
  4026. + .command_id = 0x03,
  4027. + .instance_id = 0x00,
  4028. + .channel = 0x01,
  4029. +});
  4030. +
  4031. +static int ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, u32 mode)
  4032. +{
  4033. + __le32 mode_le = cpu_to_le32(mode);
  4034. +
  4035. + if (mode < __SAM_PERF_MODE__START || mode > __SAM_PERF_MODE__END)
  4036. + return -EINVAL;
  4037. +
  4038. + return __ssam_tmp_perf_mode_set(ctrl, &mode_le);
  4039. +}
  4040. +
  4041. +
  4042. +static int param_perf_mode_set(const char *val, const struct kernel_param *kp)
  4043. +{
  4044. + int perf_mode;
  4045. + int status;
  4046. +
  4047. + status = kstrtoint(val, 0, &perf_mode);
  4048. + if (status)
  4049. + return status;
  4050. +
  4051. + if (perf_mode < __SID_PARAM_PERF_MODE__START || perf_mode > __SID_PARAM_PERF_MODE__END)
  4052. + return -EINVAL;
  4053. +
  4054. + return param_set_int(val, kp);
  4055. +}
  4056. +
  4057. +static const struct kernel_param_ops param_perf_mode_ops = {
  4058. + .set = param_perf_mode_set,
  4059. + .get = param_get_int,
  4060. +};
  4061. +
  4062. +static int param_perf_mode_init = SID_PARAM_PERF_MODE_AS_IS;
  4063. +static int param_perf_mode_exit = SID_PARAM_PERF_MODE_AS_IS;
  4064. +
  4065. +module_param_cb(perf_mode_init, &param_perf_mode_ops, &param_perf_mode_init, SID_PARAM_PERM);
  4066. +module_param_cb(perf_mode_exit, &param_perf_mode_ops, &param_perf_mode_exit, SID_PARAM_PERM);
  4067. +
  4068. +MODULE_PARM_DESC(perf_mode_init, "Performance-mode to be set on module initialization");
  4069. +MODULE_PARM_DESC(perf_mode_exit, "Performance-mode to be set on module exit");
  4070. +
  4071. +
  4072. +static ssize_t perf_mode_show(struct device *dev, struct device_attribute *attr, char *data)
  4073. +{
  4074. + struct spm_data *d = dev_get_drvdata(dev);
  4075. + struct ssam_perf_info info;
  4076. + int status;
  4077. +
  4078. + status = ssam_tmp_perf_mode_get(d->ctrl, &info);
  4079. + if (status) {
  4080. + dev_err(dev, "failed to get current performance mode: %d\n", status);
  4081. + return -EIO;
  4082. + }
  4083. +
  4084. + return sprintf(data, "%d\n", le32_to_cpu(info.mode));
  4085. +}
  4086. +
  4087. +static ssize_t perf_mode_store(struct device *dev, struct device_attribute *attr,
  4088. + const char *data, size_t count)
  4089. +{
  4090. + struct spm_data *d = dev_get_drvdata(dev);
  4091. + int perf_mode;
  4092. + int status;
  4093. +
  4094. + status = kstrtoint(data, 0, &perf_mode);
  4095. + if (status)
  4096. + return status;
  4097. +
  4098. + status = ssam_tmp_perf_mode_set(d->ctrl, perf_mode);
  4099. + if (status)
  4100. + return status;
  4101. +
  4102. + // TODO: Should we notify ACPI here?
  4103. + //
  4104. + // There is a _DSM call described as
  4105. + // WSID._DSM: Notify DPTF on Slider State change
  4106. + // which calls
  4107. + // ODV3 = ToInteger (Arg3)
  4108. + // Notify(IETM, 0x88)
  4109. + // IETM is an INT3400 Intel Dynamic Power Performance Management
  4110. + // device, part of the DPTF framework. From the corresponding
  4111. + // kernel driver, it looks like event 0x88 is being ignored. Also
  4112. + // it is currently unknown what the consequecnes of setting ODV3
  4113. + // are.
  4114. +
  4115. + return count;
  4116. +}
  4117. +
  4118. +static const DEVICE_ATTR_RW(perf_mode);
  4119. +
  4120. +
  4121. +static int surface_sam_sid_perfmode_probe(struct platform_device *pdev)
  4122. +{
  4123. + struct ssam_controller *ctrl;
  4124. + struct spm_data *data;
  4125. + int status;
  4126. +
  4127. + // link to ec
  4128. + status = ssam_client_bind(&pdev->dev, &ctrl);
  4129. + if (status)
  4130. + return status == -ENXIO ? -EPROBE_DEFER : status;
  4131. +
  4132. + data = devm_kzalloc(&pdev->dev, sizeof(struct spm_data), GFP_KERNEL);
  4133. + if (!data)
  4134. + return -ENOMEM;
  4135. +
  4136. + data->ctrl = ctrl;
  4137. + platform_set_drvdata(pdev, data);
  4138. +
  4139. + // set initial perf_mode
  4140. + if (param_perf_mode_init != SID_PARAM_PERF_MODE_AS_IS) {
  4141. + status = ssam_tmp_perf_mode_set(ctrl, param_perf_mode_init);
  4142. + if (status)
  4143. + return status;
  4144. + }
  4145. +
  4146. + // register perf_mode attribute
  4147. + status = sysfs_create_file(&pdev->dev.kobj, &dev_attr_perf_mode.attr);
  4148. + if (status)
  4149. + goto err_sysfs;
  4150. +
  4151. + return 0;
  4152. +
  4153. +err_sysfs:
  4154. + ssam_tmp_perf_mode_set(ctrl, param_perf_mode_exit);
  4155. + return status;
  4156. +}
  4157. +
  4158. +static int surface_sam_sid_perfmode_remove(struct platform_device *pdev)
  4159. +{
  4160. + struct spm_data *data = platform_get_drvdata(pdev);
  4161. +
  4162. + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_perf_mode.attr);
  4163. + ssam_tmp_perf_mode_set(data->ctrl, param_perf_mode_exit);
  4164. +
  4165. + platform_set_drvdata(pdev, NULL);
  4166. + return 0;
  4167. +}
  4168. +
  4169. +static struct platform_driver surface_sam_sid_perfmode = {
  4170. + .probe = surface_sam_sid_perfmode_probe,
  4171. + .remove = surface_sam_sid_perfmode_remove,
  4172. + .driver = {
  4173. + .name = "surface_sam_sid_perfmode",
  4174. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  4175. + },
  4176. +};
  4177. +module_platform_driver(surface_sam_sid_perfmode);
  4178. +
  4179. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  4180. +MODULE_DESCRIPTION("Surface Performance Mode Driver for 5th Generation Surface Devices");
  4181. +MODULE_LICENSE("GPL");
  4182. +MODULE_ALIAS("platform:surface_sam_sid_perfmode");
  4183. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_power.c b/drivers/platform/x86/surface_sam/surface_sam_sid_power.c
  4184. new file mode 100644
  4185. index 0000000000000..64a3d46a128cc
  4186. --- /dev/null
  4187. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_power.c
  4188. @@ -0,0 +1,1054 @@
  4189. +// SPDX-License-Identifier: GPL-2.0-or-later
  4190. +/*
  4191. + * Surface SID Battery/AC Driver.
  4192. + * Provides support for the battery and AC on 7th generation Surface devices.
  4193. + */
  4194. +
  4195. +#include <asm/unaligned.h>
  4196. +#include <linux/kernel.h>
  4197. +#include <linux/delay.h>
  4198. +#include <linux/jiffies.h>
  4199. +#include <linux/module.h>
  4200. +#include <linux/platform_device.h>
  4201. +#include <linux/power_supply.h>
  4202. +#include <linux/workqueue.h>
  4203. +
  4204. +#include "surface_sam_ssh.h"
  4205. +#include "surface_sam_sid_power.h"
  4206. +
  4207. +#define SPWR_WARN KERN_WARNING KBUILD_MODNAME ": "
  4208. +#define SPWR_DEBUG KERN_DEBUG KBUILD_MODNAME ": "
  4209. +
  4210. +
  4211. +// TODO: check BIX/BST for unknown/unsupported 0xffffffff entries
  4212. +// TODO: DPTF (/SAN notifications)?
  4213. +// TODO: other properties?
  4214. +
  4215. +
  4216. +static unsigned int cache_time = 1000;
  4217. +module_param(cache_time, uint, 0644);
  4218. +MODULE_PARM_DESC(cache_time, "battery state chaching time in milliseconds [default: 1000]");
  4219. +
  4220. +#define SPWR_AC_BAT_UPDATE_DELAY msecs_to_jiffies(5000)
  4221. +
  4222. +
  4223. +/*
  4224. + * SAM Interface.
  4225. + */
  4226. +
  4227. +#define SAM_EVENT_PWR_CID_BIX 0x15
  4228. +#define SAM_EVENT_PWR_CID_BST 0x16
  4229. +#define SAM_EVENT_PWR_CID_ADAPTER 0x17
  4230. +
  4231. +#define SAM_BATTERY_STA_OK 0x0f
  4232. +#define SAM_BATTERY_STA_PRESENT 0x10
  4233. +
  4234. +#define SAM_BATTERY_STATE_DISCHARGING 0x01
  4235. +#define SAM_BATTERY_STATE_CHARGING 0x02
  4236. +#define SAM_BATTERY_STATE_CRITICAL 0x04
  4237. +
  4238. +#define SAM_BATTERY_POWER_UNIT_MA 1
  4239. +
  4240. +
  4241. +/* Equivalent to data returned in ACPI _BIX method */
  4242. +struct spwr_bix {
  4243. + u8 revision;
  4244. + __le32 power_unit;
  4245. + __le32 design_cap;
  4246. + __le32 last_full_charge_cap;
  4247. + __le32 technology;
  4248. + __le32 design_voltage;
  4249. + __le32 design_cap_warn;
  4250. + __le32 design_cap_low;
  4251. + __le32 cycle_count;
  4252. + __le32 measurement_accuracy;
  4253. + __le32 max_sampling_time;
  4254. + __le32 min_sampling_time;
  4255. + __le32 max_avg_interval;
  4256. + __le32 min_avg_interval;
  4257. + __le32 bat_cap_granularity_1;
  4258. + __le32 bat_cap_granularity_2;
  4259. + u8 model[21];
  4260. + u8 serial[11];
  4261. + u8 type[5];
  4262. + u8 oem_info[21];
  4263. +} __packed;
  4264. +
  4265. +/* Equivalent to data returned in ACPI _BST method */
  4266. +struct spwr_bst {
  4267. + __le32 state;
  4268. + __le32 present_rate;
  4269. + __le32 remaining_cap;
  4270. + __le32 present_voltage;
  4271. +} __packed;
  4272. +
  4273. +/* DPTF event payload */
  4274. +struct spwr_event_dptf {
  4275. + __le32 pmax;
  4276. + __le32 _1; /* currently unknown */
  4277. + __le32 _2; /* currently unknown */
  4278. +} __packed;
  4279. +
  4280. +
  4281. +/* Get battery status (_STA) */
  4282. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_sta, __le32, {
  4283. + .target_category = SSAM_SSH_TC_BAT,
  4284. + .command_id = 0x01,
  4285. +});
  4286. +
  4287. +/* Get battery static information (_BIX) */
  4288. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_bix, struct spwr_bix, {
  4289. + .target_category = SSAM_SSH_TC_BAT,
  4290. + .command_id = 0x02,
  4291. +});
  4292. +
  4293. +/* Get battery dynamic information (_BST) */
  4294. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_bst, struct spwr_bst, {
  4295. + .target_category = SSAM_SSH_TC_BAT,
  4296. + .command_id = 0x03,
  4297. +});
  4298. +
  4299. +/* Set battery trip point (_BTP) */
  4300. +static SSAM_DEFINE_SYNC_REQUEST_MD_W(ssam_bat_set_btp, __le32, {
  4301. + .target_category = SSAM_SSH_TC_BAT,
  4302. + .command_id = 0x04,
  4303. +});
  4304. +
  4305. +/* Get platform power soruce for battery (DPTF PSRC) */
  4306. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_psrc, __le32, {
  4307. + .target_category = SSAM_SSH_TC_BAT,
  4308. + .command_id = 0x0d,
  4309. +});
  4310. +
  4311. +/* Get maximum platform power for battery (DPTF PMAX) */
  4312. +__always_unused
  4313. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_pmax, __le32, {
  4314. + .target_category = SSAM_SSH_TC_BAT,
  4315. + .command_id = 0x0b,
  4316. +});
  4317. +
  4318. +/* Get adapter rating (DPTF ARTG) */
  4319. +__always_unused
  4320. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_artg, __le32, {
  4321. + .target_category = SSAM_SSH_TC_BAT,
  4322. + .command_id = 0x0f,
  4323. +});
  4324. +
  4325. +/* Unknown (DPTF PSOC) */
  4326. +__always_unused
  4327. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_psoc, __le32, {
  4328. + .target_category = SSAM_SSH_TC_BAT,
  4329. + .command_id = 0x0c,
  4330. +});
  4331. +
  4332. +/* Unknown (DPTF CHGI/ INT3403 SPPC) */
  4333. +__always_unused
  4334. +static SSAM_DEFINE_SYNC_REQUEST_MD_W(ssam_bat_set_chgi, __le32, {
  4335. + .target_category = SSAM_SSH_TC_BAT,
  4336. + .command_id = 0x0e,
  4337. +});
  4338. +
  4339. +
  4340. +/*
  4341. + * Common Power-Subsystem Interface.
  4342. + */
  4343. +
  4344. +struct spwr_battery_device {
  4345. + struct platform_device *pdev;
  4346. + struct ssam_controller *ctrl;
  4347. + const struct ssam_battery_properties *p;
  4348. +
  4349. + char name[32];
  4350. + struct power_supply *psy;
  4351. + struct power_supply_desc psy_desc;
  4352. +
  4353. + struct delayed_work update_work;
  4354. +
  4355. + struct ssam_event_notifier notif;
  4356. +
  4357. + struct mutex lock;
  4358. + unsigned long timestamp;
  4359. +
  4360. + __le32 sta;
  4361. + struct spwr_bix bix;
  4362. + struct spwr_bst bst;
  4363. + u32 alarm;
  4364. +};
  4365. +
  4366. +struct spwr_ac_device {
  4367. + struct platform_device *pdev;
  4368. + struct ssam_controller *ctrl;
  4369. +
  4370. + char name[32];
  4371. + struct power_supply *psy;
  4372. + struct power_supply_desc psy_desc;
  4373. +
  4374. + struct ssam_event_notifier notif;
  4375. +
  4376. + struct mutex lock;
  4377. +
  4378. + __le32 state;
  4379. +};
  4380. +
  4381. +static enum power_supply_property spwr_ac_props[] = {
  4382. + POWER_SUPPLY_PROP_ONLINE,
  4383. +};
  4384. +
  4385. +static enum power_supply_property spwr_battery_props_chg[] = {
  4386. + POWER_SUPPLY_PROP_STATUS,
  4387. + POWER_SUPPLY_PROP_PRESENT,
  4388. + POWER_SUPPLY_PROP_TECHNOLOGY,
  4389. + POWER_SUPPLY_PROP_CYCLE_COUNT,
  4390. + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
  4391. + POWER_SUPPLY_PROP_VOLTAGE_NOW,
  4392. + POWER_SUPPLY_PROP_CURRENT_NOW,
  4393. + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
  4394. + POWER_SUPPLY_PROP_CHARGE_FULL,
  4395. + POWER_SUPPLY_PROP_CHARGE_NOW,
  4396. + POWER_SUPPLY_PROP_CAPACITY,
  4397. + POWER_SUPPLY_PROP_CAPACITY_LEVEL,
  4398. + POWER_SUPPLY_PROP_MODEL_NAME,
  4399. + POWER_SUPPLY_PROP_MANUFACTURER,
  4400. + POWER_SUPPLY_PROP_SERIAL_NUMBER,
  4401. +};
  4402. +
  4403. +static enum power_supply_property spwr_battery_props_eng[] = {
  4404. + POWER_SUPPLY_PROP_STATUS,
  4405. + POWER_SUPPLY_PROP_PRESENT,
  4406. + POWER_SUPPLY_PROP_TECHNOLOGY,
  4407. + POWER_SUPPLY_PROP_CYCLE_COUNT,
  4408. + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
  4409. + POWER_SUPPLY_PROP_VOLTAGE_NOW,
  4410. + POWER_SUPPLY_PROP_POWER_NOW,
  4411. + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
  4412. + POWER_SUPPLY_PROP_ENERGY_FULL,
  4413. + POWER_SUPPLY_PROP_ENERGY_NOW,
  4414. + POWER_SUPPLY_PROP_CAPACITY,
  4415. + POWER_SUPPLY_PROP_CAPACITY_LEVEL,
  4416. + POWER_SUPPLY_PROP_MODEL_NAME,
  4417. + POWER_SUPPLY_PROP_MANUFACTURER,
  4418. + POWER_SUPPLY_PROP_SERIAL_NUMBER,
  4419. +};
  4420. +
  4421. +
  4422. +static int spwr_battery_register(struct spwr_battery_device *bat,
  4423. + struct platform_device *pdev,
  4424. + struct ssam_controller *ctrl,
  4425. + const struct ssam_battery_properties *p);
  4426. +
  4427. +static void spwr_battery_unregister(struct spwr_battery_device *bat);
  4428. +
  4429. +
  4430. +static inline bool spwr_battery_present(struct spwr_battery_device *bat)
  4431. +{
  4432. + return le32_to_cpu(bat->sta) & SAM_BATTERY_STA_PRESENT;
  4433. +}
  4434. +
  4435. +
  4436. +static inline int spwr_battery_load_sta(struct spwr_battery_device *bat)
  4437. +{
  4438. + return ssam_bat_get_sta(bat->ctrl, bat->p->channel, bat->p->instance,
  4439. + &bat->sta);
  4440. +}
  4441. +
  4442. +static inline int spwr_battery_load_bix(struct spwr_battery_device *bat)
  4443. +{
  4444. + if (!spwr_battery_present(bat))
  4445. + return 0;
  4446. +
  4447. + return ssam_bat_get_bix(bat->ctrl, bat->p->channel, bat->p->instance,
  4448. + &bat->bix);
  4449. +}
  4450. +
  4451. +static inline int spwr_battery_load_bst(struct spwr_battery_device *bat)
  4452. +{
  4453. + if (!spwr_battery_present(bat))
  4454. + return 0;
  4455. +
  4456. + return ssam_bat_get_bst(bat->ctrl, bat->p->channel, bat->p->instance,
  4457. + &bat->bst);
  4458. +}
  4459. +
  4460. +
  4461. +static inline int spwr_battery_set_alarm_unlocked(struct spwr_battery_device *bat, u32 value)
  4462. +{
  4463. + __le32 alarm = cpu_to_le32(value);
  4464. +
  4465. + bat->alarm = value;
  4466. + return ssam_bat_set_btp(bat->ctrl, bat->p->channel, bat->p->instance,
  4467. + &alarm);
  4468. +}
  4469. +
  4470. +static inline int spwr_battery_set_alarm(struct spwr_battery_device *bat, u32 value)
  4471. +{
  4472. + int status;
  4473. +
  4474. + mutex_lock(&bat->lock);
  4475. + status = spwr_battery_set_alarm_unlocked(bat, value);
  4476. + mutex_unlock(&bat->lock);
  4477. +
  4478. + return status;
  4479. +}
  4480. +
  4481. +static inline int spwr_battery_update_bst_unlocked(struct spwr_battery_device *bat, bool cached)
  4482. +{
  4483. + unsigned long cache_deadline = bat->timestamp + msecs_to_jiffies(cache_time);
  4484. + int status;
  4485. +
  4486. + if (cached && bat->timestamp && time_is_after_jiffies(cache_deadline))
  4487. + return 0;
  4488. +
  4489. + status = spwr_battery_load_sta(bat);
  4490. + if (status)
  4491. + return status;
  4492. +
  4493. + status = spwr_battery_load_bst(bat);
  4494. + if (status)
  4495. + return status;
  4496. +
  4497. + bat->timestamp = jiffies;
  4498. + return 0;
  4499. +}
  4500. +
  4501. +static int spwr_battery_update_bst(struct spwr_battery_device *bat, bool cached)
  4502. +{
  4503. + int status;
  4504. +
  4505. + mutex_lock(&bat->lock);
  4506. + status = spwr_battery_update_bst_unlocked(bat, cached);
  4507. + mutex_unlock(&bat->lock);
  4508. +
  4509. + return status;
  4510. +}
  4511. +
  4512. +static inline int spwr_battery_update_bix_unlocked(struct spwr_battery_device *bat)
  4513. +{
  4514. + int status;
  4515. +
  4516. + status = spwr_battery_load_sta(bat);
  4517. + if (status)
  4518. + return status;
  4519. +
  4520. + status = spwr_battery_load_bix(bat);
  4521. + if (status)
  4522. + return status;
  4523. +
  4524. + status = spwr_battery_load_bst(bat);
  4525. + if (status)
  4526. + return status;
  4527. +
  4528. + bat->timestamp = jiffies;
  4529. + return 0;
  4530. +}
  4531. +
  4532. +static int spwr_battery_update_bix(struct spwr_battery_device *bat)
  4533. +{
  4534. + int status;
  4535. +
  4536. + mutex_lock(&bat->lock);
  4537. + status = spwr_battery_update_bix_unlocked(bat);
  4538. + mutex_unlock(&bat->lock);
  4539. +
  4540. + return status;
  4541. +}
  4542. +
  4543. +static inline int spwr_ac_update_unlocked(struct spwr_ac_device *ac)
  4544. +{
  4545. + return ssam_bat_get_psrc(ac->ctrl, 0x01, 0x01, &ac->state);
  4546. +}
  4547. +
  4548. +static int spwr_ac_update(struct spwr_ac_device *ac)
  4549. +{
  4550. + int status;
  4551. +
  4552. + mutex_lock(&ac->lock);
  4553. + status = spwr_ac_update_unlocked(ac);
  4554. + mutex_unlock(&ac->lock);
  4555. +
  4556. + return status;
  4557. +}
  4558. +
  4559. +
  4560. +static int spwr_battery_recheck(struct spwr_battery_device *bat)
  4561. +{
  4562. + bool present = spwr_battery_present(bat);
  4563. + u32 unit = get_unaligned_le32(&bat->bix.power_unit);
  4564. + int status;
  4565. +
  4566. + status = spwr_battery_update_bix(bat);
  4567. + if (status)
  4568. + return status;
  4569. +
  4570. + // if battery has been attached, (re-)initialize alarm
  4571. + if (!present && spwr_battery_present(bat)) {
  4572. + u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
  4573. + status = spwr_battery_set_alarm(bat, cap_warn);
  4574. + if (status)
  4575. + return status;
  4576. + }
  4577. +
  4578. + // if the unit has changed, re-add the battery
  4579. + if (unit != get_unaligned_le32(&bat->bix.power_unit)) {
  4580. + spwr_battery_unregister(bat);
  4581. + status = spwr_battery_register(bat, bat->pdev, bat->ctrl, bat->p);
  4582. + }
  4583. +
  4584. + return status;
  4585. +}
  4586. +
  4587. +
  4588. +static inline int spwr_notify_bix(struct spwr_battery_device *bat)
  4589. +{
  4590. + int status;
  4591. +
  4592. + status = spwr_battery_recheck(bat);
  4593. + if (!status)
  4594. + power_supply_changed(bat->psy);
  4595. +
  4596. + return status;
  4597. +}
  4598. +
  4599. +static inline int spwr_notify_bst(struct spwr_battery_device *bat)
  4600. +{
  4601. + int status;
  4602. +
  4603. + status = spwr_battery_update_bst(bat, false);
  4604. + if (!status)
  4605. + power_supply_changed(bat->psy);
  4606. +
  4607. + return status;
  4608. +}
  4609. +
  4610. +static inline int spwr_notify_adapter_bat(struct spwr_battery_device *bat)
  4611. +{
  4612. + u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
  4613. + u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
  4614. +
  4615. + /*
  4616. + * Handle battery update quirk:
  4617. + * When the battery is fully charged and the adapter is plugged in or
  4618. + * removed, the EC does not send a separate event for the state
  4619. + * (charging/discharging) change. Furthermore it may take some time until
  4620. + * the state is updated on the battery. Schedule an update to solve this.
  4621. + */
  4622. +
  4623. + if (remaining_cap >= last_full_cap)
  4624. + schedule_delayed_work(&bat->update_work, SPWR_AC_BAT_UPDATE_DELAY);
  4625. +
  4626. + return 0;
  4627. +}
  4628. +
  4629. +static inline int spwr_notify_adapter_ac(struct spwr_ac_device *ac)
  4630. +{
  4631. + int status;
  4632. +
  4633. + status = spwr_ac_update(ac);
  4634. + if (!status)
  4635. + power_supply_changed(ac->psy);
  4636. +
  4637. + return status;
  4638. +}
  4639. +
  4640. +static u32 spwr_notify_bat(struct ssam_notifier_block *nb, const struct ssam_event *event)
  4641. +{
  4642. + struct spwr_battery_device *bat = container_of(nb, struct spwr_battery_device, notif.base);
  4643. + int status;
  4644. +
  4645. + dev_dbg(&bat->pdev->dev, "power event (cid = 0x%02x, iid = %d, chn = %d)\n",
  4646. + event->command_id, event->instance_id, event->channel);
  4647. +
  4648. + // handled here, needs to be handled for all channels/instances
  4649. + if (event->command_id == SAM_EVENT_PWR_CID_ADAPTER) {
  4650. + status = spwr_notify_adapter_bat(bat);
  4651. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  4652. + }
  4653. +
  4654. + // check for the correct channel and instance ID
  4655. + if (event->channel != bat->p->channel)
  4656. + return 0;
  4657. +
  4658. + if (event->instance_id != bat->p->instance)
  4659. + return 0;
  4660. +
  4661. + switch (event->command_id) {
  4662. + case SAM_EVENT_PWR_CID_BIX:
  4663. + status = spwr_notify_bix(bat);
  4664. + break;
  4665. +
  4666. + case SAM_EVENT_PWR_CID_BST:
  4667. + status = spwr_notify_bst(bat);
  4668. + break;
  4669. +
  4670. + default:
  4671. + return 0;
  4672. + }
  4673. +
  4674. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  4675. +}
  4676. +
  4677. +static u32 spwr_notify_ac(struct ssam_notifier_block *nb, const struct ssam_event *event)
  4678. +{
  4679. + struct spwr_ac_device *ac = container_of(nb, struct spwr_ac_device, notif.base);
  4680. + int status;
  4681. +
  4682. + dev_dbg(&ac->pdev->dev, "power event (cid = 0x%02x, iid = %d, chn = %d)\n",
  4683. + event->command_id, event->instance_id, event->channel);
  4684. +
  4685. + // AC has IID = 0
  4686. + if (event->instance_id != 0)
  4687. + return 0;
  4688. +
  4689. + switch (event->command_id) {
  4690. + case SAM_EVENT_PWR_CID_ADAPTER:
  4691. + status = spwr_notify_adapter_ac(ac);
  4692. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  4693. +
  4694. + default:
  4695. + return 0;
  4696. + }
  4697. +}
  4698. +
  4699. +static void spwr_battery_update_bst_workfn(struct work_struct *work)
  4700. +{
  4701. + struct delayed_work *dwork = to_delayed_work(work);
  4702. + struct spwr_battery_device *bat = container_of(dwork, struct spwr_battery_device, update_work);
  4703. + int status;
  4704. +
  4705. + status = spwr_battery_update_bst(bat, false);
  4706. + if (!status)
  4707. + power_supply_changed(bat->psy);
  4708. +
  4709. + if (status)
  4710. + dev_err(&bat->pdev->dev, "failed to update battery state: %d\n", status);
  4711. +}
  4712. +
  4713. +
  4714. +static inline int spwr_battery_prop_status(struct spwr_battery_device *bat)
  4715. +{
  4716. + u32 state = get_unaligned_le32(&bat->bst.state);
  4717. + u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
  4718. + u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
  4719. + u32 present_rate = get_unaligned_le32(&bat->bst.present_rate);
  4720. +
  4721. + if (state & SAM_BATTERY_STATE_DISCHARGING)
  4722. + return POWER_SUPPLY_STATUS_DISCHARGING;
  4723. +
  4724. + if (state & SAM_BATTERY_STATE_CHARGING)
  4725. + return POWER_SUPPLY_STATUS_CHARGING;
  4726. +
  4727. + if (last_full_cap == remaining_cap)
  4728. + return POWER_SUPPLY_STATUS_FULL;
  4729. +
  4730. + if (present_rate == 0)
  4731. + return POWER_SUPPLY_STATUS_NOT_CHARGING;
  4732. +
  4733. + return POWER_SUPPLY_STATUS_UNKNOWN;
  4734. +}
  4735. +
  4736. +static inline int spwr_battery_prop_technology(struct spwr_battery_device *bat)
  4737. +{
  4738. + if (!strcasecmp("NiCd", bat->bix.type))
  4739. + return POWER_SUPPLY_TECHNOLOGY_NiCd;
  4740. +
  4741. + if (!strcasecmp("NiMH", bat->bix.type))
  4742. + return POWER_SUPPLY_TECHNOLOGY_NiMH;
  4743. +
  4744. + if (!strcasecmp("LION", bat->bix.type))
  4745. + return POWER_SUPPLY_TECHNOLOGY_LION;
  4746. +
  4747. + if (!strncasecmp("LI-ION", bat->bix.type, 6))
  4748. + return POWER_SUPPLY_TECHNOLOGY_LION;
  4749. +
  4750. + if (!strcasecmp("LiP", bat->bix.type))
  4751. + return POWER_SUPPLY_TECHNOLOGY_LIPO;
  4752. +
  4753. + return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
  4754. +}
  4755. +
  4756. +static inline int spwr_battery_prop_capacity(struct spwr_battery_device *bat)
  4757. +{
  4758. + u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
  4759. + u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
  4760. +
  4761. + if (remaining_cap && last_full_cap)
  4762. + return remaining_cap * 100 / last_full_cap;
  4763. + else
  4764. + return 0;
  4765. +}
  4766. +
  4767. +static inline int spwr_battery_prop_capacity_level(struct spwr_battery_device *bat)
  4768. +{
  4769. + u32 state = get_unaligned_le32(&bat->bst.state);
  4770. + u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
  4771. + u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
  4772. +
  4773. + if (state & SAM_BATTERY_STATE_CRITICAL)
  4774. + return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
  4775. +
  4776. + if (remaining_cap >= last_full_cap)
  4777. + return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
  4778. +
  4779. + if (remaining_cap <= bat->alarm)
  4780. + return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
  4781. +
  4782. + return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
  4783. +}
  4784. +
  4785. +static int spwr_ac_get_property(struct power_supply *psy,
  4786. + enum power_supply_property psp,
  4787. + union power_supply_propval *val)
  4788. +{
  4789. + struct spwr_ac_device *ac = power_supply_get_drvdata(psy);
  4790. + int status;
  4791. +
  4792. + mutex_lock(&ac->lock);
  4793. +
  4794. + status = spwr_ac_update_unlocked(ac);
  4795. + if (status)
  4796. + goto out;
  4797. +
  4798. + switch (psp) {
  4799. + case POWER_SUPPLY_PROP_ONLINE:
  4800. + val->intval = le32_to_cpu(ac->state) == 1;
  4801. + break;
  4802. +
  4803. + default:
  4804. + status = -EINVAL;
  4805. + goto out;
  4806. + }
  4807. +
  4808. +out:
  4809. + mutex_unlock(&ac->lock);
  4810. + return status;
  4811. +}
  4812. +
  4813. +static int spwr_battery_get_property(struct power_supply *psy,
  4814. + enum power_supply_property psp,
  4815. + union power_supply_propval *val)
  4816. +{
  4817. + struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
  4818. + int status;
  4819. +
  4820. + mutex_lock(&bat->lock);
  4821. +
  4822. + status = spwr_battery_update_bst_unlocked(bat, true);
  4823. + if (status)
  4824. + goto out;
  4825. +
  4826. + // abort if battery is not present
  4827. + if (!spwr_battery_present(bat) && psp != POWER_SUPPLY_PROP_PRESENT) {
  4828. + status = -ENODEV;
  4829. + goto out;
  4830. + }
  4831. +
  4832. + switch (psp) {
  4833. + case POWER_SUPPLY_PROP_STATUS:
  4834. + val->intval = spwr_battery_prop_status(bat);
  4835. + break;
  4836. +
  4837. + case POWER_SUPPLY_PROP_PRESENT:
  4838. + val->intval = spwr_battery_present(bat);
  4839. + break;
  4840. +
  4841. + case POWER_SUPPLY_PROP_TECHNOLOGY:
  4842. + val->intval = spwr_battery_prop_technology(bat);
  4843. + break;
  4844. +
  4845. + case POWER_SUPPLY_PROP_CYCLE_COUNT:
  4846. + val->intval = get_unaligned_le32(&bat->bix.cycle_count);
  4847. + break;
  4848. +
  4849. + case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
  4850. + val->intval = get_unaligned_le32(&bat->bix.design_voltage)
  4851. + * 1000;
  4852. + break;
  4853. +
  4854. + case POWER_SUPPLY_PROP_VOLTAGE_NOW:
  4855. + val->intval = get_unaligned_le32(&bat->bst.present_voltage)
  4856. + * 1000;
  4857. + break;
  4858. +
  4859. + case POWER_SUPPLY_PROP_CURRENT_NOW:
  4860. + case POWER_SUPPLY_PROP_POWER_NOW:
  4861. + val->intval = get_unaligned_le32(&bat->bst.present_rate) * 1000;
  4862. + break;
  4863. +
  4864. + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
  4865. + case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
  4866. + val->intval = get_unaligned_le32(&bat->bix.design_cap) * 1000;
  4867. + break;
  4868. +
  4869. + case POWER_SUPPLY_PROP_CHARGE_FULL:
  4870. + case POWER_SUPPLY_PROP_ENERGY_FULL:
  4871. + val->intval = get_unaligned_le32(&bat->bix.last_full_charge_cap)
  4872. + * 1000;
  4873. + break;
  4874. +
  4875. + case POWER_SUPPLY_PROP_CHARGE_NOW:
  4876. + case POWER_SUPPLY_PROP_ENERGY_NOW:
  4877. + val->intval = get_unaligned_le32(&bat->bst.remaining_cap)
  4878. + * 1000;
  4879. + break;
  4880. +
  4881. + case POWER_SUPPLY_PROP_CAPACITY:
  4882. + val->intval = spwr_battery_prop_capacity(bat);
  4883. + break;
  4884. +
  4885. + case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
  4886. + val->intval = spwr_battery_prop_capacity_level(bat);
  4887. + break;
  4888. +
  4889. + case POWER_SUPPLY_PROP_MODEL_NAME:
  4890. + val->strval = bat->bix.model;
  4891. + break;
  4892. +
  4893. + case POWER_SUPPLY_PROP_MANUFACTURER:
  4894. + val->strval = bat->bix.oem_info;
  4895. + break;
  4896. +
  4897. + case POWER_SUPPLY_PROP_SERIAL_NUMBER:
  4898. + val->strval = bat->bix.serial;
  4899. + break;
  4900. +
  4901. + default:
  4902. + status = -EINVAL;
  4903. + goto out;
  4904. + }
  4905. +
  4906. +out:
  4907. + mutex_unlock(&bat->lock);
  4908. + return status;
  4909. +}
  4910. +
  4911. +
  4912. +static ssize_t spwr_battery_alarm_show(struct device *dev,
  4913. + struct device_attribute *attr,
  4914. + char *buf)
  4915. +{
  4916. + struct power_supply *psy = dev_get_drvdata(dev);
  4917. + struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
  4918. +
  4919. + return sprintf(buf, "%d\n", bat->alarm * 1000);
  4920. +}
  4921. +
  4922. +static ssize_t spwr_battery_alarm_store(struct device *dev,
  4923. + struct device_attribute *attr,
  4924. + const char *buf, size_t count)
  4925. +{
  4926. + struct power_supply *psy = dev_get_drvdata(dev);
  4927. + struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
  4928. + unsigned long value;
  4929. + int status;
  4930. +
  4931. + status = kstrtoul(buf, 0, &value);
  4932. + if (status)
  4933. + return status;
  4934. +
  4935. + if (!spwr_battery_present(bat))
  4936. + return -ENODEV;
  4937. +
  4938. + status = spwr_battery_set_alarm(bat, value / 1000);
  4939. + if (status)
  4940. + return status;
  4941. +
  4942. + return count;
  4943. +}
  4944. +
  4945. +static const struct device_attribute alarm_attr = {
  4946. + .attr = {.name = "alarm", .mode = 0644},
  4947. + .show = spwr_battery_alarm_show,
  4948. + .store = spwr_battery_alarm_store,
  4949. +};
  4950. +
  4951. +
  4952. +static int spwr_ac_register(struct spwr_ac_device *ac,
  4953. + struct platform_device *pdev,
  4954. + struct ssam_controller *ctrl)
  4955. +{
  4956. + struct power_supply_config psy_cfg = {};
  4957. + __le32 sta;
  4958. + int status;
  4959. +
  4960. + // make sure the device is there and functioning properly
  4961. + status = ssam_bat_get_sta(ctrl, 0x01, 0x01, &sta);
  4962. + if (status)
  4963. + return status;
  4964. +
  4965. + if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
  4966. + return -ENODEV;
  4967. +
  4968. + psy_cfg.drv_data = ac;
  4969. +
  4970. + ac->pdev = pdev;
  4971. + ac->ctrl = ctrl;
  4972. + mutex_init(&ac->lock);
  4973. +
  4974. + snprintf(ac->name, ARRAY_SIZE(ac->name), "ADP0");
  4975. +
  4976. + ac->psy_desc.name = ac->name;
  4977. + ac->psy_desc.type = POWER_SUPPLY_TYPE_MAINS;
  4978. + ac->psy_desc.properties = spwr_ac_props;
  4979. + ac->psy_desc.num_properties = ARRAY_SIZE(spwr_ac_props);
  4980. + ac->psy_desc.get_property = spwr_ac_get_property;
  4981. +
  4982. + ac->psy = power_supply_register(&ac->pdev->dev, &ac->psy_desc, &psy_cfg);
  4983. + if (IS_ERR(ac->psy)) {
  4984. + status = PTR_ERR(ac->psy);
  4985. + goto err_psy;
  4986. + }
  4987. +
  4988. + ac->notif.base.priority = 1;
  4989. + ac->notif.base.fn = spwr_notify_ac;
  4990. + ac->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
  4991. + ac->notif.event.id.target_category = SSAM_SSH_TC_BAT;
  4992. + ac->notif.event.id.instance = 0;
  4993. + ac->notif.event.flags = SSAM_EVENT_SEQUENCED;
  4994. +
  4995. + status = ssam_notifier_register(ctrl, &ac->notif);
  4996. + if (status)
  4997. + goto err_notif;
  4998. +
  4999. + return 0;
  5000. +
  5001. +err_notif:
  5002. + power_supply_unregister(ac->psy);
  5003. +err_psy:
  5004. + mutex_destroy(&ac->lock);
  5005. + return status;
  5006. +}
  5007. +
  5008. +static int spwr_ac_unregister(struct spwr_ac_device *ac)
  5009. +{
  5010. + ssam_notifier_unregister(ac->ctrl, &ac->notif);
  5011. + power_supply_unregister(ac->psy);
  5012. + mutex_destroy(&ac->lock);
  5013. + return 0;
  5014. +}
  5015. +
  5016. +static int spwr_battery_register(struct spwr_battery_device *bat,
  5017. + struct platform_device *pdev,
  5018. + struct ssam_controller *ctrl,
  5019. + const struct ssam_battery_properties *p)
  5020. +{
  5021. + struct power_supply_config psy_cfg = {};
  5022. + __le32 sta;
  5023. + int status;
  5024. +
  5025. + bat->pdev = pdev;
  5026. + bat->ctrl = ctrl;
  5027. + bat->p = p;
  5028. +
  5029. + // make sure the device is there and functioning properly
  5030. + status = ssam_bat_get_sta(ctrl, bat->p->channel, bat->p->instance, &sta);
  5031. + if (status)
  5032. + return status;
  5033. +
  5034. + if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
  5035. + return -ENODEV;
  5036. +
  5037. + status = spwr_battery_update_bix_unlocked(bat);
  5038. + if (status)
  5039. + return status;
  5040. +
  5041. + if (spwr_battery_present(bat)) {
  5042. + u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
  5043. + status = spwr_battery_set_alarm_unlocked(bat, cap_warn);
  5044. + if (status)
  5045. + return status;
  5046. + }
  5047. +
  5048. + snprintf(bat->name, ARRAY_SIZE(bat->name), "BAT%d", bat->p->num);
  5049. + bat->psy_desc.name = bat->name;
  5050. + bat->psy_desc.type = POWER_SUPPLY_TYPE_BATTERY;
  5051. +
  5052. + if (get_unaligned_le32(&bat->bix.power_unit) == SAM_BATTERY_POWER_UNIT_MA) {
  5053. + bat->psy_desc.properties = spwr_battery_props_chg;
  5054. + bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_chg);
  5055. + } else {
  5056. + bat->psy_desc.properties = spwr_battery_props_eng;
  5057. + bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_eng);
  5058. + }
  5059. +
  5060. + bat->psy_desc.get_property = spwr_battery_get_property;
  5061. +
  5062. + mutex_init(&bat->lock);
  5063. + psy_cfg.drv_data = bat;
  5064. +
  5065. + INIT_DELAYED_WORK(&bat->update_work, spwr_battery_update_bst_workfn);
  5066. +
  5067. + bat->psy = power_supply_register(&bat->pdev->dev, &bat->psy_desc, &psy_cfg);
  5068. + if (IS_ERR(bat->psy)) {
  5069. + status = PTR_ERR(bat->psy);
  5070. + goto err_psy;
  5071. + }
  5072. +
  5073. + bat->notif.base.priority = 1;
  5074. + bat->notif.base.fn = spwr_notify_bat;
  5075. + bat->notif.event.reg = p->registry;
  5076. + bat->notif.event.id.target_category = SSAM_SSH_TC_BAT;
  5077. + bat->notif.event.id.instance = 0;
  5078. + bat->notif.event.flags = SSAM_EVENT_SEQUENCED;
  5079. +
  5080. + status = ssam_notifier_register(ctrl, &bat->notif);
  5081. + if (status)
  5082. + goto err_notif;
  5083. +
  5084. + status = device_create_file(&bat->psy->dev, &alarm_attr);
  5085. + if (status)
  5086. + goto err_file;
  5087. +
  5088. + return 0;
  5089. +
  5090. +err_file:
  5091. + ssam_notifier_unregister(ctrl, &bat->notif);
  5092. +err_notif:
  5093. + power_supply_unregister(bat->psy);
  5094. +err_psy:
  5095. + mutex_destroy(&bat->lock);
  5096. + return status;
  5097. +}
  5098. +
  5099. +static void spwr_battery_unregister(struct spwr_battery_device *bat)
  5100. +{
  5101. + ssam_notifier_unregister(bat->ctrl, &bat->notif);
  5102. + cancel_delayed_work_sync(&bat->update_work);
  5103. + device_remove_file(&bat->psy->dev, &alarm_attr);
  5104. + power_supply_unregister(bat->psy);
  5105. + mutex_destroy(&bat->lock);
  5106. +}
  5107. +
  5108. +
  5109. +/*
  5110. + * Battery Driver.
  5111. + */
  5112. +
  5113. +#ifdef CONFIG_PM_SLEEP
  5114. +static int surface_sam_sid_battery_resume(struct device *dev)
  5115. +{
  5116. + struct spwr_battery_device *bat;
  5117. +
  5118. + bat = dev_get_drvdata(dev);
  5119. + return spwr_battery_recheck(bat);
  5120. +}
  5121. +#else
  5122. +#define surface_sam_sid_battery_resume NULL
  5123. +#endif
  5124. +
  5125. +SIMPLE_DEV_PM_OPS(surface_sam_sid_battery_pm, NULL, surface_sam_sid_battery_resume);
  5126. +
  5127. +static int surface_sam_sid_battery_probe(struct platform_device *pdev)
  5128. +{
  5129. + struct spwr_battery_device *bat;
  5130. + struct ssam_controller *ctrl;
  5131. + int status;
  5132. +
  5133. + // link to ec
  5134. + status = ssam_client_bind(&pdev->dev, &ctrl);
  5135. + if (status)
  5136. + return status == -ENXIO ? -EPROBE_DEFER : status;
  5137. +
  5138. + bat = devm_kzalloc(&pdev->dev, sizeof(struct spwr_battery_device), GFP_KERNEL);
  5139. + if (!bat)
  5140. + return -ENOMEM;
  5141. +
  5142. + platform_set_drvdata(pdev, bat);
  5143. + return spwr_battery_register(bat, pdev, ctrl, pdev->dev.platform_data);
  5144. +}
  5145. +
  5146. +static int surface_sam_sid_battery_remove(struct platform_device *pdev)
  5147. +{
  5148. + struct spwr_battery_device *bat;
  5149. +
  5150. + bat = platform_get_drvdata(pdev);
  5151. + spwr_battery_unregister(bat);
  5152. +
  5153. + return 0;
  5154. +}
  5155. +
  5156. +static struct platform_driver surface_sam_sid_battery = {
  5157. + .probe = surface_sam_sid_battery_probe,
  5158. + .remove = surface_sam_sid_battery_remove,
  5159. + .driver = {
  5160. + .name = "surface_sam_sid_battery",
  5161. + .pm = &surface_sam_sid_battery_pm,
  5162. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  5163. + },
  5164. +};
  5165. +
  5166. +
  5167. +/*
  5168. + * AC Driver.
  5169. + */
  5170. +
  5171. +static int surface_sam_sid_ac_probe(struct platform_device *pdev)
  5172. +{
  5173. + struct spwr_ac_device *ac;
  5174. + struct ssam_controller *ctrl;
  5175. + int status;
  5176. +
  5177. + // link to ec
  5178. + status = ssam_client_bind(&pdev->dev, &ctrl);
  5179. + if (status)
  5180. + return status == -ENXIO ? -EPROBE_DEFER : status;
  5181. +
  5182. + ac = devm_kzalloc(&pdev->dev, sizeof(struct spwr_ac_device), GFP_KERNEL);
  5183. + if (!ac)
  5184. + return -ENOMEM;
  5185. +
  5186. + status = spwr_ac_register(ac, pdev, ctrl);
  5187. + if (status)
  5188. + return status;
  5189. +
  5190. + platform_set_drvdata(pdev, ac);
  5191. + return 0;
  5192. +}
  5193. +
  5194. +static int surface_sam_sid_ac_remove(struct platform_device *pdev)
  5195. +{
  5196. + struct spwr_ac_device *ac;
  5197. +
  5198. + ac = platform_get_drvdata(pdev);
  5199. + return spwr_ac_unregister(ac);
  5200. +}
  5201. +
  5202. +static struct platform_driver surface_sam_sid_ac = {
  5203. + .probe = surface_sam_sid_ac_probe,
  5204. + .remove = surface_sam_sid_ac_remove,
  5205. + .driver = {
  5206. + .name = "surface_sam_sid_ac",
  5207. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  5208. + },
  5209. +};
  5210. +
  5211. +
  5212. +static int __init surface_sam_sid_power_init(void)
  5213. +{
  5214. + int status;
  5215. +
  5216. + status = platform_driver_register(&surface_sam_sid_battery);
  5217. + if (status)
  5218. + return status;
  5219. +
  5220. + status = platform_driver_register(&surface_sam_sid_ac);
  5221. + if (status) {
  5222. + platform_driver_unregister(&surface_sam_sid_battery);
  5223. + return status;
  5224. + }
  5225. +
  5226. + return 0;
  5227. +}
  5228. +
  5229. +static void __exit surface_sam_sid_power_exit(void)
  5230. +{
  5231. + platform_driver_unregister(&surface_sam_sid_battery);
  5232. + platform_driver_unregister(&surface_sam_sid_ac);
  5233. +}
  5234. +
  5235. +module_init(surface_sam_sid_power_init);
  5236. +module_exit(surface_sam_sid_power_exit);
  5237. +
  5238. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  5239. +MODULE_DESCRIPTION("Surface Battery/AC Driver for 7th Generation Surface Devices");
  5240. +MODULE_LICENSE("GPL");
  5241. +MODULE_ALIAS("platform:surface_sam_sid_ac");
  5242. +MODULE_ALIAS("platform:surface_sam_sid_battery");
  5243. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_power.h b/drivers/platform/x86/surface_sam/surface_sam_sid_power.h
  5244. new file mode 100644
  5245. index 0000000000000..d8d9509b7d122
  5246. --- /dev/null
  5247. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_power.h
  5248. @@ -0,0 +1,16 @@
  5249. +
  5250. +#ifndef _SURFACE_SAM_SID_POWER_H
  5251. +#define _SURFACE_SAM_SID_POWER_H
  5252. +
  5253. +#include <linux/types.h>
  5254. +#include "surface_sam_ssh.h"
  5255. +
  5256. +
  5257. +struct ssam_battery_properties {
  5258. + struct ssam_event_registry registry;
  5259. + u8 num;
  5260. + u8 channel;
  5261. + u8 instance;
  5262. +};
  5263. +
  5264. +#endif /* _SURFACE_SAM_SID_POWER_H */
  5265. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c
  5266. new file mode 100644
  5267. index 0000000000000..a6059d6796619
  5268. --- /dev/null
  5269. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c
  5270. @@ -0,0 +1,429 @@
  5271. +// SPDX-License-Identifier: GPL-2.0-or-later
  5272. +/*
  5273. + * Microsofs Surface HID (VHF) driver for HID input events via SAM.
  5274. + * Used for keyboard input events on the 7th generation Surface Laptops.
  5275. + */
  5276. +
  5277. +#include <linux/acpi.h>
  5278. +#include <linux/hid.h>
  5279. +#include <linux/input.h>
  5280. +#include <linux/platform_device.h>
  5281. +#include <linux/types.h>
  5282. +
  5283. +#include "surface_sam_ssh.h"
  5284. +#include "surface_sam_sid_vhf.h"
  5285. +
  5286. +#define SID_VHF_INPUT_NAME "Microsoft Surface HID"
  5287. +
  5288. +#define SAM_EVENT_SID_VHF_TC 0x15
  5289. +
  5290. +#define VHF_HID_STARTED 0
  5291. +
  5292. +struct sid_vhf {
  5293. + struct platform_device *dev;
  5294. + struct ssam_controller *ctrl;
  5295. + const struct ssam_hid_properties *p;
  5296. +
  5297. + struct ssam_event_notifier notif;
  5298. +
  5299. + struct hid_device *hid;
  5300. + unsigned long state;
  5301. +};
  5302. +
  5303. +
  5304. +static int sid_vhf_hid_start(struct hid_device *hid)
  5305. +{
  5306. + hid_dbg(hid, "%s\n", __func__);
  5307. + return 0;
  5308. +}
  5309. +
  5310. +static void sid_vhf_hid_stop(struct hid_device *hid)
  5311. +{
  5312. + hid_dbg(hid, "%s\n", __func__);
  5313. +}
  5314. +
  5315. +static int sid_vhf_hid_open(struct hid_device *hid)
  5316. +{
  5317. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5318. +
  5319. + hid_dbg(hid, "%s\n", __func__);
  5320. +
  5321. + set_bit(VHF_HID_STARTED, &vhf->state);
  5322. + return 0;
  5323. +}
  5324. +
  5325. +static void sid_vhf_hid_close(struct hid_device *hid)
  5326. +{
  5327. +
  5328. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5329. +
  5330. + hid_dbg(hid, "%s\n", __func__);
  5331. +
  5332. + clear_bit(VHF_HID_STARTED, &vhf->state);
  5333. +}
  5334. +
  5335. +struct surface_sam_sid_vhf_meta_rqst {
  5336. + u8 id;
  5337. + u32 offset;
  5338. + u32 length; // buffer limit on send, length of data received on receive
  5339. + u8 end; // 0x01 if end was reached
  5340. +} __packed;
  5341. +
  5342. +struct vhf_device_metadata_info {
  5343. + u8 len;
  5344. + u8 _2;
  5345. + u8 _3;
  5346. + u8 _4;
  5347. + u8 _5;
  5348. + u8 _6;
  5349. + u8 _7;
  5350. + u16 hid_len; // hid descriptor length
  5351. +} __packed;
  5352. +
  5353. +struct vhf_device_metadata {
  5354. + u32 len;
  5355. + u16 vendor_id;
  5356. + u16 product_id;
  5357. + u8 _1[24];
  5358. +} __packed;
  5359. +
  5360. +union vhf_buffer_data {
  5361. + struct vhf_device_metadata_info info;
  5362. + u8 pld[0x76];
  5363. + struct vhf_device_metadata meta;
  5364. +};
  5365. +
  5366. +struct surface_sam_sid_vhf_meta_resp {
  5367. + struct surface_sam_sid_vhf_meta_rqst rqst;
  5368. + union vhf_buffer_data data;
  5369. +} __packed;
  5370. +
  5371. +
  5372. +static int vhf_get_metadata(struct ssam_controller *ctrl, u8 iid,
  5373. + struct vhf_device_metadata *meta)
  5374. +{
  5375. + struct surface_sam_sid_vhf_meta_resp data = {};
  5376. + struct ssam_request rqst;
  5377. + struct ssam_response rsp;
  5378. + int status;
  5379. +
  5380. + data.rqst.id = 2;
  5381. + data.rqst.offset = 0;
  5382. + data.rqst.length = 0x76;
  5383. + data.rqst.end = 0;
  5384. +
  5385. + rqst.target_category = SSAM_SSH_TC_HID;;
  5386. + rqst.command_id = 0x04;
  5387. + rqst.instance_id = iid;
  5388. + rqst.channel = 0x02;
  5389. + rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
  5390. + rqst.length = sizeof(struct surface_sam_sid_vhf_meta_rqst);
  5391. + rqst.payload = (u8 *)&data.rqst;
  5392. +
  5393. + rsp.capacity = sizeof(struct surface_sam_sid_vhf_meta_resp);
  5394. + rsp.length = 0;
  5395. + rsp.pointer = (u8 *)&data;
  5396. +
  5397. + status = ssam_request_sync(ctrl, &rqst, &rsp);
  5398. + if (status)
  5399. + return status;
  5400. +
  5401. + *meta = data.data.meta;
  5402. +
  5403. + return 0;
  5404. +}
  5405. +
  5406. +static int vhf_get_hid_descriptor(struct hid_device *hid, u8 iid, u8 **desc, int *size)
  5407. +{
  5408. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5409. + struct surface_sam_sid_vhf_meta_resp data = {};
  5410. + struct ssam_request rqst;
  5411. + struct ssam_response rsp;
  5412. + int status, len;
  5413. + u8 *buf;
  5414. +
  5415. + data.rqst.id = 0;
  5416. + data.rqst.offset = 0;
  5417. + data.rqst.length = 0x76;
  5418. + data.rqst.end = 0;
  5419. +
  5420. + rqst.target_category = SSAM_SSH_TC_HID;
  5421. + rqst.command_id = 0x04;
  5422. + rqst.instance_id = iid;
  5423. + rqst.channel = 0x02;
  5424. + rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
  5425. + rqst.length = sizeof(struct surface_sam_sid_vhf_meta_rqst);
  5426. + rqst.payload = (u8 *)&data.rqst;
  5427. +
  5428. + rsp.capacity = sizeof(struct surface_sam_sid_vhf_meta_resp);
  5429. + rsp.length = 0;
  5430. + rsp.pointer = (u8 *)&data;
  5431. +
  5432. + // first fetch 00 to get the total length
  5433. + status = ssam_request_sync(vhf->ctrl, &rqst, &rsp);
  5434. + if (status)
  5435. + return status;
  5436. +
  5437. + len = data.data.info.hid_len;
  5438. +
  5439. + // allocate a buffer for the descriptor
  5440. + buf = kzalloc(len, GFP_KERNEL);
  5441. +
  5442. + // then, iterate and write into buffer, copying out bytes
  5443. + data.rqst.id = 1;
  5444. + data.rqst.offset = 0;
  5445. + data.rqst.length = 0x76;
  5446. + data.rqst.end = 0;
  5447. +
  5448. + while (!data.rqst.end && data.rqst.offset < len) {
  5449. + status = ssam_request_sync(vhf->ctrl, &rqst, &rsp);
  5450. + if (status) {
  5451. + kfree(buf);
  5452. + return status;
  5453. + }
  5454. + memcpy(buf + data.rqst.offset, data.data.pld, data.rqst.length);
  5455. +
  5456. + data.rqst.offset += data.rqst.length;
  5457. + }
  5458. +
  5459. + *desc = buf;
  5460. + *size = len;
  5461. +
  5462. + return 0;
  5463. +}
  5464. +
  5465. +static int sid_vhf_hid_parse(struct hid_device *hid)
  5466. +{
  5467. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5468. + int ret = 0, size;
  5469. + u8 *buf;
  5470. +
  5471. + ret = vhf_get_hid_descriptor(hid, vhf->p->instance, &buf, &size);
  5472. + if (ret != 0) {
  5473. + hid_err(hid, "Failed to read HID descriptor from device: %d\n", ret);
  5474. + return -EIO;
  5475. + }
  5476. + hid_dbg(hid, "HID descriptor of device:");
  5477. + print_hex_dump_debug("descriptor:", DUMP_PREFIX_OFFSET, 16, 1, buf, size, false);
  5478. +
  5479. + ret = hid_parse_report(hid, buf, size);
  5480. + kfree(buf);
  5481. + return ret;
  5482. +
  5483. +}
  5484. +
  5485. +static int sid_vhf_hid_raw_request(struct hid_device *hid, unsigned char
  5486. + reportnum, u8 *buf, size_t len, unsigned char rtype, int
  5487. + reqtype)
  5488. +{
  5489. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5490. + struct ssam_request rqst;
  5491. + struct ssam_response rsp;
  5492. + int status;
  5493. + u8 cid;
  5494. +
  5495. + hid_dbg(hid, "%s: reportnum=%#04x rtype=%i reqtype=%i\n", __func__, reportnum, rtype, reqtype);
  5496. + print_hex_dump_debug("report:", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
  5497. +
  5498. + // Byte 0 is the report number. Report data starts at byte 1.
  5499. + buf[0] = reportnum;
  5500. +
  5501. + switch (rtype) {
  5502. + case HID_OUTPUT_REPORT:
  5503. + cid = 0x01;
  5504. + break;
  5505. + case HID_FEATURE_REPORT:
  5506. + switch (reqtype) {
  5507. + case HID_REQ_GET_REPORT:
  5508. + // The EC doesn't respond to GET FEATURE for these touchpad reports
  5509. + // we immediately discard to avoid waiting for a timeout.
  5510. + if (reportnum == 6 || reportnum == 7 || reportnum == 8 || reportnum == 9 || reportnum == 0x0b) {
  5511. + hid_dbg(hid, "%s: skipping get feature report for 0x%02x\n", __func__, reportnum);
  5512. + return 0;
  5513. + }
  5514. +
  5515. + cid = 0x02;
  5516. + break;
  5517. + case HID_REQ_SET_REPORT:
  5518. + cid = 0x03;
  5519. + break;
  5520. + default:
  5521. + hid_err(hid, "%s: unknown req type 0x%02x\n", __func__, rtype);
  5522. + return -EIO;
  5523. + }
  5524. + break;
  5525. + default:
  5526. + hid_err(hid, "%s: unknown report type 0x%02x\n", __func__, reportnum);
  5527. + return -EIO;
  5528. + }
  5529. +
  5530. + rqst.target_category = SSAM_SSH_TC_HID;
  5531. + rqst.channel = 0x02;
  5532. + rqst.instance_id = vhf->p->instance;
  5533. + rqst.command_id = cid;
  5534. + rqst.flags = reqtype == HID_REQ_GET_REPORT ? SSAM_REQUEST_HAS_RESPONSE : 0;
  5535. + rqst.length = reqtype == HID_REQ_GET_REPORT ? 1 : len;
  5536. + rqst.payload = buf;
  5537. +
  5538. + rsp.capacity = len;
  5539. + rsp.length = 0;
  5540. + rsp.pointer = buf;
  5541. +
  5542. + hid_dbg(hid, "%s: sending to cid=%#04x snc=%#04x\n", __func__, cid, HID_REQ_GET_REPORT == reqtype);
  5543. +
  5544. + status = ssam_request_sync(vhf->ctrl, &rqst, &rsp);
  5545. + hid_dbg(hid, "%s: status %i\n", __func__, status);
  5546. +
  5547. + if (status)
  5548. + return status;
  5549. +
  5550. + if (rsp.length > 0)
  5551. + print_hex_dump_debug("response:", DUMP_PREFIX_OFFSET, 16, 1, rsp.pointer, rsp.length, false);
  5552. +
  5553. + return rsp.length;
  5554. +}
  5555. +
  5556. +static struct hid_ll_driver sid_vhf_hid_ll_driver = {
  5557. + .start = sid_vhf_hid_start,
  5558. + .stop = sid_vhf_hid_stop,
  5559. + .open = sid_vhf_hid_open,
  5560. + .close = sid_vhf_hid_close,
  5561. + .parse = sid_vhf_hid_parse,
  5562. + .raw_request = sid_vhf_hid_raw_request,
  5563. +};
  5564. +
  5565. +
  5566. +static struct hid_device *sid_vhf_create_hid_device(struct platform_device *pdev, struct vhf_device_metadata *meta)
  5567. +{
  5568. + struct hid_device *hid;
  5569. +
  5570. + hid = hid_allocate_device();
  5571. + if (IS_ERR(hid))
  5572. + return hid;
  5573. +
  5574. + hid->dev.parent = &pdev->dev;
  5575. +
  5576. + hid->bus = BUS_VIRTUAL;
  5577. + hid->vendor = meta->vendor_id;
  5578. + hid->product = meta->product_id;
  5579. +
  5580. + hid->ll_driver = &sid_vhf_hid_ll_driver;
  5581. +
  5582. + sprintf(hid->name, "%s", SID_VHF_INPUT_NAME);
  5583. +
  5584. + return hid;
  5585. +}
  5586. +
  5587. +static u32 sid_vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event)
  5588. +{
  5589. + struct sid_vhf *vhf = container_of(nb, struct sid_vhf, notif.base);
  5590. + int status;
  5591. +
  5592. + if (event->target_category != SSAM_SSH_TC_HID)
  5593. + return 0;
  5594. +
  5595. + if (event->channel != 0x02)
  5596. + return 0;
  5597. +
  5598. + if (event->instance_id != vhf->p->instance)
  5599. + return 0;
  5600. +
  5601. + if (event->command_id != 0x00 && event->command_id != 0x03 && event->command_id != 0x04)
  5602. + return 0;
  5603. +
  5604. + // skip if HID hasn't started yet
  5605. + if (!test_bit(VHF_HID_STARTED, &vhf->state))
  5606. + return SSAM_NOTIF_HANDLED;
  5607. +
  5608. + status = hid_input_report(vhf->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0);
  5609. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  5610. +}
  5611. +
  5612. +static int surface_sam_sid_vhf_probe(struct platform_device *pdev)
  5613. +{
  5614. + const struct ssam_hid_properties *p = pdev->dev.platform_data;
  5615. + struct ssam_controller *ctrl;
  5616. + struct sid_vhf *vhf;
  5617. + struct vhf_device_metadata meta = {};
  5618. + struct hid_device *hid;
  5619. + int status;
  5620. +
  5621. + // add device link to EC
  5622. + status = ssam_client_bind(&pdev->dev, &ctrl);
  5623. + if (status)
  5624. + return status == -ENXIO ? -EPROBE_DEFER : status;
  5625. +
  5626. + vhf = kzalloc(sizeof(struct sid_vhf), GFP_KERNEL);
  5627. + if (!vhf)
  5628. + return -ENOMEM;
  5629. +
  5630. + status = vhf_get_metadata(ctrl, p->instance, &meta);
  5631. + if (status)
  5632. + goto err_create_hid;
  5633. +
  5634. + hid = sid_vhf_create_hid_device(pdev, &meta);
  5635. + if (IS_ERR(hid)) {
  5636. + status = PTR_ERR(hid);
  5637. + goto err_create_hid;
  5638. + }
  5639. +
  5640. + vhf->dev = pdev;
  5641. + vhf->ctrl = ctrl;
  5642. + vhf->p = pdev->dev.platform_data;
  5643. + vhf->hid = hid;
  5644. +
  5645. + vhf->notif.base.priority = 1;
  5646. + vhf->notif.base.fn = sid_vhf_event_handler;
  5647. + vhf->notif.event.reg = p->registry;
  5648. + vhf->notif.event.id.target_category = SSAM_SSH_TC_HID;
  5649. + vhf->notif.event.id.instance = p->instance;
  5650. + vhf->notif.event.flags = 0;
  5651. +
  5652. + platform_set_drvdata(pdev, vhf);
  5653. +
  5654. + status = ssam_notifier_register(ctrl, &vhf->notif);
  5655. + if (status)
  5656. + goto err_notif;
  5657. +
  5658. + status = hid_add_device(hid);
  5659. + if (status)
  5660. + goto err_add_hid;
  5661. +
  5662. + return 0;
  5663. +
  5664. +err_add_hid:
  5665. + ssam_notifier_unregister(ctrl, &vhf->notif);
  5666. +err_notif:
  5667. + hid_destroy_device(hid);
  5668. + platform_set_drvdata(pdev, NULL);
  5669. +err_create_hid:
  5670. + kfree(vhf);
  5671. + return status;
  5672. +}
  5673. +
  5674. +static int surface_sam_sid_vhf_remove(struct platform_device *pdev)
  5675. +{
  5676. + struct sid_vhf *vhf = platform_get_drvdata(pdev);
  5677. +
  5678. + ssam_notifier_unregister(vhf->ctrl, &vhf->notif);
  5679. + hid_destroy_device(vhf->hid);
  5680. + kfree(vhf);
  5681. +
  5682. + platform_set_drvdata(pdev, NULL);
  5683. + return 0;
  5684. +}
  5685. +
  5686. +static struct platform_driver surface_sam_sid_vhf = {
  5687. + .probe = surface_sam_sid_vhf_probe,
  5688. + .remove = surface_sam_sid_vhf_remove,
  5689. + .driver = {
  5690. + .name = "surface_sam_sid_vhf",
  5691. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  5692. + },
  5693. +};
  5694. +module_platform_driver(surface_sam_sid_vhf);
  5695. +
  5696. +MODULE_AUTHOR("Blaž Hrastnik <blaz@mxxn.io>");
  5697. +MODULE_DESCRIPTION("Driver for HID devices connected via Surface SAM");
  5698. +MODULE_LICENSE("GPL");
  5699. +MODULE_ALIAS("platform:surface_sam_sid_vhf");
  5700. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h
  5701. new file mode 100644
  5702. index 0000000000000..d956de5cf877a
  5703. --- /dev/null
  5704. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h
  5705. @@ -0,0 +1,14 @@
  5706. +
  5707. +#ifndef _SURFACE_SAM_SID_VHF_H
  5708. +#define _SURFACE_SAM_SID_VHF_H
  5709. +
  5710. +#include <linux/types.h>
  5711. +#include "surface_sam_ssh.h"
  5712. +
  5713. +
  5714. +struct ssam_hid_properties {
  5715. + struct ssam_event_registry registry;
  5716. + u8 instance;
  5717. +};
  5718. +
  5719. +#endif /* _SURFACE_SAM_SID_VHF_H */
  5720. diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh.c b/drivers/platform/x86/surface_sam/surface_sam_ssh.c
  5721. new file mode 100644
  5722. index 0000000000000..4551b75570f22
  5723. --- /dev/null
  5724. +++ b/drivers/platform/x86/surface_sam/surface_sam_ssh.c
  5725. @@ -0,0 +1,5329 @@
  5726. +// SPDX-License-Identifier: GPL-2.0-or-later
  5727. +/*
  5728. + * Surface Serial Hub (SSH) driver for communication with the Surface/System
  5729. + * Aggregator Module.
  5730. + */
  5731. +
  5732. +#include <asm/unaligned.h>
  5733. +#include <linux/acpi.h>
  5734. +#include <linux/atomic.h>
  5735. +#include <linux/completion.h>
  5736. +#include <linux/crc-ccitt.h>
  5737. +#include <linux/dmaengine.h>
  5738. +#include <linux/gpio/consumer.h>
  5739. +#include <linux/interrupt.h>
  5740. +#include <linux/jiffies.h>
  5741. +#include <linux/kernel.h>
  5742. +#include <linux/kfifo.h>
  5743. +#include <linux/kref.h>
  5744. +#include <linux/kthread.h>
  5745. +#include <linux/ktime.h>
  5746. +#include <linux/list.h>
  5747. +#include <linux/mutex.h>
  5748. +#include <linux/pm.h>
  5749. +#include <linux/refcount.h>
  5750. +#include <linux/serdev.h>
  5751. +#include <linux/spinlock.h>
  5752. +#include <linux/sysfs.h>
  5753. +#include <linux/workqueue.h>
  5754. +
  5755. +#include "surface_sam_ssh.h"
  5756. +
  5757. +#define CREATE_TRACE_POINTS
  5758. +#include "surface_sam_ssh_trace.h"
  5759. +
  5760. +
  5761. +/* -- Error injection helpers. ---------------------------------------------- */
  5762. +
  5763. +#ifdef CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION
  5764. +#define noinline_if_inject noinline
  5765. +#else /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
  5766. +#define noinline_if_inject inline
  5767. +#endif /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
  5768. +
  5769. +
  5770. +/* -- SSH protocol utility functions and definitions. ----------------------- */
  5771. +
  5772. +/*
  5773. + * The number of reserved event IDs, used for registering an SSH event
  5774. + * handler. Valid event IDs are numbers below or equal to this value, with
  5775. + * exception of zero, which is not an event ID. Thus, this is also the
  5776. + * absolute maximum number of event handlers that can be registered.
  5777. + */
  5778. +#define SSH_NUM_EVENTS 34
  5779. +
  5780. +/*
  5781. + * The number of communication channels used in the protocol.
  5782. + */
  5783. +#define SSH_NUM_CHANNELS 2
  5784. +
  5785. +
  5786. +static inline u16 ssh_crc(const u8 *buf, size_t len)
  5787. +{
  5788. + return crc_ccitt_false(0xffff, buf, len);
  5789. +}
  5790. +
  5791. +static inline u16 ssh_rqid_next_valid(u16 rqid)
  5792. +{
  5793. + return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u;
  5794. +}
  5795. +
  5796. +static inline u16 ssh_rqid_to_event(u16 rqid)
  5797. +{
  5798. + return rqid - 1u;
  5799. +}
  5800. +
  5801. +static inline bool ssh_rqid_is_event(u16 rqid)
  5802. +{
  5803. + return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS;
  5804. +}
  5805. +
  5806. +static inline int ssh_tc_to_rqid(u8 tc)
  5807. +{
  5808. + return tc;
  5809. +}
  5810. +
  5811. +static inline u8 ssh_channel_to_index(u8 channel)
  5812. +{
  5813. + return channel - 1u;
  5814. +}
  5815. +
  5816. +static inline bool ssh_channel_is_valid(u8 channel)
  5817. +{
  5818. + return ssh_channel_to_index(channel) < SSH_NUM_CHANNELS;
  5819. +}
  5820. +
  5821. +
  5822. +/* -- Safe counters. -------------------------------------------------------- */
  5823. +
  5824. +struct ssh_seq_counter {
  5825. + u8 value;
  5826. +};
  5827. +
  5828. +struct ssh_rqid_counter {
  5829. + u16 value;
  5830. +};
  5831. +
  5832. +static inline void ssh_seq_reset(struct ssh_seq_counter *c)
  5833. +{
  5834. + WRITE_ONCE(c->value, 0);
  5835. +}
  5836. +
  5837. +static inline u8 ssh_seq_next(struct ssh_seq_counter *c)
  5838. +{
  5839. + u8 old = READ_ONCE(c->value);
  5840. + u8 new = old + 1;
  5841. + u8 ret;
  5842. +
  5843. + while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
  5844. + old = ret;
  5845. + new = old + 1;
  5846. + }
  5847. +
  5848. + return old;
  5849. +}
  5850. +
  5851. +static inline void ssh_rqid_reset(struct ssh_rqid_counter *c)
  5852. +{
  5853. + WRITE_ONCE(c->value, 0);
  5854. +}
  5855. +
  5856. +static inline u16 ssh_rqid_next(struct ssh_rqid_counter *c)
  5857. +{
  5858. + u16 old = READ_ONCE(c->value);
  5859. + u16 new = ssh_rqid_next_valid(old);
  5860. + u16 ret;
  5861. +
  5862. + while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
  5863. + old = ret;
  5864. + new = ssh_rqid_next_valid(old);
  5865. + }
  5866. +
  5867. + return old;
  5868. +}
  5869. +
  5870. +
  5871. +/* -- Builder functions for SAM-over-SSH messages. -------------------------- */
  5872. +
  5873. +struct msgbuf {
  5874. + u8 *begin;
  5875. + u8 *end;
  5876. + u8 *ptr;
  5877. +};
  5878. +
  5879. +static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap)
  5880. +{
  5881. + msgb->begin = ptr;
  5882. + msgb->end = ptr + cap;
  5883. + msgb->ptr = ptr;
  5884. +}
  5885. +
  5886. +static inline size_t msgb_bytes_used(const struct msgbuf *msgb)
  5887. +{
  5888. + return msgb->ptr - msgb->begin;
  5889. +}
  5890. +
  5891. +static inline void msgb_push_u16(struct msgbuf *msgb, u16 value)
  5892. +{
  5893. + if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end))
  5894. + return;
  5895. +
  5896. + put_unaligned_le16(value, msgb->ptr);
  5897. + msgb->ptr += sizeof(u16);
  5898. +}
  5899. +
  5900. +static inline void msgb_push_syn(struct msgbuf *msgb)
  5901. +{
  5902. + msgb_push_u16(msgb, SSH_MSG_SYN);
  5903. +}
  5904. +
  5905. +static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len)
  5906. +{
  5907. + msgb->ptr = memcpy(msgb->ptr, buf, len) + len;
  5908. +}
  5909. +
  5910. +static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len)
  5911. +{
  5912. + msgb_push_u16(msgb, ssh_crc(buf, len));
  5913. +}
  5914. +
  5915. +static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq)
  5916. +{
  5917. + struct ssh_frame *frame = (struct ssh_frame *)msgb->ptr;
  5918. + const u8 *const begin = msgb->ptr;
  5919. +
  5920. + if (WARN_ON(msgb->ptr + sizeof(*frame) > msgb->end))
  5921. + return;
  5922. +
  5923. + frame->type = ty;
  5924. + put_unaligned_le16(len, &frame->len);
  5925. + frame->seq = seq;
  5926. +
  5927. + msgb->ptr += sizeof(*frame);
  5928. + msgb_push_crc(msgb, begin, msgb->ptr - begin);
  5929. +}
  5930. +
  5931. +static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq)
  5932. +{
  5933. + // SYN
  5934. + msgb_push_syn(msgb);
  5935. +
  5936. + // ACK-type frame + CRC
  5937. + msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq);
  5938. +
  5939. + // payload CRC (ACK-type frames do not have a payload)
  5940. + msgb_push_crc(msgb, msgb->ptr, 0);
  5941. +}
  5942. +
  5943. +static inline void msgb_push_nak(struct msgbuf *msgb)
  5944. +{
  5945. + // SYN
  5946. + msgb_push_syn(msgb);
  5947. +
  5948. + // NAK-type frame + CRC
  5949. + msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00);
  5950. +
  5951. + // payload CRC (ACK-type frames do not have a payload)
  5952. + msgb_push_crc(msgb, msgb->ptr, 0);
  5953. +}
  5954. +
  5955. +static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid,
  5956. + const struct ssam_request *rqst)
  5957. +{
  5958. + struct ssh_command *cmd;
  5959. + const u8 *cmd_begin;
  5960. + const u8 type = SSH_FRAME_TYPE_DATA_SEQ;
  5961. +
  5962. + // SYN
  5963. + msgb_push_syn(msgb);
  5964. +
  5965. + // command frame + crc
  5966. + msgb_push_frame(msgb, type, sizeof(*cmd) + rqst->length, seq);
  5967. +
  5968. + // frame payload: command struct + payload
  5969. + if (WARN_ON(msgb->ptr + sizeof(*cmd) > msgb->end))
  5970. + return;
  5971. +
  5972. + cmd_begin = msgb->ptr;
  5973. + cmd = (struct ssh_command *)msgb->ptr;
  5974. +
  5975. + cmd->type = SSH_PLD_TYPE_CMD;
  5976. + cmd->tc = rqst->target_category;
  5977. + cmd->chn_out = rqst->channel;
  5978. + cmd->chn_in = 0x00;
  5979. + cmd->iid = rqst->instance_id;
  5980. + put_unaligned_le16(rqid, &cmd->rqid);
  5981. + cmd->cid = rqst->command_id;
  5982. +
  5983. + msgb->ptr += sizeof(*cmd);
  5984. +
  5985. + // command payload
  5986. + msgb_push_buf(msgb, rqst->payload, rqst->length);
  5987. +
  5988. + // crc for command struct + payload
  5989. + msgb_push_crc(msgb, cmd_begin, msgb->ptr - cmd_begin);
  5990. +}
  5991. +
  5992. +
  5993. +/* -- Parser functions and utilities for SAM-over-SSH messages. ------------- */
  5994. +
  5995. +struct sshp_buf {
  5996. + u8 *ptr;
  5997. + size_t len;
  5998. + size_t cap;
  5999. +};
  6000. +
  6001. +
  6002. +static inline bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc)
  6003. +{
  6004. + u16 actual = ssh_crc(src->ptr, src->len);
  6005. + u16 expected = get_unaligned_le16(crc);
  6006. +
  6007. + return actual == expected;
  6008. +}
  6009. +
  6010. +static bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem)
  6011. +{
  6012. + size_t i;
  6013. +
  6014. + for (i = 0; i < src->len - 1; i++) {
  6015. + if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) {
  6016. + rem->ptr = src->ptr + i;
  6017. + rem->len = src->len - i;
  6018. + return true;
  6019. + }
  6020. + }
  6021. +
  6022. + if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) {
  6023. + rem->ptr = src->ptr + src->len - 1;
  6024. + rem->len = 1;
  6025. + return false;
  6026. + } else {
  6027. + rem->ptr = src->ptr + src->len;
  6028. + rem->len = 0;
  6029. + return false;
  6030. + }
  6031. +}
  6032. +
  6033. +static bool sshp_starts_with_syn(const struct ssam_span *src)
  6034. +{
  6035. + return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN;
  6036. +}
  6037. +
  6038. +static int sshp_parse_frame(const struct device *dev,
  6039. + const struct ssam_span *source,
  6040. + struct ssh_frame **frame,
  6041. + struct ssam_span *payload,
  6042. + size_t maxlen)
  6043. +{
  6044. + struct ssam_span sf;
  6045. + struct ssam_span sp;
  6046. +
  6047. + // initialize output
  6048. + *frame = NULL;
  6049. + payload->ptr = NULL;
  6050. + payload->len = 0;
  6051. +
  6052. + if (!sshp_starts_with_syn(source)) {
  6053. + dev_warn(dev, "rx: parser: invalid start of frame\n");
  6054. + return -ENOMSG;
  6055. + }
  6056. +
  6057. + // check for minumum packet length
  6058. + if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) {
  6059. + dev_dbg(dev, "rx: parser: not enough data for frame\n");
  6060. + return 0;
  6061. + }
  6062. +
  6063. + // pin down frame
  6064. + sf.ptr = source->ptr + sizeof(u16);
  6065. + sf.len = sizeof(struct ssh_frame);
  6066. +
  6067. + // validate frame CRC
  6068. + if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) {
  6069. + dev_warn(dev, "rx: parser: invalid frame CRC\n");
  6070. + return -EBADMSG;
  6071. + }
  6072. +
  6073. + // ensure packet does not exceed maximum length
  6074. + if (unlikely(((struct ssh_frame *)sf.ptr)->len > maxlen)) {
  6075. + dev_warn(dev, "rx: parser: frame too large: %u bytes\n",
  6076. + ((struct ssh_frame *)sf.ptr)->len);
  6077. + return -EMSGSIZE;
  6078. + }
  6079. +
  6080. + // pin down payload
  6081. + sp.ptr = sf.ptr + sf.len + sizeof(u16);
  6082. + sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len);
  6083. +
  6084. + // check for frame + payload length
  6085. + if (source->len < SSH_MESSAGE_LENGTH(sp.len)) {
  6086. + dev_dbg(dev, "rx: parser: not enough data for payload\n");
  6087. + return 0;
  6088. + }
  6089. +
  6090. + // validate payload crc
  6091. + if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) {
  6092. + dev_warn(dev, "rx: parser: invalid payload CRC\n");
  6093. + return -EBADMSG;
  6094. + }
  6095. +
  6096. + *frame = (struct ssh_frame *)sf.ptr;
  6097. + *payload = sp;
  6098. +
  6099. + dev_dbg(dev, "rx: parser: valid frame found (type: 0x%02x, len: %u)\n",
  6100. + (*frame)->type, (*frame)->len);
  6101. +
  6102. + return 0;
  6103. +}
  6104. +
  6105. +static int sshp_parse_command(const struct device *dev,
  6106. + const struct ssam_span *source,
  6107. + struct ssh_command **command,
  6108. + struct ssam_span *command_data)
  6109. +{
  6110. + // check for minimum length
  6111. + if (unlikely(source->len < sizeof(struct ssh_command))) {
  6112. + *command = NULL;
  6113. + command_data->ptr = NULL;
  6114. + command_data->len = 0;
  6115. +
  6116. + dev_err(dev, "rx: parser: command payload is too short\n");
  6117. + return -ENOMSG;
  6118. + }
  6119. +
  6120. + *command = (struct ssh_command *)source->ptr;
  6121. + command_data->ptr = source->ptr + sizeof(struct ssh_command);
  6122. + command_data->len = source->len - sizeof(struct ssh_command);
  6123. +
  6124. + dev_dbg(dev, "rx: parser: valid command found (tc: 0x%02x,"
  6125. + " cid: 0x%02x)\n", (*command)->tc, (*command)->cid);
  6126. +
  6127. + return 0;
  6128. +}
  6129. +
  6130. +
  6131. +static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap)
  6132. +{
  6133. + buf->ptr = ptr;
  6134. + buf->len = 0;
  6135. + buf->cap = cap;
  6136. +}
  6137. +
  6138. +static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags)
  6139. +{
  6140. + u8 *ptr;
  6141. +
  6142. + ptr = kzalloc(cap, flags);
  6143. + if (!ptr)
  6144. + return -ENOMEM;
  6145. +
  6146. + sshp_buf_init(buf, ptr, cap);
  6147. + return 0;
  6148. +
  6149. +}
  6150. +
  6151. +static inline void sshp_buf_free(struct sshp_buf *buf)
  6152. +{
  6153. + kfree(buf->ptr);
  6154. + buf->ptr = NULL;
  6155. + buf->len = 0;
  6156. + buf->cap = 0;
  6157. +}
  6158. +
  6159. +static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n)
  6160. +{
  6161. + memmove(buf->ptr, buf->ptr + n, buf->len - n);
  6162. + buf->len -= n;
  6163. +}
  6164. +
  6165. +static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf,
  6166. + struct kfifo *fifo)
  6167. +{
  6168. + size_t n;
  6169. +
  6170. + n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len);
  6171. + buf->len += n;
  6172. +
  6173. + return n;
  6174. +}
  6175. +
  6176. +static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset,
  6177. + struct ssam_span *span)
  6178. +{
  6179. + span->ptr = buf->ptr + offset;
  6180. + span->len = buf->len - offset;
  6181. +}
  6182. +
  6183. +
  6184. +/* -- Packet transport layer (ptl). ----------------------------------------- */
  6185. +/*
  6186. + * To simplify reasoning about the code below, we define a few concepts. The
  6187. + * system below is similar to a state-machine for packets, however, there are
  6188. + * too many states to explicitly write them down. To (somewhat) manage the
  6189. + * states and packets we rely on flags, reference counting, and some simple
  6190. + * concepts. State transitions are triggered by actions.
  6191. + *
  6192. + * >> Actions <<
  6193. + *
  6194. + * - submit
  6195. + * - transmission start (process next item in queue)
  6196. + * - transmission finished (guaranteed to never be parallel to transmission
  6197. + * start)
  6198. + * - ACK received
  6199. + * - NAK received (this is equivalent to issuing re-submit for all pending
  6200. + * packets)
  6201. + * - timeout (this is equivalent to re-issuing a submit or canceling)
  6202. + * - cancel (non-pending and pending)
  6203. + *
  6204. + * >> Data Structures, Packet Ownership, General Overview <<
  6205. + *
  6206. + * The code below employs two main data structures: The packet queue, containing
  6207. + * all packets scheduled for transmission, and the set of pending packets,
  6208. + * containing all packets awaiting an ACK.
  6209. + *
  6210. + * Shared ownership of a packet is controlled via reference counting. Inside the
  6211. + * transmission system are a total of five packet owners:
  6212. + *
  6213. + * - the packet queue,
  6214. + * - the pending set,
  6215. + * - the transmitter thread,
  6216. + * - the receiver thread (via ACKing), and
  6217. + * - the timeout work item.
  6218. + *
  6219. + * Normal operation is as follows: The initial reference of the packet is
  6220. + * obtained by submitting the packet and queueing it. The receiver thread
  6221. + * takes packets from the queue. By doing this, it does not increment the
  6222. + * refcount but takes over the reference (removing it from the queue).
  6223. + * If the packet is sequenced (i.e. needs to be ACKed by the client), the
  6224. + * transmitter thread sets-up the timeout and adds the packet to the pending set
  6225. + * before starting to transmit it. As the timeout is handled by a reaper task,
  6226. + * no additional reference for it is needed. After the transmit is done, the
  6227. + * reference hold by the transmitter thread is dropped. If the packet is
  6228. + * unsequenced (i.e. does not need an ACK), the packet is completed by the
  6229. + * transmitter thread before dropping that reference.
  6230. + *
  6231. + * On receial of an ACK, the receiver thread removes and obtains the refernce to
  6232. + * the packet from the pending set. On succes, the receiver thread will then
  6233. + * complete the packet and drop its reference.
  6234. + *
  6235. + * On error, the completion callback is immediately run by on thread on which
  6236. + * the error was detected.
  6237. + *
  6238. + * To ensure that a packet eventually leaves the system it is marked as "locked"
  6239. + * directly before it is going to be completed or when it is canceled. Marking a
  6240. + * packet as "locked" has the effect that passing and creating new references
  6241. + * of the packet will be blocked. This means that the packet cannot be added
  6242. + * to the queue, the pending set, and the timeout, or be picked up by the
  6243. + * transmitter thread or receiver thread. To remove a packet from the system it
  6244. + * has to be marked as locked and subsequently all references from the data
  6245. + * structures (queue, pending) have to be removed. References held by threads
  6246. + * will eventually be dropped automatically as their execution progresses.
  6247. + *
  6248. + * Note that the packet completion callback is, in case of success and for a
  6249. + * sequenced packet, guaranteed to run on the receiver thread, thus providing a
  6250. + * way to reliably identify responses to the packet. The packet completion
  6251. + * callback is only run once and it does not indicate that the packet has fully
  6252. + * left the system. In case of re-submission (and with somewhat unlikely
  6253. + * timing), it may be possible that the packet is being re-transmitted while the
  6254. + * completion callback runs. Completion will occur both on success and internal
  6255. + * error, as well as when the packet is canceled.
  6256. + *
  6257. + * >> Flags <<
  6258. + *
  6259. + * Flags are used to indicate the state and progression of a packet. Some flags
  6260. + * have stricter guarantees than other:
  6261. + *
  6262. + * - locked
  6263. + * Indicates if the packet is locked. If the packet is locked, passing and/or
  6264. + * creating additional references to the packet is forbidden. The packet thus
  6265. + * may not be queued, dequeued, or removed or added to the pending set. Note
  6266. + * that the packet state flags may still change (e.g. it may be marked as
  6267. + * ACKed, transmitted, ...).
  6268. + *
  6269. + * - completed
  6270. + * Indicates if the packet completion has been run or is about to be run. This
  6271. + * flag is used to ensure that the packet completion callback is only run
  6272. + * once.
  6273. + *
  6274. + * - queued
  6275. + * Indicates if a packet is present in the submission queue or not. This flag
  6276. + * must only be modified with the queue lock held, and must be coherent
  6277. + * presence of the packet in the queue.
  6278. + *
  6279. + * - pending
  6280. + * Indicates if a packet is present in the set of pending packets or not.
  6281. + * This flag must only be modified with the pending lock held, and must be
  6282. + * coherent presence of the packet in the pending set.
  6283. + *
  6284. + * - transmitting
  6285. + * Indicates if the packet is currently transmitting. In case of
  6286. + * re-transmissions, it is only safe to wait on the "transmitted" completion
  6287. + * after this flag has been set. The completion will be set both in success
  6288. + * and error case.
  6289. + *
  6290. + * - transmitted
  6291. + * Indicates if the packet has been transmitted. This flag is not cleared by
  6292. + * the system, thus it indicates the first transmission only.
  6293. + *
  6294. + * - acked
  6295. + * Indicates if the packet has been acknowledged by the client. There are no
  6296. + * other guarantees given. For example, the packet may still be canceled
  6297. + * and/or the completion may be triggered an error even though this bit is
  6298. + * set. Rely on the status provided by completion instead.
  6299. + *
  6300. + * - canceled
  6301. + * Indicates if the packet has been canceled from the outside. There are no
  6302. + * other guarantees given. Specifically, the packet may be completed by
  6303. + * another part of the system before the cancellation attempts to complete it.
  6304. + *
  6305. + * >> General Notes <<
  6306. + *
  6307. + * To avoid deadlocks, if both queue and pending locks are required, the pending
  6308. + * lock must be acquired before the queue lock.
  6309. + */
  6310. +
  6311. +/**
  6312. + * Maximum number transmission attempts per sequenced packet in case of
  6313. + * time-outs. Must be smaller than 16.
  6314. + */
  6315. +#define SSH_PTL_MAX_PACKET_TRIES 3
  6316. +
  6317. +/**
  6318. + * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
  6319. + * time-frame after starting transmission, the packet will be re-submitted.
  6320. + */
  6321. +#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000)
  6322. +
  6323. +/**
  6324. + * Maximum time resolution for timeouts. Currently set to max(2 jiffies, 50ms).
  6325. + * Should be larger than one jiffy to avoid direct re-scheduling of reaper
  6326. + * work_struct.
  6327. + */
  6328. +#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
  6329. +
  6330. +/**
  6331. + * Maximum number of sequenced packets concurrently waiting for an ACK.
  6332. + * Packets marked as blocking will not be transmitted while this limit is
  6333. + * reached.
  6334. + */
  6335. +#define SSH_PTL_MAX_PENDING 1
  6336. +
  6337. +#define SSH_PTL_RX_BUF_LEN 4096
  6338. +
  6339. +#define SSH_PTL_RX_FIFO_LEN 4096
  6340. +
  6341. +
  6342. +enum ssh_ptl_state_flags {
  6343. + SSH_PTL_SF_SHUTDOWN_BIT,
  6344. +};
  6345. +
  6346. +struct ssh_ptl_ops {
  6347. + void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data);
  6348. +};
  6349. +
  6350. +struct ssh_ptl {
  6351. + struct serdev_device *serdev;
  6352. + unsigned long state;
  6353. +
  6354. + struct {
  6355. + spinlock_t lock;
  6356. + struct list_head head;
  6357. + } queue;
  6358. +
  6359. + struct {
  6360. + spinlock_t lock;
  6361. + struct list_head head;
  6362. + atomic_t count;
  6363. + } pending;
  6364. +
  6365. + struct {
  6366. + bool thread_signal;
  6367. + struct task_struct *thread;
  6368. + struct wait_queue_head thread_wq;
  6369. + struct wait_queue_head packet_wq;
  6370. + struct ssh_packet *packet;
  6371. + size_t offset;
  6372. + } tx;
  6373. +
  6374. + struct {
  6375. + struct task_struct *thread;
  6376. + struct wait_queue_head wq;
  6377. + struct kfifo fifo;
  6378. + struct sshp_buf buf;
  6379. +
  6380. + struct {
  6381. + u16 seqs[8];
  6382. + u16 offset;
  6383. + } blocked;
  6384. + } rx;
  6385. +
  6386. + struct {
  6387. + ktime_t timeout;
  6388. + ktime_t expires;
  6389. + struct delayed_work reaper;
  6390. + } rtx_timeout;
  6391. +
  6392. + struct ssh_ptl_ops ops;
  6393. +};
  6394. +
  6395. +
  6396. +#define __ssam_prcond(func, p, fmt, ...) \
  6397. + do { \
  6398. + if ((p)) \
  6399. + func((p), fmt, ##__VA_ARGS__); \
  6400. + } while (0);
  6401. +
  6402. +#define ptl_dbg(p, fmt, ...) dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
  6403. +#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
  6404. +#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
  6405. +#define ptl_err(p, fmt, ...) dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
  6406. +#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__)
  6407. +
  6408. +#define to_ssh_packet(ptr, member) \
  6409. + container_of(ptr, struct ssh_packet, member)
  6410. +
  6411. +#define to_ssh_ptl(ptr, member) \
  6412. + container_of(ptr, struct ssh_ptl, member)
  6413. +
  6414. +
  6415. +#ifdef CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION
  6416. +
  6417. +/**
  6418. + * ssh_ptl_should_drop_ack_packet - error injection hook to drop ACK packets
  6419. + *
  6420. + * Useful to test detection and handling of automated re-transmits by the EC.
  6421. + * Specifically of packets that the EC consideres not-ACKed but the driver
  6422. + * already consideres ACKed (due to dropped ACK). In this case, the EC
  6423. + * re-transmits the packet-to-be-ACKed and the driver should detect it as
  6424. + * duplicate/already handled. Note that the driver should still send an ACK
  6425. + * for the re-transmitted packet.
  6426. + */
  6427. +static noinline bool ssh_ptl_should_drop_ack_packet(void)
  6428. +{
  6429. + return false;
  6430. +}
  6431. +ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE);
  6432. +
  6433. +/**
  6434. + * ssh_ptl_should_drop_nak_packet - error injection hook to drop NAK packets
  6435. + *
  6436. + * Useful to test/force automated (timeout-based) re-transmit by the EC.
  6437. + * Specifically, packets that have not reached the driver completely/with valid
  6438. + * checksums. Only useful in combination with receival of (injected) bad data.
  6439. + */
  6440. +static noinline bool ssh_ptl_should_drop_nak_packet(void)
  6441. +{
  6442. + return false;
  6443. +}
  6444. +ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE);
  6445. +
  6446. +/**
  6447. + * ssh_ptl_should_drop_dsq_packet - error injection hook to drop sequenced data
  6448. + * packet
  6449. + *
  6450. + * Useful to test re-transmit timeout of the driver. If the data packet has not
  6451. + * been ACKed after a certain time, the driver should re-transmit the packet up
  6452. + * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
  6453. + */
  6454. +static noinline bool ssh_ptl_should_drop_dsq_packet(void)
  6455. +{
  6456. + return false;
  6457. +}
  6458. +ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE);
  6459. +
  6460. +/**
  6461. + * ssh_ptl_should_fail_write - error injection hook to make serdev_device_write
  6462. + * fail
  6463. + *
  6464. + * Hook to simulate errors in serdev_device_write when transmitting packets.
  6465. + */
  6466. +static noinline int ssh_ptl_should_fail_write(void)
  6467. +{
  6468. + return 0;
  6469. +}
  6470. +ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO);
  6471. +
  6472. +/**
  6473. + * ssh_ptl_should_corrupt_tx_data - error injection hook to simualte invalid
  6474. + * data being sent to the EC
  6475. + *
  6476. + * Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
  6477. + * Causes the packet data to be actively corrupted by overwriting it with
  6478. + * pre-defined values, such that it becomes invalid, causing the EC to respond
  6479. + * with a NAK packet. Useful to test handling of NAK packets received by the
  6480. + * driver.
  6481. + */
  6482. +static noinline bool ssh_ptl_should_corrupt_tx_data(void)
  6483. +{
  6484. + return false;
  6485. +}
  6486. +ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE);
  6487. +
  6488. +/**
  6489. + * ssh_ptl_should_corrupt_rx_syn - error injection hook to simulate invalid
  6490. + * data being sent by the EC
  6491. + *
  6492. + * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
  6493. + * test handling thereof in the driver.
  6494. + */
  6495. +static noinline bool ssh_ptl_should_corrupt_rx_syn(void)
  6496. +{
  6497. + return false;
  6498. +}
  6499. +ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE);
  6500. +
  6501. +/**
  6502. + * ssh_ptl_should_corrupt_rx_data - error injection hook to simulate invalid
  6503. + * data being sent by the EC
  6504. + *
  6505. + * Hook to simulate invalid data/checksum of the message frame and test handling
  6506. + * thereof in the driver.
  6507. + */
  6508. +static noinline bool ssh_ptl_should_corrupt_rx_data(void)
  6509. +{
  6510. + return false;
  6511. +}
  6512. +ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE);
  6513. +
  6514. +
  6515. +static inline bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet)
  6516. +{
  6517. + if (likely(!ssh_ptl_should_drop_ack_packet()))
  6518. + return false;
  6519. +
  6520. + trace_ssam_ei_tx_drop_ack_packet(packet);
  6521. + ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n",
  6522. + packet);
  6523. +
  6524. + return true;
  6525. +}
  6526. +
  6527. +static inline bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet)
  6528. +{
  6529. + if (likely(!ssh_ptl_should_drop_nak_packet()))
  6530. + return false;
  6531. +
  6532. + trace_ssam_ei_tx_drop_nak_packet(packet);
  6533. + ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n",
  6534. + packet);
  6535. +
  6536. + return true;
  6537. +}
  6538. +
  6539. +static inline bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet)
  6540. +{
  6541. + if (likely(!ssh_ptl_should_drop_dsq_packet()))
  6542. + return false;
  6543. +
  6544. + trace_ssam_ei_tx_drop_dsq_packet(packet);
  6545. + ptl_info(packet->ptl,
  6546. + "packet error injection: dropping sequenced data packet %p\n",
  6547. + packet);
  6548. +
  6549. + return true;
  6550. +}
  6551. +
  6552. +static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
  6553. +{
  6554. + // ignore packets that don't carry any data (i.e. flush)
  6555. + if (!packet->data.ptr || !packet->data.len)
  6556. + return false;
  6557. +
  6558. + switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) {
  6559. + case SSH_FRAME_TYPE_ACK:
  6560. + return __ssh_ptl_should_drop_ack_packet(packet);
  6561. +
  6562. + case SSH_FRAME_TYPE_NAK:
  6563. + return __ssh_ptl_should_drop_nak_packet(packet);
  6564. +
  6565. + case SSH_FRAME_TYPE_DATA_SEQ:
  6566. + return __ssh_ptl_should_drop_dsq_packet(packet);
  6567. +
  6568. + default:
  6569. + return false;
  6570. + }
  6571. +}
  6572. +
  6573. +static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet,
  6574. + const unsigned char *buf, size_t count)
  6575. +{
  6576. + int status;
  6577. +
  6578. + status = ssh_ptl_should_fail_write();
  6579. + if (unlikely(status)) {
  6580. + trace_ssam_ei_tx_fail_write(packet, status);
  6581. + ptl_info(packet->ptl,
  6582. + "packet error injection: simulating transmit error %d, packet %p\n",
  6583. + status, packet);
  6584. +
  6585. + return status;
  6586. + }
  6587. +
  6588. + return serdev_device_write_buf(ptl->serdev, buf, count);
  6589. +}
  6590. +
  6591. +static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
  6592. +{
  6593. + // ignore packets that don't carry any data (i.e. flush)
  6594. + if (!packet->data.ptr || !packet->data.len)
  6595. + return;
  6596. +
  6597. + // only allow sequenced data packets to be modified
  6598. + if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ)
  6599. + return;
  6600. +
  6601. + if (likely(!ssh_ptl_should_corrupt_tx_data()))
  6602. + return;
  6603. +
  6604. + trace_ssam_ei_tx_corrupt_data(packet);
  6605. + ptl_info(packet->ptl,
  6606. + "packet error injection: simulating invalid transmit data on packet %p\n",
  6607. + packet);
  6608. +
  6609. + /*
  6610. + * NB: The value 0xb3 has been chosen more or less randomly so that it
  6611. + * doesn't have any (major) overlap with the SYN bytes (aa 55) and is
  6612. + * non-trivial (i.e. non-zero, non-0xff).
  6613. + */
  6614. + memset(packet->data.ptr, 0xb3, packet->data.len);
  6615. +}
  6616. +
  6617. +static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
  6618. + struct ssam_span *data)
  6619. +{
  6620. + struct ssam_span frame;
  6621. +
  6622. + // check if there actually is something to corrupt
  6623. + if (!sshp_find_syn(data, &frame))
  6624. + return;
  6625. +
  6626. + if (likely(!ssh_ptl_should_corrupt_rx_syn()))
  6627. + return;
  6628. +
  6629. + trace_ssam_ei_rx_corrupt_syn("data_length", data->len);
  6630. +
  6631. + data->ptr[1] = 0xb3; // set second byte of SYN to "random" value
  6632. +}
  6633. +
  6634. +static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
  6635. + struct ssam_span *frame)
  6636. +{
  6637. + size_t payload_len, message_len;
  6638. + struct ssh_frame *sshf;
  6639. +
  6640. + // ignore incomplete messages, will get handled once it's complete
  6641. + if (frame->len < SSH_MESSAGE_LENGTH(0))
  6642. + return;
  6643. +
  6644. + // ignore incomplete messages, part 2
  6645. + payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]);
  6646. + message_len = SSH_MESSAGE_LENGTH(payload_len);
  6647. + if (frame->len < message_len)
  6648. + return;
  6649. +
  6650. + if (likely(!ssh_ptl_should_corrupt_rx_data()))
  6651. + return;
  6652. +
  6653. + sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)];
  6654. + trace_ssam_ei_rx_corrupt_data(sshf);
  6655. +
  6656. + /*
  6657. + * Flip bits in first byte of payload checksum. This is basically
  6658. + * equivalent to a payload/frame data error without us having to worry
  6659. + * about (the, arguably pretty small, probability of) accidental
  6660. + * checksum collisions.
  6661. + */
  6662. + frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2];
  6663. +}
  6664. +
  6665. +#else /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
  6666. +
  6667. +static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
  6668. +{
  6669. + return false;
  6670. +}
  6671. +
  6672. +static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl,
  6673. + struct ssh_packet *packet,
  6674. + const unsigned char *buf,
  6675. + size_t count)
  6676. +{
  6677. + return serdev_device_write_buf(ptl->serdev, buf, count);
  6678. +}
  6679. +
  6680. +static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
  6681. +{
  6682. +}
  6683. +
  6684. +static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
  6685. + struct ssam_span *data)
  6686. +{
  6687. +}
  6688. +
  6689. +static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
  6690. + struct ssam_span *frame)
  6691. +{
  6692. +}
  6693. +
  6694. +#endif /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
  6695. +
  6696. +
  6697. +static void __ssh_ptl_packet_release(struct kref *kref)
  6698. +{
  6699. + struct ssh_packet *p = to_ssh_packet(kref, refcnt);
  6700. +
  6701. + trace_ssam_packet_release(p);
  6702. +
  6703. + ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
  6704. + p->ops->release(p);
  6705. +}
  6706. +
  6707. +void ssh_packet_get(struct ssh_packet *packet)
  6708. +{
  6709. + kref_get(&packet->refcnt);
  6710. +}
  6711. +EXPORT_SYMBOL_GPL(ssh_packet_get);
  6712. +
  6713. +void ssh_packet_put(struct ssh_packet *packet)
  6714. +{
  6715. + kref_put(&packet->refcnt, __ssh_ptl_packet_release);
  6716. +}
  6717. +EXPORT_SYMBOL_GPL(ssh_packet_put);
  6718. +
  6719. +static inline u8 ssh_packet_get_seq(struct ssh_packet *packet)
  6720. +{
  6721. + return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
  6722. +}
  6723. +
  6724. +
  6725. +struct ssh_packet_args {
  6726. + unsigned long type;
  6727. + u8 priority;
  6728. + const struct ssh_packet_ops *ops;
  6729. +};
  6730. +
  6731. +static void ssh_packet_init(struct ssh_packet *packet,
  6732. + const struct ssh_packet_args *args)
  6733. +{
  6734. + kref_init(&packet->refcnt);
  6735. +
  6736. + packet->ptl = NULL;
  6737. + INIT_LIST_HEAD(&packet->queue_node);
  6738. + INIT_LIST_HEAD(&packet->pending_node);
  6739. +
  6740. + packet->state = args->type & SSH_PACKET_FLAGS_TY_MASK;
  6741. + packet->priority = args->priority;
  6742. + packet->timestamp = KTIME_MAX;
  6743. +
  6744. + packet->data.ptr = NULL;
  6745. + packet->data.len = 0;
  6746. +
  6747. + packet->ops = args->ops;
  6748. +}
  6749. +
  6750. +
  6751. +static struct kmem_cache *ssh_ctrl_packet_cache;
  6752. +
  6753. +static int __init ssh_ctrl_packet_cache_init(void)
  6754. +{
  6755. + const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL;
  6756. + const unsigned int align = __alignof__(struct ssh_packet);
  6757. + struct kmem_cache *cache;
  6758. +
  6759. + cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL);
  6760. + if (!cache)
  6761. + return -ENOMEM;
  6762. +
  6763. + ssh_ctrl_packet_cache = cache;
  6764. + return 0;
  6765. +}
  6766. +
  6767. +static void __exit ssh_ctrl_packet_cache_destroy(void)
  6768. +{
  6769. + kmem_cache_destroy(ssh_ctrl_packet_cache);
  6770. + ssh_ctrl_packet_cache = NULL;
  6771. +}
  6772. +
  6773. +static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
  6774. + struct ssam_span *buffer, gfp_t flags)
  6775. +{
  6776. + *packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags);
  6777. + if (!*packet)
  6778. + return -ENOMEM;
  6779. +
  6780. + buffer->ptr = (u8 *)(*packet + 1);
  6781. + buffer->len = SSH_MSG_LEN_CTRL;
  6782. +
  6783. + trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
  6784. + return 0;
  6785. +}
  6786. +
  6787. +static void ssh_ctrl_packet_free(struct ssh_packet *p)
  6788. +{
  6789. + trace_ssam_ctrl_packet_free(p);
  6790. + kmem_cache_free(ssh_ctrl_packet_cache, p);
  6791. +}
  6792. +
  6793. +static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
  6794. + .complete = NULL,
  6795. + .release = ssh_ctrl_packet_free,
  6796. +};
  6797. +
  6798. +
  6799. +static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
  6800. + ktime_t expires)
  6801. +{
  6802. + unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
  6803. + ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION);
  6804. + ktime_t old;
  6805. +
  6806. + // re-adjust / schedule reaper if it is above resolution delta
  6807. + old = READ_ONCE(ptl->rtx_timeout.expires);
  6808. + while (ktime_before(aexp, old))
  6809. + old = cmpxchg64(&ptl->rtx_timeout.expires, old, expires);
  6810. +
  6811. + // if we updated the reaper expiration, modify work timeout
  6812. + if (old == expires)
  6813. + mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
  6814. +}
  6815. +
  6816. +static void ssh_ptl_timeout_start(struct ssh_packet *packet)
  6817. +{
  6818. + struct ssh_ptl *ptl = packet->ptl;
  6819. + ktime_t timestamp = ktime_get_coarse_boottime();
  6820. + ktime_t timeout = ptl->rtx_timeout.timeout;
  6821. +
  6822. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
  6823. + return;
  6824. +
  6825. + WRITE_ONCE(packet->timestamp, timestamp);
  6826. + smp_mb__after_atomic();
  6827. +
  6828. + ssh_ptl_timeout_reaper_mod(packet->ptl, timestamp, timestamp + timeout);
  6829. +}
  6830. +
  6831. +
  6832. +static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p)
  6833. +{
  6834. + struct list_head *head;
  6835. + u8 priority = READ_ONCE(p->priority);
  6836. +
  6837. + /*
  6838. + * We generally assume that there are less control (ACK/NAK) packets and
  6839. + * re-submitted data packets as there are normal data packets (at least
  6840. + * in situations in which many packets are queued; if there aren't many
  6841. + * packets queued the decision on how to iterate should be basically
  6842. + * irrellevant; the number of control/data packets is more or less
  6843. + * limited via the maximum number of pending packets). Thus, when
  6844. + * inserting a control or re-submitted data packet, (determined by their
  6845. + * priority), we search from front to back. Normal data packets are,
  6846. + * usually queued directly at the tail of the queue, so for those search
  6847. + * from back to front.
  6848. + */
  6849. +
  6850. + if (priority > SSH_PACKET_PRIORITY_DATA) {
  6851. + list_for_each(head, &p->ptl->queue.head) {
  6852. + p = list_entry(head, struct ssh_packet, queue_node);
  6853. +
  6854. + if (READ_ONCE(p->priority) < priority)
  6855. + break;
  6856. + }
  6857. + } else {
  6858. + list_for_each_prev(head, &p->ptl->queue.head) {
  6859. + p = list_entry(head, struct ssh_packet, queue_node);
  6860. +
  6861. + if (READ_ONCE(p->priority) >= priority) {
  6862. + head = head->next;
  6863. + break;
  6864. + }
  6865. + }
  6866. + }
  6867. +
  6868. +
  6869. + return head;
  6870. +}
  6871. +
  6872. +static int ssh_ptl_queue_push(struct ssh_packet *packet)
  6873. +{
  6874. + struct ssh_ptl *ptl = packet->ptl;
  6875. + struct list_head *head;
  6876. +
  6877. + spin_lock(&ptl->queue.lock);
  6878. +
  6879. + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) {
  6880. + spin_unlock(&ptl->queue.lock);
  6881. + return -ESHUTDOWN;
  6882. + }
  6883. +
  6884. + // avoid further transitions when cancelling/completing
  6885. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) {
  6886. + spin_unlock(&ptl->queue.lock);
  6887. + return -EINVAL;
  6888. + }
  6889. +
  6890. + // if this packet has already been queued, do not add it
  6891. + if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
  6892. + spin_unlock(&ptl->queue.lock);
  6893. + return -EALREADY;
  6894. + }
  6895. +
  6896. + head = __ssh_ptl_queue_find_entrypoint(packet);
  6897. +
  6898. + ssh_packet_get(packet);
  6899. + list_add_tail(&packet->queue_node, &ptl->queue.head);
  6900. +
  6901. + spin_unlock(&ptl->queue.lock);
  6902. + return 0;
  6903. +}
  6904. +
  6905. +static void ssh_ptl_queue_remove(struct ssh_packet *packet)
  6906. +{
  6907. + struct ssh_ptl *ptl = packet->ptl;
  6908. + bool remove;
  6909. +
  6910. + spin_lock(&ptl->queue.lock);
  6911. +
  6912. + remove = test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state);
  6913. + if (remove)
  6914. + list_del(&packet->queue_node);
  6915. +
  6916. + spin_unlock(&ptl->queue.lock);
  6917. +
  6918. + if (remove)
  6919. + ssh_packet_put(packet);
  6920. +}
  6921. +
  6922. +
  6923. +static void ssh_ptl_pending_push(struct ssh_packet *packet)
  6924. +{
  6925. + struct ssh_ptl *ptl = packet->ptl;
  6926. +
  6927. + spin_lock(&ptl->pending.lock);
  6928. +
  6929. + // if we are cancelling/completing this packet, do not add it
  6930. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) {
  6931. + spin_unlock(&ptl->pending.lock);
  6932. + return;
  6933. + }
  6934. +
  6935. + // in case it is already pending (e.g. re-submission), do not add it
  6936. + if (test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
  6937. + spin_unlock(&ptl->pending.lock);
  6938. + return;
  6939. + }
  6940. +
  6941. + atomic_inc(&ptl->pending.count);
  6942. + ssh_packet_get(packet);
  6943. + list_add_tail(&packet->pending_node, &ptl->pending.head);
  6944. +
  6945. + spin_unlock(&ptl->pending.lock);
  6946. +}
  6947. +
  6948. +static void ssh_ptl_pending_remove(struct ssh_packet *packet)
  6949. +{
  6950. + struct ssh_ptl *ptl = packet->ptl;
  6951. + bool remove;
  6952. +
  6953. + spin_lock(&ptl->pending.lock);
  6954. +
  6955. + remove = test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state);
  6956. + if (remove) {
  6957. + list_del(&packet->pending_node);
  6958. + atomic_dec(&ptl->pending.count);
  6959. + }
  6960. +
  6961. + spin_unlock(&ptl->pending.lock);
  6962. +
  6963. + if (remove)
  6964. + ssh_packet_put(packet);
  6965. +}
  6966. +
  6967. +
  6968. +static void __ssh_ptl_complete(struct ssh_packet *p, int status)
  6969. +{
  6970. + struct ssh_ptl *ptl = READ_ONCE(p->ptl);
  6971. +
  6972. + trace_ssam_packet_complete(p, status);
  6973. +
  6974. + ptl_dbg_cond(ptl, "ptl: completing packet %p\n", p);
  6975. + if (status && status != -ECANCELED)
  6976. + ptl_dbg_cond(ptl, "ptl: packet error: %d\n", status);
  6977. +
  6978. + if (p->ops->complete)
  6979. + p->ops->complete(p, status);
  6980. +}
  6981. +
  6982. +static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status)
  6983. +{
  6984. + /*
  6985. + * A call to this function should in general be preceeded by
  6986. + * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
  6987. + * packet to the structures it's going to be removed from.
  6988. + *
  6989. + * The set_bit call does not need explicit memory barriers as the
  6990. + * implicit barrier of the test_and_set_bit call below ensure that the
  6991. + * flag is visible before we actually attempt to remove the packet.
  6992. + */
  6993. +
  6994. + if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
  6995. + return;
  6996. +
  6997. + ssh_ptl_queue_remove(p);
  6998. + ssh_ptl_pending_remove(p);
  6999. +
  7000. + __ssh_ptl_complete(p, status);
  7001. +}
  7002. +
  7003. +
  7004. +static bool ssh_ptl_tx_can_process(struct ssh_packet *packet)
  7005. +{
  7006. + struct ssh_ptl *ptl = packet->ptl;
  7007. +
  7008. + if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state))
  7009. + return !atomic_read(&ptl->pending.count);
  7010. +
  7011. + // we can alwas process non-blocking packets
  7012. + if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state))
  7013. + return true;
  7014. +
  7015. + // if we are already waiting for this packet, send it again
  7016. + if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state))
  7017. + return true;
  7018. +
  7019. + // otherwise: check if we have the capacity to send
  7020. + return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING;
  7021. +}
  7022. +
  7023. +static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl)
  7024. +{
  7025. + struct ssh_packet *packet = ERR_PTR(-ENOENT);
  7026. + struct ssh_packet *p, *n;
  7027. +
  7028. + spin_lock(&ptl->queue.lock);
  7029. + list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
  7030. + /*
  7031. + * If we are cancelling or completing this packet, ignore it.
  7032. + * It's going to be removed from this queue shortly.
  7033. + */
  7034. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  7035. + continue;
  7036. +
  7037. + /*
  7038. + * Packets should be ordered non-blocking/to-be-resent first.
  7039. + * If we cannot process this packet, assume that we can't
  7040. + * process any following packet either and abort.
  7041. + */
  7042. + if (!ssh_ptl_tx_can_process(p)) {
  7043. + packet = ERR_PTR(-EBUSY);
  7044. + break;
  7045. + }
  7046. +
  7047. + /*
  7048. + * We are allowed to change the state now. Remove it from the
  7049. + * queue and mark it as being transmitted. Note that we cannot
  7050. + * add it to the set of pending packets yet, as queue locks must
  7051. + * always be acquired before packet locks (otherwise we might
  7052. + * run into a deadlock).
  7053. + */
  7054. +
  7055. + list_del(&p->queue_node);
  7056. +
  7057. + /*
  7058. + * Ensure that the "queued" bit gets cleared after setting the
  7059. + * "transmitting" bit to guaranteee non-zero flags.
  7060. + */
  7061. + set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state);
  7062. + smp_mb__before_atomic();
  7063. + clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
  7064. +
  7065. + packet = p;
  7066. + break;
  7067. + }
  7068. + spin_unlock(&ptl->queue.lock);
  7069. +
  7070. + return packet;
  7071. +}
  7072. +
  7073. +static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl)
  7074. +{
  7075. + struct ssh_packet *p;
  7076. +
  7077. + p = ssh_ptl_tx_pop(ptl);
  7078. + if (IS_ERR(p))
  7079. + return p;
  7080. +
  7081. + if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) {
  7082. + ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p);
  7083. + ssh_ptl_pending_push(p);
  7084. + ssh_ptl_timeout_start(p);
  7085. + } else {
  7086. + ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p);
  7087. + }
  7088. +
  7089. + /*
  7090. + * Update number of tries. This directly influences the priority in case
  7091. + * the packet is re-submitted (e.g. via timeout/NAK). Note that this is
  7092. + * the only place where we update the priority in-flight. As this runs
  7093. + * only on the tx-thread, this read-modify-write procedure is safe.
  7094. + */
  7095. + WRITE_ONCE(p->priority, READ_ONCE(p->priority) + 1);
  7096. +
  7097. + return p;
  7098. +}
  7099. +
  7100. +static void ssh_ptl_tx_compl_success(struct ssh_packet *packet)
  7101. +{
  7102. + struct ssh_ptl *ptl = packet->ptl;
  7103. +
  7104. + ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet);
  7105. +
  7106. + /*
  7107. + * Transition to state to "transmitted". Ensure that the flags never get
  7108. + * zero with barrier.
  7109. + */
  7110. + set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state);
  7111. + smp_mb__before_atomic();
  7112. + clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
  7113. +
  7114. + // if the packet is unsequenced, we're done: lock and complete
  7115. + if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) {
  7116. + set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
  7117. + ssh_ptl_remove_and_complete(packet, 0);
  7118. + }
  7119. +
  7120. + /*
  7121. + * Notify that a packet transmission has finished. In general we're only
  7122. + * waiting for one packet (if any), so wake_up_all should be fine.
  7123. + */
  7124. + wake_up_all(&ptl->tx.packet_wq);
  7125. +}
  7126. +
  7127. +static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status)
  7128. +{
  7129. + /*
  7130. + * Transmission failure: Lock the packet and try to complete it. Ensure
  7131. + * that the flags never get zero with barrier.
  7132. + */
  7133. + set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
  7134. + smp_mb__before_atomic();
  7135. + clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
  7136. +
  7137. + ptl_err(packet->ptl, "ptl: transmission error: %d\n", status);
  7138. + ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet);
  7139. +
  7140. + ssh_ptl_remove_and_complete(packet, status);
  7141. +
  7142. + /*
  7143. + * Notify that a packet transmission has finished. In general we're only
  7144. + * waiting for one packet (if any), so wake_up_all should be fine.
  7145. + */
  7146. + wake_up_all(&packet->ptl->tx.packet_wq);
  7147. +}
  7148. +
  7149. +static void ssh_ptl_tx_threadfn_wait(struct ssh_ptl *ptl)
  7150. +{
  7151. + wait_event_interruptible(ptl->tx.thread_wq,
  7152. + READ_ONCE(ptl->tx.thread_signal) || kthread_should_stop());
  7153. + WRITE_ONCE(ptl->tx.thread_signal, false);
  7154. +}
  7155. +
  7156. +static int ssh_ptl_tx_threadfn(void *data)
  7157. +{
  7158. + struct ssh_ptl *ptl = data;
  7159. +
  7160. + while (!kthread_should_stop()) {
  7161. + unsigned char *buf;
  7162. + bool drop = false;
  7163. + size_t len = 0;
  7164. + int status = 0;
  7165. +
  7166. + // if we don't have a packet, get the next and add it to pending
  7167. + if (IS_ERR_OR_NULL(ptl->tx.packet)) {
  7168. + ptl->tx.packet = ssh_ptl_tx_next(ptl);
  7169. + ptl->tx.offset = 0;
  7170. +
  7171. + // if no packet is available, we are done
  7172. + if (IS_ERR(ptl->tx.packet)) {
  7173. + ssh_ptl_tx_threadfn_wait(ptl);
  7174. + continue;
  7175. + }
  7176. + }
  7177. +
  7178. + // error injection: drop packet to simulate transmission problem
  7179. + if (ptl->tx.offset == 0)
  7180. + drop = ssh_ptl_should_drop_packet(ptl->tx.packet);
  7181. +
  7182. + // error injection: simulate invalid packet data
  7183. + if (ptl->tx.offset == 0 && !drop)
  7184. + ssh_ptl_tx_inject_invalid_data(ptl->tx.packet);
  7185. +
  7186. + // flush-packets don't have any data
  7187. + if (likely(ptl->tx.packet->data.ptr && !drop)) {
  7188. + buf = ptl->tx.packet->data.ptr + ptl->tx.offset;
  7189. + len = ptl->tx.packet->data.len - ptl->tx.offset;
  7190. +
  7191. + ptl_dbg(ptl, "tx: sending data (length: %zu)\n", len);
  7192. + print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
  7193. + buf, len, false);
  7194. +
  7195. + status = ssh_ptl_write_buf(ptl, ptl->tx.packet, buf, len);
  7196. + }
  7197. +
  7198. + if (status < 0) {
  7199. + // complete packet with error
  7200. + ssh_ptl_tx_compl_error(ptl->tx.packet, status);
  7201. + ssh_packet_put(ptl->tx.packet);
  7202. + ptl->tx.packet = NULL;
  7203. +
  7204. + } else if (status == len) {
  7205. + // complete packet and/or mark as transmitted
  7206. + ssh_ptl_tx_compl_success(ptl->tx.packet);
  7207. + ssh_packet_put(ptl->tx.packet);
  7208. + ptl->tx.packet = NULL;
  7209. +
  7210. + } else { // need more buffer space
  7211. + ptl->tx.offset += status;
  7212. + ssh_ptl_tx_threadfn_wait(ptl);
  7213. + }
  7214. + }
  7215. +
  7216. + // cancel active packet before we actually stop
  7217. + if (!IS_ERR_OR_NULL(ptl->tx.packet)) {
  7218. + ssh_ptl_tx_compl_error(ptl->tx.packet, -ESHUTDOWN);
  7219. + ssh_packet_put(ptl->tx.packet);
  7220. + ptl->tx.packet = NULL;
  7221. + }
  7222. +
  7223. + return 0;
  7224. +}
  7225. +
  7226. +static inline void ssh_ptl_tx_wakeup(struct ssh_ptl *ptl, bool force)
  7227. +{
  7228. + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
  7229. + return;
  7230. +
  7231. + if (force || atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING) {
  7232. + WRITE_ONCE(ptl->tx.thread_signal, true);
  7233. + smp_mb__after_atomic();
  7234. + wake_up(&ptl->tx.thread_wq);
  7235. + }
  7236. +}
  7237. +
  7238. +static int ssh_ptl_tx_start(struct ssh_ptl *ptl)
  7239. +{
  7240. + ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "surface-sh-tx");
  7241. + if (IS_ERR(ptl->tx.thread))
  7242. + return PTR_ERR(ptl->tx.thread);
  7243. +
  7244. + return 0;
  7245. +}
  7246. +
  7247. +static int ssh_ptl_tx_stop(struct ssh_ptl *ptl)
  7248. +{
  7249. + int status = 0;
  7250. +
  7251. + if (ptl->tx.thread) {
  7252. + status = kthread_stop(ptl->tx.thread);
  7253. + ptl->tx.thread = NULL;
  7254. + }
  7255. +
  7256. + return status;
  7257. +}
  7258. +
  7259. +
  7260. +static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id)
  7261. +{
  7262. + struct ssh_packet *packet = ERR_PTR(-ENOENT);
  7263. + struct ssh_packet *p, *n;
  7264. +
  7265. + spin_lock(&ptl->pending.lock);
  7266. + list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
  7267. + /*
  7268. + * We generally expect packets to be in order, so first packet
  7269. + * to be added to pending is first to be sent, is first to be
  7270. + * ACKed.
  7271. + */
  7272. + if (unlikely(ssh_packet_get_seq(p) != seq_id))
  7273. + continue;
  7274. +
  7275. + /*
  7276. + * In case we receive an ACK while handling a transmission error
  7277. + * completion. The packet will be removed shortly.
  7278. + */
  7279. + if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
  7280. + packet = ERR_PTR(-EPERM);
  7281. + break;
  7282. + }
  7283. +
  7284. + /*
  7285. + * Mark packet as ACKed and remove it from pending. Ensure that
  7286. + * the flags never get zero with barrier.
  7287. + */
  7288. + set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state);
  7289. + smp_mb__before_atomic();
  7290. + clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
  7291. +
  7292. + atomic_dec(&ptl->pending.count);
  7293. + list_del(&p->pending_node);
  7294. + packet = p;
  7295. +
  7296. + break;
  7297. + }
  7298. + spin_unlock(&ptl->pending.lock);
  7299. +
  7300. + return packet;
  7301. +}
  7302. +
  7303. +static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet)
  7304. +{
  7305. + wait_event(packet->ptl->tx.packet_wq,
  7306. + test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state)
  7307. + || test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state));
  7308. +}
  7309. +
  7310. +static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq)
  7311. +{
  7312. + struct ssh_packet *p;
  7313. + int status = 0;
  7314. +
  7315. + p = ssh_ptl_ack_pop(ptl, seq);
  7316. + if (IS_ERR(p)) {
  7317. + if (PTR_ERR(p) == -ENOENT) {
  7318. + /*
  7319. + * The packet has not been found in the set of pending
  7320. + * packets.
  7321. + */
  7322. + ptl_warn(ptl, "ptl: received ACK for non-pending"
  7323. + " packet\n");
  7324. + } else {
  7325. + /*
  7326. + * The packet is pending, but we are not allowed to take
  7327. + * it because it has been locked.
  7328. + */
  7329. + }
  7330. + return;
  7331. + }
  7332. +
  7333. + ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p);
  7334. +
  7335. + /*
  7336. + * It is possible that the packet has been transmitted, but the state
  7337. + * has not been updated from "transmitting" to "transmitted" yet.
  7338. + * In that case, we need to wait for this transition to occur in order
  7339. + * to determine between success or failure.
  7340. + */
  7341. + if (test_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state))
  7342. + ssh_ptl_wait_until_transmitted(p);
  7343. +
  7344. + /*
  7345. + * The packet will already be locked in case of a transmission error or
  7346. + * cancellation. Let the transmitter or cancellation issuer complete the
  7347. + * packet.
  7348. + */
  7349. + if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
  7350. + ssh_packet_put(p);
  7351. + return;
  7352. + }
  7353. +
  7354. + if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state))) {
  7355. + ptl_err(ptl, "ptl: received ACK before packet had been fully"
  7356. + " transmitted\n");
  7357. + status = -EREMOTEIO;
  7358. + }
  7359. +
  7360. + ssh_ptl_remove_and_complete(p, status);
  7361. + ssh_packet_put(p);
  7362. +
  7363. + ssh_ptl_tx_wakeup(ptl, false);
  7364. +}
  7365. +
  7366. +
  7367. +static int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
  7368. +{
  7369. + struct ssh_ptl *ptl_old;
  7370. + int status;
  7371. +
  7372. + trace_ssam_packet_submit(p);
  7373. +
  7374. + // validate packet fields
  7375. + if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
  7376. + if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
  7377. + return -EINVAL;
  7378. + } else if (!p->data.ptr) {
  7379. + return -EINVAL;
  7380. + }
  7381. +
  7382. + /*
  7383. + * The ptl reference only gets set on or before the first submission.
  7384. + * After the first submission, it has to be read-only.
  7385. + */
  7386. + ptl_old = READ_ONCE(p->ptl);
  7387. + if (ptl_old == NULL)
  7388. + WRITE_ONCE(p->ptl, ptl);
  7389. + else if (ptl_old != ptl)
  7390. + return -EALREADY;
  7391. +
  7392. + status = ssh_ptl_queue_push(p);
  7393. + if (status)
  7394. + return status;
  7395. +
  7396. + ssh_ptl_tx_wakeup(ptl, !test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state));
  7397. + return 0;
  7398. +}
  7399. +
  7400. +static void __ssh_ptl_resubmit(struct ssh_packet *packet)
  7401. +{
  7402. + struct list_head *head;
  7403. +
  7404. + trace_ssam_packet_resubmit(packet);
  7405. +
  7406. + spin_lock(&packet->ptl->queue.lock);
  7407. +
  7408. + // if this packet has already been queued, do not add it
  7409. + if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
  7410. + spin_unlock(&packet->ptl->queue.lock);
  7411. + return;
  7412. + }
  7413. +
  7414. + // find first node with lower priority
  7415. + head = __ssh_ptl_queue_find_entrypoint(packet);
  7416. +
  7417. + WRITE_ONCE(packet->timestamp, KTIME_MAX);
  7418. + smp_mb__after_atomic();
  7419. +
  7420. + // add packet
  7421. + ssh_packet_get(packet);
  7422. + list_add_tail(&packet->queue_node, head);
  7423. +
  7424. + spin_unlock(&packet->ptl->queue.lock);
  7425. +}
  7426. +
  7427. +static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl)
  7428. +{
  7429. + struct ssh_packet *p;
  7430. + bool resub = false;
  7431. + u8 try;
  7432. +
  7433. + /*
  7434. + * Note: We deliberately do not remove/attempt to cancel and complete
  7435. + * packets that are out of tires in this function. The packet will be
  7436. + * eventually canceled and completed by the timeout. Removing the packet
  7437. + * here could lead to overly eager cancelation if the packet has not
  7438. + * been re-transmitted yet but the tries-counter already updated (i.e
  7439. + * ssh_ptl_tx_next removed the packet from the queue and updated the
  7440. + * counter, but re-transmission for the last try has not actually
  7441. + * started yet).
  7442. + */
  7443. +
  7444. + spin_lock(&ptl->pending.lock);
  7445. +
  7446. + // re-queue all pending packets
  7447. + list_for_each_entry(p, &ptl->pending.head, pending_node) {
  7448. + // avoid further transitions if locked
  7449. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  7450. + continue;
  7451. +
  7452. + // do not re-schedule if packet is out of tries
  7453. + try = ssh_packet_priority_get_try(READ_ONCE(p->priority));
  7454. + if (try >= SSH_PTL_MAX_PACKET_TRIES)
  7455. + continue;
  7456. +
  7457. + resub = true;
  7458. + __ssh_ptl_resubmit(p);
  7459. + }
  7460. +
  7461. + spin_unlock(&ptl->pending.lock);
  7462. +
  7463. + ssh_ptl_tx_wakeup(ptl, resub);
  7464. +}
  7465. +
  7466. +static void ssh_ptl_cancel(struct ssh_packet *p)
  7467. +{
  7468. + if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
  7469. + return;
  7470. +
  7471. + trace_ssam_packet_cancel(p);
  7472. +
  7473. + /*
  7474. + * Lock packet and commit with memory barrier. If this packet has
  7475. + * already been locked, it's going to be removed and completed by
  7476. + * another party, which should have precedence.
  7477. + */
  7478. + if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  7479. + return;
  7480. +
  7481. + /*
  7482. + * By marking the packet as locked and employing the implicit memory
  7483. + * barrier of test_and_set_bit, we have guaranteed that, at this point,
  7484. + * the packet cannot be added to the queue any more.
  7485. + *
  7486. + * In case the packet has never been submitted, packet->ptl is NULL. If
  7487. + * the packet is currently being submitted, packet->ptl may be NULL or
  7488. + * non-NULL. Due marking the packet as locked above and committing with
  7489. + * the memory barrier, we have guaranteed that, if packet->ptl is NULL,
  7490. + * the packet will never be added to the queue. If packet->ptl is
  7491. + * non-NULL, we don't have any guarantees.
  7492. + */
  7493. +
  7494. + if (READ_ONCE(p->ptl)) {
  7495. + ssh_ptl_remove_and_complete(p, -ECANCELED);
  7496. + ssh_ptl_tx_wakeup(p->ptl, false);
  7497. + } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
  7498. + __ssh_ptl_complete(p, -ECANCELED);
  7499. + }
  7500. +}
  7501. +
  7502. +
  7503. +static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout)
  7504. +{
  7505. + ktime_t timestamp = READ_ONCE(p->timestamp);
  7506. +
  7507. + if (timestamp != KTIME_MAX)
  7508. + return ktime_add(timestamp, timeout);
  7509. + else
  7510. + return KTIME_MAX;
  7511. +}
  7512. +
  7513. +static void ssh_ptl_timeout_reap(struct work_struct *work)
  7514. +{
  7515. + struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
  7516. + struct ssh_packet *p, *n;
  7517. + LIST_HEAD(claimed);
  7518. + ktime_t now = ktime_get_coarse_boottime();
  7519. + ktime_t timeout = ptl->rtx_timeout.timeout;
  7520. + ktime_t next = KTIME_MAX;
  7521. + bool resub = false;
  7522. +
  7523. + trace_ssam_ptl_timeout_reap("pending", atomic_read(&ptl->pending.count));
  7524. +
  7525. + /*
  7526. + * Mark reaper as "not pending". This is done before checking any
  7527. + * packets to avoid lost-update type problems.
  7528. + */
  7529. + WRITE_ONCE(ptl->rtx_timeout.expires, KTIME_MAX);
  7530. + smp_mb__after_atomic();
  7531. +
  7532. + spin_lock(&ptl->pending.lock);
  7533. +
  7534. + list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
  7535. + ktime_t expires = ssh_packet_get_expiration(p, timeout);
  7536. + u8 try;
  7537. +
  7538. + /*
  7539. + * Check if the timeout hasn't expired yet. Find out next
  7540. + * expiration date to be handled after this run.
  7541. + */
  7542. + if (ktime_after(expires, now)) {
  7543. + next = ktime_before(expires, next) ? expires : next;
  7544. + continue;
  7545. + }
  7546. +
  7547. + // avoid further transitions if locked
  7548. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  7549. + continue;
  7550. +
  7551. + trace_ssam_packet_timeout(p);
  7552. +
  7553. + // check if we still have some tries left
  7554. + try = ssh_packet_priority_get_try(READ_ONCE(p->priority));
  7555. + if (likely(try < SSH_PTL_MAX_PACKET_TRIES)) {
  7556. + resub = true;
  7557. + __ssh_ptl_resubmit(p);
  7558. + continue;
  7559. + }
  7560. +
  7561. + // no more tries left: cancel the packet
  7562. +
  7563. + // if someone else has locked the packet already, don't use it
  7564. + if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  7565. + continue;
  7566. +
  7567. + /*
  7568. + * We have now marked the packet as locked. Thus it cannot be
  7569. + * added to the pending list again after we've removed it here.
  7570. + * We can therefore re-use the pending_node of this packet
  7571. + * temporarily.
  7572. + */
  7573. +
  7574. + clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
  7575. +
  7576. + atomic_dec(&ptl->pending.count);
  7577. + list_del(&p->pending_node);
  7578. +
  7579. + list_add_tail(&p->pending_node, &claimed);
  7580. + }
  7581. +
  7582. + spin_unlock(&ptl->pending.lock);
  7583. +
  7584. + // cancel and complete the packet
  7585. + list_for_each_entry_safe(p, n, &claimed, pending_node) {
  7586. + if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
  7587. + ssh_ptl_queue_remove(p);
  7588. + __ssh_ptl_complete(p, -ETIMEDOUT);
  7589. + }
  7590. +
  7591. + // drop the reference we've obtained by removing it from pending
  7592. + list_del(&p->pending_node);
  7593. + ssh_packet_put(p);
  7594. + }
  7595. +
  7596. + // ensure that reaper doesn't run again immediately
  7597. + next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION));
  7598. + if (next != KTIME_MAX)
  7599. + ssh_ptl_timeout_reaper_mod(ptl, now, next);
  7600. +
  7601. + // force-wakeup to properly handle re-transmits if we've re-submitted
  7602. + ssh_ptl_tx_wakeup(ptl, resub);
  7603. +}
  7604. +
  7605. +
  7606. +static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
  7607. +{
  7608. + int i;
  7609. +
  7610. + // check if SEQ has been seen recently (i.e. packet was re-transmitted)
  7611. + for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
  7612. + if (likely(ptl->rx.blocked.seqs[i] != seq))
  7613. + continue;
  7614. +
  7615. + ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
  7616. + return true;
  7617. + }
  7618. +
  7619. + // update list of blocked seuence IDs
  7620. + ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq;
  7621. + ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
  7622. + % ARRAY_SIZE(ptl->rx.blocked.seqs);
  7623. +
  7624. + return false;
  7625. +}
  7626. +
  7627. +static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
  7628. + const struct ssh_frame *frame,
  7629. + const struct ssam_span *payload)
  7630. +{
  7631. + if (ssh_ptl_rx_retransmit_check(ptl, frame->seq))
  7632. + return;
  7633. +
  7634. + ptl->ops.data_received(ptl, payload);
  7635. +}
  7636. +
  7637. +static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq)
  7638. +{
  7639. + struct ssh_packet_args args;
  7640. + struct ssh_packet *packet;
  7641. + struct ssam_span buf;
  7642. + struct msgbuf msgb;
  7643. + int status;
  7644. +
  7645. + status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
  7646. + if (status) {
  7647. + ptl_err(ptl, "ptl: failed to allocate ACK packet\n");
  7648. + return;
  7649. + }
  7650. +
  7651. + args.type = 0;
  7652. + args.priority = SSH_PACKET_PRIORITY(ACK, 0);
  7653. + args.ops = &ssh_ptl_ctrl_packet_ops;
  7654. + ssh_packet_init(packet, &args);
  7655. +
  7656. + msgb_init(&msgb, buf.ptr, buf.len);
  7657. + msgb_push_ack(&msgb, seq);
  7658. + ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
  7659. +
  7660. + ssh_ptl_submit(ptl, packet);
  7661. + ssh_packet_put(packet);
  7662. +}
  7663. +
  7664. +static void ssh_ptl_send_nak(struct ssh_ptl *ptl)
  7665. +{
  7666. + struct ssh_packet_args args;
  7667. + struct ssh_packet *packet;
  7668. + struct ssam_span buf;
  7669. + struct msgbuf msgb;
  7670. + int status;
  7671. +
  7672. + status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
  7673. + if (status) {
  7674. + ptl_err(ptl, "ptl: failed to allocate NAK packet\n");
  7675. + return;
  7676. + }
  7677. +
  7678. + args.type = 0;
  7679. + args.priority = SSH_PACKET_PRIORITY(NAK, 0);
  7680. + args.ops = &ssh_ptl_ctrl_packet_ops;
  7681. + ssh_packet_init(packet, &args);
  7682. +
  7683. + msgb_init(&msgb, buf.ptr, buf.len);
  7684. + msgb_push_nak(&msgb);
  7685. + ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
  7686. +
  7687. + ssh_ptl_submit(ptl, packet);
  7688. + ssh_packet_put(packet);
  7689. +}
  7690. +
  7691. +static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
  7692. +{
  7693. + struct ssh_frame *frame;
  7694. + struct ssam_span payload;
  7695. + struct ssam_span aligned;
  7696. + bool syn_found;
  7697. + int status;
  7698. +
  7699. + // error injection: modify data to simulate corrupt SYN bytes
  7700. + ssh_ptl_rx_inject_invalid_syn(ptl, source);
  7701. +
  7702. + // find SYN
  7703. + syn_found = sshp_find_syn(source, &aligned);
  7704. +
  7705. + if (unlikely(aligned.ptr - source->ptr) > 0) {
  7706. + ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
  7707. +
  7708. + /*
  7709. + * Notes:
  7710. + * - This might send multiple NAKs in case the communication
  7711. + * starts with an invalid SYN and is broken down into multiple
  7712. + * pieces. This should generally be handled fine, we just
  7713. + * might receive duplicate data in this case, which is
  7714. + * detected when handling data frames.
  7715. + * - This path will also be executed on invalid CRCs: When an
  7716. + * invalid CRC is encountered, the code below will skip data
  7717. + * until direclty after the SYN. This causes the search for
  7718. + * the next SYN, which is generally not placed directly after
  7719. + * the last one.
  7720. + */
  7721. + ssh_ptl_send_nak(ptl);
  7722. + }
  7723. +
  7724. + if (unlikely(!syn_found))
  7725. + return aligned.ptr - source->ptr;
  7726. +
  7727. + // error injection: modify data to simulate corruption
  7728. + ssh_ptl_rx_inject_invalid_data(ptl, &aligned);
  7729. +
  7730. + // parse and validate frame
  7731. + status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
  7732. + SSH_PTL_RX_BUF_LEN);
  7733. + if (status) // invalid frame: skip to next syn
  7734. + return aligned.ptr - source->ptr + sizeof(u16);
  7735. + if (!frame) // not enough data
  7736. + return aligned.ptr - source->ptr;
  7737. +
  7738. + trace_ssam_rx_frame_received(frame);
  7739. +
  7740. + switch (frame->type) {
  7741. + case SSH_FRAME_TYPE_ACK:
  7742. + ssh_ptl_acknowledge(ptl, frame->seq);
  7743. + break;
  7744. +
  7745. + case SSH_FRAME_TYPE_NAK:
  7746. + ssh_ptl_resubmit_pending(ptl);
  7747. + break;
  7748. +
  7749. + case SSH_FRAME_TYPE_DATA_SEQ:
  7750. + ssh_ptl_send_ack(ptl, frame->seq);
  7751. + /* fallthrough */
  7752. +
  7753. + case SSH_FRAME_TYPE_DATA_NSQ:
  7754. + ssh_ptl_rx_dataframe(ptl, frame, &payload);
  7755. + break;
  7756. +
  7757. + default:
  7758. + ptl_warn(ptl, "ptl: received frame with unknown type 0x%02x\n",
  7759. + frame->type);
  7760. + break;
  7761. + }
  7762. +
  7763. + return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(frame->len);
  7764. +}
  7765. +
  7766. +static int ssh_ptl_rx_threadfn(void *data)
  7767. +{
  7768. + struct ssh_ptl *ptl = data;
  7769. +
  7770. + while (true) {
  7771. + struct ssam_span span;
  7772. + size_t offs = 0;
  7773. + size_t n;
  7774. +
  7775. + wait_event_interruptible(ptl->rx.wq,
  7776. + !kfifo_is_empty(&ptl->rx.fifo)
  7777. + || kthread_should_stop());
  7778. + if (kthread_should_stop())
  7779. + break;
  7780. +
  7781. + // copy from fifo to evaluation buffer
  7782. + n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo);
  7783. +
  7784. + ptl_dbg(ptl, "rx: received data (size: %zu)\n", n);
  7785. + print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1,
  7786. + ptl->rx.buf.ptr + ptl->rx.buf.len - n,
  7787. + n, false);
  7788. +
  7789. + // parse until we need more bytes or buffer is empty
  7790. + while (offs < ptl->rx.buf.len) {
  7791. + sshp_buf_span_from(&ptl->rx.buf, offs, &span);
  7792. + n = ssh_ptl_rx_eval(ptl, &span);
  7793. + if (n == 0)
  7794. + break; // need more bytes
  7795. +
  7796. + offs += n;
  7797. + }
  7798. +
  7799. + // throw away the evaluated parts
  7800. + sshp_buf_drop(&ptl->rx.buf, offs);
  7801. + }
  7802. +
  7803. + return 0;
  7804. +}
  7805. +
  7806. +static inline void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl)
  7807. +{
  7808. + wake_up(&ptl->rx.wq);
  7809. +}
  7810. +
  7811. +static int ssh_ptl_rx_start(struct ssh_ptl *ptl)
  7812. +{
  7813. + if (ptl->rx.thread)
  7814. + return 0;
  7815. +
  7816. + ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl, "surface-sh-rx");
  7817. + if (IS_ERR(ptl->rx.thread))
  7818. + return PTR_ERR(ptl->rx.thread);
  7819. +
  7820. + return 0;
  7821. +}
  7822. +
  7823. +static int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
  7824. +{
  7825. + int status = 0;
  7826. +
  7827. + if (ptl->rx.thread) {
  7828. + status = kthread_stop(ptl->rx.thread);
  7829. + ptl->rx.thread = NULL;
  7830. + }
  7831. +
  7832. + return status;
  7833. +}
  7834. +
  7835. +static int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
  7836. +{
  7837. + int used;
  7838. +
  7839. + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
  7840. + return -ESHUTDOWN;
  7841. +
  7842. + used = kfifo_in(&ptl->rx.fifo, buf, n);
  7843. + if (used)
  7844. + ssh_ptl_rx_wakeup(ptl);
  7845. +
  7846. + return used;
  7847. +}
  7848. +
  7849. +
  7850. +struct ssh_flush_packet {
  7851. + struct ssh_packet base;
  7852. + struct completion completion;
  7853. + int status;
  7854. +};
  7855. +
  7856. +static void ssh_ptl_flush_complete(struct ssh_packet *p, int status)
  7857. +{
  7858. + struct ssh_flush_packet *packet;
  7859. +
  7860. + packet = container_of(p, struct ssh_flush_packet, base);
  7861. + packet->status = status;
  7862. +}
  7863. +
  7864. +static void ssh_ptl_flush_release(struct ssh_packet *p)
  7865. +{
  7866. + struct ssh_flush_packet *packet;
  7867. +
  7868. + packet = container_of(p, struct ssh_flush_packet, base);
  7869. + complete_all(&packet->completion);
  7870. +}
  7871. +
  7872. +static const struct ssh_packet_ops ssh_flush_packet_ops = {
  7873. + .complete = ssh_ptl_flush_complete,
  7874. + .release = ssh_ptl_flush_release,
  7875. +};
  7876. +
  7877. +/**
  7878. + * ssh_ptl_shutdown - shut down the packet transmission layer
  7879. + * @ptl: packet transmission layer
  7880. + *
  7881. + * Shuts down the packet transmission layer, removing and canceling all queued
  7882. + * and pending packets. Packets canceled by this operation will be completed
  7883. + * with -ESHUTDOWN as status.
  7884. + *
  7885. + * As a result of this function, the transmission layer will be marked as shut
  7886. + * down. Submission of packets after the transmission layer has been shut down
  7887. + * will fail with -ESHUTDOWN.
  7888. + */
  7889. +static void ssh_ptl_shutdown(struct ssh_ptl *ptl)
  7890. +{
  7891. + LIST_HEAD(complete_q);
  7892. + LIST_HEAD(complete_p);
  7893. + struct ssh_packet *p, *n;
  7894. + int status;
  7895. +
  7896. + // ensure that no new packets (including ACK/NAK) can be submitted
  7897. + set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state);
  7898. + smp_mb__after_atomic();
  7899. +
  7900. + status = ssh_ptl_rx_stop(ptl);
  7901. + if (status)
  7902. + ptl_err(ptl, "ptl: failed to stop receiver thread\n");
  7903. +
  7904. + status = ssh_ptl_tx_stop(ptl);
  7905. + if (status)
  7906. + ptl_err(ptl, "ptl: failed to stop transmitter thread\n");
  7907. +
  7908. + cancel_delayed_work_sync(&ptl->rtx_timeout.reaper);
  7909. +
  7910. + /*
  7911. + * At this point, all threads have been stopped. This means that the
  7912. + * only references to packets from inside the system are in the queue
  7913. + * and pending set.
  7914. + *
  7915. + * Note: We still need locks here because someone could still be
  7916. + * cancelling packets.
  7917. + *
  7918. + * Note 2: We can re-use queue_node (or pending_node) if we mark the
  7919. + * packet as locked an then remove it from the queue (or pending set
  7920. + * respecitvely). Marking the packet as locked avoids re-queueing
  7921. + * (which should already be prevented by having stopped the treads...)
  7922. + * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
  7923. + * new list via other threads (e.g. canellation).
  7924. + *
  7925. + * Note 3: There may be overlap between complete_p and complete_q.
  7926. + * This is handled via test_and_set_bit on the "completed" flag
  7927. + * (also handles cancelation).
  7928. + */
  7929. +
  7930. + // mark queued packets as locked and move them to complete_q
  7931. + spin_lock(&ptl->queue.lock);
  7932. + list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
  7933. + set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
  7934. + smp_mb__before_atomic();
  7935. + clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
  7936. +
  7937. + list_del(&p->queue_node);
  7938. + list_add_tail(&p->queue_node, &complete_q);
  7939. + }
  7940. + spin_unlock(&ptl->queue.lock);
  7941. +
  7942. + // mark pending packets as locked and move them to complete_p
  7943. + spin_lock(&ptl->pending.lock);
  7944. + list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
  7945. + set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
  7946. + smp_mb__before_atomic();
  7947. + clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
  7948. +
  7949. + list_del(&p->pending_node);
  7950. + list_add_tail(&p->pending_node, &complete_q);
  7951. + }
  7952. + atomic_set(&ptl->pending.count, 0);
  7953. + spin_unlock(&ptl->pending.lock);
  7954. +
  7955. + // complete and drop packets on complete_q
  7956. + list_for_each_entry(p, &complete_q, queue_node) {
  7957. + if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
  7958. + __ssh_ptl_complete(p, -ESHUTDOWN);
  7959. +
  7960. + ssh_packet_put(p);
  7961. + }
  7962. +
  7963. + // complete and drop packets on complete_p
  7964. + list_for_each_entry(p, &complete_p, pending_node) {
  7965. + if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
  7966. + __ssh_ptl_complete(p, -ESHUTDOWN);
  7967. +
  7968. + ssh_packet_put(p);
  7969. + }
  7970. +
  7971. + /*
  7972. + * At this point we have guaranteed that the system doesn't reference
  7973. + * any packets any more.
  7974. + */
  7975. +}
  7976. +
  7977. +static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl)
  7978. +{
  7979. + return ptl->serdev ? &ptl->serdev->dev : NULL;
  7980. +}
  7981. +
  7982. +static int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
  7983. + struct ssh_ptl_ops *ops)
  7984. +{
  7985. + int i, status;
  7986. +
  7987. + ptl->serdev = serdev;
  7988. + ptl->state = 0;
  7989. +
  7990. + spin_lock_init(&ptl->queue.lock);
  7991. + INIT_LIST_HEAD(&ptl->queue.head);
  7992. +
  7993. + spin_lock_init(&ptl->pending.lock);
  7994. + INIT_LIST_HEAD(&ptl->pending.head);
  7995. + atomic_set_release(&ptl->pending.count, 0);
  7996. +
  7997. + ptl->tx.thread = NULL;
  7998. + ptl->tx.thread_signal = false;
  7999. + ptl->tx.packet = NULL;
  8000. + ptl->tx.offset = 0;
  8001. + init_waitqueue_head(&ptl->tx.thread_wq);
  8002. + init_waitqueue_head(&ptl->tx.packet_wq);
  8003. +
  8004. + ptl->rx.thread = NULL;
  8005. + init_waitqueue_head(&ptl->rx.wq);
  8006. +
  8007. + ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT;
  8008. + ptl->rtx_timeout.expires = KTIME_MAX;
  8009. + INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap);
  8010. +
  8011. + ptl->ops = *ops;
  8012. +
  8013. + // initialize list of recent/blocked SEQs with invalid sequence IDs
  8014. + for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++)
  8015. + ptl->rx.blocked.seqs[i] = 0xFFFF;
  8016. + ptl->rx.blocked.offset = 0;
  8017. +
  8018. + status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL);
  8019. + if (status)
  8020. + return status;
  8021. +
  8022. + status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL);
  8023. + if (status)
  8024. + kfifo_free(&ptl->rx.fifo);
  8025. +
  8026. + return status;
  8027. +}
  8028. +
  8029. +static void ssh_ptl_destroy(struct ssh_ptl *ptl)
  8030. +{
  8031. + kfifo_free(&ptl->rx.fifo);
  8032. + sshp_buf_free(&ptl->rx.buf);
  8033. +}
  8034. +
  8035. +
  8036. +/* -- Request transport layer (rtl). ---------------------------------------- */
  8037. +
  8038. +#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000)
  8039. +#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
  8040. +
  8041. +#define SSH_RTL_MAX_PENDING 3
  8042. +
  8043. +
  8044. +enum ssh_rtl_state_flags {
  8045. + SSH_RTL_SF_SHUTDOWN_BIT,
  8046. +};
  8047. +
  8048. +struct ssh_rtl_ops {
  8049. + void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd,
  8050. + const struct ssam_span *data);
  8051. +};
  8052. +
  8053. +struct ssh_rtl {
  8054. + struct ssh_ptl ptl;
  8055. + unsigned long state;
  8056. +
  8057. + struct {
  8058. + spinlock_t lock;
  8059. + struct list_head head;
  8060. + } queue;
  8061. +
  8062. + struct {
  8063. + spinlock_t lock;
  8064. + struct list_head head;
  8065. + atomic_t count;
  8066. + } pending;
  8067. +
  8068. + struct {
  8069. + struct work_struct work;
  8070. + } tx;
  8071. +
  8072. + struct {
  8073. + ktime_t timeout;
  8074. + ktime_t expires;
  8075. + struct delayed_work reaper;
  8076. + } rtx_timeout;
  8077. +
  8078. + struct ssh_rtl_ops ops;
  8079. +};
  8080. +
  8081. +
  8082. +#define rtl_dbg(r, fmt, ...) ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__)
  8083. +#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__)
  8084. +#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__)
  8085. +#define rtl_err(r, fmt, ...) ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__)
  8086. +#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__)
  8087. +
  8088. +#define to_ssh_rtl(ptr, member) \
  8089. + container_of(ptr, struct ssh_rtl, member)
  8090. +
  8091. +#define to_ssh_request(ptr, member) \
  8092. + container_of(ptr, struct ssh_request, member)
  8093. +
  8094. +static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst)
  8095. +{
  8096. + struct ssh_ptl *ptl = READ_ONCE(rqst->packet.ptl);
  8097. + return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL;
  8098. +}
  8099. +
  8100. +
  8101. +/**
  8102. + * ssh_rtl_should_drop_response - error injection hook to drop request responses
  8103. + *
  8104. + * Useful to cause request transmission timeouts in the driver by dropping the
  8105. + * response to a request.
  8106. + */
  8107. +static noinline_if_inject bool ssh_rtl_should_drop_response(void)
  8108. +{
  8109. + return false;
  8110. +}
  8111. +ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
  8112. +
  8113. +
  8114. +static inline u16 ssh_request_get_rqid(struct ssh_request *rqst)
  8115. +{
  8116. + return get_unaligned_le16(rqst->packet.data.ptr
  8117. + + SSH_MSGOFFSET_COMMAND(rqid));
  8118. +}
  8119. +
  8120. +static inline u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
  8121. +{
  8122. + if (!rqst->packet.data.ptr)
  8123. + return -1;
  8124. +
  8125. + return ssh_request_get_rqid(rqst);
  8126. +}
  8127. +
  8128. +
  8129. +static void ssh_rtl_queue_remove(struct ssh_request *rqst)
  8130. +{
  8131. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8132. + bool remove;
  8133. +
  8134. + spin_lock(&rtl->queue.lock);
  8135. +
  8136. + remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
  8137. + if (remove)
  8138. + list_del(&rqst->node);
  8139. +
  8140. + spin_unlock(&rtl->queue.lock);
  8141. +
  8142. + if (remove)
  8143. + ssh_request_put(rqst);
  8144. +}
  8145. +
  8146. +static void ssh_rtl_pending_remove(struct ssh_request *rqst)
  8147. +{
  8148. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8149. + bool remove;
  8150. +
  8151. + spin_lock(&rtl->pending.lock);
  8152. +
  8153. + remove = test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state);
  8154. + if (remove) {
  8155. + atomic_dec(&rtl->pending.count);
  8156. + list_del(&rqst->node);
  8157. + }
  8158. +
  8159. + spin_unlock(&rtl->pending.lock);
  8160. +
  8161. + if (remove)
  8162. + ssh_request_put(rqst);
  8163. +}
  8164. +
  8165. +
  8166. +static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
  8167. +{
  8168. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8169. +
  8170. + trace_ssam_request_complete(rqst, status);
  8171. +
  8172. + // rtl/ptl may not be set if we're cancelling before submitting
  8173. + rtl_dbg_cond(rtl, "rtl: completing request (rqid: 0x%04x,"
  8174. + " status: %d)\n", ssh_request_get_rqid_safe(rqst), status);
  8175. +
  8176. + if (status && status != -ECANCELED)
  8177. + rtl_dbg_cond(rtl, "rtl: request error: %d\n", status);
  8178. +
  8179. + rqst->ops->complete(rqst, NULL, NULL, status);
  8180. +}
  8181. +
  8182. +static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
  8183. + const struct ssh_command *cmd,
  8184. + const struct ssam_span *data)
  8185. +{
  8186. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8187. +
  8188. + trace_ssam_request_complete(rqst, 0);
  8189. +
  8190. + rtl_dbg(rtl, "rtl: completing request with response"
  8191. + " (rqid: 0x%04x)\n", ssh_request_get_rqid(rqst));
  8192. +
  8193. + rqst->ops->complete(rqst, cmd, data, 0);
  8194. +}
  8195. +
  8196. +
  8197. +static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
  8198. +{
  8199. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8200. +
  8201. + if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
  8202. + return !atomic_read(&rtl->pending.count);
  8203. +
  8204. + return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
  8205. +}
  8206. +
  8207. +static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
  8208. +{
  8209. + struct ssh_request *rqst = ERR_PTR(-ENOENT);
  8210. + struct ssh_request *p, *n;
  8211. +
  8212. + spin_lock(&rtl->queue.lock);
  8213. +
  8214. + // find first non-locked request and remove it
  8215. + list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
  8216. + if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
  8217. + continue;
  8218. +
  8219. + if (!ssh_rtl_tx_can_process(p)) {
  8220. + rqst = ERR_PTR(-EBUSY);
  8221. + break;
  8222. + }
  8223. +
  8224. + /*
  8225. + * Remove from queue and mark as transmitting. Ensure that the
  8226. + * state does not get zero via memory barrier.
  8227. + */
  8228. + set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
  8229. + smp_mb__before_atomic();
  8230. + clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
  8231. +
  8232. + list_del(&p->node);
  8233. +
  8234. + rqst = p;
  8235. + break;
  8236. + }
  8237. +
  8238. + spin_unlock(&rtl->queue.lock);
  8239. + return rqst;
  8240. +}
  8241. +
  8242. +static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
  8243. +{
  8244. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8245. +
  8246. + spin_lock(&rtl->pending.lock);
  8247. +
  8248. + if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
  8249. + spin_unlock(&rtl->pending.lock);
  8250. + return -EINVAL;
  8251. + }
  8252. +
  8253. + if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
  8254. + spin_unlock(&rtl->pending.lock);
  8255. + return -EALREADY;
  8256. + }
  8257. +
  8258. + atomic_inc(&rtl->pending.count);
  8259. + ssh_request_get(rqst);
  8260. + list_add_tail(&rqst->node, &rtl->pending.head);
  8261. +
  8262. + spin_unlock(&rtl->pending.lock);
  8263. + return 0;
  8264. +}
  8265. +
  8266. +static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
  8267. +{
  8268. + struct ssh_request *rqst;
  8269. + int status;
  8270. +
  8271. + // get and prepare next request for transmit
  8272. + rqst = ssh_rtl_tx_next(rtl);
  8273. + if (IS_ERR(rqst))
  8274. + return PTR_ERR(rqst);
  8275. +
  8276. + // add to/mark as pending
  8277. + status = ssh_rtl_tx_pending_push(rqst);
  8278. + if (status) {
  8279. + ssh_request_put(rqst);
  8280. + return -EAGAIN;
  8281. + }
  8282. +
  8283. + // submit packet
  8284. + status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
  8285. + if (status == -ESHUTDOWN) {
  8286. + /*
  8287. + * Packet has been refused due to the packet layer shutting
  8288. + * down. Complete it here.
  8289. + */
  8290. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
  8291. + smp_mb__after_atomic();
  8292. +
  8293. + ssh_rtl_pending_remove(rqst);
  8294. + ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
  8295. +
  8296. + ssh_request_put(rqst);
  8297. + return -ESHUTDOWN;
  8298. +
  8299. + } else if (status) {
  8300. + /*
  8301. + * If submitting the packet failed and the packet layer isn't
  8302. + * shutting down, the packet has either been submmitted/queued
  8303. + * before (-EALREADY, which cannot happen as we have guaranteed
  8304. + * that requests cannot be re-submitted), or the packet was
  8305. + * marked as locked (-EINVAL). To mark the packet locked at this
  8306. + * stage, the request, and thus the packets itself, had to have
  8307. + * been canceled. Simply drop the reference. Cancellation itself
  8308. + * will remove it from the set of pending requests.
  8309. + */
  8310. +
  8311. + WARN_ON(status != -EINVAL);
  8312. +
  8313. + ssh_request_put(rqst);
  8314. + return -EAGAIN;
  8315. + }
  8316. +
  8317. + ssh_request_put(rqst);
  8318. + return 0;
  8319. +}
  8320. +
  8321. +static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
  8322. +{
  8323. + bool empty;
  8324. +
  8325. + spin_lock(&rtl->queue.lock);
  8326. + empty = list_empty(&rtl->queue.head);
  8327. + spin_unlock(&rtl->queue.lock);
  8328. +
  8329. + return empty;
  8330. +}
  8331. +
  8332. +static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
  8333. +{
  8334. + if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
  8335. + return false;
  8336. +
  8337. + if (ssh_rtl_queue_empty(rtl))
  8338. + return false;
  8339. +
  8340. + return schedule_work(&rtl->tx.work);
  8341. +}
  8342. +
  8343. +static void ssh_rtl_tx_work_fn(struct work_struct *work)
  8344. +{
  8345. + struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
  8346. + int i, status;
  8347. +
  8348. + /*
  8349. + * Try to be nice and not block the workqueue: Run a maximum of 10
  8350. + * tries, then re-submit if necessary. This should not be neccesary,
  8351. + * for normal execution, but guarantee it anyway.
  8352. + */
  8353. + for (i = 0; i < 10; i++) {
  8354. + status = ssh_rtl_tx_try_process_one(rtl);
  8355. + if (status == -ENOENT || status == -EBUSY)
  8356. + return; // no more requests to process
  8357. +
  8358. + if (status == -ESHUTDOWN) {
  8359. + /*
  8360. + * Packet system shutting down. No new packets can be
  8361. + * transmitted. Return silently, the party initiating
  8362. + * the shutdown should handle the rest.
  8363. + */
  8364. + return;
  8365. + }
  8366. +
  8367. + WARN_ON(status != 0 && status != -EAGAIN);
  8368. + }
  8369. +
  8370. + // out of tries, reschedule
  8371. + ssh_rtl_tx_schedule(rtl);
  8372. +}
  8373. +
  8374. +
  8375. +static int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
  8376. +{
  8377. + trace_ssam_request_submit(rqst);
  8378. +
  8379. + /*
  8380. + * Ensure that requests expecting a response are sequenced. If this
  8381. + * invariant ever changes, see the comment in ssh_rtl_complete on what
  8382. + * is required to be changed in the code.
  8383. + */
  8384. + if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
  8385. + if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
  8386. + return -EINVAL;
  8387. +
  8388. + // try to set ptl and check if this request has already been submitted
  8389. + if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl) != NULL)
  8390. + return -EALREADY;
  8391. +
  8392. + spin_lock(&rtl->queue.lock);
  8393. +
  8394. + if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
  8395. + spin_unlock(&rtl->queue.lock);
  8396. + return -ESHUTDOWN;
  8397. + }
  8398. +
  8399. + if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
  8400. + spin_unlock(&rtl->queue.lock);
  8401. + return -EINVAL;
  8402. + }
  8403. +
  8404. + ssh_request_get(rqst);
  8405. + set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
  8406. + list_add_tail(&rqst->node, &rtl->queue.head);
  8407. +
  8408. + spin_unlock(&rtl->queue.lock);
  8409. +
  8410. + ssh_rtl_tx_schedule(rtl);
  8411. + return 0;
  8412. +}
  8413. +
  8414. +
  8415. +static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
  8416. + ktime_t expires)
  8417. +{
  8418. + unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
  8419. + ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
  8420. + ktime_t old;
  8421. +
  8422. + // re-adjust / schedule reaper if it is above resolution delta
  8423. + old = READ_ONCE(rtl->rtx_timeout.expires);
  8424. + while (ktime_before(aexp, old))
  8425. + old = cmpxchg64(&rtl->rtx_timeout.expires, old, expires);
  8426. +
  8427. + // if we updated the reaper expiration, modify work timeout
  8428. + if (old == expires)
  8429. + mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
  8430. +}
  8431. +
  8432. +static void ssh_rtl_timeout_start(struct ssh_request *rqst)
  8433. +{
  8434. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8435. + ktime_t timestamp = ktime_get_coarse_boottime();
  8436. + ktime_t timeout = rtl->rtx_timeout.timeout;
  8437. +
  8438. + if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
  8439. + return;
  8440. +
  8441. + WRITE_ONCE(rqst->timestamp, timestamp);
  8442. + smp_mb__after_atomic();
  8443. +
  8444. + ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
  8445. +}
  8446. +
  8447. +
  8448. +static void ssh_rtl_complete(struct ssh_rtl *rtl,
  8449. + const struct ssh_command *command,
  8450. + const struct ssam_span *command_data)
  8451. +{
  8452. + struct ssh_request *r = NULL;
  8453. + struct ssh_request *p, *n;
  8454. + u16 rqid = get_unaligned_le16(&command->rqid);
  8455. +
  8456. + trace_ssam_rx_response_received(command, command_data->len);
  8457. +
  8458. + /*
  8459. + * Get request from pending based on request ID and mark it as response
  8460. + * received and locked.
  8461. + */
  8462. + spin_lock(&rtl->pending.lock);
  8463. + list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
  8464. + // we generally expect requests to be processed in order
  8465. + if (unlikely(ssh_request_get_rqid(p) != rqid))
  8466. + continue;
  8467. +
  8468. + // simulate response timeout
  8469. + if (ssh_rtl_should_drop_response()) {
  8470. + spin_unlock(&rtl->pending.lock);
  8471. +
  8472. + trace_ssam_ei_rx_drop_response(p);
  8473. + rtl_info(rtl, "request error injection: "
  8474. + "dropping response for request %p\n",
  8475. + &p->packet);
  8476. + return;
  8477. + }
  8478. +
  8479. + /*
  8480. + * Mark as "response received" and "locked" as we're going to
  8481. + * complete it. Ensure that the state doesn't get zero by
  8482. + * employing a memory barrier.
  8483. + */
  8484. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
  8485. + set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
  8486. + smp_mb__before_atomic();
  8487. + clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
  8488. +
  8489. + atomic_dec(&rtl->pending.count);
  8490. + list_del(&p->node);
  8491. +
  8492. + r = p;
  8493. + break;
  8494. + }
  8495. + spin_unlock(&rtl->pending.lock);
  8496. +
  8497. + if (!r) {
  8498. + rtl_warn(rtl, "rtl: dropping unexpected command message"
  8499. + " (rqid = 0x%04x)\n", rqid);
  8500. + return;
  8501. + }
  8502. +
  8503. + // if the request hasn't been completed yet, we will do this now
  8504. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
  8505. + ssh_request_put(r);
  8506. + ssh_rtl_tx_schedule(rtl);
  8507. + return;
  8508. + }
  8509. +
  8510. + /*
  8511. + * Make sure the request has been transmitted. In case of a sequenced
  8512. + * request, we are guaranteed that the completion callback will run on
  8513. + * the receiver thread directly when the ACK for the packet has been
  8514. + * received. Similarly, this function is guaranteed to run on the
  8515. + * receiver thread. Thus we are guaranteed that if the packet has been
  8516. + * successfully transmitted and received an ACK, the transmitted flag
  8517. + * has been set and is visible here.
  8518. + *
  8519. + * We are currently not handling unsequenced packets here, as those
  8520. + * should never expect a response as ensured in ssh_rtl_submit. If this
  8521. + * ever changes, one would have to test for
  8522. + *
  8523. + * (r->state & (transmitting | transmitted))
  8524. + *
  8525. + * on unsequenced packets to determine if they could have been
  8526. + * transmitted. There are no synchronization guarantees as in the
  8527. + * sequenced case, since, in this case, the callback function will not
  8528. + * run on the same thread. Thus an exact determination is impossible.
  8529. + */
  8530. + if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
  8531. + rtl_err(rtl, "rtl: received response before ACK for request"
  8532. + " (rqid = 0x%04x)\n", rqid);
  8533. +
  8534. + /*
  8535. + * NB: Timeout has already been canceled, request already been
  8536. + * removed from pending and marked as locked and completed. As
  8537. + * we receive a "false" response, the packet might still be
  8538. + * queued though.
  8539. + */
  8540. + ssh_rtl_queue_remove(r);
  8541. +
  8542. + ssh_rtl_complete_with_status(r, -EREMOTEIO);
  8543. + ssh_request_put(r);
  8544. +
  8545. + ssh_rtl_tx_schedule(rtl);
  8546. + return;
  8547. + }
  8548. +
  8549. + /*
  8550. + * NB: Timeout has already been canceled, request already been
  8551. + * removed from pending and marked as locked and completed. The request
  8552. + * can also not be queued any more, as it has been marked as
  8553. + * transmitting and later transmitted. Thus no need to remove it from
  8554. + * anywhere.
  8555. + */
  8556. +
  8557. + ssh_rtl_complete_with_rsp(r, command, command_data);
  8558. + ssh_request_put(r);
  8559. +
  8560. + ssh_rtl_tx_schedule(rtl);
  8561. +}
  8562. +
  8563. +
  8564. +static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
  8565. +{
  8566. + struct ssh_rtl *rtl;
  8567. + unsigned long state, fixed;
  8568. + bool remove;
  8569. +
  8570. + /*
  8571. + * Handle unsubmitted request: Try to mark the packet as locked,
  8572. + * expecting the state to be zero (i.e. unsubmitted). Note that, if
  8573. + * setting the state worked, we might still be adding the packet to the
  8574. + * queue in a currently executing submit call. In that case, however,
  8575. + * ptl reference must have been set previously, as locked is checked
  8576. + * after setting ptl. Thus only if we successfully lock this request and
  8577. + * ptl is NULL, we have successfully removed the request.
  8578. + * Otherwise we need to try and grab it from the queue.
  8579. + *
  8580. + * Note that if the CMPXCHG fails, we are guaranteed that ptl has
  8581. + * been set and is non-NULL, as states can only be nonzero after this
  8582. + * has been set. Also note that we need to fetch the static (type) flags
  8583. + * to ensure that they don't cause the cmpxchg to fail.
  8584. + */
  8585. + fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
  8586. + state = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
  8587. + if (!state && !READ_ONCE(r->packet.ptl)) {
  8588. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8589. + return true;
  8590. +
  8591. + ssh_rtl_complete_with_status(r, -ECANCELED);
  8592. + return true;
  8593. + }
  8594. +
  8595. + rtl = ssh_request_rtl(r);
  8596. + spin_lock(&rtl->queue.lock);
  8597. +
  8598. + /*
  8599. + * Note: 1) Requests cannot be re-submitted. 2) If a request is queued,
  8600. + * it cannot be "transmitting"/"pending" yet. Thus, if we successfully
  8601. + * remove the the request here, we have removed all its occurences in
  8602. + * the system.
  8603. + */
  8604. +
  8605. + remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
  8606. + if (!remove) {
  8607. + spin_unlock(&rtl->queue.lock);
  8608. + return false;
  8609. + }
  8610. +
  8611. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  8612. + list_del(&r->node);
  8613. +
  8614. + spin_unlock(&rtl->queue.lock);
  8615. +
  8616. + ssh_request_put(r); // drop reference obtained from queue
  8617. +
  8618. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8619. + return true;
  8620. +
  8621. + ssh_rtl_complete_with_status(r, -ECANCELED);
  8622. + return true;
  8623. +}
  8624. +
  8625. +static bool ssh_rtl_cancel_pending(struct ssh_request *r)
  8626. +{
  8627. + // if the packet is already locked, it's going to be removed shortly
  8628. + if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
  8629. + return true;
  8630. +
  8631. + /*
  8632. + * Now that we have locked the packet, we have guaranteed that it can't
  8633. + * be added to the system any more. If rtl is zero, the locked
  8634. + * check in ssh_rtl_submit has not been run and any submission,
  8635. + * currently in progress or called later, won't add the packet. Thus we
  8636. + * can directly complete it.
  8637. + */
  8638. + if (!ssh_request_rtl(r)) {
  8639. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8640. + return true;
  8641. +
  8642. + ssh_rtl_complete_with_status(r, -ECANCELED);
  8643. + return true;
  8644. + }
  8645. +
  8646. + /*
  8647. + * Try to cancel the packet. If the packet has not been completed yet,
  8648. + * this will subsequently (and synchronously) call the completion
  8649. + * callback of the packet, which will complete the request.
  8650. + */
  8651. + ssh_ptl_cancel(&r->packet);
  8652. +
  8653. + /*
  8654. + * If the packet has been completed with success, i.e. has not been
  8655. + * canceled by the above call, the request may not have been completed
  8656. + * yet (may be waiting for a response). Check if we need to do this
  8657. + * here.
  8658. + */
  8659. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8660. + return true;
  8661. +
  8662. + ssh_rtl_queue_remove(r);
  8663. + ssh_rtl_pending_remove(r);
  8664. + ssh_rtl_complete_with_status(r, -ECANCELED);
  8665. +
  8666. + return true;
  8667. +}
  8668. +
  8669. +static bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
  8670. +{
  8671. + struct ssh_rtl *rtl;
  8672. + bool canceled;
  8673. +
  8674. + if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
  8675. + return true;
  8676. +
  8677. + trace_ssam_request_cancel(rqst);
  8678. +
  8679. + if (pending)
  8680. + canceled = ssh_rtl_cancel_pending(rqst);
  8681. + else
  8682. + canceled = ssh_rtl_cancel_nonpending(rqst);
  8683. +
  8684. + // note: rtl may be NULL if request has not been submitted yet
  8685. + rtl = ssh_request_rtl(rqst);
  8686. + if (canceled && rtl)
  8687. + ssh_rtl_tx_schedule(rtl);
  8688. +
  8689. + return canceled;
  8690. +}
  8691. +
  8692. +
  8693. +static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
  8694. +{
  8695. + struct ssh_request *r = to_ssh_request(p, packet);
  8696. +
  8697. + if (unlikely(status)) {
  8698. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  8699. +
  8700. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8701. + return;
  8702. +
  8703. + /*
  8704. + * The packet may get cancelled even though it has not been
  8705. + * submitted yet. The request may still be queued. Check the
  8706. + * queue and remove it if necessary. As the timeout would have
  8707. + * been started in this function on success, there's no need to
  8708. + * cancel it here.
  8709. + */
  8710. + ssh_rtl_queue_remove(r);
  8711. + ssh_rtl_pending_remove(r);
  8712. + ssh_rtl_complete_with_status(r, status);
  8713. +
  8714. + ssh_rtl_tx_schedule(ssh_request_rtl(r));
  8715. + return;
  8716. + }
  8717. +
  8718. + /*
  8719. + * Mark as transmitted, ensure that state doesn't get zero by inserting
  8720. + * a memory barrier.
  8721. + */
  8722. + set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
  8723. + smp_mb__before_atomic();
  8724. + clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
  8725. +
  8726. + // if we expect a response, we just need to start the timeout
  8727. + if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
  8728. + ssh_rtl_timeout_start(r);
  8729. + return;
  8730. + }
  8731. +
  8732. + /*
  8733. + * If we don't expect a response, lock, remove, and complete the
  8734. + * request. Note that, at this point, the request is guaranteed to have
  8735. + * left the queue and no timeout has been started. Thus we only need to
  8736. + * remove it from pending. If the request has already been completed (it
  8737. + * may have been canceled) return.
  8738. + */
  8739. +
  8740. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  8741. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8742. + return;
  8743. +
  8744. + ssh_rtl_pending_remove(r);
  8745. + ssh_rtl_complete_with_status(r, 0);
  8746. +
  8747. + ssh_rtl_tx_schedule(ssh_request_rtl(r));
  8748. +}
  8749. +
  8750. +
  8751. +static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeo)
  8752. +{
  8753. + ktime_t timestamp = READ_ONCE(r->timestamp);
  8754. +
  8755. + if (timestamp != KTIME_MAX)
  8756. + return ktime_add(timestamp, timeo);
  8757. + else
  8758. + return KTIME_MAX;
  8759. +}
  8760. +
  8761. +static void ssh_rtl_timeout_reap(struct work_struct *work)
  8762. +{
  8763. + struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
  8764. + struct ssh_request *r, *n;
  8765. + LIST_HEAD(claimed);
  8766. + ktime_t now = ktime_get_coarse_boottime();
  8767. + ktime_t timeout = rtl->rtx_timeout.timeout;
  8768. + ktime_t next = KTIME_MAX;
  8769. +
  8770. + trace_ssam_rtl_timeout_reap("pending", atomic_read(&rtl->pending.count));
  8771. +
  8772. + /*
  8773. + * Mark reaper as "not pending". This is done before checking any
  8774. + * requests to avoid lost-update type problems.
  8775. + */
  8776. + WRITE_ONCE(rtl->rtx_timeout.expires, KTIME_MAX);
  8777. + smp_mb__after_atomic();
  8778. +
  8779. + spin_lock(&rtl->pending.lock);
  8780. + list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
  8781. + ktime_t expires = ssh_request_get_expiration(r, timeout);
  8782. +
  8783. + /*
  8784. + * Check if the timeout hasn't expired yet. Find out next
  8785. + * expiration date to be handled after this run.
  8786. + */
  8787. + if (ktime_after(expires, now)) {
  8788. + next = ktime_before(expires, next) ? expires : next;
  8789. + continue;
  8790. + }
  8791. +
  8792. + // avoid further transitions if locked
  8793. + if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
  8794. + continue;
  8795. +
  8796. + /*
  8797. + * We have now marked the packet as locked. Thus it cannot be
  8798. + * added to the pending or queued lists again after we've
  8799. + * removed it here. We can therefore re-use the node of this
  8800. + * packet temporarily.
  8801. + */
  8802. +
  8803. + clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
  8804. +
  8805. + atomic_dec(&rtl->pending.count);
  8806. + list_del(&r->node);
  8807. +
  8808. + list_add_tail(&r->node, &claimed);
  8809. + }
  8810. + spin_unlock(&rtl->pending.lock);
  8811. +
  8812. + // cancel and complete the request
  8813. + list_for_each_entry_safe(r, n, &claimed, node) {
  8814. + trace_ssam_request_timeout(r);
  8815. +
  8816. + /*
  8817. + * At this point we've removed the packet from pending. This
  8818. + * means that we've obtained the last (only) reference of the
  8819. + * system to it. Thus we can just complete it.
  8820. + */
  8821. + if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8822. + ssh_rtl_complete_with_status(r, -ETIMEDOUT);
  8823. +
  8824. + // drop the reference we've obtained by removing it from pending
  8825. + list_del(&r->node);
  8826. + ssh_request_put(r);
  8827. + }
  8828. +
  8829. + // ensure that reaper doesn't run again immediately
  8830. + next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
  8831. + if (next != KTIME_MAX)
  8832. + ssh_rtl_timeout_reaper_mod(rtl, now, next);
  8833. +
  8834. + ssh_rtl_tx_schedule(rtl);
  8835. +}
  8836. +
  8837. +
  8838. +static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
  8839. + const struct ssam_span *data)
  8840. +{
  8841. + trace_ssam_rx_event_received(cmd, data->len);
  8842. +
  8843. + rtl_dbg(rtl, "rtl: handling event (rqid: 0x%04x)\n",
  8844. + get_unaligned_le16(&cmd->rqid));
  8845. +
  8846. + rtl->ops.handle_event(rtl, cmd, data);
  8847. +}
  8848. +
  8849. +static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
  8850. +{
  8851. + struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
  8852. + struct device *dev = &p->serdev->dev;
  8853. + struct ssh_command *command;
  8854. + struct ssam_span command_data;
  8855. +
  8856. + if (sshp_parse_command(dev, data, &command, &command_data))
  8857. + return;
  8858. +
  8859. + if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
  8860. + ssh_rtl_rx_event(rtl, command, &command_data);
  8861. + else
  8862. + ssh_rtl_complete(rtl, command, &command_data);
  8863. +}
  8864. +
  8865. +static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
  8866. +{
  8867. + switch (data->ptr[0]) {
  8868. + case SSH_PLD_TYPE_CMD:
  8869. + ssh_rtl_rx_command(p, data);
  8870. + break;
  8871. +
  8872. + default:
  8873. + ptl_err(p, "rtl: rx: unknown frame payload type"
  8874. + " (type: 0x%02x)\n", data->ptr[0]);
  8875. + break;
  8876. + }
  8877. +}
  8878. +
  8879. +
  8880. +static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl)
  8881. +{
  8882. + return ssh_ptl_get_device(&rtl->ptl);
  8883. +}
  8884. +
  8885. +static inline bool ssh_rtl_tx_flush(struct ssh_rtl *rtl)
  8886. +{
  8887. + return flush_work(&rtl->tx.work);
  8888. +}
  8889. +
  8890. +static inline int ssh_rtl_tx_start(struct ssh_rtl *rtl)
  8891. +{
  8892. + int status;
  8893. + bool sched;
  8894. +
  8895. + status = ssh_ptl_tx_start(&rtl->ptl);
  8896. + if (status)
  8897. + return status;
  8898. +
  8899. + /*
  8900. + * If the packet layer has been shut down and restarted without shutting
  8901. + * down the request layer, there may still be requests queued and not
  8902. + * handled.
  8903. + */
  8904. + spin_lock(&rtl->queue.lock);
  8905. + sched = !list_empty(&rtl->queue.head);
  8906. + spin_unlock(&rtl->queue.lock);
  8907. +
  8908. + if (sched)
  8909. + ssh_rtl_tx_schedule(rtl);
  8910. +
  8911. + return 0;
  8912. +}
  8913. +
  8914. +static inline int ssh_rtl_rx_start(struct ssh_rtl *rtl)
  8915. +{
  8916. + return ssh_ptl_rx_start(&rtl->ptl);
  8917. +}
  8918. +
  8919. +static int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
  8920. + const struct ssh_rtl_ops *ops)
  8921. +{
  8922. + struct ssh_ptl_ops ptl_ops;
  8923. + int status;
  8924. +
  8925. + ptl_ops.data_received = ssh_rtl_rx_data;
  8926. +
  8927. + status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
  8928. + if (status)
  8929. + return status;
  8930. +
  8931. + spin_lock_init(&rtl->queue.lock);
  8932. + INIT_LIST_HEAD(&rtl->queue.head);
  8933. +
  8934. + spin_lock_init(&rtl->pending.lock);
  8935. + INIT_LIST_HEAD(&rtl->pending.head);
  8936. + atomic_set_release(&rtl->pending.count, 0);
  8937. +
  8938. + INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
  8939. +
  8940. + rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
  8941. + rtl->rtx_timeout.expires = KTIME_MAX;
  8942. + INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
  8943. +
  8944. + rtl->ops = *ops;
  8945. +
  8946. + return 0;
  8947. +}
  8948. +
  8949. +static void ssh_rtl_destroy(struct ssh_rtl *rtl)
  8950. +{
  8951. + ssh_ptl_destroy(&rtl->ptl);
  8952. +}
  8953. +
  8954. +
  8955. +static void ssh_rtl_packet_release(struct ssh_packet *p)
  8956. +{
  8957. + struct ssh_request *rqst = to_ssh_request(p, packet);
  8958. + rqst->ops->release(rqst);
  8959. +}
  8960. +
  8961. +static const struct ssh_packet_ops ssh_rtl_packet_ops = {
  8962. + .complete = ssh_rtl_packet_callback,
  8963. + .release = ssh_rtl_packet_release,
  8964. +};
  8965. +
  8966. +static void ssh_request_init(struct ssh_request *rqst,
  8967. + enum ssam_request_flags flags,
  8968. + const struct ssh_request_ops *ops)
  8969. +{
  8970. + struct ssh_packet_args packet_args;
  8971. +
  8972. + packet_args.type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
  8973. + if (!(flags & SSAM_REQUEST_UNSEQUENCED))
  8974. + packet_args.type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
  8975. +
  8976. + packet_args.priority = SSH_PACKET_PRIORITY(DATA, 0);
  8977. + packet_args.ops = &ssh_rtl_packet_ops;
  8978. +
  8979. + ssh_packet_init(&rqst->packet, &packet_args);
  8980. + INIT_LIST_HEAD(&rqst->node);
  8981. +
  8982. + rqst->state = 0;
  8983. + if (flags & SSAM_REQUEST_HAS_RESPONSE)
  8984. + rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
  8985. +
  8986. + rqst->timestamp = KTIME_MAX;
  8987. + rqst->ops = ops;
  8988. +}
  8989. +
  8990. +
  8991. +struct ssh_flush_request {
  8992. + struct ssh_request base;
  8993. + struct completion completion;
  8994. + int status;
  8995. +};
  8996. +
  8997. +static void ssh_rtl_flush_request_complete(struct ssh_request *r,
  8998. + const struct ssh_command *cmd,
  8999. + const struct ssam_span *data,
  9000. + int status)
  9001. +{
  9002. + struct ssh_flush_request *rqst;
  9003. +
  9004. + rqst = container_of(r, struct ssh_flush_request, base);
  9005. + rqst->status = status;
  9006. +}
  9007. +
  9008. +static void ssh_rtl_flush_request_release(struct ssh_request *r)
  9009. +{
  9010. + struct ssh_flush_request *rqst;
  9011. +
  9012. + rqst = container_of(r, struct ssh_flush_request, base);
  9013. + complete_all(&rqst->completion);
  9014. +}
  9015. +
  9016. +static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
  9017. + .complete = ssh_rtl_flush_request_complete,
  9018. + .release = ssh_rtl_flush_request_release,
  9019. +};
  9020. +
  9021. +/**
  9022. + * ssh_rtl_flush - flush the request transmission layer
  9023. + * @rtl: request transmission layer
  9024. + * @timeout: timeout for the flush operation in jiffies
  9025. + *
  9026. + * Queue a special flush request and wait for its completion. This request
  9027. + * will be completed after all other currently queued and pending requests
  9028. + * have been completed. Instead of a normal data packet, this request submits
  9029. + * a special flush packet, meaning that upon completion, also the underlying
  9030. + * packet transmission layer has been flushed.
  9031. + *
  9032. + * Flushing the request layer gurarantees that all previously submitted
  9033. + * requests have been fully completed before this call returns. Additinally,
  9034. + * flushing blocks execution of all later submitted requests until the flush
  9035. + * has been completed.
  9036. + *
  9037. + * If the caller ensures that no new requests are submitted after a call to
  9038. + * this function, the request transmission layer is guaranteed to have no
  9039. + * remaining requests when this call returns. The same guarantee does not hold
  9040. + * for the packet layer, on which control packets may still be queued after
  9041. + * this call. See the documentation of ssh_ptl_flush for more details on
  9042. + * packet layer flushing.
  9043. + *
  9044. + * Return: Zero on success, -ETIMEDOUT if the flush timed out and has been
  9045. + * canceled as a result of the timeout, or -ESHUTDOWN if the packet and/or
  9046. + * request transmission layer has been shut down before this call. May also
  9047. + * return -EINTR if the underlying packet transmission has been interrupted.
  9048. + */
  9049. +static int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
  9050. +{
  9051. + const unsigned init_flags = SSAM_REQUEST_UNSEQUENCED;
  9052. + struct ssh_flush_request rqst;
  9053. + int status;
  9054. +
  9055. + ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
  9056. + rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
  9057. + rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
  9058. + rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
  9059. +
  9060. + init_completion(&rqst.completion);
  9061. +
  9062. + status = ssh_rtl_submit(rtl, &rqst.base);
  9063. + if (status)
  9064. + return status;
  9065. +
  9066. + ssh_request_put(&rqst.base);
  9067. +
  9068. + if (wait_for_completion_timeout(&rqst.completion, timeout))
  9069. + return 0;
  9070. +
  9071. + ssh_rtl_cancel(&rqst.base, true);
  9072. + wait_for_completion(&rqst.completion);
  9073. +
  9074. + WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED
  9075. + && rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
  9076. +
  9077. + return rqst.status == -ECANCELED ? -ETIMEDOUT : status;
  9078. +}
  9079. +
  9080. +
  9081. +static void ssh_rtl_shutdown(struct ssh_rtl *rtl)
  9082. +{
  9083. + struct ssh_request *r, *n;
  9084. + LIST_HEAD(claimed);
  9085. + int pending;
  9086. +
  9087. + set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
  9088. + smp_mb__after_atomic();
  9089. +
  9090. + // remove requests from queue
  9091. + spin_lock(&rtl->queue.lock);
  9092. + list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
  9093. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  9094. + smp_mb__before_atomic();
  9095. + clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
  9096. +
  9097. + list_del(&r->node);
  9098. + list_add_tail(&r->node, &claimed);
  9099. + }
  9100. + spin_unlock(&rtl->queue.lock);
  9101. +
  9102. + /*
  9103. + * We have now guaranteed that the queue is empty and no more new
  9104. + * requests can be submitted (i.e. it will stay empty). This means that
  9105. + * calling ssh_rtl_tx_schedule will not schedule tx.work any more. So we
  9106. + * can simply call cancel_work_sync on tx.work here and when that
  9107. + * returns, we've locked it down. This also means that after this call,
  9108. + * we don't submit any more packets to the underlying packet layer, so
  9109. + * we can also shut that down.
  9110. + */
  9111. +
  9112. + cancel_work_sync(&rtl->tx.work);
  9113. + ssh_ptl_shutdown(&rtl->ptl);
  9114. + cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
  9115. +
  9116. + /*
  9117. + * Shutting down the packet layer should also have caneled all requests.
  9118. + * Thus the pending set should be empty. Attempt to handle this
  9119. + * gracefully anyways, even though this should be dead code.
  9120. + */
  9121. +
  9122. + pending = atomic_read(&rtl->pending.count);
  9123. + if (WARN_ON(pending)) {
  9124. + spin_lock(&rtl->pending.lock);
  9125. + list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
  9126. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  9127. + smp_mb__before_atomic();
  9128. + clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
  9129. +
  9130. + list_del(&r->node);
  9131. + list_add_tail(&r->node, &claimed);
  9132. + }
  9133. + spin_unlock(&rtl->pending.lock);
  9134. + }
  9135. +
  9136. + // finally cancel and complete requests
  9137. + list_for_each_entry_safe(r, n, &claimed, node) {
  9138. + // test_and_set because we still might compete with cancellation
  9139. + if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  9140. + ssh_rtl_complete_with_status(r, -ESHUTDOWN);
  9141. +
  9142. + // drop the reference we've obtained by removing it from list
  9143. + list_del(&r->node);
  9144. + ssh_request_put(r);
  9145. + }
  9146. +}
  9147. +
  9148. +
  9149. +/* -- Event notifier/callbacks. --------------------------------------------- */
  9150. +/*
  9151. + * The notifier system is based on linux/notifier.h, specifically the SRCU
  9152. + * implementation. The difference to that is, that some bits of the notifier
  9153. + * call return value can be tracked accross multiple calls. This is done so that
  9154. + * handling of events can be tracked and a warning can be issued in case an
  9155. + * event goes unhandled. The idea of that waring is that it should help discover
  9156. + * and identify new/currently unimplemented features.
  9157. + */
  9158. +
  9159. +struct ssam_nf_head {
  9160. + struct srcu_struct srcu;
  9161. + struct ssam_notifier_block __rcu *head;
  9162. +};
  9163. +
  9164. +
  9165. +int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event)
  9166. +{
  9167. + struct ssam_notifier_block *nb, *next_nb;
  9168. + int ret = 0, idx;
  9169. +
  9170. + idx = srcu_read_lock(&nh->srcu);
  9171. +
  9172. + nb = rcu_dereference_raw(nh->head);
  9173. + while (nb) {
  9174. + next_nb = rcu_dereference_raw(nb->next);
  9175. +
  9176. + ret = (ret & SSAM_NOTIF_STATE_MASK) | nb->fn(nb, event);
  9177. + if (ret & SSAM_NOTIF_STOP)
  9178. + break;
  9179. +
  9180. + nb = next_nb;
  9181. + }
  9182. +
  9183. + srcu_read_unlock(&nh->srcu, idx);
  9184. + return ret;
  9185. +}
  9186. +
  9187. +/*
  9188. + * Note: This function must be synchronized by the caller with respect to other
  9189. + * insert and/or remove calls.
  9190. + */
  9191. +int __ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
  9192. +{
  9193. + struct ssam_notifier_block **link = &nh->head;
  9194. +
  9195. + while ((*link) != NULL) {
  9196. + if (unlikely((*link) == nb)) {
  9197. + WARN(1, "double register detected");
  9198. + return -EINVAL;
  9199. + }
  9200. +
  9201. + if (nb->priority > (*link)->priority)
  9202. + break;
  9203. +
  9204. + link = &((*link)->next);
  9205. + }
  9206. +
  9207. + nb->next = *link;
  9208. + rcu_assign_pointer(*link, nb);
  9209. +
  9210. + return 0;
  9211. +}
  9212. +
  9213. +/*
  9214. + * Note: This function must be synchronized by the caller with respect to other
  9215. + * insert and/or remove calls. On success, the caller _must_ ensure SRCU
  9216. + * synchronization by calling `synchronize_srcu(&nh->srcu)` after leaving the
  9217. + * critical section, to ensure that the removed notifier block is not in use any
  9218. + * more.
  9219. + */
  9220. +int __ssam_nfblk_remove(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
  9221. +{
  9222. + struct ssam_notifier_block **link = &nh->head;
  9223. +
  9224. + while ((*link) != NULL) {
  9225. + if ((*link) == nb) {
  9226. + rcu_assign_pointer(*link, nb->next);
  9227. + return 0;
  9228. + }
  9229. +
  9230. + link = &((*link)->next);
  9231. + }
  9232. +
  9233. + return -ENOENT;
  9234. +}
  9235. +
  9236. +static int ssam_nf_head_init(struct ssam_nf_head *nh)
  9237. +{
  9238. + int status;
  9239. +
  9240. + status = init_srcu_struct(&nh->srcu);
  9241. + if (status)
  9242. + return status;
  9243. +
  9244. + nh->head = NULL;
  9245. + return 0;
  9246. +}
  9247. +
  9248. +static void ssam_nf_head_destroy(struct ssam_nf_head *nh)
  9249. +{
  9250. + cleanup_srcu_struct(&nh->srcu);
  9251. +}
  9252. +
  9253. +
  9254. +/* -- Event/notification registry. ------------------------------------------ */
  9255. +
  9256. +struct ssam_nf_refcount_key {
  9257. + struct ssam_event_registry reg;
  9258. + struct ssam_event_id id;
  9259. +};
  9260. +
  9261. +struct ssam_nf_refcount_entry {
  9262. + struct rb_node node;
  9263. + struct ssam_nf_refcount_key key;
  9264. + int refcount;
  9265. +};
  9266. +
  9267. +struct ssam_nf {
  9268. + struct mutex lock;
  9269. + struct rb_root refcount;
  9270. + struct ssam_nf_head head[SSH_NUM_EVENTS];
  9271. +};
  9272. +
  9273. +
  9274. +static int ssam_nf_refcount_inc(struct ssam_nf *nf,
  9275. + struct ssam_event_registry reg,
  9276. + struct ssam_event_id id)
  9277. +{
  9278. + struct ssam_nf_refcount_entry *entry;
  9279. + struct ssam_nf_refcount_key key;
  9280. + struct rb_node **link = &nf->refcount.rb_node;
  9281. + struct rb_node *parent = NULL;
  9282. + int cmp;
  9283. +
  9284. + key.reg = reg;
  9285. + key.id = id;
  9286. +
  9287. + while (*link) {
  9288. + entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
  9289. + parent = *link;
  9290. +
  9291. + cmp = memcmp(&key, &entry->key, sizeof(key));
  9292. + if (cmp < 0) {
  9293. + link = &(*link)->rb_left;
  9294. + } else if (cmp > 0) {
  9295. + link = &(*link)->rb_right;
  9296. + } else if (entry->refcount < INT_MAX) {
  9297. + return ++entry->refcount;
  9298. + } else {
  9299. + return -ENOSPC;
  9300. + }
  9301. + }
  9302. +
  9303. + entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  9304. + if (!entry)
  9305. + return -ENOMEM;
  9306. +
  9307. + entry->key = key;
  9308. + entry->refcount = 1;
  9309. +
  9310. + rb_link_node(&entry->node, parent, link);
  9311. + rb_insert_color(&entry->node, &nf->refcount);
  9312. +
  9313. + return entry->refcount;
  9314. +}
  9315. +
  9316. +static int ssam_nf_refcount_dec(struct ssam_nf *nf,
  9317. + struct ssam_event_registry reg,
  9318. + struct ssam_event_id id)
  9319. +{
  9320. + struct ssam_nf_refcount_entry *entry;
  9321. + struct ssam_nf_refcount_key key;
  9322. + struct rb_node *node = nf->refcount.rb_node;
  9323. + int cmp, rc;
  9324. +
  9325. + key.reg = reg;
  9326. + key.id = id;
  9327. +
  9328. + while (node) {
  9329. + entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
  9330. +
  9331. + cmp = memcmp(&key, &entry->key, sizeof(key));
  9332. + if (cmp < 0) {
  9333. + node = node->rb_left;
  9334. + } else if (cmp > 0) {
  9335. + node = node->rb_right;
  9336. + } else {
  9337. + rc = --entry->refcount;
  9338. +
  9339. + if (rc == 0) {
  9340. + rb_erase(&entry->node, &nf->refcount);
  9341. + kfree(entry);
  9342. + }
  9343. +
  9344. + return rc;
  9345. + }
  9346. + }
  9347. +
  9348. + return -ENOENT;
  9349. +}
  9350. +
  9351. +static bool ssam_nf_refcount_empty(struct ssam_nf *nf)
  9352. +{
  9353. + return RB_EMPTY_ROOT(&nf->refcount);
  9354. +}
  9355. +
  9356. +static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid,
  9357. + struct ssam_event *event)
  9358. +{
  9359. + struct ssam_nf_head *nf_head;
  9360. + int status, nf_ret;
  9361. +
  9362. + if (!ssh_rqid_is_event(rqid)) {
  9363. + dev_warn(dev, "event: unsupported rqid: 0x%04x\n", rqid);
  9364. + return;
  9365. + }
  9366. +
  9367. + nf_head = &nf->head[ssh_rqid_to_event(rqid)];
  9368. + nf_ret = ssam_nfblk_call_chain(nf_head, event);
  9369. + status = ssam_notifier_to_errno(nf_ret);
  9370. +
  9371. + if (status < 0) {
  9372. + dev_err(dev, "event: error handling event: %d "
  9373. + "(tc: 0x%02x, cid: 0x%02x, iid: 0x%02x, chn: 0x%02x)\n",
  9374. + status, event->target_category, event->command_id,
  9375. + event->instance_id, event->channel);
  9376. + }
  9377. +
  9378. + if (!(nf_ret & SSAM_NOTIF_HANDLED)) {
  9379. + dev_warn(dev, "event: unhandled event (rqid: 0x%02x, "
  9380. + "tc: 0x%02x, cid: 0x%02x, iid: 0x%02x, chn: 0x%02x)\n",
  9381. + rqid, event->target_category, event->command_id,
  9382. + event->instance_id, event->channel);
  9383. + }
  9384. +}
  9385. +
  9386. +static int ssam_nf_init(struct ssam_nf *nf)
  9387. +{
  9388. + int i, status;
  9389. +
  9390. + for (i = 0; i < SSH_NUM_EVENTS; i++) {
  9391. + status = ssam_nf_head_init(&nf->head[i]);
  9392. + if (status)
  9393. + break;
  9394. + }
  9395. +
  9396. + if (status) {
  9397. + for (i = i - 1; i >= 0; i--)
  9398. + ssam_nf_head_destroy(&nf->head[i]);
  9399. +
  9400. + return status;
  9401. + }
  9402. +
  9403. + mutex_init(&nf->lock);
  9404. + return 0;
  9405. +}
  9406. +
  9407. +static void ssam_nf_destroy(struct ssam_nf *nf)
  9408. +{
  9409. + int i;
  9410. +
  9411. + for (i = 0; i < SSH_NUM_EVENTS; i++)
  9412. + ssam_nf_head_destroy(&nf->head[i]);
  9413. +
  9414. + mutex_destroy(&nf->lock);
  9415. +}
  9416. +
  9417. +
  9418. +/* -- Event/async request completion system. -------------------------------- */
  9419. +
  9420. +#define SSAM_CPLT_WQ_NAME "ssam_cpltq"
  9421. +
  9422. +
  9423. +struct ssam_cplt;
  9424. +struct ssam_event_item;
  9425. +
  9426. +struct ssam_event_item_ops {
  9427. + void (*free)(struct ssam_event_item *);
  9428. +};
  9429. +
  9430. +struct ssam_event_item {
  9431. + struct list_head node;
  9432. + u16 rqid;
  9433. +
  9434. + struct ssam_event_item_ops ops;
  9435. + struct ssam_event event; // must be last
  9436. +};
  9437. +
  9438. +struct ssam_event_queue {
  9439. + struct ssam_cplt *cplt;
  9440. +
  9441. + spinlock_t lock;
  9442. + struct list_head head;
  9443. + struct work_struct work;
  9444. +};
  9445. +
  9446. +struct ssam_event_channel {
  9447. + struct ssam_event_queue queue[SSH_NUM_EVENTS];
  9448. +};
  9449. +
  9450. +struct ssam_cplt {
  9451. + struct device *dev;
  9452. + struct workqueue_struct *wq;
  9453. +
  9454. + struct {
  9455. + struct ssam_event_channel channel[SSH_NUM_CHANNELS];
  9456. + struct ssam_nf notif;
  9457. + } event;
  9458. +};
  9459. +
  9460. +
  9461. +/**
  9462. + * Maximum payload length for cached `ssam_event_item`s.
  9463. + *
  9464. + * This length has been chosen to be accomodate standard touchpad and keyboard
  9465. + * input events. Events with larger payloads will be allocated separately.
  9466. + */
  9467. +#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN 32
  9468. +
  9469. +static struct kmem_cache *ssam_event_item_cache;
  9470. +
  9471. +static int ssam_event_item_cache_init(void)
  9472. +{
  9473. + const unsigned int size = sizeof(struct ssam_event_item)
  9474. + + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN;
  9475. + const unsigned int align = __alignof__(struct ssam_event_item);
  9476. + struct kmem_cache *cache;
  9477. +
  9478. + cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL);
  9479. + if (!cache)
  9480. + return -ENOMEM;
  9481. +
  9482. + ssam_event_item_cache = cache;
  9483. + return 0;
  9484. +}
  9485. +
  9486. +static void ssam_event_item_cache_destroy(void)
  9487. +{
  9488. + kmem_cache_destroy(ssam_event_item_cache);
  9489. + ssam_event_item_cache = NULL;
  9490. +}
  9491. +
  9492. +static void __ssam_event_item_free_cached(struct ssam_event_item *item)
  9493. +{
  9494. + kmem_cache_free(ssam_event_item_cache, item);
  9495. +}
  9496. +
  9497. +static void __ssam_event_item_free_generic(struct ssam_event_item *item)
  9498. +{
  9499. + kfree(item);
  9500. +}
  9501. +
  9502. +static inline void ssam_event_item_free(struct ssam_event_item *item)
  9503. +{
  9504. + trace_ssam_event_item_free(item);
  9505. + item->ops.free(item);
  9506. +}
  9507. +
  9508. +static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
  9509. +{
  9510. + struct ssam_event_item *item;
  9511. +
  9512. + if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) {
  9513. + item = kmem_cache_alloc(ssam_event_item_cache, GFP_KERNEL);
  9514. + if (!item)
  9515. + return NULL;
  9516. +
  9517. + item->ops.free = __ssam_event_item_free_cached;
  9518. + } else {
  9519. + const size_t n = sizeof(struct ssam_event_item) + len;
  9520. + item = kzalloc(n, GFP_KERNEL);
  9521. + if (!item)
  9522. + return NULL;
  9523. +
  9524. + item->ops.free = __ssam_event_item_free_generic;
  9525. + }
  9526. +
  9527. + item->event.length = len;
  9528. +
  9529. + trace_ssam_event_item_alloc(item, len);
  9530. + return item;
  9531. +}
  9532. +
  9533. +
  9534. +static void ssam_event_queue_push(struct ssam_event_queue *q,
  9535. + struct ssam_event_item *item)
  9536. +{
  9537. + spin_lock(&q->lock);
  9538. + list_add_tail(&item->node, &q->head);
  9539. + spin_unlock(&q->lock);
  9540. +}
  9541. +
  9542. +static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
  9543. +{
  9544. + struct ssam_event_item *item;
  9545. +
  9546. + spin_lock(&q->lock);
  9547. + item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
  9548. + if (item)
  9549. + list_del(&item->node);
  9550. + spin_unlock(&q->lock);
  9551. +
  9552. + return item;
  9553. +}
  9554. +
  9555. +static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
  9556. +{
  9557. + bool empty;
  9558. +
  9559. + spin_lock(&q->lock);
  9560. + empty = list_empty(&q->head);
  9561. + spin_unlock(&q->lock);
  9562. +
  9563. + return empty;
  9564. +}
  9565. +
  9566. +static struct ssam_event_queue *ssam_cplt_get_event_queue(
  9567. + struct ssam_cplt *cplt, u8 channel, u16 rqid)
  9568. +{
  9569. + u16 event = ssh_rqid_to_event(rqid);
  9570. + u16 chidx = ssh_channel_to_index(channel);
  9571. +
  9572. + if (!ssh_rqid_is_event(rqid)) {
  9573. + dev_err(cplt->dev, "event: unsupported rqid: 0x%04x\n", rqid);
  9574. + return NULL;
  9575. + }
  9576. +
  9577. + if (!ssh_channel_is_valid(channel)) {
  9578. + dev_warn(cplt->dev, "event: unsupported channel: %u\n",
  9579. + channel);
  9580. + chidx = 0;
  9581. + }
  9582. +
  9583. + return &cplt->event.channel[chidx].queue[event];
  9584. +}
  9585. +
  9586. +static inline bool ssam_cplt_submit(struct ssam_cplt *cplt,
  9587. + struct work_struct *work)
  9588. +{
  9589. + return queue_work(cplt->wq, work);
  9590. +}
  9591. +
  9592. +static int ssam_cplt_submit_event(struct ssam_cplt *cplt,
  9593. + struct ssam_event_item *item)
  9594. +{
  9595. + struct ssam_event_queue *evq;
  9596. +
  9597. + evq = ssam_cplt_get_event_queue(cplt, item->event.channel, item->rqid);
  9598. + if (!evq)
  9599. + return -EINVAL;
  9600. +
  9601. + ssam_event_queue_push(evq, item);
  9602. + ssam_cplt_submit(cplt, &evq->work);
  9603. + return 0;
  9604. +}
  9605. +
  9606. +static void ssam_cplt_flush(struct ssam_cplt *cplt)
  9607. +{
  9608. + flush_workqueue(cplt->wq);
  9609. +}
  9610. +
  9611. +static void ssam_event_queue_work_fn(struct work_struct *work)
  9612. +{
  9613. + struct ssam_event_queue *queue;
  9614. + struct ssam_event_item *item;
  9615. + struct ssam_nf *nf;
  9616. + struct device *dev;
  9617. + int i;
  9618. +
  9619. + queue = container_of(work, struct ssam_event_queue, work);
  9620. + nf = &queue->cplt->event.notif;
  9621. + dev = queue->cplt->dev;
  9622. +
  9623. + for (i = 0; i < 10; i++) {
  9624. + item = ssam_event_queue_pop(queue);
  9625. + if (item == NULL)
  9626. + return;
  9627. +
  9628. + ssam_nf_call(nf, dev, item->rqid, &item->event);
  9629. + ssam_event_item_free(item);
  9630. + }
  9631. +
  9632. + if (!ssam_event_queue_is_empty(queue))
  9633. + ssam_cplt_submit(queue->cplt, &queue->work);
  9634. +}
  9635. +
  9636. +static void ssam_event_queue_init(struct ssam_cplt *cplt,
  9637. + struct ssam_event_queue *evq)
  9638. +{
  9639. + evq->cplt = cplt;
  9640. + spin_lock_init(&evq->lock);
  9641. + INIT_LIST_HEAD(&evq->head);
  9642. + INIT_WORK(&evq->work, ssam_event_queue_work_fn);
  9643. +}
  9644. +
  9645. +static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
  9646. +{
  9647. + struct ssam_event_channel *channel;
  9648. + int status, c, i;
  9649. +
  9650. + cplt->dev = dev;
  9651. +
  9652. + cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
  9653. + if (!cplt->wq)
  9654. + return -ENOMEM;
  9655. +
  9656. + for (c = 0; c < ARRAY_SIZE(cplt->event.channel); c++) {
  9657. + channel = &cplt->event.channel[c];
  9658. +
  9659. + for (i = 0; i < ARRAY_SIZE(channel->queue); i++)
  9660. + ssam_event_queue_init(cplt, &channel->queue[i]);
  9661. + }
  9662. +
  9663. + status = ssam_nf_init(&cplt->event.notif);
  9664. + if (status)
  9665. + destroy_workqueue(cplt->wq);
  9666. +
  9667. + return status;
  9668. +}
  9669. +
  9670. +static void ssam_cplt_destroy(struct ssam_cplt *cplt)
  9671. +{
  9672. + /*
  9673. + * Note: destroy_workqueue ensures that all currently queued work will
  9674. + * be fully completed and the workqueue drained. This means that this
  9675. + * call will inherently also free any queued ssam_event_items, thus we
  9676. + * don't have to take care of that here explicitly.
  9677. + */
  9678. + destroy_workqueue(cplt->wq);
  9679. + ssam_nf_destroy(&cplt->event.notif);
  9680. +}
  9681. +
  9682. +
  9683. +/* -- Main SSAM device structures. ------------------------------------------ */
  9684. +
  9685. +enum ssam_controller_state {
  9686. + SSAM_CONTROLLER_UNINITIALIZED,
  9687. + SSAM_CONTROLLER_INITIALIZED,
  9688. + SSAM_CONTROLLER_STARTED,
  9689. + SSAM_CONTROLLER_STOPPED,
  9690. + SSAM_CONTROLLER_SUSPENDED,
  9691. +};
  9692. +
  9693. +struct ssam_device_caps {
  9694. + u32 notif_display:1;
  9695. + u32 notif_d0exit:1;
  9696. +};
  9697. +
  9698. +struct ssam_controller {
  9699. + enum ssam_controller_state state;
  9700. +
  9701. + struct ssh_rtl rtl;
  9702. + struct ssam_cplt cplt;
  9703. +
  9704. + struct {
  9705. + struct ssh_seq_counter seq;
  9706. + struct ssh_rqid_counter rqid;
  9707. + } counter;
  9708. +
  9709. + struct {
  9710. + int num;
  9711. + bool wakeup_enabled;
  9712. + } irq;
  9713. +
  9714. + struct ssam_device_caps caps;
  9715. +};
  9716. +
  9717. +
  9718. +#define ssam_dbg(ctrl, fmt, ...) rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
  9719. +#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
  9720. +#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
  9721. +#define ssam_err(ctrl, fmt, ...) rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
  9722. +
  9723. +#define to_ssam_controller(ptr, member) \
  9724. + container_of(ptr, struct ssam_controller, member)
  9725. +
  9726. +struct device *ssam_controller_device(struct ssam_controller *c)
  9727. +{
  9728. + return ssh_rtl_get_device(&c->rtl);
  9729. +}
  9730. +EXPORT_SYMBOL_GPL(ssam_controller_device);
  9731. +
  9732. +
  9733. +static void ssam_handle_event(struct ssh_rtl *rtl,
  9734. + const struct ssh_command *cmd,
  9735. + const struct ssam_span *data)
  9736. +{
  9737. + struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl);
  9738. + struct ssam_event_item *item;
  9739. +
  9740. + item = ssam_event_item_alloc(data->len, GFP_KERNEL);
  9741. + if (!item)
  9742. + return;
  9743. +
  9744. + item->rqid = get_unaligned_le16(&cmd->rqid);
  9745. + item->event.target_category = cmd->tc;
  9746. + item->event.command_id = cmd->cid;
  9747. + item->event.instance_id = cmd->iid;
  9748. + item->event.channel = cmd->chn_in;
  9749. + memcpy(&item->event.data[0], data->ptr, data->len);
  9750. +
  9751. + ssam_cplt_submit_event(&ctrl->cplt, item);
  9752. +}
  9753. +
  9754. +static const struct ssh_rtl_ops ssam_rtl_ops = {
  9755. + .handle_event = ssam_handle_event,
  9756. +};
  9757. +
  9758. +
  9759. +static bool ssam_notifier_empty(struct ssam_controller *ctrl);
  9760. +static void ssam_notifier_unregister_all(struct ssam_controller *ctrl);
  9761. +
  9762. +
  9763. +#define SSAM_SSH_DSM_REVISION 0
  9764. +#define SSAM_SSH_DSM_NOTIF_D0 8
  9765. +static const guid_t SSAM_SSH_DSM_UUID = GUID_INIT(0xd5e383e1, 0xd892, 0x4a76,
  9766. + 0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5);
  9767. +
  9768. +static int ssam_device_caps_load_from_acpi(acpi_handle handle,
  9769. + struct ssam_device_caps *caps)
  9770. +{
  9771. + union acpi_object *obj;
  9772. + u64 funcs = 0;
  9773. + int i;
  9774. +
  9775. + // set defaults
  9776. + caps->notif_display = true;
  9777. + caps->notif_d0exit = false;
  9778. +
  9779. + if (!acpi_has_method(handle, "_DSM"))
  9780. + return 0;
  9781. +
  9782. + // get function availability bitfield
  9783. + obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_UUID, 0, 0, NULL,
  9784. + ACPI_TYPE_BUFFER);
  9785. + if (!obj)
  9786. + return -EFAULT;
  9787. +
  9788. + for (i = 0; i < obj->buffer.length && i < 8; i++)
  9789. + funcs |= (((u64)obj->buffer.pointer[i]) << (i * 8));
  9790. +
  9791. + ACPI_FREE(obj);
  9792. +
  9793. + // D0 exit/entry notification
  9794. + if (funcs & BIT(SSAM_SSH_DSM_NOTIF_D0)) {
  9795. + obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_UUID,
  9796. + SSAM_SSH_DSM_REVISION, SSAM_SSH_DSM_NOTIF_D0,
  9797. + NULL, ACPI_TYPE_INTEGER);
  9798. + if (!obj)
  9799. + return -EFAULT;
  9800. +
  9801. + caps->notif_d0exit = !!obj->integer.value;
  9802. + ACPI_FREE(obj);
  9803. + }
  9804. +
  9805. + return 0;
  9806. +}
  9807. +
  9808. +static int ssam_controller_init(struct ssam_controller *ctrl,
  9809. + struct serdev_device *serdev)
  9810. +{
  9811. + acpi_handle handle = ACPI_HANDLE(&serdev->dev);
  9812. + int status;
  9813. +
  9814. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_UNINITIALIZED) {
  9815. + dev_err(&serdev->dev, "embedded controller already initialized\n");
  9816. + return -EBUSY;
  9817. + }
  9818. +
  9819. + status = ssam_device_caps_load_from_acpi(handle, &ctrl->caps);
  9820. + if (status)
  9821. + return status;
  9822. +
  9823. + dev_dbg(&serdev->dev, "device capabilities:\n");
  9824. + dev_dbg(&serdev->dev, " notif_display: %u\n", ctrl->caps.notif_display);
  9825. + dev_dbg(&serdev->dev, " notif_d0exit: %u\n", ctrl->caps.notif_d0exit);
  9826. +
  9827. + ssh_seq_reset(&ctrl->counter.seq);
  9828. + ssh_rqid_reset(&ctrl->counter.rqid);
  9829. +
  9830. + // initialize event/request completion system
  9831. + status = ssam_cplt_init(&ctrl->cplt, &serdev->dev);
  9832. + if (status)
  9833. + return status;
  9834. +
  9835. + // initialize request and packet transmission layers
  9836. + status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops);
  9837. + if (status) {
  9838. + ssam_cplt_destroy(&ctrl->cplt);
  9839. + return status;
  9840. + }
  9841. +
  9842. + // update state
  9843. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_INITIALIZED);
  9844. + return 0;
  9845. +}
  9846. +
  9847. +static int ssam_controller_start(struct ssam_controller *ctrl)
  9848. +{
  9849. + int status;
  9850. +
  9851. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_INITIALIZED)
  9852. + return -EINVAL;
  9853. +
  9854. + status = ssh_rtl_tx_start(&ctrl->rtl);
  9855. + if (status)
  9856. + return status;
  9857. +
  9858. + status = ssh_rtl_rx_start(&ctrl->rtl);
  9859. + if (status) {
  9860. + ssh_rtl_tx_flush(&ctrl->rtl);
  9861. + return status;
  9862. + }
  9863. +
  9864. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_STARTED);
  9865. + return 0;
  9866. +}
  9867. +
  9868. +static void ssam_controller_shutdown(struct ssam_controller *ctrl)
  9869. +{
  9870. + enum ssam_controller_state s = smp_load_acquire(&ctrl->state);
  9871. + int status;
  9872. +
  9873. + if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED)
  9874. + return;
  9875. +
  9876. + // try to flush pending events and requests while everything still works
  9877. + status = ssh_rtl_flush(&ctrl->rtl, msecs_to_jiffies(5000));
  9878. + if (status) {
  9879. + ssam_err(ctrl, "failed to flush request transmission layer: %d\n",
  9880. + status);
  9881. + }
  9882. +
  9883. + // try to flush out all currently completing requests and events
  9884. + ssam_cplt_flush(&ctrl->cplt);
  9885. +
  9886. + /*
  9887. + * We expect all notifiers to have been removed by the respective client
  9888. + * driver that set them up at this point. If this warning occurs, some
  9889. + * client driver has not done that...
  9890. + */
  9891. + WARN_ON(!ssam_notifier_empty(ctrl));
  9892. +
  9893. + /*
  9894. + * Nevertheless, we should still take care of drivers that don't behave
  9895. + * well. Thus disable all enabled events, unregister all notifiers.
  9896. + */
  9897. + ssam_notifier_unregister_all(ctrl);
  9898. +
  9899. + // cancel rem. requests, ensure no new ones can be queued, stop threads
  9900. + ssh_rtl_tx_flush(&ctrl->rtl);
  9901. + ssh_rtl_shutdown(&ctrl->rtl);
  9902. +
  9903. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_STOPPED);
  9904. +}
  9905. +
  9906. +static void ssam_controller_destroy(struct ssam_controller *ctrl)
  9907. +{
  9908. + if (smp_load_acquire(&ctrl->state) == SSAM_CONTROLLER_UNINITIALIZED)
  9909. + return;
  9910. +
  9911. + /*
  9912. + * Note: New events could still have been received after the previous
  9913. + * flush in ssam_controller_shutdown, before the request transport layer
  9914. + * has been shut down. At this point, after the shutdown, we can be sure
  9915. + * that no new events will be queued. The call to ssam_cplt_destroy will
  9916. + * ensure that those remaining are being completed and freed.
  9917. + */
  9918. +
  9919. + // actually free resources
  9920. + ssam_cplt_destroy(&ctrl->cplt);
  9921. + ssh_rtl_destroy(&ctrl->rtl);
  9922. +
  9923. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_UNINITIALIZED);
  9924. +}
  9925. +
  9926. +static int ssam_controller_suspend(struct ssam_controller *ctrl)
  9927. +{
  9928. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_STARTED)
  9929. + return -EINVAL;
  9930. +
  9931. + ssam_dbg(ctrl, "pm: suspending controller\n");
  9932. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_SUSPENDED);
  9933. + return 0;
  9934. +}
  9935. +
  9936. +static int ssam_controller_resume(struct ssam_controller *ctrl)
  9937. +{
  9938. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_SUSPENDED)
  9939. + return -EINVAL;
  9940. +
  9941. + ssam_dbg(ctrl, "pm: resuming controller\n");
  9942. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_STARTED);
  9943. + return 0;
  9944. +}
  9945. +
  9946. +
  9947. +static inline
  9948. +int ssam_controller_receive_buf(struct ssam_controller *ctrl,
  9949. + const unsigned char *buf, size_t n)
  9950. +{
  9951. + return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
  9952. +}
  9953. +
  9954. +static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl)
  9955. +{
  9956. + ssh_ptl_tx_wakeup(&ctrl->rtl.ptl, true);
  9957. +}
  9958. +
  9959. +
  9960. +/* -- Top-level request interface ------------------------------------------- */
  9961. +
  9962. +ssize_t ssam_request_write_data(struct ssam_span *buf,
  9963. + struct ssam_controller *ctrl,
  9964. + struct ssam_request *spec)
  9965. +{
  9966. + struct msgbuf msgb;
  9967. + u16 rqid;
  9968. + u8 seq;
  9969. +
  9970. + if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE)
  9971. + return -EINVAL;
  9972. +
  9973. + msgb_init(&msgb, buf->ptr, buf->len);
  9974. + seq = ssh_seq_next(&ctrl->counter.seq);
  9975. + rqid = ssh_rqid_next(&ctrl->counter.rqid);
  9976. + msgb_push_cmd(&msgb, seq, rqid, spec);
  9977. +
  9978. + return msgb_bytes_used(&msgb);
  9979. +}
  9980. +EXPORT_SYMBOL_GPL(ssam_request_write_data);
  9981. +
  9982. +
  9983. +static void ssam_request_sync_complete(struct ssh_request *rqst,
  9984. + const struct ssh_command *cmd,
  9985. + const struct ssam_span *data, int status)
  9986. +{
  9987. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  9988. + struct ssam_request_sync *r;
  9989. +
  9990. + r = container_of(rqst, struct ssam_request_sync, base);
  9991. + r->status = status;
  9992. +
  9993. + if (r->resp)
  9994. + r->resp->length = 0;
  9995. +
  9996. + if (status) {
  9997. + rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status);
  9998. + return;
  9999. + }
  10000. +
  10001. + if (!data) // handle requests without a response
  10002. + return;
  10003. +
  10004. + if (!r->resp || !r->resp->pointer) {
  10005. + if (data->len) {
  10006. + rtl_warn(rtl, "rsp: no response buffer provided, "
  10007. + "dropping data\n");
  10008. + }
  10009. + return;
  10010. + }
  10011. +
  10012. + if (data->len > r->resp->capacity) {
  10013. + rtl_err(rtl, "rsp: response buffer too small, "
  10014. + "capacity: %zu bytes, got: %zu bytes\n",
  10015. + r->resp->capacity, data->len);
  10016. + r->status = -ENOSPC;
  10017. + return;
  10018. + }
  10019. +
  10020. + r->resp->length = data->len;
  10021. + memcpy(r->resp->pointer, data->ptr, data->len);
  10022. +}
  10023. +
  10024. +static void ssam_request_sync_release(struct ssh_request *rqst)
  10025. +{
  10026. + complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp);
  10027. +}
  10028. +
  10029. +static const struct ssh_request_ops ssam_request_sync_ops = {
  10030. + .release = ssam_request_sync_release,
  10031. + .complete = ssam_request_sync_complete,
  10032. +};
  10033. +
  10034. +
  10035. +int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
  10036. + struct ssam_request_sync **rqst,
  10037. + struct ssam_span *buffer)
  10038. +{
  10039. + size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len);
  10040. +
  10041. + *rqst = kzalloc(sizeof(struct ssam_request_sync) + msglen, flags);
  10042. + if (!*rqst)
  10043. + return -ENOMEM;
  10044. +
  10045. + buffer->ptr = (u8 *)(*rqst + 1);
  10046. + buffer->len = msglen;
  10047. +
  10048. + return 0;
  10049. +}
  10050. +EXPORT_SYMBOL_GPL(ssam_request_sync_alloc);
  10051. +
  10052. +void ssam_request_sync_init(struct ssam_request_sync *rqst,
  10053. + enum ssam_request_flags flags)
  10054. +{
  10055. + ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops);
  10056. + init_completion(&rqst->comp);
  10057. + rqst->resp = NULL;
  10058. + rqst->status = 0;
  10059. +}
  10060. +EXPORT_SYMBOL_GPL(ssam_request_sync_init);
  10061. +
  10062. +int ssam_request_sync_submit(struct ssam_controller *ctrl,
  10063. + struct ssam_request_sync *rqst)
  10064. +{
  10065. + enum ssam_controller_state state = smp_load_acquire(&ctrl->state);
  10066. + int status;
  10067. +
  10068. + if (state == SSAM_CONTROLLER_SUSPENDED) {
  10069. + ssam_warn(ctrl, "rqst: embedded controller is suspended\n");
  10070. + ssh_request_put(&rqst->base);
  10071. + return -EPERM;
  10072. + }
  10073. +
  10074. + if (state != SSAM_CONTROLLER_STARTED) {
  10075. + ssam_warn(ctrl, "rqst: embedded controller is uninitialized\n");
  10076. + ssh_request_put(&rqst->base);
  10077. + return -ENXIO;
  10078. + }
  10079. +
  10080. + status = ssh_rtl_submit(&ctrl->rtl, &rqst->base);
  10081. + ssh_request_put(&rqst->base);
  10082. +
  10083. + return status;
  10084. +}
  10085. +EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
  10086. +
  10087. +int ssam_request_sync(struct ssam_controller *ctrl, struct ssam_request *spec,
  10088. + struct ssam_response *rsp)
  10089. +{
  10090. + struct ssam_request_sync *rqst;
  10091. + struct ssam_span buf;
  10092. + size_t len;
  10093. + int status;
  10094. +
  10095. + // prevent overflow, allows us to skip checks later on
  10096. + if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) {
  10097. + ssam_err(ctrl, "rqst: request payload too large\n");
  10098. + return -EINVAL;
  10099. + }
  10100. +
  10101. + status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf);
  10102. + if (status)
  10103. + return status;
  10104. +
  10105. + ssam_request_sync_init(rqst, spec->flags);
  10106. + ssam_request_sync_set_resp(rqst, rsp);
  10107. +
  10108. + len = ssam_request_write_data(&buf, ctrl, spec);
  10109. + ssam_request_sync_set_data(rqst, buf.ptr, len);
  10110. +
  10111. + status = ssam_request_sync_submit(ctrl, rqst);
  10112. + if (!status)
  10113. + status = ssam_request_sync_wait(rqst);
  10114. +
  10115. + kfree(rqst);
  10116. + return status;
  10117. +}
  10118. +EXPORT_SYMBOL_GPL(ssam_request_sync);
  10119. +
  10120. +int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
  10121. + struct ssam_request *spec,
  10122. + struct ssam_response *rsp,
  10123. + struct ssam_span *buf)
  10124. +{
  10125. + struct ssam_request_sync rqst;
  10126. + size_t len;
  10127. + int status;
  10128. +
  10129. + // prevent overflow, allows us to skip checks later on
  10130. + if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) {
  10131. + ssam_err(ctrl, "rqst: request payload too large\n");
  10132. + return -EINVAL;
  10133. + }
  10134. +
  10135. + ssam_request_sync_init(&rqst, spec->flags);
  10136. + ssam_request_sync_set_resp(&rqst, rsp);
  10137. +
  10138. + len = ssam_request_write_data(buf, ctrl, spec);
  10139. + ssam_request_sync_set_data(&rqst, buf->ptr, len);
  10140. +
  10141. + status = ssam_request_sync_submit(ctrl, &rqst);
  10142. + if (!status)
  10143. + status = ssam_request_sync_wait(&rqst);
  10144. +
  10145. + return status;
  10146. +}
  10147. +EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
  10148. +
  10149. +
  10150. +/* -- Internal SAM requests. ------------------------------------------------ */
  10151. +
  10152. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
  10153. + .target_category = SSAM_SSH_TC_SAM,
  10154. + .command_id = 0x13,
  10155. + .instance_id = 0x00,
  10156. + .channel = 0x01,
  10157. +});
  10158. +
  10159. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
  10160. + .target_category = SSAM_SSH_TC_SAM,
  10161. + .command_id = 0x15,
  10162. + .instance_id = 0x00,
  10163. + .channel = 0x01,
  10164. +});
  10165. +
  10166. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
  10167. + .target_category = SSAM_SSH_TC_SAM,
  10168. + .command_id = 0x16,
  10169. + .instance_id = 0x00,
  10170. + .channel = 0x01,
  10171. +});
  10172. +
  10173. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
  10174. + .target_category = SSAM_SSH_TC_SAM,
  10175. + .command_id = 0x33,
  10176. + .instance_id = 0x00,
  10177. + .channel = 0x01,
  10178. +});
  10179. +
  10180. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
  10181. + .target_category = SSAM_SSH_TC_SAM,
  10182. + .command_id = 0x34,
  10183. + .instance_id = 0x00,
  10184. + .channel = 0x01,
  10185. +});
  10186. +
  10187. +static int ssam_ssh_event_enable(struct ssam_controller *ctrl,
  10188. + struct ssam_event_registry reg,
  10189. + struct ssam_event_id id, u8 flags)
  10190. +{
  10191. + struct ssh_notification_params params;
  10192. + struct ssam_request rqst;
  10193. + struct ssam_response result;
  10194. + int status;
  10195. +
  10196. + u16 rqid = ssh_tc_to_rqid(id.target_category);
  10197. + u8 buf[1] = { 0x00 };
  10198. +
  10199. + // only allow RQIDs that lie within event spectrum
  10200. + if (!ssh_rqid_is_event(rqid))
  10201. + return -EINVAL;
  10202. +
  10203. + params.target_category = id.target_category;
  10204. + params.instance_id = id.instance;
  10205. + params.flags = flags;
  10206. + put_unaligned_le16(rqid, &params.request_id);
  10207. +
  10208. + rqst.target_category = reg.target_category;
  10209. + rqst.command_id = reg.cid_enable;
  10210. + rqst.instance_id = 0x00;
  10211. + rqst.channel = reg.channel;
  10212. + rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
  10213. + rqst.length = sizeof(params);
  10214. + rqst.payload = (u8 *)&params;
  10215. +
  10216. + result.capacity = ARRAY_SIZE(buf);
  10217. + result.length = 0;
  10218. + result.pointer = buf;
  10219. +
  10220. + status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params));
  10221. + if (status) {
  10222. + ssam_err(ctrl, "failed to enable event source "
  10223. + "(tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
  10224. + id.target_category, id.instance, reg.target_category);
  10225. + }
  10226. +
  10227. + if (buf[0] != 0x00) {
  10228. + ssam_warn(ctrl, "unexpected result while enabling event source: "
  10229. + "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
  10230. + buf[0], id.target_category, id.instance,
  10231. + reg.target_category);
  10232. + }
  10233. +
  10234. + return status;
  10235. +
  10236. +}
  10237. +
  10238. +static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
  10239. + struct ssam_event_registry reg,
  10240. + struct ssam_event_id id, u8 flags)
  10241. +{
  10242. + struct ssh_notification_params params;
  10243. + struct ssam_request rqst;
  10244. + struct ssam_response result;
  10245. + int status;
  10246. +
  10247. + u16 rqid = ssh_tc_to_rqid(id.target_category);
  10248. + u8 buf[1] = { 0x00 };
  10249. +
  10250. + // only allow RQIDs that lie within event spectrum
  10251. + if (!ssh_rqid_is_event(rqid))
  10252. + return -EINVAL;
  10253. +
  10254. + params.target_category = id.target_category;
  10255. + params.instance_id = id.instance;
  10256. + params.flags = flags;
  10257. + put_unaligned_le16(rqid, &params.request_id);
  10258. +
  10259. + rqst.target_category = reg.target_category;
  10260. + rqst.command_id = reg.cid_disable;
  10261. + rqst.instance_id = 0x00;
  10262. + rqst.channel = reg.channel;
  10263. + rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
  10264. + rqst.length = sizeof(params);
  10265. + rqst.payload = (u8 *)&params;
  10266. +
  10267. + result.capacity = ARRAY_SIZE(buf);
  10268. + result.length = 0;
  10269. + result.pointer = buf;
  10270. +
  10271. + status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params));
  10272. + if (status) {
  10273. + ssam_err(ctrl, "failed to disable event source "
  10274. + "(tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
  10275. + id.target_category, id.instance, reg.target_category);
  10276. + }
  10277. +
  10278. + if (buf[0] != 0x00) {
  10279. + ssam_warn(ctrl, "unexpected result while disabling event source: "
  10280. + "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
  10281. + buf[0], id.target_category, id.instance,
  10282. + reg.target_category);
  10283. + }
  10284. +
  10285. + return status;
  10286. +}
  10287. +
  10288. +
  10289. +/* -- Wrappers for internal SAM requests. ----------------------------------- */
  10290. +
  10291. +static int ssam_log_firmware_version(struct ssam_controller *ctrl)
  10292. +{
  10293. + __le32 __version;
  10294. + u32 version, a, b, c;
  10295. + int status;
  10296. +
  10297. + status = ssam_ssh_get_firmware_version(ctrl, &__version);
  10298. + if (status)
  10299. + return status;
  10300. +
  10301. + version = le32_to_cpu(__version);
  10302. + a = (version >> 24) & 0xff;
  10303. + b = ((version >> 8) & 0xffff);
  10304. + c = version & 0xff;
  10305. +
  10306. + ssam_info(ctrl, "SAM controller version: %u.%u.%u\n", a, b, c);
  10307. + return 0;
  10308. +}
  10309. +
  10310. +static int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl)
  10311. +{
  10312. + int status;
  10313. + u8 response;
  10314. +
  10315. + if (!ctrl->caps.notif_display)
  10316. + return 0;
  10317. +
  10318. + ssam_dbg(ctrl, "pm: notifying display off\n");
  10319. +
  10320. + status = ssam_ssh_notif_display_off(ctrl, &response);
  10321. + if (status)
  10322. + return status;
  10323. +
  10324. + if (response != 0) {
  10325. + ssam_err(ctrl, "unexpected response from display-off notification: "
  10326. + "0x%02x\n", response);
  10327. + return -EIO;
  10328. + }
  10329. +
  10330. + return 0;
  10331. +}
  10332. +
  10333. +static int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl)
  10334. +{
  10335. + int status;
  10336. + u8 response;
  10337. +
  10338. + if (!ctrl->caps.notif_display)
  10339. + return 0;
  10340. +
  10341. + ssam_dbg(ctrl, "pm: notifying display on\n");
  10342. +
  10343. + status = ssam_ssh_notif_display_on(ctrl, &response);
  10344. + if (status)
  10345. + return status;
  10346. +
  10347. + if (response != 0) {
  10348. + ssam_err(ctrl, "unexpected response from display-on notification: "
  10349. + "0x%02x\n", response);
  10350. + return -EIO;
  10351. + }
  10352. +
  10353. + return 0;
  10354. +}
  10355. +
  10356. +static int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl)
  10357. +{
  10358. + int status;
  10359. + u8 response;
  10360. +
  10361. + if (!ctrl->caps.notif_d0exit)
  10362. + return 0;
  10363. +
  10364. + ssam_dbg(ctrl, "pm: notifying D0 exit\n");
  10365. +
  10366. + status = ssam_ssh_notif_d0_exit(ctrl, &response);
  10367. + if (status)
  10368. + return status;
  10369. +
  10370. + if (response != 0) {
  10371. + ssam_err(ctrl, "unexpected response from D0-exit notification: "
  10372. + "0x%02x\n", response);
  10373. + return -EIO;
  10374. + }
  10375. +
  10376. + return 0;
  10377. +}
  10378. +
  10379. +static int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
  10380. +{
  10381. + int status;
  10382. + u8 response;
  10383. +
  10384. + if (!ctrl->caps.notif_d0exit)
  10385. + return 0;
  10386. +
  10387. + ssam_dbg(ctrl, "pm: notifying D0 entry\n");
  10388. +
  10389. + status = ssam_ssh_notif_d0_entry(ctrl, &response);
  10390. + if (status)
  10391. + return status;
  10392. +
  10393. + if (response != 0) {
  10394. + ssam_err(ctrl, "unexpected response from D0-entry notification: "
  10395. + "0x%02x\n", response);
  10396. + return -EIO;
  10397. + }
  10398. +
  10399. + return 0;
  10400. +}
  10401. +
  10402. +
  10403. +/* -- Top-level event registry interface. ----------------------------------- */
  10404. +
  10405. +int ssam_notifier_register(struct ssam_controller *ctrl,
  10406. + struct ssam_event_notifier *n)
  10407. +{
  10408. + u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
  10409. + struct ssam_nf_head *nf_head;
  10410. + struct ssam_nf *nf;
  10411. + int rc, status;
  10412. +
  10413. + if (!ssh_rqid_is_event(rqid))
  10414. + return -EINVAL;
  10415. +
  10416. + nf = &ctrl->cplt.event.notif;
  10417. + nf_head = &nf->head[ssh_rqid_to_event(rqid)];
  10418. +
  10419. + mutex_lock(&nf->lock);
  10420. +
  10421. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_STARTED) {
  10422. + mutex_unlock(&nf->lock);
  10423. + return -ENXIO;
  10424. + }
  10425. +
  10426. + rc = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
  10427. + if (rc < 0) {
  10428. + mutex_unlock(&nf->lock);
  10429. + return rc;
  10430. + }
  10431. +
  10432. + ssam_dbg(ctrl, "enabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x, "
  10433. + "rc: %d)\n", n->event.reg.target_category,
  10434. + n->event.id.target_category, n->event.id.instance, rc);
  10435. +
  10436. + status = __ssam_nfblk_insert(nf_head, &n->base);
  10437. + if (status) {
  10438. + ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
  10439. + mutex_unlock(&nf->lock);
  10440. + return status;
  10441. + }
  10442. +
  10443. + if (rc == 1) {
  10444. + status = ssam_ssh_event_enable(ctrl, n->event.reg, n->event.id,
  10445. + n->event.flags);
  10446. + if (status) {
  10447. + __ssam_nfblk_remove(nf_head, &n->base);
  10448. + ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
  10449. + mutex_unlock(&nf->lock);
  10450. + synchronize_srcu(&nf_head->srcu);
  10451. + return status;
  10452. + }
  10453. + }
  10454. +
  10455. + mutex_unlock(&nf->lock);
  10456. + return 0;
  10457. +
  10458. +}
  10459. +EXPORT_SYMBOL_GPL(ssam_notifier_register);
  10460. +
  10461. +int ssam_notifier_unregister(struct ssam_controller *ctrl,
  10462. + struct ssam_event_notifier *n)
  10463. +{
  10464. + u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
  10465. + struct ssam_nf_head *nf_head;
  10466. + struct ssam_nf *nf;
  10467. + int rc, status = 0;
  10468. +
  10469. + if (!ssh_rqid_is_event(rqid))
  10470. + return -EINVAL;
  10471. +
  10472. + nf = &ctrl->cplt.event.notif;
  10473. + nf_head = &nf->head[ssh_rqid_to_event(rqid)];
  10474. +
  10475. + mutex_lock(&nf->lock);
  10476. +
  10477. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_STARTED) {
  10478. + mutex_unlock(&nf->lock);
  10479. + return -ENXIO;
  10480. + }
  10481. +
  10482. + rc = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
  10483. + if (rc < 0) {
  10484. + mutex_unlock(&nf->lock);
  10485. + return rc;
  10486. + }
  10487. +
  10488. + ssam_dbg(ctrl, "disabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x, "
  10489. + "rc: %d)\n", n->event.reg.target_category,
  10490. + n->event.id.target_category, n->event.id.instance, rc);
  10491. +
  10492. + if (rc == 0) {
  10493. + status = ssam_ssh_event_disable(ctrl, n->event.reg, n->event.id,
  10494. + n->event.flags);
  10495. + }
  10496. +
  10497. + __ssam_nfblk_remove(nf_head, &n->base);
  10498. + mutex_unlock(&nf->lock);
  10499. + synchronize_srcu(&nf_head->srcu);
  10500. +
  10501. + return status;
  10502. +}
  10503. +EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
  10504. +
  10505. +static bool ssam_notifier_empty(struct ssam_controller *ctrl)
  10506. +{
  10507. + struct ssam_nf *nf = &ctrl->cplt.event.notif;
  10508. + bool result;
  10509. +
  10510. + mutex_lock(&nf->lock);
  10511. + result = ssam_nf_refcount_empty(nf);
  10512. + mutex_unlock(&nf->lock);
  10513. +
  10514. + return result;
  10515. +}
  10516. +
  10517. +static void ssam_notifier_unregister_all(struct ssam_controller *ctrl)
  10518. +{
  10519. + struct ssam_nf *nf = &ctrl->cplt.event.notif;
  10520. + struct ssam_nf_refcount_entry *pos, *n;
  10521. +
  10522. + mutex_lock(&nf->lock);
  10523. + rbtree_postorder_for_each_entry_safe(pos, n, &nf->refcount, node) {
  10524. + // ignore errors, will get logged in call
  10525. + ssam_ssh_event_disable(ctrl, pos->key.reg, pos->key.id, 0);
  10526. + kfree(pos);
  10527. + }
  10528. + nf->refcount = RB_ROOT;
  10529. + mutex_unlock(&nf->lock);
  10530. +}
  10531. +
  10532. +
  10533. +/* -- Wakeup IRQ. ----------------------------------------------------------- */
  10534. +
  10535. +static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false };
  10536. +static const struct acpi_gpio_params gpio_ssam_wakeup = { 1, 0, false };
  10537. +
  10538. +static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
  10539. + { "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 },
  10540. + { "ssam_wakeup-gpio", &gpio_ssam_wakeup, 1 },
  10541. + { },
  10542. +};
  10543. +
  10544. +static irqreturn_t ssam_irq_handle(int irq, void *dev_id)
  10545. +{
  10546. + struct ssam_controller *ctrl = dev_id;
  10547. +
  10548. + ssam_dbg(ctrl, "pm: wake irq triggered\n");
  10549. +
  10550. + // Note: Proper wakeup detection is currently unimplemented.
  10551. + // When the EC is in display-off or any other non-D0 state, it
  10552. + // does not send events/notifications to the host. Instead it
  10553. + // signals that there are events available via the wakeup IRQ.
  10554. + // This driver is responsible for calling back to the EC to
  10555. + // release these events one-by-one.
  10556. + //
  10557. + // This IRQ should not cause a full system resume by its own.
  10558. + // Instead, events should be handled by their respective subsystem
  10559. + // drivers, which in turn should signal whether a full system
  10560. + // resume should be performed.
  10561. + //
  10562. + // TODO: Send GPIO callback command repeatedly to EC until callback
  10563. + // returns 0x00. Return flag of callback is "has more events".
  10564. + // Each time the command is sent, one event is "released". Once
  10565. + // all events have been released (return = 0x00), the GPIO is
  10566. + // re-armed. Detect wakeup events during this process, go back to
  10567. + // sleep if no wakeup event has been received.
  10568. +
  10569. + return IRQ_HANDLED;
  10570. +}
  10571. +
  10572. +static int ssam_irq_setup(struct ssam_controller *ctrl)
  10573. +{
  10574. + struct device *dev = ssam_controller_device(ctrl);
  10575. + struct gpio_desc *gpiod;
  10576. + int irq;
  10577. + int status;
  10578. +
  10579. + /*
  10580. + * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH.
  10581. + * However, the GPIO line only gets reset by sending the GPIO callback
  10582. + * command to SAM (or alternatively the display-on notification). As
  10583. + * proper handling for this interrupt is not implemented yet, leaving
  10584. + * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback
  10585. + * never gets sent and thus the line line never gets reset). To avoid
  10586. + * this, mark the IRQ as TRIGGER_RISING for now, only creating a single
  10587. + * interrupt, and let the SAM resume callback during the controller
  10588. + * resume process clear it.
  10589. + */
  10590. + const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
  10591. +
  10592. + gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
  10593. + if (IS_ERR(gpiod))
  10594. + return PTR_ERR(gpiod);
  10595. +
  10596. + irq = gpiod_to_irq(gpiod);
  10597. + gpiod_put(gpiod);
  10598. +
  10599. + if (irq < 0)
  10600. + return irq;
  10601. +
  10602. + status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf,
  10603. + "surface_sam_wakeup", ctrl);
  10604. + if (status)
  10605. + return status;
  10606. +
  10607. + ctrl->irq.num = irq;
  10608. + return 0;
  10609. +}
  10610. +
  10611. +static void ssam_irq_free(struct ssam_controller *ctrl)
  10612. +{
  10613. + free_irq(ctrl->irq.num, ctrl);
  10614. + ctrl->irq.num = -1;
  10615. +}
  10616. +
  10617. +
  10618. +/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
  10619. +
  10620. +static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
  10621. + size_t n)
  10622. +{
  10623. + struct ssam_controller *ctrl = serdev_device_get_drvdata(dev);
  10624. + return ssam_controller_receive_buf(ctrl, buf, n);
  10625. +}
  10626. +
  10627. +static void ssam_write_wakeup(struct serdev_device *dev)
  10628. +{
  10629. + struct ssam_controller *ctrl = serdev_device_get_drvdata(dev);
  10630. + ssam_controller_write_wakeup(ctrl);
  10631. +}
  10632. +
  10633. +static const struct serdev_device_ops ssam_serdev_ops = {
  10634. + .receive_buf = ssam_receive_buf,
  10635. + .write_wakeup = ssam_write_wakeup,
  10636. +};
  10637. +
  10638. +
  10639. +/* -- ACPI based device setup. ---------------------------------------------- */
  10640. +
  10641. +static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
  10642. + void *ctx)
  10643. +{
  10644. + struct serdev_device *serdev = ctx;
  10645. + struct acpi_resource_common_serialbus *serial;
  10646. + struct acpi_resource_uart_serialbus *uart;
  10647. + bool flow_control;
  10648. + int status = 0;
  10649. +
  10650. + if (rsc->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
  10651. + return AE_OK;
  10652. +
  10653. + serial = &rsc->data.common_serial_bus;
  10654. + if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART)
  10655. + return AE_OK;
  10656. +
  10657. + uart = &rsc->data.uart_serial_bus;
  10658. +
  10659. + // set up serdev device
  10660. + serdev_device_set_baudrate(serdev, uart->default_baud_rate);
  10661. +
  10662. + // serdev currently only supports RTSCTS flow control
  10663. + if (uart->flow_control & (~((u8) ACPI_UART_FLOW_CONTROL_HW))) {
  10664. + dev_warn(&serdev->dev, "setup: unsupported flow control"
  10665. + " (value: 0x%02x)\n", uart->flow_control);
  10666. + }
  10667. +
  10668. + // set RTSCTS flow control
  10669. + flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW;
  10670. + serdev_device_set_flow_control(serdev, flow_control);
  10671. +
  10672. + // serdev currently only supports EVEN/ODD parity
  10673. + switch (uart->parity) {
  10674. + case ACPI_UART_PARITY_NONE:
  10675. + status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
  10676. + break;
  10677. + case ACPI_UART_PARITY_EVEN:
  10678. + status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN);
  10679. + break;
  10680. + case ACPI_UART_PARITY_ODD:
  10681. + status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD);
  10682. + break;
  10683. + default:
  10684. + dev_warn(&serdev->dev, "setup: unsupported parity"
  10685. + " (value: 0x%02x)\n", uart->parity);
  10686. + break;
  10687. + }
  10688. +
  10689. + if (status) {
  10690. + dev_err(&serdev->dev, "setup: failed to set parity"
  10691. + " (value: 0x%02x)\n", uart->parity);
  10692. + return status;
  10693. + }
  10694. +
  10695. + return AE_CTRL_TERMINATE; // we've found the resource and are done
  10696. +}
  10697. +
  10698. +static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
  10699. + struct serdev_device *serdev)
  10700. +{
  10701. + return acpi_walk_resources(handle, METHOD_NAME__CRS,
  10702. + ssam_serdev_setup_via_acpi_crs, serdev);
  10703. +}
  10704. +
  10705. +
  10706. +/* -- Power management. ----------------------------------------------------- */
  10707. +
  10708. +static void surface_sam_ssh_shutdown(struct device *dev)
  10709. +{
  10710. + struct ssam_controller *c = dev_get_drvdata(dev);
  10711. + int status;
  10712. +
  10713. + /*
  10714. + * Try to signal display-off and D0-exit, ignore any errors.
  10715. + *
  10716. + * Note: It has not been established yet if this is actually
  10717. + * necessary/useful for shutdown.
  10718. + */
  10719. +
  10720. + status = ssam_ctrl_notif_display_off(c);
  10721. + if (status)
  10722. + ssam_err(c, "pm: display-off notification failed: %d\n", status);
  10723. +
  10724. + status = ssam_ctrl_notif_d0_exit(c);
  10725. + if (status)
  10726. + ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
  10727. +}
  10728. +
  10729. +static int surface_sam_ssh_suspend(struct device *dev)
  10730. +{
  10731. + struct ssam_controller *c = dev_get_drvdata(dev);
  10732. + int status;
  10733. +
  10734. + /*
  10735. + * Try to signal display-off and D0-exit, enable IRQ wakeup if
  10736. + * specified. Abort on error.
  10737. + *
  10738. + * Note: Signalling display-off/display-on should normally be done from
  10739. + * some sort of display state notifier. As that is not available, signal
  10740. + * it here.
  10741. + */
  10742. +
  10743. + status = ssam_ctrl_notif_display_off(c);
  10744. + if (status) {
  10745. + ssam_err(c, "pm: display-off notification failed: %d\n", status);
  10746. + return status;
  10747. + }
  10748. +
  10749. + status = ssam_ctrl_notif_d0_exit(c);
  10750. + if (status) {
  10751. + ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
  10752. + goto err_notif;
  10753. + }
  10754. +
  10755. + if (device_may_wakeup(dev)) {
  10756. + status = enable_irq_wake(c->irq.num);
  10757. + if (status) {
  10758. + ssam_err(c, "failed to disable wake IRQ: %d\n", status);
  10759. + goto err_irq;
  10760. + }
  10761. +
  10762. + c->irq.wakeup_enabled = true;
  10763. + } else {
  10764. + c->irq.wakeup_enabled = false;
  10765. + }
  10766. +
  10767. + WARN_ON(ssam_controller_suspend(c));
  10768. + return 0;
  10769. +
  10770. +err_irq:
  10771. + ssam_ctrl_notif_d0_entry(c);
  10772. +err_notif:
  10773. + ssam_ctrl_notif_display_on(c);
  10774. + return status;
  10775. +}
  10776. +
  10777. +static int surface_sam_ssh_resume(struct device *dev)
  10778. +{
  10779. + struct ssam_controller *c = dev_get_drvdata(dev);
  10780. + int status;
  10781. +
  10782. + WARN_ON(ssam_controller_resume(c));
  10783. +
  10784. + /*
  10785. + * Try to disable IRQ wakeup (if specified), signal display-on and
  10786. + * D0-entry. In case of errors, log them and try to restore normal
  10787. + * operation state as far as possible.
  10788. + *
  10789. + * Note: Signalling display-off/display-on should normally be done from
  10790. + * some sort of display state notifier. As that is not available, signal
  10791. + * it here.
  10792. + */
  10793. +
  10794. + if (c->irq.wakeup_enabled) {
  10795. + status = disable_irq_wake(c->irq.num);
  10796. + if (status)
  10797. + ssam_err(c, "failed to disable wake IRQ: %d\n", status);
  10798. +
  10799. + c->irq.wakeup_enabled = false;
  10800. + }
  10801. +
  10802. + status = ssam_ctrl_notif_d0_entry(c);
  10803. + if (status)
  10804. + ssam_err(c, "pm: display-on notification failed: %d\n", status);
  10805. +
  10806. + status = ssam_ctrl_notif_display_on(c);
  10807. + if (status)
  10808. + ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
  10809. +
  10810. + return 0;
  10811. +}
  10812. +
  10813. +static SIMPLE_DEV_PM_OPS(surface_sam_ssh_pm_ops, surface_sam_ssh_suspend,
  10814. + surface_sam_ssh_resume);
  10815. +
  10816. +
  10817. +/* -- Device/driver setup. -------------------------------------------------- */
  10818. +
  10819. +static struct ssam_controller ssam_controller = {
  10820. + .state = SSAM_CONTROLLER_UNINITIALIZED,
  10821. +};
  10822. +static DEFINE_MUTEX(ssam_controller_lock);
  10823. +
  10824. +static int __ssam_client_link(struct ssam_controller *c, struct device *client)
  10825. +{
  10826. + const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
  10827. + struct device_link *link;
  10828. + struct device *ctrldev;
  10829. +
  10830. + if (smp_load_acquire(&c->state) != SSAM_CONTROLLER_STARTED)
  10831. + return -ENXIO;
  10832. +
  10833. + if ((ctrldev = ssam_controller_device(c)) == NULL)
  10834. + return -ENXIO;
  10835. +
  10836. + if ((link = device_link_add(client, ctrldev, flags)) == NULL)
  10837. + return -ENOMEM;
  10838. +
  10839. + /*
  10840. + * Return -ENXIO if supplier driver is on its way to be removed. In this
  10841. + * case, the controller won't be around for much longer and the device
  10842. + * link is not going to save us any more, as unbinding is already in
  10843. + * progress.
  10844. + */
  10845. + if (link->status == DL_STATE_SUPPLIER_UNBIND)
  10846. + return -ENXIO;
  10847. +
  10848. + return 0;
  10849. +}
  10850. +
  10851. +int ssam_client_bind(struct device *client, struct ssam_controller **ctrl)
  10852. +{
  10853. + struct ssam_controller *c = &ssam_controller;
  10854. + int status;
  10855. +
  10856. + mutex_lock(&ssam_controller_lock);
  10857. + status = __ssam_client_link(c, client);
  10858. + mutex_unlock(&ssam_controller_lock);
  10859. +
  10860. + *ctrl = status == 0 ? c : NULL;
  10861. + return status;
  10862. +}
  10863. +EXPORT_SYMBOL_GPL(ssam_client_bind);
  10864. +
  10865. +
  10866. +static int surface_sam_ssh_probe(struct serdev_device *serdev)
  10867. +{
  10868. + struct ssam_controller *ctrl = &ssam_controller;
  10869. + acpi_handle *ssh = ACPI_HANDLE(&serdev->dev);
  10870. + int status;
  10871. +
  10872. + if (gpiod_count(&serdev->dev, NULL) < 0)
  10873. + return -ENODEV;
  10874. +
  10875. + status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
  10876. + if (status)
  10877. + return status;
  10878. +
  10879. + // set up EC
  10880. + mutex_lock(&ssam_controller_lock);
  10881. +
  10882. + // initialize controller
  10883. + status = ssam_controller_init(ctrl, serdev);
  10884. + if (status)
  10885. + goto err_ctrl_init;
  10886. +
  10887. + // set up serdev device
  10888. + serdev_device_set_drvdata(serdev, ctrl);
  10889. + serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
  10890. + status = serdev_device_open(serdev);
  10891. + if (status)
  10892. + goto err_devopen;
  10893. +
  10894. + status = ssam_serdev_setup_via_acpi(ssh, serdev);
  10895. + if (ACPI_FAILURE(status))
  10896. + goto err_devinit;
  10897. +
  10898. + // start controller
  10899. + status = ssam_controller_start(ctrl);
  10900. + if (status)
  10901. + goto err_devinit;
  10902. +
  10903. + // initial SAM requests: log version, notify default/init power states
  10904. + status = ssam_log_firmware_version(ctrl);
  10905. + if (status)
  10906. + goto err_initrq;
  10907. +
  10908. + status = ssam_ctrl_notif_d0_entry(ctrl);
  10909. + if (status)
  10910. + goto err_initrq;
  10911. +
  10912. + status = ssam_ctrl_notif_display_on(ctrl);
  10913. + if (status)
  10914. + goto err_initrq;
  10915. +
  10916. + // setup IRQ
  10917. + status = ssam_irq_setup(ctrl);
  10918. + if (status)
  10919. + goto err_initrq;
  10920. +
  10921. + mutex_unlock(&ssam_controller_lock);
  10922. +
  10923. + /*
  10924. + * TODO: The EC can wake up the system via the associated GPIO interrupt
  10925. + * in multiple situations. One of which is the remaining battery
  10926. + * capacity falling below a certain threshold. Normally, we should
  10927. + * use the device_init_wakeup function, however, the EC also seems
  10928. + * to have other reasons for waking up the system and it seems
  10929. + * that Windows has additional checks whether the system should be
  10930. + * resumed. In short, this causes some spurious unwanted wake-ups.
  10931. + * For now let's thus default power/wakeup to false.
  10932. + */
  10933. + device_set_wakeup_capable(&serdev->dev, true);
  10934. + acpi_walk_dep_device_list(ssh);
  10935. +
  10936. + return 0;
  10937. +
  10938. +err_initrq:
  10939. + ssam_controller_shutdown(ctrl);
  10940. +err_devinit:
  10941. + serdev_device_close(serdev);
  10942. +err_devopen:
  10943. + ssam_controller_destroy(ctrl);
  10944. +err_ctrl_init:
  10945. + serdev_device_set_drvdata(serdev, NULL);
  10946. + mutex_unlock(&ssam_controller_lock);
  10947. + return status;
  10948. +}
  10949. +
  10950. +static void surface_sam_ssh_remove(struct serdev_device *serdev)
  10951. +{
  10952. + struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev);
  10953. + int status;
  10954. +
  10955. + mutex_lock(&ssam_controller_lock);
  10956. + ssam_irq_free(ctrl);
  10957. +
  10958. + // suspend EC and disable events
  10959. + status = ssam_ctrl_notif_display_off(ctrl);
  10960. + if (status) {
  10961. + dev_err(&serdev->dev, "display-off notification failed: %d\n",
  10962. + status);
  10963. + }
  10964. +
  10965. + status = ssam_ctrl_notif_d0_exit(ctrl);
  10966. + if (status) {
  10967. + dev_err(&serdev->dev, "D0-exit notification failed: %d\n",
  10968. + status);
  10969. + }
  10970. +
  10971. + ssam_controller_shutdown(ctrl);
  10972. +
  10973. + // shut down actual transport
  10974. + serdev_device_wait_until_sent(serdev, 0);
  10975. + serdev_device_close(serdev);
  10976. +
  10977. + ssam_controller_destroy(ctrl);
  10978. +
  10979. + device_set_wakeup_capable(&serdev->dev, false);
  10980. + serdev_device_set_drvdata(serdev, NULL);
  10981. + mutex_unlock(&ssam_controller_lock);
  10982. +}
  10983. +
  10984. +
  10985. +static const struct acpi_device_id surface_sam_ssh_match[] = {
  10986. + { "MSHW0084", 0 },
  10987. + { },
  10988. +};
  10989. +MODULE_DEVICE_TABLE(acpi, surface_sam_ssh_match);
  10990. +
  10991. +static struct serdev_device_driver surface_sam_ssh = {
  10992. + .probe = surface_sam_ssh_probe,
  10993. + .remove = surface_sam_ssh_remove,
  10994. + .driver = {
  10995. + .name = "surface_sam_ssh",
  10996. + .acpi_match_table = surface_sam_ssh_match,
  10997. + .pm = &surface_sam_ssh_pm_ops,
  10998. + .shutdown = surface_sam_ssh_shutdown,
  10999. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  11000. + },
  11001. +};
  11002. +
  11003. +
  11004. +/* -- Module setup. --------------------------------------------------------- */
  11005. +
  11006. +static int __init surface_sam_ssh_init(void)
  11007. +{
  11008. + int status;
  11009. +
  11010. + status = ssh_ctrl_packet_cache_init();
  11011. + if (status)
  11012. + goto err_cpkg;
  11013. +
  11014. + status = ssam_event_item_cache_init();
  11015. + if (status)
  11016. + goto err_evitem;
  11017. +
  11018. + status = serdev_device_driver_register(&surface_sam_ssh);
  11019. + if (status)
  11020. + goto err_register;
  11021. +
  11022. + return 0;
  11023. +
  11024. +err_register:
  11025. + ssam_event_item_cache_destroy();
  11026. +err_evitem:
  11027. + ssh_ctrl_packet_cache_destroy();
  11028. +err_cpkg:
  11029. + return status;
  11030. +}
  11031. +
  11032. +static void __exit surface_sam_ssh_exit(void)
  11033. +{
  11034. + serdev_device_driver_unregister(&surface_sam_ssh);
  11035. + ssam_event_item_cache_destroy();
  11036. + ssh_ctrl_packet_cache_destroy();
  11037. +}
  11038. +
  11039. +/*
  11040. + * Ensure that the driver is loaded late due to some issues with the UART
  11041. + * communication. Specifically, we want to ensure that DMA is ready and being
  11042. + * used. Not using DMA can result in spurious communication failures,
  11043. + * especially during boot, which among other things will result in wrong
  11044. + * battery information (via ACPI _BIX) being displayed. Using a late init_call
  11045. + * instead of the normal module_init gives the DMA subsystem time to
  11046. + * initialize and via that results in a more stable communication, avoiding
  11047. + * such failures.
  11048. + */
  11049. +late_initcall(surface_sam_ssh_init);
  11050. +module_exit(surface_sam_ssh_exit);
  11051. +
  11052. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  11053. +MODULE_DESCRIPTION("Surface Serial Hub Driver for 5th Generation Surface Devices");
  11054. +MODULE_LICENSE("GPL");
  11055. diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh.h b/drivers/platform/x86/surface_sam/surface_sam_ssh.h
  11056. new file mode 100644
  11057. index 0000000000000..ba57adb2a3c9d
  11058. --- /dev/null
  11059. +++ b/drivers/platform/x86/surface_sam/surface_sam_ssh.h
  11060. @@ -0,0 +1,717 @@
  11061. +/* SPDX-License-Identifier: GPL-2.0-or-later */
  11062. +/*
  11063. + * Interface for Surface Serial Hub (SSH).
  11064. + *
  11065. + * The SSH is the main communication hub for communication between host and
  11066. + * the Surface/System Aggregator Module (SAM) on newer Microsoft Surface
  11067. + * devices (Book 2, Pro 5, Laptops, ...). Also referred to as SAM-over-SSH.
  11068. + * Older devices (Book 1, Pro 4) use SAM-over-HID (via I2C).
  11069. + */
  11070. +
  11071. +#ifndef _SURFACE_SAM_SSH_H
  11072. +#define _SURFACE_SAM_SSH_H
  11073. +
  11074. +#include <linux/types.h>
  11075. +#include <linux/device.h>
  11076. +
  11077. +
  11078. +/* -- Data structures for SAM-over-SSH communication. ----------------------- */
  11079. +
  11080. +/**
  11081. + * enum ssh_frame_type - Frame types for SSH frames.
  11082. + * @SSH_FRAME_TYPE_DATA_SEQ: Indicates a data frame, followed by a payload with
  11083. + * the length specified in the ssh_frame.len field. This
  11084. + * frame is sequenced, meaning that an ACK is required.
  11085. + * @SSH_FRAME_TYPE_DATA_NSQ: Same as SSH_FRAME_TYPE_DATA_SEQ, but unsequenced,
  11086. + * meaning that the message does not have to be ACKed.
  11087. + * @SSH_FRAME_TYPE_ACK: Indicates an ACK message.
  11088. + * @SSH_FRAME_TYPE_NAK: Indicates an error response for previously sent
  11089. + * frame. In general, this means that the frame and/or
  11090. + * payload is malformed, e.g. a CRC is wrong. For command-
  11091. + * type payloads, this can also mean that the command is
  11092. + * invalid.
  11093. + */
  11094. +enum ssh_frame_type {
  11095. + SSH_FRAME_TYPE_DATA_SEQ = 0x80,
  11096. + SSH_FRAME_TYPE_DATA_NSQ = 0x00,
  11097. + SSH_FRAME_TYPE_ACK = 0x40,
  11098. + SSH_FRAME_TYPE_NAK = 0x04,
  11099. +};
  11100. +
  11101. +/**
  11102. + * struct ssh_frame - SSH communication frame.
  11103. + * @type: The type of the frame. See &enum ssh_frame_type.
  11104. + * @len: The length of the frame payload directly following the CRC for this
  11105. + * frame. Does not include the final CRC for that payload.
  11106. + * @seq: The sequence number for this message/exchange.
  11107. + */
  11108. +struct ssh_frame {
  11109. + u8 type;
  11110. + __le16 len;
  11111. + u8 seq;
  11112. +} __packed;
  11113. +
  11114. +static_assert(sizeof(struct ssh_frame) == 4);
  11115. +
  11116. +/*
  11117. + * Maximum SSH frame payload length in bytes. This is the physical maximum
  11118. + * length of the protocol. Implementations may set a more constrained limit.
  11119. + */
  11120. +#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX
  11121. +
  11122. +/**
  11123. + * enum ssh_payload_type - Type indicator for the SSH payload.
  11124. + * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command
  11125. + * payload.
  11126. + */
  11127. +enum ssh_payload_type {
  11128. + SSH_PLD_TYPE_CMD = 0x80,
  11129. +};
  11130. +
  11131. +/**
  11132. + * struct ssh_command - Payload of a command-type frame.
  11133. + * @type: The type of the payload. See &enum ssh_payload_type. Should be
  11134. + * SSH_PLD_TYPE_CMD for this struct.
  11135. + * @tc: Command target category.
  11136. + * @chn_out: Output channel. Should be zero if this an incoming (EC to host)
  11137. + * message.
  11138. + * @chn_in: Input channel. Should be zero if this is an outgoing (hos to EC)
  11139. + * message.
  11140. + * @iid: Instance ID.
  11141. + * @rqid: Request ID. Used to match requests with responses and differentiate
  11142. + * between responses and events.
  11143. + * @cid: Command ID.
  11144. + */
  11145. +struct ssh_command {
  11146. + u8 type;
  11147. + u8 tc;
  11148. + u8 chn_out;
  11149. + u8 chn_in;
  11150. + u8 iid;
  11151. + __le16 rqid;
  11152. + u8 cid;
  11153. +} __packed;
  11154. +
  11155. +static_assert(sizeof(struct ssh_command) == 8);
  11156. +
  11157. +/*
  11158. + * Maximum SSH command payload length in bytes. This is the physical maximum
  11159. + * length of the protocol. Implementations may set a more constrained limit.
  11160. + */
  11161. +#define SSH_COMMAND_MAX_PAYLOAD_SIZE \
  11162. + (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command))
  11163. +
  11164. +/**
  11165. + * struct ssh_notification_params - Command payload to enable/disable SSH
  11166. + * notifications.
  11167. + * @target_category: The target category for which notifications should be
  11168. + * enabled/disabled.
  11169. + * @flags: Flags determining how notifications are being sent.
  11170. + * @request_id: The request ID that is used to send these notifications.
  11171. + * @instance_id: The specific instance in the given target category for
  11172. + * which notifications should be enabled.
  11173. + */
  11174. +struct ssh_notification_params {
  11175. + u8 target_category;
  11176. + u8 flags;
  11177. + __le16 request_id;
  11178. + u8 instance_id;
  11179. +} __packed;
  11180. +
  11181. +static_assert(sizeof(struct ssh_notification_params) == 5);
  11182. +
  11183. +/**
  11184. + * SSH message syncrhonization (SYN) bytes.
  11185. + */
  11186. +#define SSH_MSG_SYN ((u16)0x55aa)
  11187. +
  11188. +/**
  11189. + * Base-length of a SSH message. This is the minimum number of bytes required
  11190. + * to form a message. The actual message length is SSH_MSG_LEN_BASE plus the
  11191. + * length of the frame payload.
  11192. + */
  11193. +#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16))
  11194. +
  11195. +/**
  11196. + * Length of a SSH control message.
  11197. + */
  11198. +#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE
  11199. +
  11200. +/**
  11201. + * Length of a SSH message with payload of specified size.
  11202. + */
  11203. +#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + payload_size)
  11204. +
  11205. +/**
  11206. + * Length of a SSH command message with command payload of specified size.
  11207. + */
  11208. +#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \
  11209. + SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + payload_size)
  11210. +
  11211. +/**
  11212. + * Offset of the specified struct ssh_frame field in the raw SSH message data.
  11213. + */
  11214. +#define SSH_MSGOFFSET_FRAME(field) \
  11215. + (sizeof(u16) + offsetof(struct ssh_frame, field))
  11216. +
  11217. +/**
  11218. + * Offset of the specified struct ssh_command field in the raw SSH message data.
  11219. + */
  11220. +#define SSH_MSGOFFSET_COMMAND(field) \
  11221. + (2ull * sizeof(u16) + sizeof(struct ssh_frame) \
  11222. + + offsetof(struct ssh_command, field))
  11223. +
  11224. +/**
  11225. + * struct ssam_span - reference to a buffer region
  11226. + * @ptr: pointer to the buffer region
  11227. + * @len: length of the buffer region
  11228. + *
  11229. + * A reference to a (non-owned) buffer segment, consisting of pointer and
  11230. + * length. Use of this struct indicates non-owned data, i.e. data of which the
  11231. + * life-time is managed (i.e. it is allocated/freed) via another pointer.
  11232. + */
  11233. +struct ssam_span {
  11234. + u8 *ptr;
  11235. + size_t len;
  11236. +};
  11237. +
  11238. +
  11239. +/* -- Packet transport layer (ptl). ----------------------------------------- */
  11240. +
  11241. +enum ssh_packet_priority {
  11242. + SSH_PACKET_PRIORITY_FLUSH = 0,
  11243. + SSH_PACKET_PRIORITY_DATA = 0,
  11244. + SSH_PACKET_PRIORITY_NAK = 1 << 4,
  11245. + SSH_PACKET_PRIORITY_ACK = 2 << 4,
  11246. +};
  11247. +
  11248. +#define SSH_PACKET_PRIORITY(base, try) \
  11249. + ((SSH_PACKET_PRIORITY_##base) | ((try) & 0x0f))
  11250. +
  11251. +#define ssh_packet_priority_get_try(p) ((p) & 0x0f)
  11252. +
  11253. +
  11254. +enum ssh_packet_flags {
  11255. + SSH_PACKET_SF_LOCKED_BIT,
  11256. + SSH_PACKET_SF_QUEUED_BIT,
  11257. + SSH_PACKET_SF_PENDING_BIT,
  11258. + SSH_PACKET_SF_TRANSMITTING_BIT,
  11259. + SSH_PACKET_SF_TRANSMITTED_BIT,
  11260. + SSH_PACKET_SF_ACKED_BIT,
  11261. + SSH_PACKET_SF_CANCELED_BIT,
  11262. + SSH_PACKET_SF_COMPLETED_BIT,
  11263. +
  11264. + SSH_PACKET_TY_FLUSH_BIT,
  11265. + SSH_PACKET_TY_SEQUENCED_BIT,
  11266. + SSH_PACKET_TY_BLOCKING_BIT,
  11267. +
  11268. + SSH_PACKET_FLAGS_SF_MASK =
  11269. + BIT(SSH_PACKET_SF_LOCKED_BIT)
  11270. + | BIT(SSH_PACKET_SF_QUEUED_BIT)
  11271. + | BIT(SSH_PACKET_SF_PENDING_BIT)
  11272. + | BIT(SSH_PACKET_SF_TRANSMITTING_BIT)
  11273. + | BIT(SSH_PACKET_SF_TRANSMITTED_BIT)
  11274. + | BIT(SSH_PACKET_SF_ACKED_BIT)
  11275. + | BIT(SSH_PACKET_SF_CANCELED_BIT)
  11276. + | BIT(SSH_PACKET_SF_COMPLETED_BIT),
  11277. +
  11278. + SSH_PACKET_FLAGS_TY_MASK =
  11279. + BIT(SSH_PACKET_TY_FLUSH_BIT)
  11280. + | BIT(SSH_PACKET_TY_SEQUENCED_BIT)
  11281. + | BIT(SSH_PACKET_TY_BLOCKING_BIT),
  11282. +};
  11283. +
  11284. +
  11285. +struct ssh_ptl;
  11286. +struct ssh_packet;
  11287. +
  11288. +struct ssh_packet_ops {
  11289. + void (*release)(struct ssh_packet *p);
  11290. + void (*complete)(struct ssh_packet *p, int status);
  11291. +};
  11292. +
  11293. +struct ssh_packet {
  11294. + struct ssh_ptl *ptl;
  11295. + struct kref refcnt;
  11296. +
  11297. + u8 priority;
  11298. +
  11299. + struct {
  11300. + size_t len;
  11301. + u8 *ptr;
  11302. + } data;
  11303. +
  11304. + unsigned long state;
  11305. + ktime_t timestamp;
  11306. +
  11307. + struct list_head queue_node;
  11308. + struct list_head pending_node;
  11309. +
  11310. + const struct ssh_packet_ops *ops;
  11311. +};
  11312. +
  11313. +
  11314. +void ssh_packet_get(struct ssh_packet *p);
  11315. +void ssh_packet_put(struct ssh_packet *p);
  11316. +
  11317. +static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len)
  11318. +{
  11319. + p->data.ptr = ptr;
  11320. + p->data.len = len;
  11321. +}
  11322. +
  11323. +
  11324. +/* -- Request transport layer (rtl). ---------------------------------------- */
  11325. +
  11326. +enum ssh_request_flags {
  11327. + SSH_REQUEST_SF_LOCKED_BIT,
  11328. + SSH_REQUEST_SF_QUEUED_BIT,
  11329. + SSH_REQUEST_SF_PENDING_BIT,
  11330. + SSH_REQUEST_SF_TRANSMITTING_BIT,
  11331. + SSH_REQUEST_SF_TRANSMITTED_BIT,
  11332. + SSH_REQUEST_SF_RSPRCVD_BIT,
  11333. + SSH_REQUEST_SF_CANCELED_BIT,
  11334. + SSH_REQUEST_SF_COMPLETED_BIT,
  11335. +
  11336. + SSH_REQUEST_TY_FLUSH_BIT,
  11337. + SSH_REQUEST_TY_HAS_RESPONSE_BIT,
  11338. +
  11339. + SSH_REQUEST_FLAGS_SF_MASK =
  11340. + BIT(SSH_REQUEST_SF_LOCKED_BIT)
  11341. + | BIT(SSH_REQUEST_SF_QUEUED_BIT)
  11342. + | BIT(SSH_REQUEST_SF_PENDING_BIT)
  11343. + | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT)
  11344. + | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT)
  11345. + | BIT(SSH_REQUEST_SF_RSPRCVD_BIT)
  11346. + | BIT(SSH_REQUEST_SF_CANCELED_BIT)
  11347. + | BIT(SSH_REQUEST_SF_COMPLETED_BIT),
  11348. +
  11349. + SSH_REQUEST_FLAGS_TY_MASK =
  11350. + BIT(SSH_REQUEST_TY_FLUSH_BIT)
  11351. + | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),
  11352. +};
  11353. +
  11354. +
  11355. +struct ssh_rtl;
  11356. +struct ssh_request;
  11357. +
  11358. +struct ssh_request_ops {
  11359. + void (*release)(struct ssh_request *rqst);
  11360. + void (*complete)(struct ssh_request *rqst,
  11361. + const struct ssh_command *cmd,
  11362. + const struct ssam_span *data, int status);
  11363. +};
  11364. +
  11365. +struct ssh_request {
  11366. + struct ssh_packet packet;
  11367. + struct list_head node;
  11368. +
  11369. + unsigned long state;
  11370. + ktime_t timestamp;
  11371. +
  11372. + const struct ssh_request_ops *ops;
  11373. +};
  11374. +
  11375. +
  11376. +static inline void ssh_request_get(struct ssh_request *r)
  11377. +{
  11378. + ssh_packet_get(&r->packet);
  11379. +}
  11380. +
  11381. +static inline void ssh_request_put(struct ssh_request *r)
  11382. +{
  11383. + ssh_packet_put(&r->packet);
  11384. +}
  11385. +
  11386. +static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len)
  11387. +{
  11388. + ssh_packet_set_data(&r->packet, ptr, len);
  11389. +}
  11390. +
  11391. +
  11392. +/* -- Main data types and definitions --------------------------------------- */
  11393. +
  11394. +enum ssam_ssh_tc {
  11395. + SSAM_SSH_TC_SAM = 0x01, // generic system functionality, real-time clock
  11396. + SSAM_SSH_TC_BAT = 0x02, // battery/power subsystem
  11397. + SSAM_SSH_TC_TMP = 0x03, // thermal subsystem
  11398. + SSAM_SSH_TC_PMC = 0x04,
  11399. + SSAM_SSH_TC_FAN = 0x05,
  11400. + SSAM_SSH_TC_PoM = 0x06,
  11401. + SSAM_SSH_TC_DBG = 0x07,
  11402. + SSAM_SSH_TC_KBD = 0x08, // legacy keyboard (Laptop 1/2)
  11403. + SSAM_SSH_TC_FWU = 0x09,
  11404. + SSAM_SSH_TC_UNI = 0x0a,
  11405. + SSAM_SSH_TC_LPC = 0x0b,
  11406. + SSAM_SSH_TC_TCL = 0x0c,
  11407. + SSAM_SSH_TC_SFL = 0x0d,
  11408. + SSAM_SSH_TC_KIP = 0x0e,
  11409. + SSAM_SSH_TC_EXT = 0x0f,
  11410. + SSAM_SSH_TC_BLD = 0x10,
  11411. + SSAM_SSH_TC_BAS = 0x11, // detachment system (Surface Book 2/3)
  11412. + SSAM_SSH_TC_SEN = 0x12,
  11413. + SSAM_SSH_TC_SRQ = 0x13,
  11414. + SSAM_SSH_TC_MCU = 0x14,
  11415. + SSAM_SSH_TC_HID = 0x15, // generic HID input subsystem
  11416. + SSAM_SSH_TC_TCH = 0x16,
  11417. + SSAM_SSH_TC_BKL = 0x17,
  11418. + SSAM_SSH_TC_TAM = 0x18,
  11419. + SSAM_SSH_TC_ACC = 0x19,
  11420. + SSAM_SSH_TC_UFI = 0x1a,
  11421. + SSAM_SSH_TC_USC = 0x1b,
  11422. + SSAM_SSH_TC_PEN = 0x1c,
  11423. + SSAM_SSH_TC_VID = 0x1d,
  11424. + SSAM_SSH_TC_AUD = 0x1e,
  11425. + SSAM_SSH_TC_SMC = 0x1f,
  11426. + SSAM_SSH_TC_KPD = 0x20,
  11427. + SSAM_SSH_TC_REG = 0x21,
  11428. +};
  11429. +
  11430. +struct ssam_controller;
  11431. +
  11432. +/**
  11433. + * struct ssam_event_flags - Flags for enabling/disabling SAM-over-SSH events
  11434. + * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame.
  11435. + */
  11436. +enum ssam_event_flags {
  11437. + SSAM_EVENT_SEQUENCED = BIT(0),
  11438. +};
  11439. +
  11440. +struct ssam_event {
  11441. + u8 target_category;
  11442. + u8 command_id;
  11443. + u8 instance_id;
  11444. + u8 channel;
  11445. + u16 length;
  11446. + u8 data[0];
  11447. +};
  11448. +
  11449. +enum ssam_request_flags {
  11450. + SSAM_REQUEST_HAS_RESPONSE = BIT(0),
  11451. + SSAM_REQUEST_UNSEQUENCED = BIT(1),
  11452. +};
  11453. +
  11454. +struct ssam_request {
  11455. + u8 target_category;
  11456. + u8 command_id;
  11457. + u8 instance_id;
  11458. + u8 channel;
  11459. + u16 flags;
  11460. + u16 length;
  11461. + const u8 *payload;
  11462. +};
  11463. +
  11464. +struct ssam_response {
  11465. + size_t capacity;
  11466. + size_t length;
  11467. + u8 *pointer;
  11468. +};
  11469. +
  11470. +
  11471. +int ssam_client_bind(struct device *client, struct ssam_controller **ctrl);
  11472. +
  11473. +struct device *ssam_controller_device(struct ssam_controller *c);
  11474. +
  11475. +ssize_t ssam_request_write_data(struct ssam_span *buf,
  11476. + struct ssam_controller *ctrl,
  11477. + struct ssam_request *spec);
  11478. +
  11479. +
  11480. +/* -- Synchronous request interface. ---------------------------------------- */
  11481. +
  11482. +struct ssam_request_sync {
  11483. + struct ssh_request base;
  11484. + struct completion comp;
  11485. + struct ssam_response *resp;
  11486. + int status;
  11487. +};
  11488. +
  11489. +int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
  11490. + struct ssam_request_sync **rqst,
  11491. + struct ssam_span *buffer);
  11492. +
  11493. +void ssam_request_sync_init(struct ssam_request_sync *rqst,
  11494. + enum ssam_request_flags flags);
  11495. +
  11496. +static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst,
  11497. + u8 *ptr, size_t len)
  11498. +{
  11499. + ssh_request_set_data(&rqst->base, ptr, len);
  11500. +}
  11501. +
  11502. +static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst,
  11503. + struct ssam_response *resp)
  11504. +{
  11505. + rqst->resp = resp;
  11506. +}
  11507. +
  11508. +int ssam_request_sync_submit(struct ssam_controller *ctrl,
  11509. + struct ssam_request_sync *rqst);
  11510. +
  11511. +static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
  11512. +{
  11513. + wait_for_completion(&rqst->comp);
  11514. + return rqst->status;
  11515. +}
  11516. +
  11517. +int ssam_request_sync(struct ssam_controller *ctrl, struct ssam_request *spec,
  11518. + struct ssam_response *rsp);
  11519. +
  11520. +int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
  11521. + struct ssam_request *spec,
  11522. + struct ssam_response *rsp,
  11523. + struct ssam_span *buf);
  11524. +
  11525. +
  11526. +#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len) \
  11527. + ({ \
  11528. + u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \
  11529. + struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \
  11530. + int __status; \
  11531. + \
  11532. + /* ensure input does not overflow buffer */ \
  11533. + if ((rqst)->length <= payload_len) { \
  11534. + __status = ssam_request_sync_with_buffer( \
  11535. + ctrl, rqst, rsp, &__buf); \
  11536. + } else { \
  11537. + __status = -EINVAL; \
  11538. + } \
  11539. + \
  11540. + __status; \
  11541. + })
  11542. +
  11543. +
  11544. +struct ssam_request_spec {
  11545. + u8 target_category;
  11546. + u8 command_id;
  11547. + u8 instance_id;
  11548. + u8 channel;
  11549. + u8 flags;
  11550. +};
  11551. +
  11552. +struct ssam_request_spec_md {
  11553. + u8 target_category;
  11554. + u8 command_id;
  11555. + u8 flags;
  11556. +};
  11557. +
  11558. +#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \
  11559. + int name(struct ssam_controller *ctrl) \
  11560. + { \
  11561. + struct ssam_request_spec s = (struct ssam_request_spec)spec; \
  11562. + struct ssam_request rqst; \
  11563. + \
  11564. + rqst.target_category = s.target_category; \
  11565. + rqst.command_id = s.command_id; \
  11566. + rqst.instance_id = s.instance_id; \
  11567. + rqst.channel = s.channel; \
  11568. + rqst.flags = s.flags; \
  11569. + rqst.length = 0; \
  11570. + rqst.payload = NULL; \
  11571. + \
  11572. + return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
  11573. + }
  11574. +
  11575. +#define SSAM_DEFINE_SYNC_REQUEST_W(name, wtype, spec...) \
  11576. + int name(struct ssam_controller *ctrl, const wtype *in) \
  11577. + { \
  11578. + struct ssam_request_spec s = (struct ssam_request_spec)spec; \
  11579. + struct ssam_request rqst; \
  11580. + \
  11581. + rqst.target_category = s.target_category; \
  11582. + rqst.command_id = s.command_id; \
  11583. + rqst.instance_id = s.instance_id; \
  11584. + rqst.channel = s.channel; \
  11585. + rqst.flags = s.flags; \
  11586. + rqst.length = sizeof(wtype); \
  11587. + rqst.payload = (u8 *)in; \
  11588. + \
  11589. + return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
  11590. + sizeof(wtype)); \
  11591. + }
  11592. +
  11593. +#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \
  11594. + int name(struct ssam_controller *ctrl, rtype *out) \
  11595. + { \
  11596. + struct ssam_request_spec s = (struct ssam_request_spec)spec; \
  11597. + struct ssam_request rqst; \
  11598. + struct ssam_response rsp; \
  11599. + int status; \
  11600. + \
  11601. + rqst.target_category = s.target_category; \
  11602. + rqst.command_id = s.command_id; \
  11603. + rqst.instance_id = s.instance_id; \
  11604. + rqst.channel = s.channel; \
  11605. + rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
  11606. + rqst.length = 0; \
  11607. + rqst.payload = NULL; \
  11608. + \
  11609. + rsp.capacity = sizeof(rtype); \
  11610. + rsp.length = 0; \
  11611. + rsp.pointer = (u8 *)out; \
  11612. + \
  11613. + status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
  11614. + if (status) \
  11615. + return status; \
  11616. + \
  11617. + if (rsp.length != sizeof(rtype)) { \
  11618. + struct device *dev = ssam_controller_device(ctrl); \
  11619. + dev_err(dev, "rqst: invalid response length, expected %zu, got %zu" \
  11620. + " (tc: 0x%02x, cid: 0x%02x)", sizeof(rtype), \
  11621. + rsp.length, rqst.target_category, \
  11622. + rqst.command_id); \
  11623. + return -EIO; \
  11624. + } \
  11625. + \
  11626. + return 0; \
  11627. + }
  11628. +
  11629. +#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, wtype, spec...) \
  11630. + int name(struct ssam_controller *ctrl, u8 chn, u8 iid, const wtype *in) \
  11631. + { \
  11632. + struct ssam_request_spec_md s \
  11633. + = (struct ssam_request_spec_md)spec; \
  11634. + struct ssam_request rqst; \
  11635. + \
  11636. + rqst.target_category = s.target_category; \
  11637. + rqst.command_id = s.command_id; \
  11638. + rqst.instance_id = iid; \
  11639. + rqst.channel = chn; \
  11640. + rqst.flags = s.flags; \
  11641. + rqst.length = sizeof(wtype); \
  11642. + rqst.payload = (u8 *)in; \
  11643. + \
  11644. + return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
  11645. + sizeof(wtype)); \
  11646. + }
  11647. +
  11648. +#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \
  11649. + int name(struct ssam_controller *ctrl, u8 chn, u8 iid, rtype *out) \
  11650. + { \
  11651. + struct ssam_request_spec_md s \
  11652. + = (struct ssam_request_spec_md)spec; \
  11653. + struct ssam_request rqst; \
  11654. + struct ssam_response rsp; \
  11655. + int status; \
  11656. + \
  11657. + rqst.target_category = s.target_category; \
  11658. + rqst.command_id = s.command_id; \
  11659. + rqst.instance_id = iid; \
  11660. + rqst.channel = chn; \
  11661. + rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
  11662. + rqst.length = 0; \
  11663. + rqst.payload = NULL; \
  11664. + \
  11665. + rsp.capacity = sizeof(rtype); \
  11666. + rsp.length = 0; \
  11667. + rsp.pointer = (u8 *)out; \
  11668. + \
  11669. + status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
  11670. + if (status) \
  11671. + return status; \
  11672. + \
  11673. + if (rsp.length != sizeof(rtype)) { \
  11674. + struct device *dev = ssam_controller_device(ctrl); \
  11675. + dev_err(dev, "rqst: invalid response length, expected %zu, got %zu" \
  11676. + " (tc: 0x%02x, cid: 0x%02x)", sizeof(rtype), \
  11677. + rsp.length, rqst.target_category, \
  11678. + rqst.command_id); \
  11679. + return -EIO; \
  11680. + } \
  11681. + \
  11682. + return 0; \
  11683. + }
  11684. +
  11685. +
  11686. +/* -- Event notifier/callbacks. --------------------------------------------- */
  11687. +
  11688. +#define SSAM_NOTIF_STATE_SHIFT 2
  11689. +#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1)
  11690. +
  11691. +#define SSAM_NOTIF_HANDLED BIT(0)
  11692. +#define SSAM_NOTIF_STOP BIT(1)
  11693. +
  11694. +
  11695. +struct ssam_notifier_block;
  11696. +
  11697. +typedef u32 (*ssam_notifier_fn_t)(struct ssam_notifier_block *nb,
  11698. + const struct ssam_event *event);
  11699. +
  11700. +struct ssam_notifier_block {
  11701. + struct ssam_notifier_block __rcu *next;
  11702. + ssam_notifier_fn_t fn;
  11703. + int priority;
  11704. +};
  11705. +
  11706. +
  11707. +static inline u32 ssam_notifier_from_errno(int err)
  11708. +{
  11709. + if (WARN_ON(err > 0) || err == 0)
  11710. + return 0;
  11711. + else
  11712. + return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP;
  11713. +}
  11714. +
  11715. +static inline int ssam_notifier_to_errno(u32 ret)
  11716. +{
  11717. + return -(ret >> SSAM_NOTIF_STATE_SHIFT);
  11718. +}
  11719. +
  11720. +
  11721. +/* -- Event/notification registry. ------------------------------------------ */
  11722. +
  11723. +struct ssam_event_registry {
  11724. + u8 target_category;
  11725. + u8 channel;
  11726. + u8 cid_enable;
  11727. + u8 cid_disable;
  11728. +};
  11729. +
  11730. +struct ssam_event_id {
  11731. + u8 target_category;
  11732. + u8 instance;
  11733. +};
  11734. +
  11735. +
  11736. +#define SSAM_EVENT_REGISTRY(tc, chn, cid_en, cid_dis) \
  11737. + ((struct ssam_event_registry) { \
  11738. + .target_category = (tc), \
  11739. + .channel = (chn), \
  11740. + .cid_enable = (cid_en), \
  11741. + .cid_disable = (cid_dis), \
  11742. + })
  11743. +
  11744. +#define SSAM_EVENT_ID(tc, iid) \
  11745. + ((struct ssam_event_id) { \
  11746. + .target_category = tc, \
  11747. + .instance = iid, \
  11748. + })
  11749. +
  11750. +
  11751. +#define SSAM_EVENT_REGISTRY_SAM \
  11752. + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c)
  11753. +
  11754. +#define SSAM_EVENT_REGISTRY_KIP \
  11755. + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
  11756. +
  11757. +#define SSAM_EVENT_REGISTRY_REG \
  11758. + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
  11759. +
  11760. +
  11761. +struct ssam_event_notifier {
  11762. + struct ssam_notifier_block base;
  11763. +
  11764. + struct {
  11765. + struct ssam_event_registry reg;
  11766. + struct ssam_event_id id;
  11767. + u8 flags;
  11768. + } event;
  11769. +};
  11770. +
  11771. +int ssam_notifier_register(struct ssam_controller *ctrl,
  11772. + struct ssam_event_notifier *n);
  11773. +
  11774. +int ssam_notifier_unregister(struct ssam_controller *ctrl,
  11775. + struct ssam_event_notifier *n);
  11776. +
  11777. +#endif /* _SURFACE_SAM_SSH_H */
  11778. diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h b/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h
  11779. new file mode 100644
  11780. index 0000000000000..8ea9a2fc99d7e
  11781. --- /dev/null
  11782. +++ b/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h
  11783. @@ -0,0 +1,587 @@
  11784. +#undef TRACE_SYSTEM
  11785. +#define TRACE_SYSTEM surface_sam_ssh
  11786. +
  11787. +#if !defined(_SURFACE_SAM_SSH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
  11788. +#define _SURFACE_SAM_SSH_TRACE_H
  11789. +
  11790. +#include <linux/tracepoint.h>
  11791. +
  11792. +#include "surface_sam_ssh.h"
  11793. +
  11794. +
  11795. +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ);
  11796. +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ);
  11797. +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK);
  11798. +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK);
  11799. +
  11800. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT);
  11801. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT);
  11802. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT);
  11803. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT);
  11804. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT);
  11805. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT);
  11806. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT);
  11807. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT);
  11808. +
  11809. +TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT);
  11810. +TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT);
  11811. +TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT);
  11812. +
  11813. +TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK);
  11814. +TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK);
  11815. +
  11816. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT);
  11817. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT);
  11818. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT);
  11819. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT);
  11820. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT);
  11821. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT);
  11822. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT);
  11823. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT);
  11824. +
  11825. +TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT);
  11826. +TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
  11827. +
  11828. +TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK);
  11829. +TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK);
  11830. +
  11831. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM);
  11832. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT);
  11833. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP);
  11834. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC);
  11835. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN);
  11836. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM);
  11837. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG);
  11838. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD);
  11839. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU);
  11840. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI);
  11841. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC);
  11842. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL);
  11843. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL);
  11844. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP);
  11845. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT);
  11846. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD);
  11847. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS);
  11848. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN);
  11849. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ);
  11850. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU);
  11851. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
  11852. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
  11853. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
  11854. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
  11855. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
  11856. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
  11857. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
  11858. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
  11859. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID);
  11860. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
  11861. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
  11862. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
  11863. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
  11864. +
  11865. +
  11866. +#define SSAM_PTR_UID_LEN 9
  11867. +#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1)
  11868. +#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1)
  11869. +#define SSAM_RQID_NOT_APPLICABLE ((u32)-1)
  11870. +#define SSAM_SSH_TC_NOT_APPLICABLE 0
  11871. +
  11872. +
  11873. +#ifndef _SURFACE_SAM_SSH_TRACE_HELPERS
  11874. +#define _SURFACE_SAM_SSH_TRACE_HELPERS
  11875. +
  11876. +static inline void ssam_trace_ptr_uid(const void *ptr, char* uid_str)
  11877. +{
  11878. + char buf[2 * sizeof(void*) + 1];
  11879. +
  11880. + snprintf(buf, ARRAY_SIZE(buf), "%p", ptr);
  11881. + memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN],
  11882. + SSAM_PTR_UID_LEN);
  11883. +}
  11884. +
  11885. +static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p)
  11886. +{
  11887. + if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0))
  11888. + return SSAM_SEQ_NOT_APPLICABLE;
  11889. +
  11890. + return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
  11891. +}
  11892. +
  11893. +static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p)
  11894. +{
  11895. + if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
  11896. + return SSAM_RQID_NOT_APPLICABLE;
  11897. +
  11898. + return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]);
  11899. +}
  11900. +
  11901. +static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
  11902. +{
  11903. + if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
  11904. + return SSAM_SSH_TC_NOT_APPLICABLE;
  11905. +
  11906. + return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]);
  11907. +}
  11908. +
  11909. +#endif /* _SURFACE_SAM_SSH_TRACE_HELPERS */
  11910. +
  11911. +#define ssam_trace_get_command_field_u8(packet, field) \
  11912. + ((!packet || packet->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \
  11913. + ? 0 : p->data.ptr[SSH_MSGOFFSET_COMMAND(field)])
  11914. +
  11915. +#define ssam_show_generic_u8_field(value) \
  11916. + __print_symbolic(value, \
  11917. + { SSAM_U8_FIELD_NOT_APPLICABLE, "N/A" } \
  11918. + )
  11919. +
  11920. +
  11921. +#define ssam_show_frame_type(ty) \
  11922. + __print_symbolic(ty, \
  11923. + { SSH_FRAME_TYPE_DATA_SEQ, "DSEQ" }, \
  11924. + { SSH_FRAME_TYPE_DATA_NSQ, "DNSQ" }, \
  11925. + { SSH_FRAME_TYPE_ACK, "ACK" }, \
  11926. + { SSH_FRAME_TYPE_NAK, "NAK" } \
  11927. + )
  11928. +
  11929. +#define ssam_show_packet_type(type) \
  11930. + __print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "", \
  11931. + { BIT(SSH_PACKET_TY_FLUSH_BIT), "F" }, \
  11932. + { BIT(SSH_PACKET_TY_SEQUENCED_BIT), "S" }, \
  11933. + { BIT(SSH_PACKET_TY_BLOCKING_BIT), "B" } \
  11934. + )
  11935. +
  11936. +#define ssam_show_packet_state(state) \
  11937. + __print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "", \
  11938. + { BIT(SSH_PACKET_SF_LOCKED_BIT), "L" }, \
  11939. + { BIT(SSH_PACKET_SF_QUEUED_BIT), "Q" }, \
  11940. + { BIT(SSH_PACKET_SF_PENDING_BIT), "P" }, \
  11941. + { BIT(SSH_PACKET_SF_TRANSMITTING_BIT), "S" }, \
  11942. + { BIT(SSH_PACKET_SF_TRANSMITTED_BIT), "T" }, \
  11943. + { BIT(SSH_PACKET_SF_ACKED_BIT), "A" }, \
  11944. + { BIT(SSH_PACKET_SF_CANCELED_BIT), "C" }, \
  11945. + { BIT(SSH_PACKET_SF_COMPLETED_BIT), "F" } \
  11946. + )
  11947. +
  11948. +#define ssam_show_packet_seq(seq) \
  11949. + __print_symbolic(seq, \
  11950. + { SSAM_SEQ_NOT_APPLICABLE, "N/A" } \
  11951. + )
  11952. +
  11953. +
  11954. +#define ssam_show_request_type(flags) \
  11955. + __print_flags(flags & SSH_REQUEST_FLAGS_TY_MASK, "", \
  11956. + { BIT(SSH_REQUEST_TY_FLUSH_BIT), "F" }, \
  11957. + { BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), "R" } \
  11958. + )
  11959. +
  11960. +#define ssam_show_request_state(flags) \
  11961. + __print_flags(flags & SSH_REQUEST_FLAGS_SF_MASK, "", \
  11962. + { BIT(SSH_REQUEST_SF_LOCKED_BIT), "L" }, \
  11963. + { BIT(SSH_REQUEST_SF_QUEUED_BIT), "Q" }, \
  11964. + { BIT(SSH_REQUEST_SF_PENDING_BIT), "P" }, \
  11965. + { BIT(SSH_REQUEST_SF_TRANSMITTING_BIT), "S" }, \
  11966. + { BIT(SSH_REQUEST_SF_TRANSMITTED_BIT), "T" }, \
  11967. + { BIT(SSH_REQUEST_SF_RSPRCVD_BIT), "A" }, \
  11968. + { BIT(SSH_REQUEST_SF_CANCELED_BIT), "C" }, \
  11969. + { BIT(SSH_REQUEST_SF_COMPLETED_BIT), "F" } \
  11970. + )
  11971. +
  11972. +#define ssam_show_request_id(rqid) \
  11973. + __print_symbolic(rqid, \
  11974. + { SSAM_RQID_NOT_APPLICABLE, "N/A" } \
  11975. + )
  11976. +
  11977. +#define ssam_show_ssh_tc(rqid) \
  11978. + __print_symbolic(rqid, \
  11979. + { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
  11980. + { SSAM_SSH_TC_SAM, "SAM" }, \
  11981. + { SSAM_SSH_TC_BAT, "BAT" }, \
  11982. + { SSAM_SSH_TC_TMP, "TMP" }, \
  11983. + { SSAM_SSH_TC_PMC, "PMC" }, \
  11984. + { SSAM_SSH_TC_FAN, "FAN" }, \
  11985. + { SSAM_SSH_TC_PoM, "PoM" }, \
  11986. + { SSAM_SSH_TC_DBG, "DBG" }, \
  11987. + { SSAM_SSH_TC_KBD, "KBD" }, \
  11988. + { SSAM_SSH_TC_FWU, "FWU" }, \
  11989. + { SSAM_SSH_TC_UNI, "UNI" }, \
  11990. + { SSAM_SSH_TC_LPC, "LPC" }, \
  11991. + { SSAM_SSH_TC_TCL, "TCL" }, \
  11992. + { SSAM_SSH_TC_SFL, "SFL" }, \
  11993. + { SSAM_SSH_TC_KIP, "KIP" }, \
  11994. + { SSAM_SSH_TC_EXT, "EXT" }, \
  11995. + { SSAM_SSH_TC_BLD, "BLD" }, \
  11996. + { SSAM_SSH_TC_BAS, "BAS" }, \
  11997. + { SSAM_SSH_TC_SEN, "SEN" }, \
  11998. + { SSAM_SSH_TC_SRQ, "SRQ" }, \
  11999. + { SSAM_SSH_TC_MCU, "MCU" }, \
  12000. + { SSAM_SSH_TC_HID, "HID" }, \
  12001. + { SSAM_SSH_TC_TCH, "TCH" }, \
  12002. + { SSAM_SSH_TC_BKL, "BKL" }, \
  12003. + { SSAM_SSH_TC_TAM, "TAM" }, \
  12004. + { SSAM_SSH_TC_ACC, "ACC" }, \
  12005. + { SSAM_SSH_TC_UFI, "UFI" }, \
  12006. + { SSAM_SSH_TC_USC, "USC" }, \
  12007. + { SSAM_SSH_TC_PEN, "PEN" }, \
  12008. + { SSAM_SSH_TC_VID, "VID" }, \
  12009. + { SSAM_SSH_TC_AUD, "AUD" }, \
  12010. + { SSAM_SSH_TC_SMC, "SMC" }, \
  12011. + { SSAM_SSH_TC_KPD, "KPD" }, \
  12012. + { SSAM_SSH_TC_REG, "REG" } \
  12013. + )
  12014. +
  12015. +
  12016. +DECLARE_EVENT_CLASS(ssam_frame_class,
  12017. + TP_PROTO(const struct ssh_frame *frame),
  12018. +
  12019. + TP_ARGS(frame),
  12020. +
  12021. + TP_STRUCT__entry(
  12022. + __field(u8, type)
  12023. + __field(u8, seq)
  12024. + __field(u16, len)
  12025. + ),
  12026. +
  12027. + TP_fast_assign(
  12028. + __entry->type = frame->type;
  12029. + __entry->seq = frame->seq;
  12030. + __entry->len = get_unaligned_le16(&frame->len);
  12031. + ),
  12032. +
  12033. + TP_printk("ty=%s, seq=0x%02x, len=%u",
  12034. + ssam_show_frame_type(__entry->type),
  12035. + __entry->seq,
  12036. + __entry->len
  12037. + )
  12038. +);
  12039. +
  12040. +#define DEFINE_SSAM_FRAME_EVENT(name) \
  12041. + DEFINE_EVENT(ssam_frame_class, ssam_##name, \
  12042. + TP_PROTO(const struct ssh_frame *frame), \
  12043. + TP_ARGS(frame) \
  12044. + )
  12045. +
  12046. +
  12047. +DECLARE_EVENT_CLASS(ssam_command_class,
  12048. + TP_PROTO(const struct ssh_command *cmd, u16 len),
  12049. +
  12050. + TP_ARGS(cmd, len),
  12051. +
  12052. + TP_STRUCT__entry(
  12053. + __field(u16, rqid)
  12054. + __field(u16, len)
  12055. + __field(u8, tc)
  12056. + __field(u8, cid)
  12057. + __field(u8, iid)
  12058. + ),
  12059. +
  12060. + TP_fast_assign(
  12061. + __entry->rqid = get_unaligned_le16(&cmd->rqid);
  12062. + __entry->tc = cmd->tc;
  12063. + __entry->cid = cmd->cid;
  12064. + __entry->iid = cmd->iid;
  12065. + __entry->len = len;
  12066. + ),
  12067. +
  12068. + TP_printk("rqid=0x%04x, tc=%s, cid=0x%02x, iid=0x%02x, len=%u",
  12069. + __entry->rqid,
  12070. + ssam_show_ssh_tc(__entry->tc),
  12071. + __entry->cid,
  12072. + __entry->iid,
  12073. + __entry->len
  12074. + )
  12075. +);
  12076. +
  12077. +#define DEFINE_SSAM_COMMAND_EVENT(name) \
  12078. + DEFINE_EVENT(ssam_command_class, ssam_##name, \
  12079. + TP_PROTO(const struct ssh_command *cmd, u16 len), \
  12080. + TP_ARGS(cmd, len) \
  12081. + )
  12082. +
  12083. +
  12084. +DECLARE_EVENT_CLASS(ssam_packet_class,
  12085. + TP_PROTO(const struct ssh_packet *packet),
  12086. +
  12087. + TP_ARGS(packet),
  12088. +
  12089. + TP_STRUCT__entry(
  12090. + __array(char, uid, SSAM_PTR_UID_LEN)
  12091. + __field(u8, priority)
  12092. + __field(u16, length)
  12093. + __field(unsigned long, state)
  12094. + __field(u16, seq)
  12095. + ),
  12096. +
  12097. + TP_fast_assign(
  12098. + ssam_trace_ptr_uid(packet, __entry->uid);
  12099. + __entry->priority = READ_ONCE(packet->priority);
  12100. + __entry->length = packet->data.len;
  12101. + __entry->state = READ_ONCE(packet->state);
  12102. + __entry->seq = ssam_trace_get_packet_seq(packet);
  12103. + ),
  12104. +
  12105. + TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s",
  12106. + __entry->uid,
  12107. + ssam_show_packet_seq(__entry->seq),
  12108. + ssam_show_packet_type(__entry->state),
  12109. + __entry->priority,
  12110. + __entry->length,
  12111. + ssam_show_packet_state(__entry->state)
  12112. + )
  12113. +);
  12114. +
  12115. +#define DEFINE_SSAM_PACKET_EVENT(name) \
  12116. + DEFINE_EVENT(ssam_packet_class, ssam_##name, \
  12117. + TP_PROTO(const struct ssh_packet *packet), \
  12118. + TP_ARGS(packet) \
  12119. + )
  12120. +
  12121. +
  12122. +DECLARE_EVENT_CLASS(ssam_packet_status_class,
  12123. + TP_PROTO(const struct ssh_packet *packet, int status),
  12124. +
  12125. + TP_ARGS(packet, status),
  12126. +
  12127. + TP_STRUCT__entry(
  12128. + __array(char, uid, SSAM_PTR_UID_LEN)
  12129. + __field(u8, priority)
  12130. + __field(u16, length)
  12131. + __field(unsigned long, state)
  12132. + __field(u16, seq)
  12133. + __field(int, status)
  12134. + ),
  12135. +
  12136. + TP_fast_assign(
  12137. + ssam_trace_ptr_uid(packet, __entry->uid);
  12138. + __entry->priority = READ_ONCE(packet->priority);
  12139. + __entry->length = packet->data.len;
  12140. + __entry->state = READ_ONCE(packet->state);
  12141. + __entry->seq = ssam_trace_get_packet_seq(packet);
  12142. + __entry->status = status;
  12143. + ),
  12144. +
  12145. + TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s, status=%d",
  12146. + __entry->uid,
  12147. + ssam_show_packet_seq(__entry->seq),
  12148. + ssam_show_packet_type(__entry->state),
  12149. + __entry->priority,
  12150. + __entry->length,
  12151. + ssam_show_packet_state(__entry->state),
  12152. + __entry->status
  12153. + )
  12154. +);
  12155. +
  12156. +#define DEFINE_SSAM_PACKET_STATUS_EVENT(name) \
  12157. + DEFINE_EVENT(ssam_packet_status_class, ssam_##name, \
  12158. + TP_PROTO(const struct ssh_packet *packet, int status), \
  12159. + TP_ARGS(packet, status) \
  12160. + )
  12161. +
  12162. +
  12163. +DECLARE_EVENT_CLASS(ssam_request_class,
  12164. + TP_PROTO(const struct ssh_request *request),
  12165. +
  12166. + TP_ARGS(request),
  12167. +
  12168. + TP_STRUCT__entry(
  12169. + __array(char, uid, SSAM_PTR_UID_LEN)
  12170. + __field(unsigned long, state)
  12171. + __field(u32, rqid)
  12172. + __field(u8, tc)
  12173. + __field(u16, cid)
  12174. + __field(u16, iid)
  12175. + ),
  12176. +
  12177. + TP_fast_assign(
  12178. + const struct ssh_packet *p = &request->packet;
  12179. +
  12180. + // use packet for UID so we can match requests to packets
  12181. + ssam_trace_ptr_uid(p, __entry->uid);
  12182. + __entry->state = READ_ONCE(request->state);
  12183. + __entry->rqid = ssam_trace_get_request_id(p);
  12184. + __entry->tc = ssam_trace_get_request_tc(p);
  12185. + __entry->cid = ssam_trace_get_command_field_u8(p, cid);
  12186. + __entry->iid = ssam_trace_get_command_field_u8(p, iid);
  12187. + ),
  12188. +
  12189. + TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s",
  12190. + __entry->uid,
  12191. + ssam_show_request_id(__entry->rqid),
  12192. + ssam_show_request_type(__entry->state),
  12193. + ssam_show_request_state(__entry->state),
  12194. + ssam_show_ssh_tc(__entry->tc),
  12195. + ssam_show_generic_u8_field(__entry->cid),
  12196. + ssam_show_generic_u8_field(__entry->iid)
  12197. + )
  12198. +);
  12199. +
  12200. +#define DEFINE_SSAM_REQUEST_EVENT(name) \
  12201. + DEFINE_EVENT(ssam_request_class, ssam_##name, \
  12202. + TP_PROTO(const struct ssh_request *request), \
  12203. + TP_ARGS(request) \
  12204. + )
  12205. +
  12206. +
  12207. +DECLARE_EVENT_CLASS(ssam_request_status_class,
  12208. + TP_PROTO(const struct ssh_request *request, int status),
  12209. +
  12210. + TP_ARGS(request, status),
  12211. +
  12212. + TP_STRUCT__entry(
  12213. + __array(char, uid, SSAM_PTR_UID_LEN)
  12214. + __field(unsigned long, state)
  12215. + __field(u32, rqid)
  12216. + __field(u8, tc)
  12217. + __field(u16, cid)
  12218. + __field(u16, iid)
  12219. + __field(int, status)
  12220. + ),
  12221. +
  12222. + TP_fast_assign(
  12223. + const struct ssh_packet *p = &request->packet;
  12224. +
  12225. + // use packet for UID so we can match requests to packets
  12226. + ssam_trace_ptr_uid(p, __entry->uid);
  12227. + __entry->state = READ_ONCE(request->state);
  12228. + __entry->rqid = ssam_trace_get_request_id(p);
  12229. + __entry->tc = ssam_trace_get_request_tc(p);
  12230. + __entry->cid = ssam_trace_get_command_field_u8(p, cid);
  12231. + __entry->iid = ssam_trace_get_command_field_u8(p, iid);
  12232. + __entry->status = status;
  12233. + ),
  12234. +
  12235. + TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d",
  12236. + __entry->uid,
  12237. + ssam_show_request_id(__entry->rqid),
  12238. + ssam_show_request_type(__entry->state),
  12239. + ssam_show_request_state(__entry->state),
  12240. + ssam_show_ssh_tc(__entry->tc),
  12241. + ssam_show_generic_u8_field(__entry->cid),
  12242. + ssam_show_generic_u8_field(__entry->iid),
  12243. + __entry->status
  12244. + )
  12245. +);
  12246. +
  12247. +#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name) \
  12248. + DEFINE_EVENT(ssam_request_status_class, ssam_##name, \
  12249. + TP_PROTO(const struct ssh_request *request, int status),\
  12250. + TP_ARGS(request, status) \
  12251. + )
  12252. +
  12253. +
  12254. +DECLARE_EVENT_CLASS(ssam_alloc_class,
  12255. + TP_PROTO(void *ptr, size_t len),
  12256. +
  12257. + TP_ARGS(ptr, len),
  12258. +
  12259. + TP_STRUCT__entry(
  12260. + __array(char, uid, SSAM_PTR_UID_LEN)
  12261. + __field(size_t, len)
  12262. + ),
  12263. +
  12264. + TP_fast_assign(
  12265. + ssam_trace_ptr_uid(ptr, __entry->uid);
  12266. + __entry->len = len;
  12267. + ),
  12268. +
  12269. + TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len)
  12270. +);
  12271. +
  12272. +#define DEFINE_SSAM_ALLOC_EVENT(name) \
  12273. + DEFINE_EVENT(ssam_alloc_class, ssam_##name, \
  12274. + TP_PROTO(void *ptr, size_t len), \
  12275. + TP_ARGS(ptr, len) \
  12276. + )
  12277. +
  12278. +
  12279. +DECLARE_EVENT_CLASS(ssam_free_class,
  12280. + TP_PROTO(void *ptr),
  12281. +
  12282. + TP_ARGS(ptr),
  12283. +
  12284. + TP_STRUCT__entry(
  12285. + __array(char, uid, SSAM_PTR_UID_LEN)
  12286. + __field(size_t, len)
  12287. + ),
  12288. +
  12289. + TP_fast_assign(
  12290. + ssam_trace_ptr_uid(ptr, __entry->uid);
  12291. + ),
  12292. +
  12293. + TP_printk("uid=%s", __entry->uid)
  12294. +);
  12295. +
  12296. +#define DEFINE_SSAM_FREE_EVENT(name) \
  12297. + DEFINE_EVENT(ssam_free_class, ssam_##name, \
  12298. + TP_PROTO(void *ptr), \
  12299. + TP_ARGS(ptr) \
  12300. + )
  12301. +
  12302. +
  12303. +DECLARE_EVENT_CLASS(ssam_generic_uint_class,
  12304. + TP_PROTO(const char* property, unsigned int value),
  12305. +
  12306. + TP_ARGS(property, value),
  12307. +
  12308. + TP_STRUCT__entry(
  12309. + __string(property, property)
  12310. + __field(unsigned int, value)
  12311. + ),
  12312. +
  12313. + TP_fast_assign(
  12314. + __assign_str(property, property);
  12315. + __entry->value = value;
  12316. + ),
  12317. +
  12318. + TP_printk("%s=%u", __get_str(property), __entry->value)
  12319. +);
  12320. +
  12321. +#define DEFINE_SSAM_GENERIC_UINT_EVENT(name) \
  12322. + DEFINE_EVENT(ssam_generic_uint_class, ssam_##name, \
  12323. + TP_PROTO(const char* property, unsigned int value), \
  12324. + TP_ARGS(property, value) \
  12325. + )
  12326. +
  12327. +
  12328. +DEFINE_SSAM_FRAME_EVENT(rx_frame_received);
  12329. +DEFINE_SSAM_COMMAND_EVENT(rx_response_received);
  12330. +DEFINE_SSAM_COMMAND_EVENT(rx_event_received);
  12331. +
  12332. +DEFINE_SSAM_PACKET_EVENT(packet_release);
  12333. +DEFINE_SSAM_PACKET_EVENT(packet_submit);
  12334. +DEFINE_SSAM_PACKET_EVENT(packet_resubmit);
  12335. +DEFINE_SSAM_PACKET_EVENT(packet_timeout);
  12336. +DEFINE_SSAM_PACKET_EVENT(packet_cancel);
  12337. +DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete);
  12338. +DEFINE_SSAM_GENERIC_UINT_EVENT(ptl_timeout_reap);
  12339. +
  12340. +DEFINE_SSAM_REQUEST_EVENT(request_submit);
  12341. +DEFINE_SSAM_REQUEST_EVENT(request_timeout);
  12342. +DEFINE_SSAM_REQUEST_EVENT(request_cancel);
  12343. +DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete);
  12344. +DEFINE_SSAM_GENERIC_UINT_EVENT(rtl_timeout_reap);
  12345. +
  12346. +DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet);
  12347. +DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet);
  12348. +DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet);
  12349. +DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write);
  12350. +DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data);
  12351. +DEFINE_SSAM_GENERIC_UINT_EVENT(ei_rx_corrupt_syn);
  12352. +DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data);
  12353. +DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response);
  12354. +
  12355. +DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc);
  12356. +DEFINE_SSAM_FREE_EVENT(ctrl_packet_free);
  12357. +
  12358. +DEFINE_SSAM_ALLOC_EVENT(event_item_alloc);
  12359. +DEFINE_SSAM_FREE_EVENT(event_item_free);
  12360. +
  12361. +#endif /* _SURFACE_SAM_SSH_TRACE_H */
  12362. +
  12363. +/* This part must be outside protection */
  12364. +#undef TRACE_INCLUDE_PATH
  12365. +#undef TRACE_INCLUDE_FILE
  12366. +
  12367. +#define TRACE_INCLUDE_PATH .
  12368. +#define TRACE_INCLUDE_FILE surface_sam_ssh_trace
  12369. +
  12370. +#include <trace/define_trace.h>
  12371. diff --git a/drivers/platform/x86/surface_sam/surface_sam_vhf.c b/drivers/platform/x86/surface_sam/surface_sam_vhf.c
  12372. new file mode 100644
  12373. index 0000000000000..8455f952c2724
  12374. --- /dev/null
  12375. +++ b/drivers/platform/x86/surface_sam/surface_sam_vhf.c
  12376. @@ -0,0 +1,266 @@
  12377. +// SPDX-License-Identifier: GPL-2.0-or-later
  12378. +/*
  12379. + * Virtual HID Framework (VHF) driver for input events via SAM.
  12380. + * Used for keyboard input events on the Surface Laptops.
  12381. + */
  12382. +
  12383. +#include <linux/acpi.h>
  12384. +#include <linux/hid.h>
  12385. +#include <linux/input.h>
  12386. +#include <linux/platform_device.h>
  12387. +#include <linux/types.h>
  12388. +
  12389. +#include "surface_sam_ssh.h"
  12390. +
  12391. +
  12392. +#define USB_VENDOR_ID_MICROSOFT 0x045e
  12393. +#define USB_DEVICE_ID_MS_VHF 0xf001
  12394. +
  12395. +#define VHF_INPUT_NAME "Microsoft Virtual HID Framework Device"
  12396. +
  12397. +
  12398. +struct vhf_drvdata {
  12399. + struct platform_device *dev;
  12400. + struct ssam_controller *ctrl;
  12401. +
  12402. + struct ssam_event_notifier notif;
  12403. +
  12404. + struct hid_device *hid;
  12405. +};
  12406. +
  12407. +
  12408. +/*
  12409. + * These report descriptors have been extracted from a Surface Book 2.
  12410. + * They seems to be similar enough to be usable on the Surface Laptop.
  12411. + */
  12412. +static const u8 vhf_hid_desc[] = {
  12413. + // keyboard descriptor (event command ID 0x03)
  12414. + 0x05, 0x01, /* Usage Page (Desktop), */
  12415. + 0x09, 0x06, /* Usage (Keyboard), */
  12416. + 0xA1, 0x01, /* Collection (Application), */
  12417. + 0x85, 0x01, /* Report ID (1), */
  12418. + 0x15, 0x00, /* Logical Minimum (0), */
  12419. + 0x25, 0x01, /* Logical Maximum (1), */
  12420. + 0x75, 0x01, /* Report Size (1), */
  12421. + 0x95, 0x08, /* Report Count (8), */
  12422. + 0x05, 0x07, /* Usage Page (Keyboard), */
  12423. + 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */
  12424. + 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */
  12425. + 0x81, 0x02, /* Input (Variable), */
  12426. + 0x75, 0x08, /* Report Size (8), */
  12427. + 0x95, 0x0A, /* Report Count (10), */
  12428. + 0x19, 0x00, /* Usage Minimum (None), */
  12429. + 0x29, 0x91, /* Usage Maximum (KB LANG2), */
  12430. + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
  12431. + 0x81, 0x00, /* Input, */
  12432. + 0x05, 0x0C, /* Usage Page (Consumer), */
  12433. + 0x0A, 0xC0, 0x02, /* Usage (02C0h), */
  12434. + 0xA1, 0x02, /* Collection (Logical), */
  12435. + 0x1A, 0xC1, 0x02, /* Usage Minimum (02C1h), */
  12436. + 0x2A, 0xC6, 0x02, /* Usage Maximum (02C6h), */
  12437. + 0x95, 0x06, /* Report Count (6), */
  12438. + 0xB1, 0x03, /* Feature (Constant, Variable), */
  12439. + 0xC0, /* End Collection, */
  12440. + 0x05, 0x08, /* Usage Page (LED), */
  12441. + 0x19, 0x01, /* Usage Minimum (01h), */
  12442. + 0x29, 0x03, /* Usage Maximum (03h), */
  12443. + 0x75, 0x01, /* Report Size (1), */
  12444. + 0x95, 0x03, /* Report Count (3), */
  12445. + 0x25, 0x01, /* Logical Maximum (1), */
  12446. + 0x91, 0x02, /* Output (Variable), */
  12447. + 0x95, 0x05, /* Report Count (5), */
  12448. + 0x91, 0x01, /* Output (Constant), */
  12449. + 0xC0, /* End Collection, */
  12450. +
  12451. + // media key descriptor (event command ID 0x04)
  12452. + 0x05, 0x0C, /* Usage Page (Consumer), */
  12453. + 0x09, 0x01, /* Usage (Consumer Control), */
  12454. + 0xA1, 0x01, /* Collection (Application), */
  12455. + 0x85, 0x03, /* Report ID (3), */
  12456. + 0x75, 0x10, /* Report Size (16), */
  12457. + 0x15, 0x00, /* Logical Minimum (0), */
  12458. + 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
  12459. + 0x19, 0x00, /* Usage Minimum (00h), */
  12460. + 0x2A, 0xFF, 0x03, /* Usage Maximum (03FFh), */
  12461. + 0x81, 0x00, /* Input, */
  12462. + 0xC0, /* End Collection, */
  12463. +};
  12464. +
  12465. +
  12466. +static int vhf_hid_start(struct hid_device *hid)
  12467. +{
  12468. + hid_dbg(hid, "%s\n", __func__);
  12469. + return 0;
  12470. +}
  12471. +
  12472. +static void vhf_hid_stop(struct hid_device *hid)
  12473. +{
  12474. + hid_dbg(hid, "%s\n", __func__);
  12475. +}
  12476. +
  12477. +static int vhf_hid_open(struct hid_device *hid)
  12478. +{
  12479. + hid_dbg(hid, "%s\n", __func__);
  12480. + return 0;
  12481. +}
  12482. +
  12483. +static void vhf_hid_close(struct hid_device *hid)
  12484. +{
  12485. + hid_dbg(hid, "%s\n", __func__);
  12486. +}
  12487. +
  12488. +static int vhf_hid_parse(struct hid_device *hid)
  12489. +{
  12490. + return hid_parse_report(hid, (u8 *)vhf_hid_desc, ARRAY_SIZE(vhf_hid_desc));
  12491. +}
  12492. +
  12493. +static int vhf_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
  12494. + u8 *buf, size_t len, unsigned char rtype,
  12495. + int reqtype)
  12496. +{
  12497. + hid_dbg(hid, "%s\n", __func__);
  12498. + return 0;
  12499. +}
  12500. +
  12501. +static int vhf_hid_output_report(struct hid_device *hid, u8 *buf, size_t len)
  12502. +{
  12503. + hid_dbg(hid, "%s\n", __func__);
  12504. + print_hex_dump_debug("report:", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
  12505. +
  12506. + return len;
  12507. +}
  12508. +
  12509. +static struct hid_ll_driver vhf_hid_ll_driver = {
  12510. + .start = vhf_hid_start,
  12511. + .stop = vhf_hid_stop,
  12512. + .open = vhf_hid_open,
  12513. + .close = vhf_hid_close,
  12514. + .parse = vhf_hid_parse,
  12515. + .raw_request = vhf_hid_raw_request,
  12516. + .output_report = vhf_hid_output_report,
  12517. +};
  12518. +
  12519. +
  12520. +static struct hid_device *vhf_create_hid_device(struct platform_device *pdev)
  12521. +{
  12522. + struct hid_device *hid;
  12523. +
  12524. + hid = hid_allocate_device();
  12525. + if (IS_ERR(hid))
  12526. + return hid;
  12527. +
  12528. + hid->dev.parent = &pdev->dev;
  12529. +
  12530. + hid->bus = BUS_VIRTUAL;
  12531. + hid->vendor = USB_VENDOR_ID_MICROSOFT;
  12532. + hid->product = USB_DEVICE_ID_MS_VHF;
  12533. +
  12534. + hid->ll_driver = &vhf_hid_ll_driver;
  12535. +
  12536. + sprintf(hid->name, "%s", VHF_INPUT_NAME);
  12537. +
  12538. + return hid;
  12539. +}
  12540. +
  12541. +static u32 vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event)
  12542. +{
  12543. + struct vhf_drvdata *drvdata = container_of(nb, struct vhf_drvdata, notif.base);
  12544. + int status;
  12545. +
  12546. + if (event->target_category != 0x08)
  12547. + return 0;
  12548. +
  12549. + if (event->command_id == 0x03 || event->command_id == 0x04) {
  12550. + status = hid_input_report(drvdata->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 1);
  12551. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  12552. + }
  12553. +
  12554. + return 0;
  12555. +}
  12556. +
  12557. +static int surface_sam_vhf_probe(struct platform_device *pdev)
  12558. +{
  12559. + struct ssam_controller *ctrl;
  12560. + struct vhf_drvdata *drvdata;
  12561. + struct hid_device *hid;
  12562. + int status;
  12563. +
  12564. + // add device link to EC
  12565. + status = ssam_client_bind(&pdev->dev, &ctrl);
  12566. + if (status)
  12567. + return status == -ENXIO ? -EPROBE_DEFER : status;
  12568. +
  12569. + drvdata = kzalloc(sizeof(struct vhf_drvdata), GFP_KERNEL);
  12570. + if (!drvdata)
  12571. + return -ENOMEM;
  12572. +
  12573. + hid = vhf_create_hid_device(pdev);
  12574. + if (IS_ERR(hid)) {
  12575. + status = PTR_ERR(hid);
  12576. + goto err_probe_hid;
  12577. + }
  12578. +
  12579. + status = hid_add_device(hid);
  12580. + if (status)
  12581. + goto err_add_hid;
  12582. +
  12583. + drvdata->dev = pdev;
  12584. + drvdata->ctrl = ctrl;
  12585. + drvdata->hid = hid;
  12586. +
  12587. + drvdata->notif.base.priority = 1;
  12588. + drvdata->notif.base.fn = vhf_event_handler;
  12589. + drvdata->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
  12590. + drvdata->notif.event.id.target_category = SSAM_SSH_TC_KBD;
  12591. + drvdata->notif.event.id.instance = 0;
  12592. + drvdata->notif.event.flags = 0;
  12593. +
  12594. + platform_set_drvdata(pdev, drvdata);
  12595. +
  12596. + status = ssam_notifier_register(ctrl, &drvdata->notif);
  12597. + if (status)
  12598. + goto err_add_hid;
  12599. +
  12600. + return 0;
  12601. +
  12602. +err_add_hid:
  12603. + hid_destroy_device(hid);
  12604. + platform_set_drvdata(pdev, NULL);
  12605. +err_probe_hid:
  12606. + kfree(drvdata);
  12607. + return status;
  12608. +}
  12609. +
  12610. +static int surface_sam_vhf_remove(struct platform_device *pdev)
  12611. +{
  12612. + struct vhf_drvdata *drvdata = platform_get_drvdata(pdev);
  12613. +
  12614. + ssam_notifier_unregister(drvdata->ctrl, &drvdata->notif);
  12615. + hid_destroy_device(drvdata->hid);
  12616. + kfree(drvdata);
  12617. +
  12618. + platform_set_drvdata(pdev, NULL);
  12619. + return 0;
  12620. +}
  12621. +
  12622. +
  12623. +static const struct acpi_device_id surface_sam_vhf_match[] = {
  12624. + { "MSHW0096" },
  12625. + { },
  12626. +};
  12627. +MODULE_DEVICE_TABLE(acpi, surface_sam_vhf_match);
  12628. +
  12629. +static struct platform_driver surface_sam_vhf = {
  12630. + .probe = surface_sam_vhf_probe,
  12631. + .remove = surface_sam_vhf_remove,
  12632. + .driver = {
  12633. + .name = "surface_sam_vhf",
  12634. + .acpi_match_table = surface_sam_vhf_match,
  12635. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  12636. + },
  12637. +};
  12638. +module_platform_driver(surface_sam_vhf);
  12639. +
  12640. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  12641. +MODULE_DESCRIPTION("Virtual HID Framework Driver for 5th Generation Surface Devices");
  12642. +MODULE_LICENSE("GPL");
  12643. diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
  12644. index a9719858c950b..ce5309d002805 100644
  12645. --- a/drivers/tty/serdev/core.c
  12646. +++ b/drivers/tty/serdev/core.c
  12647. @@ -552,16 +552,97 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
  12648. }
  12649. #ifdef CONFIG_ACPI
  12650. +
  12651. +#define SERDEV_ACPI_MAX_SCAN_DEPTH 32
  12652. +
  12653. +struct acpi_serdev_lookup {
  12654. + acpi_handle device_handle;
  12655. + acpi_handle controller_handle;
  12656. + int n;
  12657. + int index;
  12658. +};
  12659. +
  12660. +static int acpi_serdev_parse_resource(struct acpi_resource *ares, void *data)
  12661. +{
  12662. + struct acpi_serdev_lookup *lookup = data;
  12663. + struct acpi_resource_uart_serialbus *sb;
  12664. + acpi_status status;
  12665. +
  12666. + if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
  12667. + return 1;
  12668. +
  12669. + if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
  12670. + return 1;
  12671. +
  12672. + if (lookup->index != -1 && lookup->n++ != lookup->index)
  12673. + return 1;
  12674. +
  12675. + sb = &ares->data.uart_serial_bus;
  12676. +
  12677. + status = acpi_get_handle(lookup->device_handle,
  12678. + sb->resource_source.string_ptr,
  12679. + &lookup->controller_handle);
  12680. + if (ACPI_FAILURE(status))
  12681. + return 1;
  12682. +
  12683. + /*
  12684. + * NOTE: Ideally, we would also want to retreive other properties here,
  12685. + * once setting them before opening the device is supported by serdev.
  12686. + */
  12687. +
  12688. + return 1;
  12689. +}
  12690. +
  12691. +static int acpi_serdev_do_lookup(struct acpi_device *adev,
  12692. + struct acpi_serdev_lookup *lookup)
  12693. +{
  12694. + struct list_head resource_list;
  12695. + int ret;
  12696. +
  12697. + lookup->device_handle = acpi_device_handle(adev);
  12698. + lookup->controller_handle = NULL;
  12699. + lookup->n = 0;
  12700. +
  12701. + INIT_LIST_HEAD(&resource_list);
  12702. + ret = acpi_dev_get_resources(adev, &resource_list,
  12703. + acpi_serdev_parse_resource, lookup);
  12704. + acpi_dev_free_resource_list(&resource_list);
  12705. +
  12706. + if (ret < 0)
  12707. + return -EINVAL;
  12708. +
  12709. + return 0;
  12710. +}
  12711. +
  12712. +static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
  12713. + struct acpi_device *adev)
  12714. +{
  12715. + struct acpi_serdev_lookup lookup;
  12716. + int ret;
  12717. +
  12718. + if (acpi_bus_get_status(adev) || !adev->status.present)
  12719. + return -EINVAL;
  12720. +
  12721. + /* Look for UARTSerialBusV2 resource */
  12722. + lookup.index = -1; // we only care for the last device
  12723. +
  12724. + ret = acpi_serdev_do_lookup(adev, &lookup);
  12725. + if (ret)
  12726. + return ret;
  12727. +
  12728. + /* Make sure controller and ResourceSource handle match */
  12729. + if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
  12730. + return -ENODEV;
  12731. +
  12732. + return 0;
  12733. +}
  12734. +
  12735. static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
  12736. - struct acpi_device *adev)
  12737. + struct acpi_device *adev)
  12738. {
  12739. - struct serdev_device *serdev = NULL;
  12740. + struct serdev_device *serdev;
  12741. int err;
  12742. - if (acpi_bus_get_status(adev) || !adev->status.present ||
  12743. - acpi_device_enumerated(adev))
  12744. - return AE_OK;
  12745. -
  12746. serdev = serdev_device_alloc(ctrl);
  12747. if (!serdev) {
  12748. dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
  12749. @@ -589,7 +670,7 @@ static const struct acpi_device_id serdev_acpi_devices_blacklist[] = {
  12750. };
  12751. static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
  12752. - void *data, void **return_value)
  12753. + void *data, void **return_value)
  12754. {
  12755. struct serdev_controller *ctrl = data;
  12756. struct acpi_device *adev;
  12757. @@ -597,26 +678,32 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
  12758. if (acpi_bus_get_device(handle, &adev))
  12759. return AE_OK;
  12760. + if (acpi_device_enumerated(adev))
  12761. + return AE_OK;
  12762. +
  12763. /* Skip if black listed */
  12764. if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist))
  12765. return AE_OK;
  12766. + if (acpi_serdev_check_resources(ctrl, adev))
  12767. + return AE_OK;
  12768. +
  12769. return acpi_serdev_register_device(ctrl, adev);
  12770. }
  12771. +
  12772. static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
  12773. {
  12774. acpi_status status;
  12775. - acpi_handle handle;
  12776. - handle = ACPI_HANDLE(ctrl->dev.parent);
  12777. - if (!handle)
  12778. + if (!has_acpi_companion(ctrl->dev.parent))
  12779. return -ENODEV;
  12780. - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
  12781. + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
  12782. + SERDEV_ACPI_MAX_SCAN_DEPTH,
  12783. acpi_serdev_add_device, NULL, ctrl, NULL);
  12784. if (ACPI_FAILURE(status))
  12785. - dev_dbg(&ctrl->dev, "failed to enumerate serdev slaves\n");
  12786. + dev_warn(&ctrl->dev, "failed to enumerate serdev slaves\n");
  12787. if (!ctrl->serdev)
  12788. return -ENODEV;
  12789. --
  12790. 2.28.0