0002-surface-sam.patch 340 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611
  1. From 0be98f240951d46cc8871a777d47a7d502d03305 Mon Sep 17 00:00:00 2001
  2. From: Maximilian Luz <luzmaximilian@gmail.com>
  3. Date: Fri, 6 Dec 2019 11:56:12 +0100
  4. Subject: [PATCH 2/5] surface-sam
  5. ---
  6. drivers/platform/x86/Kconfig | 2 +
  7. drivers/platform/x86/Makefile | 1 +
  8. drivers/platform/x86/surface_sam/Kconfig | 176 +
  9. drivers/platform/x86/surface_sam/Makefile | 16 +
  10. .../x86/surface_sam/surface_sam_debugfs.c | 270 +
  11. .../x86/surface_sam/surface_sam_dtx.c | 582 ++
  12. .../x86/surface_sam/surface_sam_hps.c | 1287 ++++
  13. .../x86/surface_sam/surface_sam_san.c | 930 +++
  14. .../x86/surface_sam/surface_sam_san.h | 30 +
  15. .../x86/surface_sam/surface_sam_sid.c | 283 +
  16. .../x86/surface_sam/surface_sam_sid_gpelid.c | 232 +
  17. .../surface_sam/surface_sam_sid_perfmode.c | 214 +
  18. .../x86/surface_sam/surface_sam_sid_power.c | 1054 ++++
  19. .../x86/surface_sam/surface_sam_sid_power.h | 16 +
  20. .../x86/surface_sam/surface_sam_sid_vhf.c | 429 ++
  21. .../x86/surface_sam/surface_sam_sid_vhf.h | 14 +
  22. .../x86/surface_sam/surface_sam_ssh.c | 5329 +++++++++++++++++
  23. .../x86/surface_sam/surface_sam_ssh.h | 717 +++
  24. .../x86/surface_sam/surface_sam_ssh_trace.h | 587 ++
  25. .../x86/surface_sam/surface_sam_vhf.c | 266 +
  26. 20 files changed, 12435 insertions(+)
  27. create mode 100644 drivers/platform/x86/surface_sam/Kconfig
  28. create mode 100644 drivers/platform/x86/surface_sam/Makefile
  29. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_debugfs.c
  30. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_dtx.c
  31. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_hps.c
  32. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_san.c
  33. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_san.h
  34. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid.c
  35. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c
  36. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c
  37. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_power.c
  38. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_power.h
  39. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c
  40. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h
  41. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh.c
  42. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh.h
  43. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h
  44. create mode 100644 drivers/platform/x86/surface_sam/surface_sam_vhf.c
  45. diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
  46. index 0581a54cf562f..998007444059b 100644
  47. --- a/drivers/platform/x86/Kconfig
  48. +++ b/drivers/platform/x86/Kconfig
  49. @@ -1376,6 +1376,8 @@ config INTEL_SCU_PLATFORM
  50. and SCU (sometimes called PMC as well). The driver currently
  51. supports Intel Elkhart Lake and compatible platforms.
  52. +source "drivers/platform/x86/surface_sam/Kconfig"
  53. +
  54. config INTEL_SCU_IPC_UTIL
  55. tristate "Intel SCU IPC utility driver"
  56. depends on INTEL_SCU
  57. diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
  58. index 2b85852a1a872..e154e679db453 100644
  59. --- a/drivers/platform/x86/Makefile
  60. +++ b/drivers/platform/x86/Makefile
  61. @@ -148,3 +148,4 @@ obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
  62. intel_telemetry_pltdrv.o \
  63. intel_telemetry_debugfs.o
  64. obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
  65. +obj-$(CONFIG_SURFACE_SAM) += surface_sam/
  66. diff --git a/drivers/platform/x86/surface_sam/Kconfig b/drivers/platform/x86/surface_sam/Kconfig
  67. new file mode 100644
  68. index 0000000000000..b5bb55248a5d5
  69. --- /dev/null
  70. +++ b/drivers/platform/x86/surface_sam/Kconfig
  71. @@ -0,0 +1,176 @@
  72. +menuconfig SURFACE_SAM
  73. + depends on ACPI
  74. + tristate "Microsoft Surface/System Aggregator Module and Platform Drivers"
  75. + help
  76. + Drivers for the Surface/System Aggregator Module (SAM) of Microsoft
  77. + Surface devices.
  78. +
  79. + SAM is an embedded controller that provides access to various
  80. + functionalities on these devices, including battery status, keyboard
  81. + events (on the Laptops) and many more.
  82. +
  83. + Say M/Y here if you have a Microsoft Surface device with a SAM device
  84. + (i.e. 5th generation or later).
  85. +
  86. +config SURFACE_SAM_SSH
  87. + tristate "Surface Serial Hub Driver"
  88. + depends on SURFACE_SAM
  89. + depends on SERIAL_DEV_CTRL_TTYPORT
  90. + select CRC_CCITT
  91. + default m
  92. + help
  93. + Surface Serial Hub driver for 5th generation (or later) Microsoft
  94. + Surface devices.
  95. +
  96. + This is the base driver for the embedded serial controller found on
  97. + 5th generation (and later) Microsoft Surface devices (e.g. Book 2,
  98. + Laptop, Laptop 2, Pro 2017, Pro 6, ...). This driver itself only
  99. + provides access to the embedded controller (SAM) and subsequent
  100. + drivers are required for the respective functionalities.
  101. +
  102. + If you have a 5th generation (or later) Microsoft Surface device, say
  103. + Y or M here.
  104. +
  105. +config SURFACE_SAM_SSH_ERROR_INJECTION
  106. + bool "Surface Serial Hub Error Injection Capabilities"
  107. + depends on SURFACE_SAM_SSH
  108. + depends on FUNCTION_ERROR_INJECTION
  109. + default n
  110. + help
  111. + Enable error injection capabilities for the Surface Serial Hub.
  112. + This is used to debug the driver, specifically the communication
  113. + interface. It is not required for normal use.
  114. +
  115. + If you are not sure, say N here.
  116. +
  117. +config SURFACE_SAM_DEBUGFS
  118. + tristate "Surface Serial Hub Debug Device"
  119. + depends on SURFACE_SAM_SSH
  120. + depends on DEBUG_FS
  121. + default n
  122. + help
  123. + Debug device for direct communication with the embedded controller
  124. + found on 5th generation (and later) Microsoft Surface devices (e.g.
  125. + Book 2, Laptop, Laptop 2, Pro 2017, Pro 6, ...) via debugfs.
  126. +
  127. + If you are not sure, say N here.
  128. +
  129. +config SURFACE_SAM_SAN
  130. + tristate "Surface ACPI Notify Driver"
  131. + depends on SURFACE_SAM_SSH
  132. + default m
  133. + help
  134. + Surface ACPI Notify driver for 5th generation (or later) Microsoft
  135. + Surface devices.
  136. +
  137. + This driver enables basic ACPI events and requests, such as battery
  138. + status requests/events, thermal events, lid status, and possibly more,
  139. + which would otherwise not work on these devices.
  140. +
  141. + If you are not sure, say M here.
  142. +
  143. +config SURFACE_SAM_VHF
  144. + tristate "Surface Virtual HID Framework Driver"
  145. + depends on SURFACE_SAM_SSH
  146. + depends on HID
  147. + default m
  148. + help
  149. + Surface Virtual HID Framework driver for 5th generation (or later)
  150. + Microsoft Surface devices.
  151. +
  152. + This driver provides support for the Microsoft Virtual HID framework,
  153. + which is required for keyboard support on the Surface Laptop 1 and 2.
  154. +
  155. + If you are not sure, say M here.
  156. +
  157. +config SURFACE_SAM_DTX
  158. + tristate "Surface Detachment System (DTX) Driver"
  159. + depends on SURFACE_SAM_SSH
  160. + depends on INPUT
  161. + default m
  162. + help
  163. + Surface Detachment System (DTX) driver for the Microsoft Surface Book
  164. + 2. This driver provides support for proper detachment handling in
  165. + user-space, status-events relating to the base and support for
  166. + the safe-guard keeping the base attached when the discrete GPU
  167. + contained in it is running via the special /dev/surface-dtx device.
  168. +
  169. + Also provides a standard input device to provide SW_TABLET_MODE events
  170. + upon device mode change.
  171. +
  172. + If you are not sure, say M here.
  173. +
  174. +config SURFACE_SAM_HPS
  175. + tristate "Surface dGPU Hot-Plug System (dGPU-HPS) Driver"
  176. + depends on SURFACE_SAM_SSH
  177. + depends on SURFACE_SAM_SAN
  178. + depends on GPIO_SYSFS
  179. + default m
  180. + help
  181. + Driver to properly handle hot-plugging and explicit power-on/power-off
  182. + of the discrete GPU (dGPU) on the Surface Book 2 and 3.
  183. +
  184. + If you are not sure, say M here.
  185. +
  186. +config SURFACE_SAM_SID
  187. + tristate "Surface Platform Integration Driver"
  188. + depends on SURFACE_SAM_SSH
  189. + default m
  190. + help
  191. + Surface Platform Integration Driver for the Microsoft Surface Devices.
  192. + This driver loads various model-specific sub-drivers, including
  193. + battery and keyboard support on 7th generation Surface devices, proper
  194. + lid setup to enable device wakeup when the lid is opened on multiple
  195. + models, as well as performance mode setting support on the Surface
  196. + Book 2.
  197. +
  198. + If you are not sure, say M here.
  199. +
  200. +config SURFACE_SAM_SID_GPELID
  201. + tristate "Surface Lid Wakeup Driver"
  202. + depends on SURFACE_SAM_SID
  203. + default m
  204. + help
  205. + Driver to set up device wake-up via lid on Intel-based Microsoft
  206. + Surface devices. These devices do not wake up from sleep as their GPE
  207. + interrupt is not configured automatically. This driver solves that
  208. + problem.
  209. +
  210. + If you are not sure, say M here.
  211. +
  212. +config SURFACE_SAM_SID_PERFMODE
  213. + tristate "Surface Performance Mode Driver"
  214. + depends on SURFACE_SAM_SID
  215. + depends on SYSFS
  216. + default m
  217. + help
  218. + This driver provides support for setting performance-modes on Surface
  219. + devices via the perf_mode sysfs attribute. Currently only supports the
  220. + Surface Book 2. Performance-modes directly influence the fan-profile
  221. + of the device, allowing to choose between higher performance or
  222. + quieter operation.
  223. +
  224. + If you are not sure, say M here.
  225. +
  226. +config SURFACE_SAM_SID_VHF
  227. + tristate "Surface SAM HID Driver"
  228. + depends on SURFACE_SAM_SID
  229. + depends on HID
  230. + default m
  231. + help
  232. + This driver provides support for HID devices connected via the Surface
  233. + SAM embedded controller. It provides support for keyboard and touchpad
  234. + on the Surface Laptop 3 models.
  235. +
  236. + If you are not sure, say M here.
  237. +
  238. +config SURFACE_SAM_SID_POWER
  239. + tristate "Surface SAM Battery/AC Driver"
  240. + depends on SURFACE_SAM_SID
  241. + select POWER_SUPPLY
  242. + default m
  243. + help
  244. + This driver provides support for the battery and AC on 7th generation
  245. + Surface devices.
  246. +
  247. + If you are not sure, say M here.
  248. diff --git a/drivers/platform/x86/surface_sam/Makefile b/drivers/platform/x86/surface_sam/Makefile
  249. new file mode 100644
  250. index 0000000000000..89bced46ebcdd
  251. --- /dev/null
  252. +++ b/drivers/platform/x86/surface_sam/Makefile
  253. @@ -0,0 +1,16 @@
  254. +# SPDX-License-Identifier: GPL-2.0-or-later
  255. +
  256. +# For include/trace/define_trace.h to include surface_sam_ssh_trace.h
  257. +CFLAGS_surface_sam_ssh.o = -I$(src)
  258. +
  259. +obj-$(CONFIG_SURFACE_SAM_SSH) += surface_sam_ssh.o
  260. +obj-$(CONFIG_SURFACE_SAM_SAN) += surface_sam_san.o
  261. +obj-$(CONFIG_SURFACE_SAM_DTX) += surface_sam_dtx.o
  262. +obj-$(CONFIG_SURFACE_SAM_HPS) += surface_sam_hps.o
  263. +obj-$(CONFIG_SURFACE_SAM_VHF) += surface_sam_vhf.o
  264. +obj-$(CONFIG_SURFACE_SAM_SID) += surface_sam_sid.o
  265. +obj-$(CONFIG_SURFACE_SAM_SID_GPELID) += surface_sam_sid_gpelid.o
  266. +obj-$(CONFIG_SURFACE_SAM_SID_PERFMODE) += surface_sam_sid_perfmode.o
  267. +obj-$(CONFIG_SURFACE_SAM_SID_POWER) += surface_sam_sid_power.o
  268. +obj-$(CONFIG_SURFACE_SAM_SID_VHF) += surface_sam_sid_vhf.o
  269. +obj-$(CONFIG_SURFACE_SAM_DEBUGFS) += surface_sam_debugfs.o
  270. diff --git a/drivers/platform/x86/surface_sam/surface_sam_debugfs.c b/drivers/platform/x86/surface_sam/surface_sam_debugfs.c
  271. new file mode 100644
  272. index 0000000000000..13e93404775c5
  273. --- /dev/null
  274. +++ b/drivers/platform/x86/surface_sam/surface_sam_debugfs.c
  275. @@ -0,0 +1,270 @@
  276. +// SPDX-License-Identifier: GPL-2.0-or-later
  277. +
  278. +#include <linux/debugfs.h>
  279. +#include <linux/fs.h>
  280. +#include <linux/kernel.h>
  281. +#include <linux/module.h>
  282. +#include <linux/platform_device.h>
  283. +#include <linux/slab.h>
  284. +#include <linux/uaccess.h>
  285. +
  286. +#include "surface_sam_ssh.h"
  287. +
  288. +#define SSAM_DBGDEV_NAME "surface_sam_dbgdev"
  289. +#define SSAM_DBGDEV_VERS 0x0100
  290. +
  291. +
  292. +struct ssam_dbgdev_request {
  293. + __u8 target_category;
  294. + __u8 command_id;
  295. + __u8 instance_id;
  296. + __u8 channel;
  297. + __u16 flags;
  298. + __s16 status;
  299. +
  300. + struct {
  301. + __u8 __pad[6];
  302. + __u16 length;
  303. + const __u8 __user *data;
  304. + } payload;
  305. +
  306. + struct {
  307. + __u8 __pad[6];
  308. + __u16 length;
  309. + __u8 __user *data;
  310. + } response;
  311. +};
  312. +
  313. +#define SSAM_DBGDEV_IOCTL_GETVERSION _IOR(0xA5, 0, __u32)
  314. +#define SSAM_DBGDEV_IOCTL_REQUEST _IOWR(0xA5, 1, struct ssam_dbgdev_request)
  315. +
  316. +
  317. +struct ssam_dbgdev {
  318. + struct ssam_controller *ctrl;
  319. + struct dentry *dentry_dir;
  320. + struct dentry *dentry_dev;
  321. +};
  322. +
  323. +
  324. +static int ssam_dbgdev_open(struct inode *inode, struct file *filp)
  325. +{
  326. + filp->private_data = inode->i_private;
  327. + return nonseekable_open(inode, filp);
  328. +}
  329. +
  330. +static long ssam_dbgdev_request(struct file *file, unsigned long arg)
  331. +{
  332. + struct ssam_dbgdev *ddev = file->private_data;
  333. + struct ssam_dbgdev_request __user *r;
  334. + struct ssam_dbgdev_request rqst;
  335. + struct ssam_request spec;
  336. + struct ssam_response rsp;
  337. + u8 *pldbuf = NULL;
  338. + u8 *rspbuf = NULL;
  339. + int status = 0, ret = 0, tmp;
  340. +
  341. + r = (struct ssam_dbgdev_request __user *)arg;
  342. + ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
  343. + if (ret)
  344. + goto out;
  345. +
  346. + // setup basic request fields
  347. + spec.target_category = rqst.target_category;
  348. + spec.command_id = rqst.command_id;
  349. + spec.instance_id = rqst.instance_id;
  350. + spec.channel = rqst.channel;
  351. + spec.flags = rqst.flags;
  352. + spec.length = rqst.payload.length;
  353. +
  354. + rsp.capacity = rqst.response.length;
  355. + rsp.length = 0;
  356. +
  357. + // get request payload from user-space
  358. + if (spec.length) {
  359. + if (!rqst.payload.data) {
  360. + ret = -EINVAL;
  361. + goto out;
  362. + }
  363. +
  364. + pldbuf = kzalloc(spec.length, GFP_KERNEL);
  365. + if (!pldbuf) {
  366. + status = -ENOMEM;
  367. + ret = -EFAULT;
  368. + goto out;
  369. + }
  370. +
  371. + if (copy_from_user(pldbuf, rqst.payload.data, spec.length)) {
  372. + ret = -EFAULT;
  373. + goto out;
  374. + }
  375. + }
  376. + spec.payload = pldbuf;
  377. +
  378. + // allocate response buffer
  379. + if (rsp.capacity) {
  380. + if (!rqst.response.data) {
  381. + ret = -EINVAL;
  382. + goto out;
  383. + }
  384. +
  385. + rspbuf = kzalloc(rsp.capacity, GFP_KERNEL);
  386. + if (!rspbuf) {
  387. + status = -ENOMEM;
  388. + ret = -EFAULT;
  389. + goto out;
  390. + }
  391. + }
  392. + rsp.pointer = rspbuf;
  393. +
  394. + // perform request
  395. + status = ssam_request_sync(ddev->ctrl, &spec, &rsp);
  396. + if (status)
  397. + goto out;
  398. +
  399. + // copy response to user-space
  400. + if (rsp.length) {
  401. + if (copy_to_user(rqst.response.data, rsp.pointer, rsp.length)) {
  402. + ret = -EFAULT;
  403. + goto out;
  404. + }
  405. + }
  406. +
  407. +out:
  408. + // always try to set response-length and status
  409. + tmp = put_user(rsp.length, &r->response.length);
  410. + if (!ret)
  411. + ret = tmp;
  412. +
  413. + tmp = put_user(status, &r->status);
  414. + if (!ret)
  415. + ret = tmp;
  416. +
  417. + // cleanup
  418. + if (pldbuf)
  419. + kfree(pldbuf);
  420. +
  421. + if (rspbuf)
  422. + kfree(rspbuf);
  423. +
  424. + return ret;
  425. +}
  426. +
  427. +static long ssam_dbgdev_getversion(struct file *file, unsigned long arg)
  428. +{
  429. + put_user(SSAM_DBGDEV_VERS, (u32 __user *)arg);
  430. + return 0;
  431. +}
  432. +
  433. +static long ssam_dbgdev_ioctl(struct file *file, unsigned int cmd,
  434. + unsigned long arg)
  435. +{
  436. + switch (cmd) {
  437. + case SSAM_DBGDEV_IOCTL_GETVERSION:
  438. + return ssam_dbgdev_getversion(file, arg);
  439. +
  440. + case SSAM_DBGDEV_IOCTL_REQUEST:
  441. + return ssam_dbgdev_request(file, arg);
  442. +
  443. + default:
  444. + return -EINVAL;
  445. + }
  446. +}
  447. +
  448. +const struct file_operations ssam_dbgdev_fops = {
  449. + .owner = THIS_MODULE,
  450. + .open = ssam_dbgdev_open,
  451. + .unlocked_ioctl = ssam_dbgdev_ioctl,
  452. + .compat_ioctl = ssam_dbgdev_ioctl,
  453. + .llseek = noop_llseek,
  454. +};
  455. +
  456. +static int ssam_dbgdev_probe(struct platform_device *pdev)
  457. +{
  458. + struct ssam_dbgdev *ddev;
  459. + struct ssam_controller *ctrl;
  460. + int status;
  461. +
  462. + status = ssam_client_bind(&pdev->dev, &ctrl);
  463. + if (status)
  464. + return status == -ENXIO ? -EPROBE_DEFER : status;
  465. +
  466. + ddev = devm_kzalloc(&pdev->dev, sizeof(struct ssam_dbgdev), GFP_KERNEL);
  467. + if (!ddev)
  468. + return -ENOMEM;
  469. +
  470. + ddev->ctrl = ctrl;
  471. +
  472. + ddev->dentry_dir = debugfs_create_dir("surface_sam", NULL);
  473. + if (IS_ERR(ddev->dentry_dir))
  474. + return PTR_ERR(ddev->dentry_dir);
  475. +
  476. + ddev->dentry_dev = debugfs_create_file("controller", 0600,
  477. + ddev->dentry_dir, ddev,
  478. + &ssam_dbgdev_fops);
  479. + if (IS_ERR(ddev->dentry_dev)) {
  480. + debugfs_remove(ddev->dentry_dir);
  481. + return PTR_ERR(ddev->dentry_dev);
  482. + }
  483. +
  484. + platform_set_drvdata(pdev, ddev);
  485. + return 0;
  486. +}
  487. +
  488. +static int ssam_dbgdev_remove(struct platform_device *pdev)
  489. +{
  490. + struct ssam_dbgdev *ddev = platform_get_drvdata(pdev);
  491. +
  492. + debugfs_remove(ddev->dentry_dev);
  493. + debugfs_remove(ddev->dentry_dir);
  494. +
  495. + platform_set_drvdata(pdev, NULL);
  496. + return 0;
  497. +}
  498. +
  499. +static void ssam_dbgdev_release(struct device *dev)
  500. +{
  501. + // nothing to do
  502. +}
  503. +
  504. +
  505. +static struct platform_device ssam_dbgdev_device = {
  506. + .name = SSAM_DBGDEV_NAME,
  507. + .id = PLATFORM_DEVID_NONE,
  508. + .dev.release = ssam_dbgdev_release,
  509. +};
  510. +
  511. +static struct platform_driver ssam_dbgdev_driver = {
  512. + .probe = ssam_dbgdev_probe,
  513. + .remove = ssam_dbgdev_remove,
  514. + .driver = {
  515. + .name = SSAM_DBGDEV_NAME,
  516. + },
  517. +};
  518. +
  519. +static int __init surface_sam_debugfs_init(void)
  520. +{
  521. + int status;
  522. +
  523. + status = platform_device_register(&ssam_dbgdev_device);
  524. + if (status)
  525. + return status;
  526. +
  527. + status = platform_driver_register(&ssam_dbgdev_driver);
  528. + if (status)
  529. + platform_device_unregister(&ssam_dbgdev_device);
  530. +
  531. + return status;
  532. +}
  533. +
  534. +static void __exit surface_sam_debugfs_exit(void)
  535. +{
  536. + platform_driver_unregister(&ssam_dbgdev_driver);
  537. + platform_device_unregister(&ssam_dbgdev_device);
  538. +}
  539. +
  540. +module_init(surface_sam_debugfs_init);
  541. +module_exit(surface_sam_debugfs_exit);
  542. +
  543. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  544. +MODULE_DESCRIPTION("DebugFS entries for Surface Aggregator Module");
  545. +MODULE_LICENSE("GPL");
  546. diff --git a/drivers/platform/x86/surface_sam/surface_sam_dtx.c b/drivers/platform/x86/surface_sam/surface_sam_dtx.c
  547. new file mode 100644
  548. index 0000000000000..9c844bb0f7739
  549. --- /dev/null
  550. +++ b/drivers/platform/x86/surface_sam/surface_sam_dtx.c
  551. @@ -0,0 +1,582 @@
  552. +// SPDX-License-Identifier: GPL-2.0-or-later
  553. +/*
  554. + * Detachment system (DTX) driver for Microsoft Surface Book 2.
  555. + */
  556. +
  557. +#include <linux/acpi.h>
  558. +#include <linux/delay.h>
  559. +#include <linux/fs.h>
  560. +#include <linux/input.h>
  561. +#include <linux/ioctl.h>
  562. +#include <linux/kernel.h>
  563. +#include <linux/miscdevice.h>
  564. +#include <linux/module.h>
  565. +#include <linux/poll.h>
  566. +#include <linux/rculist.h>
  567. +#include <linux/slab.h>
  568. +#include <linux/spinlock.h>
  569. +#include <linux/platform_device.h>
  570. +
  571. +#include "surface_sam_ssh.h"
  572. +
  573. +
  574. +#define USB_VENDOR_ID_MICROSOFT 0x045e
  575. +#define USB_DEVICE_ID_MS_SURFACE_BASE_2_INTEGRATION 0x0922
  576. +
  577. +// name copied from MS device manager
  578. +#define DTX_INPUT_NAME "Microsoft Surface Base 2 Integration Device"
  579. +
  580. +
  581. +#define DTX_CMD_LATCH_LOCK _IO(0x11, 0x01)
  582. +#define DTX_CMD_LATCH_UNLOCK _IO(0x11, 0x02)
  583. +#define DTX_CMD_LATCH_REQUEST _IO(0x11, 0x03)
  584. +#define DTX_CMD_LATCH_OPEN _IO(0x11, 0x04)
  585. +#define DTX_CMD_GET_OPMODE _IOR(0x11, 0x05, int)
  586. +
  587. +#define SAM_EVENT_DTX_CID_CONNECTION 0x0c
  588. +#define SAM_EVENT_DTX_CID_BUTTON 0x0e
  589. +#define SAM_EVENT_DTX_CID_ERROR 0x0f
  590. +#define SAM_EVENT_DTX_CID_LATCH_STATUS 0x11
  591. +
  592. +#define DTX_OPMODE_TABLET 0x00
  593. +#define DTX_OPMODE_LAPTOP 0x01
  594. +#define DTX_OPMODE_STUDIO 0x02
  595. +
  596. +#define DTX_LATCH_CLOSED 0x00
  597. +#define DTX_LATCH_OPENED 0x01
  598. +
  599. +
  600. +// Warning: This must always be a power of 2!
  601. +#define DTX_CLIENT_BUF_SIZE 16
  602. +
  603. +#define DTX_CONNECT_OPMODE_DELAY 1000
  604. +
  605. +#define DTX_ERR KERN_ERR "surface_sam_dtx: "
  606. +#define DTX_WARN KERN_WARNING "surface_sam_dtx: "
  607. +
  608. +
  609. +struct surface_dtx_event {
  610. + u8 type;
  611. + u8 code;
  612. + u8 arg0;
  613. + u8 arg1;
  614. +} __packed;
  615. +
  616. +struct surface_dtx_dev {
  617. + struct ssam_controller *ctrl;
  618. +
  619. + struct ssam_event_notifier notif;
  620. + struct delayed_work opmode_work;
  621. + wait_queue_head_t waitq;
  622. + struct miscdevice mdev;
  623. + spinlock_t client_lock;
  624. + struct list_head client_list;
  625. + struct mutex mutex;
  626. + bool active;
  627. + spinlock_t input_lock;
  628. + struct input_dev *input_dev;
  629. +};
  630. +
  631. +struct surface_dtx_client {
  632. + struct list_head node;
  633. + struct surface_dtx_dev *ddev;
  634. + struct fasync_struct *fasync;
  635. + spinlock_t buffer_lock;
  636. + unsigned int buffer_head;
  637. + unsigned int buffer_tail;
  638. + struct surface_dtx_event buffer[DTX_CLIENT_BUF_SIZE];
  639. +};
  640. +
  641. +
  642. +static struct surface_dtx_dev surface_dtx_dev;
  643. +
  644. +
  645. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
  646. + .target_category = SSAM_SSH_TC_BAS,
  647. + .command_id = 0x06,
  648. + .instance_id = 0x00,
  649. + .channel = 0x01,
  650. +});
  651. +
  652. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
  653. + .target_category = SSAM_SSH_TC_BAS,
  654. + .command_id = 0x07,
  655. + .instance_id = 0x00,
  656. + .channel = 0x01,
  657. +});
  658. +
  659. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
  660. + .target_category = SSAM_SSH_TC_BAS,
  661. + .command_id = 0x08,
  662. + .instance_id = 0x00,
  663. + .channel = 0x01,
  664. +});
  665. +
  666. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_open, {
  667. + .target_category = SSAM_SSH_TC_BAS,
  668. + .command_id = 0x09,
  669. + .instance_id = 0x00,
  670. + .channel = 0x01,
  671. +});
  672. +
  673. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
  674. + .target_category = SSAM_SSH_TC_BAS,
  675. + .command_id = 0x0d,
  676. + .instance_id = 0x00,
  677. + .channel = 0x01,
  678. +});
  679. +
  680. +
  681. +static int dtx_bas_get_opmode(struct ssam_controller *ctrl, int __user *buf)
  682. +{
  683. + u8 opmode;
  684. + int status;
  685. +
  686. + status = ssam_bas_query_opmode(ctrl, &opmode);
  687. + if (status < 0)
  688. + return status;
  689. +
  690. + if (put_user(opmode, buf))
  691. + return -EACCES;
  692. +
  693. + return 0;
  694. +}
  695. +
  696. +
  697. +static int surface_dtx_open(struct inode *inode, struct file *file)
  698. +{
  699. + struct surface_dtx_dev *ddev = container_of(file->private_data, struct surface_dtx_dev, mdev);
  700. + struct surface_dtx_client *client;
  701. +
  702. + // initialize client
  703. + client = kzalloc(sizeof(struct surface_dtx_client), GFP_KERNEL);
  704. + if (!client)
  705. + return -ENOMEM;
  706. +
  707. + spin_lock_init(&client->buffer_lock);
  708. + client->buffer_head = 0;
  709. + client->buffer_tail = 0;
  710. + client->ddev = ddev;
  711. +
  712. + // attach client
  713. + spin_lock(&ddev->client_lock);
  714. + list_add_tail_rcu(&client->node, &ddev->client_list);
  715. + spin_unlock(&ddev->client_lock);
  716. +
  717. + file->private_data = client;
  718. + nonseekable_open(inode, file);
  719. +
  720. + return 0;
  721. +}
  722. +
  723. +static int surface_dtx_release(struct inode *inode, struct file *file)
  724. +{
  725. + struct surface_dtx_client *client = file->private_data;
  726. +
  727. + // detach client
  728. + spin_lock(&client->ddev->client_lock);
  729. + list_del_rcu(&client->node);
  730. + spin_unlock(&client->ddev->client_lock);
  731. + synchronize_rcu();
  732. +
  733. + kfree(client);
  734. + file->private_data = NULL;
  735. +
  736. + return 0;
  737. +}
  738. +
  739. +static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
  740. +{
  741. + struct surface_dtx_client *client = file->private_data;
  742. + struct surface_dtx_dev *ddev = client->ddev;
  743. + struct surface_dtx_event event;
  744. + size_t read = 0;
  745. + int status = 0;
  746. +
  747. + if (count != 0 && count < sizeof(struct surface_dtx_event))
  748. + return -EINVAL;
  749. +
  750. + if (!ddev->active)
  751. + return -ENODEV;
  752. +
  753. + // check availability
  754. + if (client->buffer_head == client->buffer_tail) {
  755. + if (file->f_flags & O_NONBLOCK)
  756. + return -EAGAIN;
  757. +
  758. + status = wait_event_interruptible(ddev->waitq,
  759. + client->buffer_head != client->buffer_tail ||
  760. + !ddev->active);
  761. + if (status)
  762. + return status;
  763. +
  764. + if (!ddev->active)
  765. + return -ENODEV;
  766. + }
  767. +
  768. + // copy events one by one
  769. + while (read + sizeof(struct surface_dtx_event) <= count) {
  770. + spin_lock_irq(&client->buffer_lock);
  771. +
  772. + if (client->buffer_head == client->buffer_tail) {
  773. + spin_unlock_irq(&client->buffer_lock);
  774. + break;
  775. + }
  776. +
  777. + // get one event
  778. + event = client->buffer[client->buffer_tail];
  779. + client->buffer_tail = (client->buffer_tail + 1) & (DTX_CLIENT_BUF_SIZE - 1);
  780. + spin_unlock_irq(&client->buffer_lock);
  781. +
  782. + // copy to userspace
  783. + if (copy_to_user(buf, &event, sizeof(struct surface_dtx_event)))
  784. + return -EFAULT;
  785. +
  786. + read += sizeof(struct surface_dtx_event);
  787. + }
  788. +
  789. + return read;
  790. +}
  791. +
  792. +static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
  793. +{
  794. + struct surface_dtx_client *client = file->private_data;
  795. + int mask;
  796. +
  797. + poll_wait(file, &client->ddev->waitq, pt);
  798. +
  799. + if (client->ddev->active)
  800. + mask = EPOLLOUT | EPOLLWRNORM;
  801. + else
  802. + mask = EPOLLHUP | EPOLLERR;
  803. +
  804. + if (client->buffer_head != client->buffer_tail)
  805. + mask |= EPOLLIN | EPOLLRDNORM;
  806. +
  807. + return mask;
  808. +}
  809. +
  810. +static int surface_dtx_fasync(int fd, struct file *file, int on)
  811. +{
  812. + struct surface_dtx_client *client = file->private_data;
  813. +
  814. + return fasync_helper(fd, file, on, &client->fasync);
  815. +}
  816. +
  817. +static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  818. +{
  819. + struct surface_dtx_client *client = file->private_data;
  820. + struct surface_dtx_dev *ddev = client->ddev;
  821. + int status;
  822. +
  823. + status = mutex_lock_interruptible(&ddev->mutex);
  824. + if (status)
  825. + return status;
  826. +
  827. + if (!ddev->active) {
  828. + mutex_unlock(&ddev->mutex);
  829. + return -ENODEV;
  830. + }
  831. +
  832. + switch (cmd) {
  833. + case DTX_CMD_LATCH_LOCK:
  834. + status = ssam_bas_latch_lock(ddev->ctrl);
  835. + break;
  836. +
  837. + case DTX_CMD_LATCH_UNLOCK:
  838. + status = ssam_bas_latch_unlock(ddev->ctrl);
  839. + break;
  840. +
  841. + case DTX_CMD_LATCH_REQUEST:
  842. + status = ssam_bas_latch_request(ddev->ctrl);
  843. + break;
  844. +
  845. + case DTX_CMD_LATCH_OPEN:
  846. + status = ssam_bas_latch_open(ddev->ctrl);
  847. + break;
  848. +
  849. + case DTX_CMD_GET_OPMODE:
  850. + status = dtx_bas_get_opmode(ddev->ctrl, (int __user *)arg);
  851. + break;
  852. +
  853. + default:
  854. + status = -EINVAL;
  855. + break;
  856. + }
  857. +
  858. + mutex_unlock(&ddev->mutex);
  859. + return status;
  860. +}
  861. +
  862. +static const struct file_operations surface_dtx_fops = {
  863. + .owner = THIS_MODULE,
  864. + .open = surface_dtx_open,
  865. + .release = surface_dtx_release,
  866. + .read = surface_dtx_read,
  867. + .poll = surface_dtx_poll,
  868. + .fasync = surface_dtx_fasync,
  869. + .unlocked_ioctl = surface_dtx_ioctl,
  870. + .llseek = no_llseek,
  871. +};
  872. +
  873. +static struct surface_dtx_dev surface_dtx_dev = {
  874. + .mdev = {
  875. + .minor = MISC_DYNAMIC_MINOR,
  876. + .name = "surface_dtx",
  877. + .fops = &surface_dtx_fops,
  878. + },
  879. + .client_lock = __SPIN_LOCK_UNLOCKED(),
  880. + .input_lock = __SPIN_LOCK_UNLOCKED(),
  881. + .mutex = __MUTEX_INITIALIZER(surface_dtx_dev.mutex),
  882. + .active = false,
  883. +};
  884. +
  885. +
  886. +static void surface_dtx_push_event(struct surface_dtx_dev *ddev, struct surface_dtx_event *event)
  887. +{
  888. + struct surface_dtx_client *client;
  889. +
  890. + rcu_read_lock();
  891. + list_for_each_entry_rcu(client, &ddev->client_list, node) {
  892. + spin_lock(&client->buffer_lock);
  893. +
  894. + client->buffer[client->buffer_head++] = *event;
  895. + client->buffer_head &= DTX_CLIENT_BUF_SIZE - 1;
  896. +
  897. + if (unlikely(client->buffer_head == client->buffer_tail)) {
  898. + printk(DTX_WARN "event buffer overrun\n");
  899. + client->buffer_tail = (client->buffer_tail + 1) & (DTX_CLIENT_BUF_SIZE - 1);
  900. + }
  901. +
  902. + spin_unlock(&client->buffer_lock);
  903. +
  904. + kill_fasync(&client->fasync, SIGIO, POLL_IN);
  905. + }
  906. + rcu_read_unlock();
  907. +
  908. + wake_up_interruptible(&ddev->waitq);
  909. +}
  910. +
  911. +
  912. +static void surface_dtx_update_opmpde(struct surface_dtx_dev *ddev)
  913. +{
  914. + struct surface_dtx_event event;
  915. + u8 opmode;
  916. + int status;
  917. +
  918. + // get operation mode
  919. + status = ssam_bas_query_opmode(ddev->ctrl, &opmode);
  920. + if (status < 0) {
  921. + printk(DTX_ERR "EC request failed with error %d\n", status);
  922. + return;
  923. + }
  924. +
  925. + // send DTX event
  926. + event.type = 0x11;
  927. + event.code = 0x0D;
  928. + event.arg0 = opmode;
  929. + event.arg1 = 0x00;
  930. +
  931. + surface_dtx_push_event(ddev, &event);
  932. +
  933. + // send SW_TABLET_MODE event
  934. + spin_lock(&ddev->input_lock);
  935. + input_report_switch(ddev->input_dev, SW_TABLET_MODE, opmode != DTX_OPMODE_LAPTOP);
  936. + input_sync(ddev->input_dev);
  937. + spin_unlock(&ddev->input_lock);
  938. +}
  939. +
  940. +static void surface_dtx_opmode_workfn(struct work_struct *work)
  941. +{
  942. + struct surface_dtx_dev *ddev = container_of(work, struct surface_dtx_dev, opmode_work.work);
  943. +
  944. + surface_dtx_update_opmpde(ddev);
  945. +}
  946. +
  947. +static u32 surface_dtx_notification(struct ssam_notifier_block *nb, const struct ssam_event *in_event)
  948. +{
  949. + struct surface_dtx_dev *ddev = container_of(nb, struct surface_dtx_dev, notif.base);
  950. + struct surface_dtx_event event;
  951. + unsigned long delay;
  952. +
  953. + switch (in_event->command_id) {
  954. + case SAM_EVENT_DTX_CID_CONNECTION:
  955. + case SAM_EVENT_DTX_CID_BUTTON:
  956. + case SAM_EVENT_DTX_CID_ERROR:
  957. + case SAM_EVENT_DTX_CID_LATCH_STATUS:
  958. + if (in_event->length > 2) {
  959. + printk(DTX_ERR "unexpected payload size (cid: %x, len: %u)\n",
  960. + in_event->command_id, in_event->length);
  961. + return SSAM_NOTIF_HANDLED;
  962. + }
  963. +
  964. + event.type = in_event->target_category;
  965. + event.code = in_event->command_id;
  966. + event.arg0 = in_event->length >= 1 ? in_event->data[0] : 0x00;
  967. + event.arg1 = in_event->length >= 2 ? in_event->data[1] : 0x00;
  968. + surface_dtx_push_event(ddev, &event);
  969. + break;
  970. +
  971. + default:
  972. + return 0;
  973. + }
  974. +
  975. + // update device mode
  976. + if (in_event->command_id == SAM_EVENT_DTX_CID_CONNECTION) {
  977. + delay = event.arg0 ? DTX_CONNECT_OPMODE_DELAY : 0;
  978. + schedule_delayed_work(&ddev->opmode_work, delay);
  979. + }
  980. +
  981. + return SSAM_NOTIF_HANDLED;
  982. +}
  983. +
  984. +
  985. +static struct input_dev *surface_dtx_register_inputdev(
  986. + struct platform_device *pdev, struct ssam_controller *ctrl)
  987. +{
  988. + struct input_dev *input_dev;
  989. + u8 opmode;
  990. + int status;
  991. +
  992. + input_dev = input_allocate_device();
  993. + if (!input_dev)
  994. + return ERR_PTR(-ENOMEM);
  995. +
  996. + input_dev->name = DTX_INPUT_NAME;
  997. + input_dev->dev.parent = &pdev->dev;
  998. + input_dev->id.bustype = BUS_VIRTUAL;
  999. + input_dev->id.vendor = USB_VENDOR_ID_MICROSOFT;
  1000. + input_dev->id.product = USB_DEVICE_ID_MS_SURFACE_BASE_2_INTEGRATION;
  1001. +
  1002. + input_set_capability(input_dev, EV_SW, SW_TABLET_MODE);
  1003. +
  1004. + status = ssam_bas_query_opmode(ctrl, &opmode);
  1005. + if (status < 0) {
  1006. + input_free_device(input_dev);
  1007. + return ERR_PTR(status);
  1008. + }
  1009. +
  1010. + input_report_switch(input_dev, SW_TABLET_MODE, opmode != DTX_OPMODE_LAPTOP);
  1011. +
  1012. + status = input_register_device(input_dev);
  1013. + if (status) {
  1014. + input_unregister_device(input_dev);
  1015. + return ERR_PTR(status);
  1016. + }
  1017. +
  1018. + return input_dev;
  1019. +}
  1020. +
  1021. +
  1022. +static int surface_sam_dtx_probe(struct platform_device *pdev)
  1023. +{
  1024. + struct surface_dtx_dev *ddev = &surface_dtx_dev;
  1025. + struct ssam_controller *ctrl;
  1026. + struct input_dev *input_dev;
  1027. + int status;
  1028. +
  1029. + // link to ec
  1030. + status = ssam_client_bind(&pdev->dev, &ctrl);
  1031. + if (status)
  1032. + return status == -ENXIO ? -EPROBE_DEFER : status;
  1033. +
  1034. + input_dev = surface_dtx_register_inputdev(pdev, ctrl);
  1035. + if (IS_ERR(input_dev))
  1036. + return PTR_ERR(input_dev);
  1037. +
  1038. + // initialize device
  1039. + mutex_lock(&ddev->mutex);
  1040. + if (ddev->active) {
  1041. + mutex_unlock(&ddev->mutex);
  1042. + status = -ENODEV;
  1043. + goto err_register;
  1044. + }
  1045. +
  1046. + ddev->ctrl = ctrl;
  1047. + INIT_DELAYED_WORK(&ddev->opmode_work, surface_dtx_opmode_workfn);
  1048. + INIT_LIST_HEAD(&ddev->client_list);
  1049. + init_waitqueue_head(&ddev->waitq);
  1050. + ddev->active = true;
  1051. + ddev->input_dev = input_dev;
  1052. + mutex_unlock(&ddev->mutex);
  1053. +
  1054. + status = misc_register(&ddev->mdev);
  1055. + if (status)
  1056. + goto err_register;
  1057. +
  1058. + // set up events
  1059. + ddev->notif.base.priority = 1;
  1060. + ddev->notif.base.fn = surface_dtx_notification;
  1061. + ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
  1062. + ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
  1063. + ddev->notif.event.id.instance = 0;
  1064. + ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
  1065. +
  1066. + status = ssam_notifier_register(ctrl, &ddev->notif);
  1067. + if (status)
  1068. + goto err_events_setup;
  1069. +
  1070. + return 0;
  1071. +
  1072. +err_events_setup:
  1073. + misc_deregister(&ddev->mdev);
  1074. +err_register:
  1075. + input_unregister_device(ddev->input_dev);
  1076. + return status;
  1077. +}
  1078. +
  1079. +static int surface_sam_dtx_remove(struct platform_device *pdev)
  1080. +{
  1081. + struct surface_dtx_dev *ddev = &surface_dtx_dev;
  1082. + struct surface_dtx_client *client;
  1083. +
  1084. + mutex_lock(&ddev->mutex);
  1085. + if (!ddev->active) {
  1086. + mutex_unlock(&ddev->mutex);
  1087. + return 0;
  1088. + }
  1089. +
  1090. + // mark as inactive
  1091. + ddev->active = false;
  1092. + mutex_unlock(&ddev->mutex);
  1093. +
  1094. + // After this call we're guaranteed that no more input events will arive
  1095. + ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
  1096. +
  1097. + // wake up clients
  1098. + spin_lock(&ddev->client_lock);
  1099. + list_for_each_entry(client, &ddev->client_list, node) {
  1100. + kill_fasync(&client->fasync, SIGIO, POLL_HUP);
  1101. + }
  1102. + spin_unlock(&ddev->client_lock);
  1103. +
  1104. + wake_up_interruptible(&ddev->waitq);
  1105. +
  1106. + // unregister user-space devices
  1107. + input_unregister_device(ddev->input_dev);
  1108. + misc_deregister(&ddev->mdev);
  1109. +
  1110. + return 0;
  1111. +}
  1112. +
  1113. +
  1114. +static const struct acpi_device_id surface_sam_dtx_match[] = {
  1115. + { "MSHW0133", 0 },
  1116. + { },
  1117. +};
  1118. +MODULE_DEVICE_TABLE(acpi, surface_sam_dtx_match);
  1119. +
  1120. +static struct platform_driver surface_sam_dtx = {
  1121. + .probe = surface_sam_dtx_probe,
  1122. + .remove = surface_sam_dtx_remove,
  1123. + .driver = {
  1124. + .name = "surface_sam_dtx",
  1125. + .acpi_match_table = surface_sam_dtx_match,
  1126. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1127. + },
  1128. +};
  1129. +module_platform_driver(surface_sam_dtx);
  1130. +
  1131. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  1132. +MODULE_DESCRIPTION("Surface Detachment System (DTX) Driver for 5th Generation Surface Devices");
  1133. +MODULE_LICENSE("GPL");
  1134. diff --git a/drivers/platform/x86/surface_sam/surface_sam_hps.c b/drivers/platform/x86/surface_sam/surface_sam_hps.c
  1135. new file mode 100644
  1136. index 0000000000000..b11f9fa8095fb
  1137. --- /dev/null
  1138. +++ b/drivers/platform/x86/surface_sam/surface_sam_hps.c
  1139. @@ -0,0 +1,1287 @@
  1140. +// SPDX-License-Identifier: GPL-2.0-or-later
  1141. +/*
  1142. + * Surface dGPU hot-plug system driver.
  1143. + * Supports explicit setting of the dGPU power-state on the Surface Book 2 and
  1144. + * properly handles hot-plugging by detaching the base.
  1145. + */
  1146. +
  1147. +#include <linux/acpi.h>
  1148. +#include <linux/delay.h>
  1149. +#include <linux/gpio.h>
  1150. +#include <linux/kernel.h>
  1151. +#include <linux/module.h>
  1152. +#include <linux/mutex.h>
  1153. +#include <linux/pci.h>
  1154. +#include <linux/platform_device.h>
  1155. +#include <linux/sysfs.h>
  1156. +
  1157. +#include "surface_sam_ssh.h"
  1158. +#include "surface_sam_san.h"
  1159. +
  1160. +
  1161. +// TODO: vgaswitcheroo integration
  1162. +
  1163. +
  1164. +static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix);
  1165. +
  1166. +
  1167. +#define SHPS_DSM_REVISION 1
  1168. +#define SHPS_DSM_GPU_ADDRS 0x02
  1169. +#define SHPS_DSM_GPU_POWER 0x05
  1170. +static const guid_t SHPS_DSM_UUID =
  1171. + GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd,
  1172. + 0x32, 0x0e, 0x10, 0x36, 0x0a);
  1173. +
  1174. +
  1175. +#define SAM_DGPU_TC 0x13
  1176. +#define SAM_DGPU_CID_POWERON 0x02
  1177. +#define ACPI_SGCP_NOTIFY_POWER_ON 0x81
  1178. +
  1179. +#define SHPS_DSM_GPU_ADDRS_RP "RP5_PCIE"
  1180. +#define SHPS_DSM_GPU_ADDRS_DGPU "DGPU_PCIE"
  1181. +#define SHPS_PCI_GPU_ADDR_RP "\\_SB.PCI0.RP13._ADR"
  1182. +
  1183. +static const struct acpi_gpio_params gpio_base_presence_int = { 0, 0, false };
  1184. +static const struct acpi_gpio_params gpio_base_presence = { 1, 0, false };
  1185. +static const struct acpi_gpio_params gpio_dgpu_power_int = { 2, 0, false };
  1186. +static const struct acpi_gpio_params gpio_dgpu_power = { 3, 0, false };
  1187. +static const struct acpi_gpio_params gpio_dgpu_presence_int = { 4, 0, false };
  1188. +static const struct acpi_gpio_params gpio_dgpu_presence = { 5, 0, false };
  1189. +
  1190. +static const struct acpi_gpio_mapping shps_acpi_gpios[] = {
  1191. + { "base_presence-int-gpio", &gpio_base_presence_int, 1 },
  1192. + { "base_presence-gpio", &gpio_base_presence, 1 },
  1193. + { "dgpu_power-int-gpio", &gpio_dgpu_power_int, 1 },
  1194. + { "dgpu_power-gpio", &gpio_dgpu_power, 1 },
  1195. + { "dgpu_presence-int-gpio", &gpio_dgpu_presence_int, 1 },
  1196. + { "dgpu_presence-gpio", &gpio_dgpu_presence, 1 },
  1197. + { },
  1198. +};
  1199. +
  1200. +
  1201. +enum shps_dgpu_power {
  1202. + SHPS_DGPU_POWER_OFF = 0,
  1203. + SHPS_DGPU_POWER_ON = 1,
  1204. + SHPS_DGPU_POWER_UNKNOWN = 2,
  1205. +};
  1206. +
  1207. +static const char *shps_dgpu_power_str(enum shps_dgpu_power power)
  1208. +{
  1209. + if (power == SHPS_DGPU_POWER_OFF)
  1210. + return "off";
  1211. + else if (power == SHPS_DGPU_POWER_ON)
  1212. + return "on";
  1213. + else if (power == SHPS_DGPU_POWER_UNKNOWN)
  1214. + return "unknown";
  1215. + else
  1216. + return "<invalid>";
  1217. +}
  1218. +
  1219. +enum shps_notification_method {
  1220. + SHPS_NOTIFICATION_METHOD_SAN = 1,
  1221. + SHPS_NOTIFICATION_METHOD_SGCP = 2
  1222. +};
  1223. +
  1224. +struct shps_hardware_traits {
  1225. + enum shps_notification_method notification_method;
  1226. + const char *dgpu_rp_pci_address;
  1227. +};
  1228. +
  1229. +struct shps_driver_data {
  1230. + struct ssam_controller *ctrl;
  1231. +
  1232. + struct mutex lock;
  1233. + struct pci_dev *dgpu_root_port;
  1234. + struct pci_saved_state *dgpu_root_port_state;
  1235. + struct gpio_desc *gpio_dgpu_power;
  1236. + struct gpio_desc *gpio_dgpu_presence;
  1237. + struct gpio_desc *gpio_base_presence;
  1238. + unsigned int irq_dgpu_presence;
  1239. + unsigned int irq_base_presence;
  1240. + unsigned long state;
  1241. + acpi_handle sgpc_handle;
  1242. + struct shps_hardware_traits hardware_traits;
  1243. +};
  1244. +
  1245. +struct shps_hardware_probe {
  1246. + const char *hardware_id;
  1247. + int generation;
  1248. + struct shps_hardware_traits *hardware_traits;
  1249. +};
  1250. +
  1251. +static struct shps_hardware_traits shps_gen1_hwtraits = {
  1252. + .notification_method = SHPS_NOTIFICATION_METHOD_SAN
  1253. +};
  1254. +
  1255. +static struct shps_hardware_traits shps_gen2_hwtraits = {
  1256. + .notification_method = SHPS_NOTIFICATION_METHOD_SGCP,
  1257. + .dgpu_rp_pci_address = SHPS_PCI_GPU_ADDR_RP
  1258. +};
  1259. +
  1260. +static const struct shps_hardware_probe shps_hardware_probe_match[] = {
  1261. + /* Surface Book 3 */
  1262. + { "MSHW0117", 2, &shps_gen2_hwtraits },
  1263. +
  1264. + /* Surface Book 2 (default, must be last entry) */
  1265. + { NULL, 1, &shps_gen1_hwtraits }
  1266. +};
  1267. +
  1268. +#define SHPS_STATE_BIT_PWRTGT 0 /* desired power state: 1 for on, 0 for off */
  1269. +#define SHPS_STATE_BIT_RPPWRON_SYNC 1 /* synchronous/requested power-up in progress */
  1270. +#define SHPS_STATE_BIT_WAKE_ENABLED 2 /* wakeup via base-presence GPIO enabled */
  1271. +
  1272. +
  1273. +#define SHPS_DGPU_PARAM_PERM 0644
  1274. +
  1275. +enum shps_dgpu_power_mp {
  1276. + SHPS_DGPU_MP_POWER_OFF = SHPS_DGPU_POWER_OFF,
  1277. + SHPS_DGPU_MP_POWER_ON = SHPS_DGPU_POWER_ON,
  1278. + SHPS_DGPU_MP_POWER_ASIS = -1,
  1279. +
  1280. + __SHPS_DGPU_MP_POWER_START = -1,
  1281. + __SHPS_DGPU_MP_POWER_END = 1,
  1282. +};
  1283. +
  1284. +static int param_dgpu_power_set(const char *val, const struct kernel_param *kp)
  1285. +{
  1286. + int power = SHPS_DGPU_MP_POWER_OFF;
  1287. + int status;
  1288. +
  1289. + status = kstrtoint(val, 0, &power);
  1290. + if (status)
  1291. + return status;
  1292. +
  1293. + if (power < __SHPS_DGPU_MP_POWER_START || power > __SHPS_DGPU_MP_POWER_END)
  1294. + return -EINVAL;
  1295. +
  1296. + return param_set_int(val, kp);
  1297. +}
  1298. +
  1299. +static const struct kernel_param_ops param_dgpu_power_ops = {
  1300. + .set = param_dgpu_power_set,
  1301. + .get = param_get_int,
  1302. +};
  1303. +
  1304. +static int param_dgpu_power_init = SHPS_DGPU_MP_POWER_OFF;
  1305. +static int param_dgpu_power_exit = SHPS_DGPU_MP_POWER_ON;
  1306. +static int param_dgpu_power_susp = SHPS_DGPU_MP_POWER_ASIS;
  1307. +static bool param_dtx_latch = true;
  1308. +
  1309. +module_param_cb(dgpu_power_init, &param_dgpu_power_ops, &param_dgpu_power_init, SHPS_DGPU_PARAM_PERM);
  1310. +module_param_cb(dgpu_power_exit, &param_dgpu_power_ops, &param_dgpu_power_exit, SHPS_DGPU_PARAM_PERM);
  1311. +module_param_cb(dgpu_power_susp, &param_dgpu_power_ops, &param_dgpu_power_susp, SHPS_DGPU_PARAM_PERM);
  1312. +module_param_named(dtx_latch, param_dtx_latch, bool, SHPS_DGPU_PARAM_PERM);
  1313. +
  1314. +MODULE_PARM_DESC(dgpu_power_init, "dGPU power state to be set on init (0: off / 1: on / 2: as-is, default: off)");
  1315. +MODULE_PARM_DESC(dgpu_power_exit, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: on)");
  1316. +MODULE_PARM_DESC(dgpu_power_susp, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: as-is)");
  1317. +MODULE_PARM_DESC(dtx_latch, "lock/unlock DTX base latch in accordance to power-state (Y/n)");
  1318. +
  1319. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
  1320. + .target_category = SSAM_SSH_TC_BAS,
  1321. + .command_id = 0x06,
  1322. + .instance_id = 0x00,
  1323. + .channel = 0x01,
  1324. +});
  1325. +
  1326. +static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
  1327. + .target_category = SSAM_SSH_TC_BAS,
  1328. + .command_id = 0x07,
  1329. + .instance_id = 0x00,
  1330. + .channel = 0x01,
  1331. +});
  1332. +
  1333. +static int shps_dgpu_dsm_get_pci_addr_from_adr(struct platform_device *pdev, const char *entry) {
  1334. + acpi_handle handle = ACPI_HANDLE(&pdev->dev);
  1335. + int status;
  1336. + struct acpi_object_list input;
  1337. + union acpi_object input_args[0];
  1338. + u64 device_addr;
  1339. + u8 bus, dev, fun;
  1340. +
  1341. + input.count = 0;
  1342. + input.pointer = input_args;
  1343. +
  1344. +
  1345. + status = acpi_evaluate_integer(handle, (acpi_string)entry, &input, &device_addr);
  1346. + if (status) {
  1347. + return -ENODEV;
  1348. + }
  1349. +
  1350. + bus = 0;
  1351. + dev = (device_addr & 0xFF0000) >> 16;
  1352. + fun = device_addr & 0xFF;
  1353. +
  1354. + dev_info(&pdev->dev, "found pci device at bus = %d, dev = %x, fun = %x\n",
  1355. + (u32)bus, (u32)dev, (u32)fun);
  1356. +
  1357. + return bus << 8 | PCI_DEVFN(dev, fun);
  1358. +}
  1359. +
  1360. +static int shps_dgpu_dsm_get_pci_addr_from_dsm(struct platform_device *pdev, const char *entry)
  1361. +{
  1362. + acpi_handle handle = ACPI_HANDLE(&pdev->dev);
  1363. + union acpi_object *result;
  1364. + union acpi_object *e0;
  1365. + union acpi_object *e1;
  1366. + union acpi_object *e2;
  1367. + u64 device_addr = 0;
  1368. + u8 bus, dev, fun;
  1369. + int i;
  1370. +
  1371. +
  1372. + result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION,
  1373. + SHPS_DSM_GPU_ADDRS, NULL, ACPI_TYPE_PACKAGE);
  1374. +
  1375. + if (IS_ERR_OR_NULL(result))
  1376. + return result ? PTR_ERR(result) : -EIO;
  1377. +
  1378. + // three entries per device: name, address, <integer>
  1379. + for (i = 0; i + 2 < result->package.count; i += 3) {
  1380. + e0 = &result->package.elements[i];
  1381. + e1 = &result->package.elements[i + 1];
  1382. + e2 = &result->package.elements[i + 2];
  1383. +
  1384. + if (e0->type != ACPI_TYPE_STRING) {
  1385. + ACPI_FREE(result);
  1386. + return -EIO;
  1387. + }
  1388. +
  1389. + if (e1->type != ACPI_TYPE_INTEGER) {
  1390. + ACPI_FREE(result);
  1391. + return -EIO;
  1392. + }
  1393. +
  1394. + if (e2->type != ACPI_TYPE_INTEGER) {
  1395. + ACPI_FREE(result);
  1396. + return -EIO;
  1397. + }
  1398. +
  1399. + if (strncmp(e0->string.pointer, entry, 64) == 0)
  1400. + device_addr = e1->integer.value;
  1401. + }
  1402. +
  1403. + ACPI_FREE(result);
  1404. + if (device_addr == 0)
  1405. + return -ENODEV;
  1406. +
  1407. +
  1408. + // convert address
  1409. + bus = (device_addr & 0x0FF00000) >> 20;
  1410. + dev = (device_addr & 0x000F8000) >> 15;
  1411. + fun = (device_addr & 0x00007000) >> 12;
  1412. +
  1413. + return bus << 8 | PCI_DEVFN(dev, fun);
  1414. +}
  1415. +
  1416. +static struct pci_dev *shps_dgpu_dsm_get_pci_dev(struct platform_device *pdev)
  1417. +{
  1418. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1419. + struct pci_dev *dev;
  1420. + int addr;
  1421. +
  1422. +
  1423. + if (drvdata->hardware_traits.dgpu_rp_pci_address) {
  1424. + addr = shps_dgpu_dsm_get_pci_addr_from_adr(pdev, drvdata->hardware_traits.dgpu_rp_pci_address);
  1425. + } else {
  1426. + addr = shps_dgpu_dsm_get_pci_addr_from_dsm(pdev, SHPS_DSM_GPU_ADDRS_RP);
  1427. + }
  1428. +
  1429. + if (addr < 0)
  1430. + return ERR_PTR(addr);
  1431. +
  1432. + dev = pci_get_domain_bus_and_slot(0, (addr & 0xFF00) >> 8, addr & 0xFF);
  1433. + return dev ? dev : ERR_PTR(-ENODEV);
  1434. +}
  1435. +
  1436. +
  1437. +static int shps_dgpu_dsm_get_power_unlocked(struct platform_device *pdev)
  1438. +{
  1439. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1440. + struct gpio_desc *gpio = drvdata->gpio_dgpu_power;
  1441. + int status;
  1442. +
  1443. + status = gpiod_get_value_cansleep(gpio);
  1444. + if (status < 0)
  1445. + return status;
  1446. +
  1447. + return status == 0 ? SHPS_DGPU_POWER_OFF : SHPS_DGPU_POWER_ON;
  1448. +}
  1449. +
  1450. +static int shps_dgpu_dsm_get_power(struct platform_device *pdev)
  1451. +{
  1452. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1453. + int status;
  1454. +
  1455. + mutex_lock(&drvdata->lock);
  1456. + status = shps_dgpu_dsm_get_power_unlocked(pdev);
  1457. + mutex_unlock(&drvdata->lock);
  1458. +
  1459. + return status;
  1460. +}
  1461. +
  1462. +static int __shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
  1463. +{
  1464. + acpi_handle handle = ACPI_HANDLE(&pdev->dev);
  1465. + union acpi_object *result;
  1466. + union acpi_object param;
  1467. +
  1468. + dev_info(&pdev->dev, "setting dGPU direct power to \'%s\'\n", shps_dgpu_power_str(power));
  1469. +
  1470. + param.type = ACPI_TYPE_INTEGER;
  1471. + param.integer.value = power == SHPS_DGPU_POWER_ON;
  1472. +
  1473. + result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION,
  1474. + SHPS_DSM_GPU_POWER, &param, ACPI_TYPE_BUFFER);
  1475. +
  1476. + if (IS_ERR_OR_NULL(result))
  1477. + return result ? PTR_ERR(result) : -EIO;
  1478. +
  1479. + // check for the expected result
  1480. + if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) {
  1481. + ACPI_FREE(result);
  1482. + return -EIO;
  1483. + }
  1484. +
  1485. + ACPI_FREE(result);
  1486. + return 0;
  1487. +}
  1488. +
  1489. +static int shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
  1490. +{
  1491. + int status;
  1492. +
  1493. + if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF)
  1494. + return -EINVAL;
  1495. +
  1496. + status = shps_dgpu_dsm_get_power_unlocked(pdev);
  1497. + if (status < 0)
  1498. + return status;
  1499. + if (status == power)
  1500. + return 0;
  1501. +
  1502. + return __shps_dgpu_dsm_set_power_unlocked(pdev, power);
  1503. +}
  1504. +
  1505. +static int shps_dgpu_dsm_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
  1506. +{
  1507. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1508. + int status;
  1509. +
  1510. + mutex_lock(&drvdata->lock);
  1511. + status = shps_dgpu_dsm_set_power_unlocked(pdev, power);
  1512. + mutex_unlock(&drvdata->lock);
  1513. +
  1514. + return status;
  1515. +}
  1516. +
  1517. +
  1518. +static bool shps_rp_link_up(struct pci_dev *rp)
  1519. +{
  1520. + u16 lnksta = 0, sltsta = 0;
  1521. +
  1522. + pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta);
  1523. + pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta);
  1524. +
  1525. + return (lnksta & PCI_EXP_LNKSTA_DLLLA) || (sltsta & PCI_EXP_SLTSTA_PDS);
  1526. +}
  1527. +
  1528. +
  1529. +static int shps_dgpu_rp_get_power_unlocked(struct platform_device *pdev)
  1530. +{
  1531. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1532. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1533. +
  1534. + if (rp->current_state == PCI_D3hot || rp->current_state == PCI_D3cold)
  1535. + return SHPS_DGPU_POWER_OFF;
  1536. + else if (rp->current_state == PCI_UNKNOWN || rp->current_state == PCI_POWER_ERROR)
  1537. + return SHPS_DGPU_POWER_UNKNOWN;
  1538. + else
  1539. + return SHPS_DGPU_POWER_ON;
  1540. +}
  1541. +
  1542. +static int shps_dgpu_rp_get_power(struct platform_device *pdev)
  1543. +{
  1544. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1545. + int status;
  1546. +
  1547. + mutex_lock(&drvdata->lock);
  1548. + status = shps_dgpu_rp_get_power_unlocked(pdev);
  1549. + mutex_unlock(&drvdata->lock);
  1550. +
  1551. + return status;
  1552. +}
  1553. +
  1554. +static int __shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
  1555. +{
  1556. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1557. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1558. + int status, i;
  1559. +
  1560. + dev_info(&pdev->dev, "setting dGPU power state to \'%s\'\n", shps_dgpu_power_str(power));
  1561. +
  1562. + dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.1");
  1563. + if (power == SHPS_DGPU_POWER_ON) {
  1564. + set_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state);
  1565. + pci_set_power_state(rp, PCI_D0);
  1566. +
  1567. + if (drvdata->dgpu_root_port_state)
  1568. + pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state);
  1569. +
  1570. + pci_restore_state(rp);
  1571. +
  1572. + if (!pci_is_enabled(rp))
  1573. + pci_enable_device(rp);
  1574. +
  1575. + pci_set_master(rp);
  1576. + clear_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state);
  1577. +
  1578. + set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1579. + } else {
  1580. + if (!drvdata->dgpu_root_port_state) {
  1581. + pci_save_state(rp);
  1582. + drvdata->dgpu_root_port_state = pci_store_saved_state(rp);
  1583. + }
  1584. +
  1585. + /*
  1586. + * To properly update the hot-plug system we need to "remove" the dGPU
  1587. + * before disabling it and sending it to D3cold. Following this, we
  1588. + * need to wait for the link and slot status to actually change.
  1589. + */
  1590. + status = shps_dgpu_dsm_set_power_unlocked(pdev, SHPS_DGPU_POWER_OFF);
  1591. + if (status)
  1592. + return status;
  1593. +
  1594. + for (i = 0; i < 20 && shps_rp_link_up(rp); i++)
  1595. + msleep(50);
  1596. +
  1597. + if (shps_rp_link_up(rp))
  1598. + dev_err(&pdev->dev, "dGPU removal via DSM timed out\n");
  1599. +
  1600. + pci_clear_master(rp);
  1601. +
  1602. + if (pci_is_enabled(rp))
  1603. + pci_disable_device(rp);
  1604. +
  1605. + pci_set_power_state(rp, PCI_D3cold);
  1606. +
  1607. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1608. + }
  1609. + dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.2");
  1610. +
  1611. + return 0;
  1612. +}
  1613. +
  1614. +static int shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
  1615. +{
  1616. + int status;
  1617. +
  1618. + if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF)
  1619. + return -EINVAL;
  1620. +
  1621. + status = shps_dgpu_rp_get_power_unlocked(pdev);
  1622. + if (status < 0)
  1623. + return status;
  1624. + if (status == power)
  1625. + return 0;
  1626. +
  1627. + return __shps_dgpu_rp_set_power_unlocked(pdev, power);
  1628. +}
  1629. +
  1630. +static int shps_dgpu_rp_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
  1631. +{
  1632. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1633. + int status;
  1634. +
  1635. + mutex_lock(&drvdata->lock);
  1636. + status = shps_dgpu_rp_set_power_unlocked(pdev, power);
  1637. + mutex_unlock(&drvdata->lock);
  1638. +
  1639. + return status;
  1640. +}
  1641. +
  1642. +
  1643. +static int shps_dgpu_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
  1644. +{
  1645. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1646. + int status;
  1647. +
  1648. + if (!param_dtx_latch)
  1649. + return shps_dgpu_rp_set_power(pdev, power);
  1650. +
  1651. + if (power == SHPS_DGPU_POWER_ON) {
  1652. + status = ssam_bas_latch_lock(drvdata->ctrl);
  1653. + if (status)
  1654. + return status;
  1655. +
  1656. + status = shps_dgpu_rp_set_power(pdev, power);
  1657. + if (status)
  1658. + ssam_bas_latch_unlock(drvdata->ctrl);
  1659. +
  1660. + } else {
  1661. + status = shps_dgpu_rp_set_power(pdev, power);
  1662. + if (status)
  1663. + return status;
  1664. +
  1665. + status = ssam_bas_latch_unlock(drvdata->ctrl);
  1666. + }
  1667. +
  1668. + return status;
  1669. +}
  1670. +
  1671. +
  1672. +static int shps_dgpu_is_present(struct platform_device *pdev)
  1673. +{
  1674. + struct shps_driver_data *drvdata;
  1675. +
  1676. + drvdata = platform_get_drvdata(pdev);
  1677. + return gpiod_get_value_cansleep(drvdata->gpio_dgpu_presence);
  1678. +}
  1679. +
  1680. +
  1681. +static ssize_t dgpu_power_show(struct device *dev, struct device_attribute *attr, char *data)
  1682. +{
  1683. + struct platform_device *pdev = to_platform_device(dev);
  1684. + int power = shps_dgpu_rp_get_power(pdev);
  1685. +
  1686. + if (power < 0)
  1687. + return power;
  1688. +
  1689. + return sprintf(data, "%s\n", shps_dgpu_power_str(power));
  1690. +}
  1691. +
  1692. +static ssize_t dgpu_power_store(struct device *dev, struct device_attribute *attr,
  1693. + const char *data, size_t count)
  1694. +{
  1695. + struct platform_device *pdev = to_platform_device(dev);
  1696. + enum shps_dgpu_power power;
  1697. + bool b = false;
  1698. + int status;
  1699. +
  1700. + status = kstrtobool(data, &b);
  1701. + if (status)
  1702. + return status;
  1703. +
  1704. + status = shps_dgpu_is_present(pdev);
  1705. + if (status <= 0)
  1706. + return status < 0 ? status : -EPERM;
  1707. +
  1708. + power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF;
  1709. + status = shps_dgpu_set_power(pdev, power);
  1710. +
  1711. + return status < 0 ? status : count;
  1712. +}
  1713. +
  1714. +static ssize_t dgpu_power_dsm_show(struct device *dev, struct device_attribute *attr, char *data)
  1715. +{
  1716. + struct platform_device *pdev = to_platform_device(dev);
  1717. + int power = shps_dgpu_dsm_get_power(pdev);
  1718. +
  1719. + if (power < 0)
  1720. + return power;
  1721. +
  1722. + return sprintf(data, "%s\n", shps_dgpu_power_str(power));
  1723. +}
  1724. +
  1725. +static ssize_t dgpu_power_dsm_store(struct device *dev, struct device_attribute *attr,
  1726. + const char *data, size_t count)
  1727. +{
  1728. + struct platform_device *pdev = to_platform_device(dev);
  1729. + enum shps_dgpu_power power;
  1730. + bool b = false;
  1731. + int status;
  1732. +
  1733. + status = kstrtobool(data, &b);
  1734. + if (status)
  1735. + return status;
  1736. +
  1737. + status = shps_dgpu_is_present(pdev);
  1738. + if (status <= 0)
  1739. + return status < 0 ? status : -EPERM;
  1740. +
  1741. + power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF;
  1742. + status = shps_dgpu_dsm_set_power(pdev, power);
  1743. +
  1744. + return status < 0 ? status : count;
  1745. +}
  1746. +
  1747. +static DEVICE_ATTR_RW(dgpu_power);
  1748. +static DEVICE_ATTR_RW(dgpu_power_dsm);
  1749. +
  1750. +static struct attribute *shps_power_attrs[] = {
  1751. + &dev_attr_dgpu_power.attr,
  1752. + &dev_attr_dgpu_power_dsm.attr,
  1753. + NULL,
  1754. +};
  1755. +ATTRIBUTE_GROUPS(shps_power);
  1756. +
  1757. +
  1758. +static void dbg_dump_power_states(struct platform_device *pdev, const char *prefix)
  1759. +{
  1760. + enum shps_dgpu_power power_dsm;
  1761. + enum shps_dgpu_power power_rp;
  1762. + int status;
  1763. +
  1764. + status = shps_dgpu_rp_get_power_unlocked(pdev);
  1765. + if (status < 0)
  1766. + dev_err(&pdev->dev, "%s: failed to get root-port power state: %d\n", prefix, status);
  1767. + power_rp = status;
  1768. +
  1769. + status = shps_dgpu_rp_get_power_unlocked(pdev);
  1770. + if (status < 0)
  1771. + dev_err(&pdev->dev, "%s: failed to get direct power state: %d\n", prefix, status);
  1772. + power_dsm = status;
  1773. +
  1774. + dev_dbg(&pdev->dev, "%s: root-port power state: %d\n", prefix, power_rp);
  1775. + dev_dbg(&pdev->dev, "%s: direct power state: %d\n", prefix, power_dsm);
  1776. +}
  1777. +
  1778. +static void dbg_dump_pciesta(struct platform_device *pdev, const char *prefix)
  1779. +{
  1780. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1781. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1782. + u16 lnksta, lnksta2, sltsta, sltsta2;
  1783. +
  1784. + pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta);
  1785. + pcie_capability_read_word(rp, PCI_EXP_LNKSTA2, &lnksta2);
  1786. + pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta);
  1787. + pcie_capability_read_word(rp, PCI_EXP_SLTSTA2, &sltsta2);
  1788. +
  1789. + dev_dbg(&pdev->dev, "%s: LNKSTA: 0x%04x\n", prefix, lnksta);
  1790. + dev_dbg(&pdev->dev, "%s: LNKSTA2: 0x%04x\n", prefix, lnksta2);
  1791. + dev_dbg(&pdev->dev, "%s: SLTSTA: 0x%04x\n", prefix, sltsta);
  1792. + dev_dbg(&pdev->dev, "%s: SLTSTA2: 0x%04x\n", prefix, sltsta2);
  1793. +}
  1794. +
  1795. +static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix)
  1796. +{
  1797. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1798. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1799. +
  1800. + dev_dbg(&pdev->dev, "%s: RP power: %d\n", prefix, rp->current_state);
  1801. + dev_dbg(&pdev->dev, "%s: RP state saved: %d\n", prefix, rp->state_saved);
  1802. + dev_dbg(&pdev->dev, "%s: RP state stored: %d\n", prefix, !!drvdata->dgpu_root_port_state);
  1803. + dev_dbg(&pdev->dev, "%s: RP enabled: %d\n", prefix, atomic_read(&rp->enable_cnt));
  1804. + dev_dbg(&pdev->dev, "%s: RP mastered: %d\n", prefix, rp->is_busmaster);
  1805. +}
  1806. +
  1807. +static int shps_pm_prepare(struct device *dev)
  1808. +{
  1809. + struct platform_device *pdev = to_platform_device(dev);
  1810. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1811. + bool pwrtgt;
  1812. + int status = 0;
  1813. +
  1814. + dbg_dump_power_states(pdev, "shps_pm_prepare");
  1815. +
  1816. + if (param_dgpu_power_susp != SHPS_DGPU_MP_POWER_ASIS) {
  1817. + pwrtgt = test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1818. +
  1819. + status = shps_dgpu_set_power(pdev, param_dgpu_power_susp);
  1820. + if (status) {
  1821. + dev_err(&pdev->dev, "failed to power %s dGPU: %d\n",
  1822. + param_dgpu_power_susp == SHPS_DGPU_MP_POWER_OFF ? "off" : "on",
  1823. + status);
  1824. + return status;
  1825. + }
  1826. +
  1827. + if (pwrtgt)
  1828. + set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1829. + else
  1830. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1831. + }
  1832. +
  1833. + return 0;
  1834. +}
  1835. +
  1836. +static void shps_pm_complete(struct device *dev)
  1837. +{
  1838. + struct platform_device *pdev = to_platform_device(dev);
  1839. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1840. + int status;
  1841. +
  1842. + dbg_dump_power_states(pdev, "shps_pm_complete");
  1843. + dbg_dump_pciesta(pdev, "shps_pm_complete");
  1844. + dbg_dump_drvsta(pdev, "shps_pm_complete.1");
  1845. +
  1846. + // update power target, dGPU may have been detached while suspended
  1847. + status = shps_dgpu_is_present(pdev);
  1848. + if (status < 0) {
  1849. + dev_err(&pdev->dev, "failed to get dGPU presence: %d\n", status);
  1850. + return;
  1851. + } else if (status == 0) {
  1852. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1853. + }
  1854. +
  1855. + /*
  1856. + * During resume, the PCIe core will power on the root-port, which in turn
  1857. + * will power on the dGPU. Most of the state synchronization is already
  1858. + * handled via the SAN RQSG handler, so it is in a fully consistent
  1859. + * on-state here. If requested, turn it off here.
  1860. + *
  1861. + * As there seem to be some synchronization issues turning off the dGPU
  1862. + * directly after the power-on SAN RQSG notification during the resume
  1863. + * process, let's do this here.
  1864. + *
  1865. + * TODO/FIXME:
  1866. + * This does not combat unhandled power-ons when the device is not fully
  1867. + * resumed, i.e. re-suspended before shps_pm_complete is called. Those
  1868. + * should normally not be an issue, but the dGPU does get hot even though
  1869. + * it is suspended, so ideally we want to keep it off.
  1870. + */
  1871. + if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) {
  1872. + status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF);
  1873. + if (status)
  1874. + dev_err(&pdev->dev, "failed to power-off dGPU: %d\n", status);
  1875. + }
  1876. +
  1877. + dbg_dump_drvsta(pdev, "shps_pm_complete.2");
  1878. +}
  1879. +
  1880. +static int shps_pm_suspend(struct device *dev)
  1881. +{
  1882. + struct platform_device *pdev = to_platform_device(dev);
  1883. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1884. + int status;
  1885. +
  1886. + if (device_may_wakeup(dev)) {
  1887. + status = enable_irq_wake(drvdata->irq_base_presence);
  1888. + if (status)
  1889. + return status;
  1890. +
  1891. + set_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state);
  1892. + }
  1893. +
  1894. + return 0;
  1895. +}
  1896. +
  1897. +static int shps_pm_resume(struct device *dev)
  1898. +{
  1899. + struct platform_device *pdev = to_platform_device(dev);
  1900. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1901. + int status = 0;
  1902. +
  1903. + if (test_and_clear_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state))
  1904. + status = disable_irq_wake(drvdata->irq_base_presence);
  1905. +
  1906. + return status;
  1907. +}
  1908. +
  1909. +static void shps_shutdown(struct platform_device *pdev)
  1910. +{
  1911. + int status;
  1912. +
  1913. + /*
  1914. + * Turn on dGPU before shutting down. This allows the core drivers to
  1915. + * properly shut down the device. If we don't do this, the pcieport driver
  1916. + * will complain that the device has already been disabled.
  1917. + */
  1918. + status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_ON);
  1919. + if (status)
  1920. + dev_err(&pdev->dev, "failed to turn on dGPU: %d\n", status);
  1921. +}
  1922. +
  1923. +static int shps_dgpu_detached(struct platform_device *pdev)
  1924. +{
  1925. + dbg_dump_power_states(pdev, "shps_dgpu_detached");
  1926. + return shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF);
  1927. +}
  1928. +
  1929. +static int shps_dgpu_attached(struct platform_device *pdev)
  1930. +{
  1931. + dbg_dump_power_states(pdev, "shps_dgpu_attached");
  1932. + return 0;
  1933. +}
  1934. +
  1935. +static int shps_dgpu_powered_on(struct platform_device *pdev)
  1936. +{
  1937. + /*
  1938. + * This function gets called directly after a power-state transition of
  1939. + * the dGPU root port out of D3cold state, indicating a power-on of the
  1940. + * dGPU. Specifically, this function is called from the RQSG handler of
  1941. + * SAN, invoked by the ACPI _ON method of the dGPU root port. This means
  1942. + * that this function is run inside `pci_set_power_state(rp, ...)`
  1943. + * syncrhonously and thus returns before the `pci_set_power_state` call
  1944. + * does.
  1945. + *
  1946. + * `pci_set_power_state` may either be called by us or when the PCI
  1947. + * subsystem decides to power up the root port (e.g. during resume). Thus
  1948. + * we should use this function to ensure that the dGPU and root port
  1949. + * states are consistent when an unexpected power-up is encountered.
  1950. + */
  1951. +
  1952. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  1953. + struct pci_dev *rp = drvdata->dgpu_root_port;
  1954. + int status;
  1955. +
  1956. + dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.1");
  1957. +
  1958. + // if we caused the root port to power-on, return
  1959. + if (test_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state))
  1960. + return 0;
  1961. +
  1962. + // if dGPU is not present, force power-target to off and return
  1963. + status = shps_dgpu_is_present(pdev);
  1964. + if (status == 0)
  1965. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  1966. + if (status <= 0)
  1967. + return status;
  1968. +
  1969. + mutex_lock(&drvdata->lock);
  1970. +
  1971. + dbg_dump_power_states(pdev, "shps_dgpu_powered_on.1");
  1972. + dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.1");
  1973. + if (drvdata->dgpu_root_port_state)
  1974. + pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state);
  1975. + pci_restore_state(rp);
  1976. + if (!pci_is_enabled(rp))
  1977. + pci_enable_device(rp);
  1978. + pci_set_master(rp);
  1979. + dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.2");
  1980. + dbg_dump_power_states(pdev, "shps_dgpu_powered_on.2");
  1981. + dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.2");
  1982. +
  1983. + mutex_unlock(&drvdata->lock);
  1984. +
  1985. + if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) {
  1986. + dev_warn(&pdev->dev, "unexpected dGPU power-on detected\n");
  1987. + // TODO: schedule state re-check and update
  1988. + }
  1989. +
  1990. + return 0;
  1991. +}
  1992. +
  1993. +static int shps_dgpu_handle_rqsg(struct surface_sam_san_rqsg *rqsg, void *data)
  1994. +{
  1995. + struct platform_device *pdev = data;
  1996. +
  1997. + if (rqsg->tc == SAM_DGPU_TC && rqsg->cid == SAM_DGPU_CID_POWERON)
  1998. + return shps_dgpu_powered_on(pdev);
  1999. +
  2000. + dev_warn(&pdev->dev, "unimplemented dGPU request: RQSG(0x%02x, 0x%02x, 0x%02x)\n",
  2001. + rqsg->tc, rqsg->cid, rqsg->iid);
  2002. + return 0;
  2003. +}
  2004. +
  2005. +static irqreturn_t shps_dgpu_presence_irq(int irq, void *data)
  2006. +{
  2007. + struct platform_device *pdev = data;
  2008. + bool dgpu_present;
  2009. + int status;
  2010. +
  2011. + status = shps_dgpu_is_present(pdev);
  2012. + if (status < 0) {
  2013. + dev_err(&pdev->dev, "failed to check physical dGPU presence: %d\n", status);
  2014. + return IRQ_HANDLED;
  2015. + }
  2016. +
  2017. + dgpu_present = status != 0;
  2018. + dev_info(&pdev->dev, "dGPU physically %s\n", dgpu_present ? "attached" : "detached");
  2019. +
  2020. + if (dgpu_present)
  2021. + status = shps_dgpu_attached(pdev);
  2022. + else
  2023. + status = shps_dgpu_detached(pdev);
  2024. +
  2025. + if (status)
  2026. + dev_err(&pdev->dev, "error handling dGPU interrupt: %d\n", status);
  2027. +
  2028. + return IRQ_HANDLED;
  2029. +}
  2030. +
  2031. +static irqreturn_t shps_base_presence_irq(int irq, void *data)
  2032. +{
  2033. + return IRQ_HANDLED; // nothing to do, just wake
  2034. +}
  2035. +
  2036. +
  2037. +static int shps_gpios_setup(struct platform_device *pdev)
  2038. +{
  2039. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2040. + struct gpio_desc *gpio_dgpu_power;
  2041. + struct gpio_desc *gpio_dgpu_presence;
  2042. + struct gpio_desc *gpio_base_presence;
  2043. + int status;
  2044. +
  2045. + // get GPIOs
  2046. + gpio_dgpu_power = devm_gpiod_get(&pdev->dev, "dgpu_power", GPIOD_IN);
  2047. + if (IS_ERR(gpio_dgpu_power)) {
  2048. + status = PTR_ERR(gpio_dgpu_power);
  2049. + goto err_out;
  2050. + }
  2051. +
  2052. + gpio_dgpu_presence = devm_gpiod_get(&pdev->dev, "dgpu_presence", GPIOD_IN);
  2053. + if (IS_ERR(gpio_dgpu_presence)) {
  2054. + status = PTR_ERR(gpio_dgpu_presence);
  2055. + goto err_out;
  2056. + }
  2057. +
  2058. + gpio_base_presence = devm_gpiod_get(&pdev->dev, "base_presence", GPIOD_IN);
  2059. + if (IS_ERR(gpio_base_presence)) {
  2060. + status = PTR_ERR(gpio_base_presence);
  2061. + goto err_out;
  2062. + }
  2063. +
  2064. + // export GPIOs
  2065. + status = gpiod_export(gpio_dgpu_power, false);
  2066. + if (status)
  2067. + goto err_out;
  2068. +
  2069. + status = gpiod_export(gpio_dgpu_presence, false);
  2070. + if (status)
  2071. + goto err_export_dgpu_presence;
  2072. +
  2073. + status = gpiod_export(gpio_base_presence, false);
  2074. + if (status)
  2075. + goto err_export_base_presence;
  2076. +
  2077. + // create sysfs links
  2078. + status = gpiod_export_link(&pdev->dev, "gpio-dgpu_power", gpio_dgpu_power);
  2079. + if (status)
  2080. + goto err_link_dgpu_power;
  2081. +
  2082. + status = gpiod_export_link(&pdev->dev, "gpio-dgpu_presence", gpio_dgpu_presence);
  2083. + if (status)
  2084. + goto err_link_dgpu_presence;
  2085. +
  2086. + status = gpiod_export_link(&pdev->dev, "gpio-base_presence", gpio_base_presence);
  2087. + if (status)
  2088. + goto err_link_base_presence;
  2089. +
  2090. + drvdata->gpio_dgpu_power = gpio_dgpu_power;
  2091. + drvdata->gpio_dgpu_presence = gpio_dgpu_presence;
  2092. + drvdata->gpio_base_presence = gpio_base_presence;
  2093. + return 0;
  2094. +
  2095. +err_link_base_presence:
  2096. + sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence");
  2097. +err_link_dgpu_presence:
  2098. + sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power");
  2099. +err_link_dgpu_power:
  2100. + gpiod_unexport(gpio_base_presence);
  2101. +err_export_base_presence:
  2102. + gpiod_unexport(gpio_dgpu_presence);
  2103. +err_export_dgpu_presence:
  2104. + gpiod_unexport(gpio_dgpu_power);
  2105. +err_out:
  2106. + return status;
  2107. +}
  2108. +
  2109. +static void shps_gpios_remove(struct platform_device *pdev)
  2110. +{
  2111. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2112. +
  2113. + sysfs_remove_link(&pdev->dev.kobj, "gpio-base_presence");
  2114. + sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence");
  2115. + sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power");
  2116. + gpiod_unexport(drvdata->gpio_base_presence);
  2117. + gpiod_unexport(drvdata->gpio_dgpu_presence);
  2118. + gpiod_unexport(drvdata->gpio_dgpu_power);
  2119. +}
  2120. +
  2121. +static int shps_gpios_setup_irq(struct platform_device *pdev)
  2122. +{
  2123. + const int irqf_dgpu = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
  2124. + const int irqf_base = IRQF_SHARED;
  2125. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2126. + int status;
  2127. +
  2128. + status = gpiod_to_irq(drvdata->gpio_base_presence);
  2129. + if (status < 0)
  2130. + return status;
  2131. + drvdata->irq_base_presence = status;
  2132. +
  2133. + status = gpiod_to_irq(drvdata->gpio_dgpu_presence);
  2134. + if (status < 0)
  2135. + return status;
  2136. + drvdata->irq_dgpu_presence = status;
  2137. +
  2138. + status = request_irq(drvdata->irq_base_presence,
  2139. + shps_base_presence_irq, irqf_base,
  2140. + "shps_base_presence_irq", pdev);
  2141. + if (status) {
  2142. + dev_err(&pdev->dev, "base irq failed: %d\n", status);
  2143. + return status;
  2144. + }
  2145. +
  2146. + status = request_threaded_irq(drvdata->irq_dgpu_presence,
  2147. + NULL, shps_dgpu_presence_irq, irqf_dgpu,
  2148. + "shps_dgpu_presence_irq", pdev);
  2149. + if (status) {
  2150. + free_irq(drvdata->irq_base_presence, pdev);
  2151. + return status;
  2152. + }
  2153. +
  2154. + return 0;
  2155. +}
  2156. +
  2157. +static void shps_gpios_remove_irq(struct platform_device *pdev)
  2158. +{
  2159. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2160. +
  2161. + free_irq(drvdata->irq_base_presence, pdev);
  2162. + free_irq(drvdata->irq_dgpu_presence, pdev);
  2163. +}
  2164. +
  2165. +static void shps_sgcp_notify(acpi_handle device, u32 value, void *context) {
  2166. + struct platform_device *pdev = context;
  2167. + switch (value) {
  2168. + case ACPI_SGCP_NOTIFY_POWER_ON:
  2169. + shps_dgpu_powered_on(pdev);
  2170. + }
  2171. +}
  2172. +
  2173. +static int shps_start_sgcp_notification(struct platform_device *pdev, acpi_handle *sgpc_handle) {
  2174. + acpi_handle handle;
  2175. + int status;
  2176. +
  2177. + status = acpi_get_handle(NULL, "\\_SB.SGPC", &handle);
  2178. + if (status) {
  2179. + dev_err(&pdev->dev, "error in get_handle %d\n", status);
  2180. + return status;
  2181. + }
  2182. +
  2183. + status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify, pdev);
  2184. + if (status) {
  2185. + dev_err(&pdev->dev, "error in install notify %d\n", status);
  2186. + *sgpc_handle = NULL;
  2187. + return status;
  2188. + }
  2189. +
  2190. + *sgpc_handle = handle;
  2191. + return 0;
  2192. +}
  2193. +
  2194. +static void shps_remove_sgcp_notification(struct platform_device *pdev) {
  2195. + int status;
  2196. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2197. +
  2198. + if (drvdata->sgpc_handle) {
  2199. + status = acpi_remove_notify_handler(drvdata->sgpc_handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify);
  2200. + if (status) {
  2201. + dev_err(&pdev->dev, "failed to remove notify handler: %d\n", status);
  2202. + }
  2203. + }
  2204. +}
  2205. +
  2206. +static struct shps_hardware_traits shps_detect_hardware_traits(struct platform_device *pdev) {
  2207. + const struct shps_hardware_probe *p;
  2208. +
  2209. + for (p = shps_hardware_probe_match; p->hardware_id; ++p) {
  2210. + if (acpi_dev_present(p->hardware_id, NULL, -1)) {
  2211. + break;
  2212. + }
  2213. + }
  2214. +
  2215. + dev_info(&pdev->dev,
  2216. + "shps_detect_hardware_traits found device %s, generation %d\n",
  2217. + p->hardware_id ? p->hardware_id : "SAN (default)",
  2218. + p->generation);
  2219. +
  2220. + return *p->hardware_traits;
  2221. +}
  2222. +
  2223. +static int shps_probe(struct platform_device *pdev)
  2224. +{
  2225. + struct acpi_device *shps_dev = ACPI_COMPANION(&pdev->dev);
  2226. + struct shps_driver_data *drvdata;
  2227. + struct ssam_controller *ctrl;
  2228. + struct device_link *link;
  2229. + int power, status;
  2230. + struct shps_hardware_traits detected_traits;
  2231. +
  2232. + if (gpiod_count(&pdev->dev, NULL) < 0) {
  2233. + dev_err(&pdev->dev, "gpiod_count returned < 0\n");
  2234. + return -ENODEV;
  2235. + }
  2236. +
  2237. + // link to SSH
  2238. + status = ssam_client_bind(&pdev->dev, &ctrl);
  2239. + if (status) {
  2240. + return status == -ENXIO ? -EPROBE_DEFER : status;
  2241. + }
  2242. +
  2243. + // detect what kind of hardware we're running
  2244. + detected_traits = shps_detect_hardware_traits(pdev);
  2245. +
  2246. + if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
  2247. + // link to SAN
  2248. + status = surface_sam_san_consumer_register(&pdev->dev, 0);
  2249. + if (status) {
  2250. + dev_err(&pdev->dev, "failed to register with san consumer: %d\n", status);
  2251. + return status == -ENXIO ? -EPROBE_DEFER : status;
  2252. + }
  2253. + }
  2254. +
  2255. + status = acpi_dev_add_driver_gpios(shps_dev, shps_acpi_gpios);
  2256. + if (status) {
  2257. + dev_err(&pdev->dev, "failed to add gpios: %d\n", status);
  2258. + return status;
  2259. + }
  2260. +
  2261. + drvdata = kzalloc(sizeof(struct shps_driver_data), GFP_KERNEL);
  2262. + if (!drvdata) {
  2263. + status = -ENOMEM;
  2264. + goto err_drvdata;
  2265. + }
  2266. + mutex_init(&drvdata->lock);
  2267. + platform_set_drvdata(pdev, drvdata);
  2268. +
  2269. + drvdata->ctrl = ctrl;
  2270. + drvdata->hardware_traits = detected_traits;
  2271. +
  2272. + drvdata->dgpu_root_port = shps_dgpu_dsm_get_pci_dev(pdev);
  2273. + if (IS_ERR(drvdata->dgpu_root_port)) {
  2274. + status = PTR_ERR(drvdata->dgpu_root_port);
  2275. + dev_err(&pdev->dev, "failed to get pci dev: %d\n", status);
  2276. + goto err_rp_lookup;
  2277. + }
  2278. +
  2279. + status = shps_gpios_setup(pdev);
  2280. + if (status) {
  2281. + dev_err(&pdev->dev, "unable to set up gpios, %d\n", status);
  2282. + goto err_gpio;
  2283. + }
  2284. +
  2285. + status = shps_gpios_setup_irq(pdev);
  2286. + if (status) {
  2287. + dev_err(&pdev->dev, "unable to set up irqs %d\n", status);
  2288. + goto err_gpio_irqs;
  2289. + }
  2290. +
  2291. + status = device_add_groups(&pdev->dev, shps_power_groups);
  2292. + if (status)
  2293. + goto err_devattr;
  2294. +
  2295. + link = device_link_add(&pdev->dev, &drvdata->dgpu_root_port->dev,
  2296. + DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER);
  2297. + if (!link)
  2298. + goto err_devlink;
  2299. +
  2300. + if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
  2301. + status = surface_sam_san_set_rqsg_handler(shps_dgpu_handle_rqsg, pdev);
  2302. + if (status) {
  2303. + dev_err(&pdev->dev, "unable to set SAN notification handler (%d)\n", status);
  2304. + goto err_devlink;
  2305. + }
  2306. + } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
  2307. + status = shps_start_sgcp_notification(pdev, &drvdata->sgpc_handle);
  2308. + if (status) {
  2309. + dev_err(&pdev->dev, "unable to install SGCP notification handler (%d)\n", status);
  2310. + goto err_devlink;
  2311. + }
  2312. + }
  2313. +
  2314. + // if dGPU is not present turn-off root-port, else obey module param
  2315. + status = shps_dgpu_is_present(pdev);
  2316. + if (status < 0)
  2317. + goto err_post_notification;
  2318. +
  2319. + power = status == 0 ? SHPS_DGPU_POWER_OFF : param_dgpu_power_init;
  2320. + if (power != SHPS_DGPU_MP_POWER_ASIS) {
  2321. + status = shps_dgpu_set_power(pdev, power);
  2322. + if (status)
  2323. + goto err_post_notification;
  2324. + }
  2325. +
  2326. + // initialize power target
  2327. + status = shps_dgpu_rp_get_power(pdev);
  2328. + if (status < 0)
  2329. + goto err_pwrtgt;
  2330. +
  2331. + if (status)
  2332. + set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  2333. + else
  2334. + clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
  2335. +
  2336. + device_init_wakeup(&pdev->dev, true);
  2337. + return 0;
  2338. +
  2339. +err_pwrtgt:
  2340. + if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) {
  2341. + status = shps_dgpu_set_power(pdev, param_dgpu_power_exit);
  2342. + if (status)
  2343. + dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status);
  2344. + }
  2345. +err_post_notification:
  2346. + if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
  2347. + shps_remove_sgcp_notification(pdev);
  2348. + } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
  2349. + surface_sam_san_set_rqsg_handler(NULL, NULL);
  2350. + }
  2351. +err_devlink:
  2352. + device_remove_groups(&pdev->dev, shps_power_groups);
  2353. +err_devattr:
  2354. + shps_gpios_remove_irq(pdev);
  2355. +err_gpio_irqs:
  2356. + shps_gpios_remove(pdev);
  2357. +err_gpio:
  2358. + pci_dev_put(drvdata->dgpu_root_port);
  2359. +err_rp_lookup:
  2360. + platform_set_drvdata(pdev, NULL);
  2361. + kfree(drvdata);
  2362. +err_drvdata:
  2363. + acpi_dev_remove_driver_gpios(shps_dev);
  2364. + return status;
  2365. +}
  2366. +
  2367. +static int shps_remove(struct platform_device *pdev)
  2368. +{
  2369. + struct acpi_device *shps_dev = ACPI_COMPANION(&pdev->dev);
  2370. + struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
  2371. + int status;
  2372. +
  2373. + if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) {
  2374. + status = shps_dgpu_set_power(pdev, param_dgpu_power_exit);
  2375. + if (status)
  2376. + dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status);
  2377. + }
  2378. +
  2379. + device_set_wakeup_capable(&pdev->dev, false);
  2380. +
  2381. + if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
  2382. + shps_remove_sgcp_notification(pdev);
  2383. + } else if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
  2384. + surface_sam_san_set_rqsg_handler(NULL, NULL);
  2385. + }
  2386. + device_remove_groups(&pdev->dev, shps_power_groups);
  2387. + shps_gpios_remove_irq(pdev);
  2388. + shps_gpios_remove(pdev);
  2389. + pci_dev_put(drvdata->dgpu_root_port);
  2390. + platform_set_drvdata(pdev, NULL);
  2391. + kfree(drvdata);
  2392. +
  2393. + acpi_dev_remove_driver_gpios(shps_dev);
  2394. + return 0;
  2395. +}
  2396. +
  2397. +
  2398. +static const struct dev_pm_ops shps_pm_ops = {
  2399. + .prepare = shps_pm_prepare,
  2400. + .complete = shps_pm_complete,
  2401. + .suspend = shps_pm_suspend,
  2402. + .resume = shps_pm_resume,
  2403. +};
  2404. +
  2405. +static const struct acpi_device_id shps_acpi_match[] = {
  2406. + { "MSHW0153", 0 },
  2407. + { },
  2408. +};
  2409. +MODULE_DEVICE_TABLE(acpi, shps_acpi_match);
  2410. +
  2411. +static struct platform_driver surface_sam_hps = {
  2412. + .probe = shps_probe,
  2413. + .remove = shps_remove,
  2414. + .shutdown = shps_shutdown,
  2415. + .driver = {
  2416. + .name = "surface_dgpu_hps",
  2417. + .acpi_match_table = shps_acpi_match,
  2418. + .pm = &shps_pm_ops,
  2419. + },
  2420. +};
  2421. +
  2422. +module_platform_driver(surface_sam_hps);
  2423. +
  2424. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  2425. +MODULE_DESCRIPTION("Surface Hot-Plug System (HPS) and dGPU power-state Driver for Surface Book 2");
  2426. +MODULE_LICENSE("GPL");
  2427. diff --git a/drivers/platform/x86/surface_sam/surface_sam_san.c b/drivers/platform/x86/surface_sam/surface_sam_san.c
  2428. new file mode 100644
  2429. index 0000000000000..eab4e178a8450
  2430. --- /dev/null
  2431. +++ b/drivers/platform/x86/surface_sam/surface_sam_san.c
  2432. @@ -0,0 +1,930 @@
  2433. +// SPDX-License-Identifier: GPL-2.0-or-later
  2434. +/*
  2435. + * Surface ACPI Notify (SAN) and ACPI integration driver for SAM.
  2436. + * Translates communication from ACPI to SSH and back.
  2437. + */
  2438. +
  2439. +#include <asm/unaligned.h>
  2440. +#include <linux/acpi.h>
  2441. +#include <linux/delay.h>
  2442. +#include <linux/jiffies.h>
  2443. +#include <linux/kernel.h>
  2444. +#include <linux/platform_device.h>
  2445. +
  2446. +#include "surface_sam_ssh.h"
  2447. +#include "surface_sam_san.h"
  2448. +
  2449. +
  2450. +#define SAN_RQST_RETRY 5
  2451. +
  2452. +#define SAN_DSM_REVISION 0
  2453. +#define SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT 0x09
  2454. +
  2455. +static const guid_t SAN_DSM_UUID =
  2456. + GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d,
  2457. + 0x48, 0x7c, 0x91, 0xab, 0x3c);
  2458. +
  2459. +#define SAM_EVENT_DELAY_PWR_ADAPTER msecs_to_jiffies(5000)
  2460. +#define SAM_EVENT_DELAY_PWR_BST msecs_to_jiffies(2500)
  2461. +
  2462. +#define SAM_EVENT_PWR_CID_BIX 0x15
  2463. +#define SAM_EVENT_PWR_CID_BST 0x16
  2464. +#define SAM_EVENT_PWR_CID_ADAPTER 0x17
  2465. +#define SAM_EVENT_PWR_CID_DPTF 0x4f
  2466. +
  2467. +#define SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT 0x0b
  2468. +
  2469. +
  2470. +struct san_acpi_consumer {
  2471. + char *path;
  2472. + bool required;
  2473. + u32 flags;
  2474. +};
  2475. +
  2476. +struct san_handler_data {
  2477. + struct acpi_connection_info info; // must be first
  2478. +};
  2479. +
  2480. +struct san_consumer_link {
  2481. + const struct san_acpi_consumer *properties;
  2482. + struct device_link *link;
  2483. +};
  2484. +
  2485. +struct san_consumers {
  2486. + u32 num;
  2487. + struct san_consumer_link *links;
  2488. +};
  2489. +
  2490. +struct san_data {
  2491. + struct device *dev;
  2492. + struct ssam_controller *ctrl;
  2493. +
  2494. + struct san_handler_data context;
  2495. + struct san_consumers consumers;
  2496. +
  2497. + struct ssam_event_notifier nf_bat;
  2498. + struct ssam_event_notifier nf_tmp;
  2499. +};
  2500. +
  2501. +#define to_san_data(ptr, member) \
  2502. + container_of(ptr, struct san_data, member)
  2503. +
  2504. +struct san_event_work {
  2505. + struct delayed_work work;
  2506. + struct device *dev;
  2507. + struct ssam_event event; // must be last
  2508. +};
  2509. +
  2510. +struct gsb_data_in {
  2511. + u8 cv;
  2512. +} __packed;
  2513. +
  2514. +struct gsb_data_rqsx {
  2515. + u8 cv; // command value (should be 0x01 or 0x03)
  2516. + u8 tc; // target controller
  2517. + u8 tid; // transport channnel ID
  2518. + u8 iid; // target sub-controller (e.g. primary vs. secondary battery)
  2519. + u8 snc; // expect-response-flag
  2520. + u8 cid; // command ID
  2521. + u16 cdl; // payload length
  2522. + u8 pld[0]; // payload
  2523. +} __packed;
  2524. +
  2525. +struct gsb_data_etwl {
  2526. + u8 cv; // command value (should be 0x02)
  2527. + u8 etw3; // ?
  2528. + u8 etw4; // ?
  2529. + u8 msg[0]; // error message (ASCIIZ)
  2530. +} __packed;
  2531. +
  2532. +struct gsb_data_out {
  2533. + u8 status; // _SSH communication status
  2534. + u8 len; // _SSH payload length
  2535. + u8 pld[0]; // _SSH payload
  2536. +} __packed;
  2537. +
  2538. +union gsb_buffer_data {
  2539. + struct gsb_data_in in; // common input
  2540. + struct gsb_data_rqsx rqsx; // RQSX input
  2541. + struct gsb_data_etwl etwl; // ETWL input
  2542. + struct gsb_data_out out; // output
  2543. +};
  2544. +
  2545. +struct gsb_buffer {
  2546. + u8 status; // GSB AttribRawProcess status
  2547. + u8 len; // GSB AttribRawProcess length
  2548. + union gsb_buffer_data data;
  2549. +} __packed;
  2550. +
  2551. +#define SAN_GSB_MAX_RQSX_PAYLOAD (U8_MAX - 2 - sizeof(struct gsb_data_rqsx))
  2552. +#define SAN_GSB_MAX_RESPONSE (U8_MAX - 2 - sizeof(struct gsb_data_out))
  2553. +
  2554. +#define san_request_sync_onstack(ctrl, rqst, rsp) \
  2555. + ssam_request_sync_onstack(ctrl, rqst, rsp, SAN_GSB_MAX_RQSX_PAYLOAD)
  2556. +
  2557. +
  2558. +enum san_pwr_event {
  2559. + SAN_PWR_EVENT_BAT1_STAT = 0x03,
  2560. + SAN_PWR_EVENT_BAT1_INFO = 0x04,
  2561. + SAN_PWR_EVENT_ADP1_STAT = 0x05,
  2562. + SAN_PWR_EVENT_ADP1_INFO = 0x06,
  2563. + SAN_PWR_EVENT_BAT2_STAT = 0x07,
  2564. + SAN_PWR_EVENT_BAT2_INFO = 0x08,
  2565. + SAN_PWR_EVENT_DPTF = 0x0A,
  2566. +};
  2567. +
  2568. +
  2569. +static int sam_san_default_rqsg_handler(struct surface_sam_san_rqsg *rqsg, void *data);
  2570. +
  2571. +struct sam_san_rqsg_if {
  2572. + struct mutex lock;
  2573. + struct device *san_dev;
  2574. + surface_sam_san_rqsg_handler_fn handler;
  2575. + void *handler_data;
  2576. +};
  2577. +
  2578. +static struct sam_san_rqsg_if rqsg_if = {
  2579. + .lock = __MUTEX_INITIALIZER(rqsg_if.lock),
  2580. + .san_dev = NULL,
  2581. + .handler = sam_san_default_rqsg_handler,
  2582. + .handler_data = NULL,
  2583. +};
  2584. +
  2585. +int surface_sam_san_consumer_register(struct device *consumer, u32 flags)
  2586. +{
  2587. + const u32 valid = DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE;
  2588. + int status;
  2589. +
  2590. + if ((flags | valid) != valid)
  2591. + return -EINVAL;
  2592. +
  2593. + flags |= DL_FLAG_AUTOREMOVE_CONSUMER;
  2594. +
  2595. + mutex_lock(&rqsg_if.lock);
  2596. + if (rqsg_if.san_dev)
  2597. + status = device_link_add(consumer, rqsg_if.san_dev, flags) ? 0 : -EINVAL;
  2598. + else
  2599. + status = -ENXIO;
  2600. + mutex_unlock(&rqsg_if.lock);
  2601. + return status;
  2602. +}
  2603. +EXPORT_SYMBOL_GPL(surface_sam_san_consumer_register);
  2604. +
  2605. +int surface_sam_san_set_rqsg_handler(surface_sam_san_rqsg_handler_fn fn, void *data)
  2606. +{
  2607. + int status = -EBUSY;
  2608. +
  2609. + mutex_lock(&rqsg_if.lock);
  2610. +
  2611. + if (rqsg_if.handler == sam_san_default_rqsg_handler || !fn) {
  2612. + rqsg_if.handler = fn ? fn : sam_san_default_rqsg_handler;
  2613. + rqsg_if.handler_data = fn ? data : NULL;
  2614. + status = 0;
  2615. + }
  2616. +
  2617. + mutex_unlock(&rqsg_if.lock);
  2618. + return status;
  2619. +}
  2620. +EXPORT_SYMBOL_GPL(surface_sam_san_set_rqsg_handler);
  2621. +
  2622. +int san_call_rqsg_handler(struct surface_sam_san_rqsg *rqsg)
  2623. +{
  2624. + int status;
  2625. +
  2626. + mutex_lock(&rqsg_if.lock);
  2627. + status = rqsg_if.handler(rqsg, rqsg_if.handler_data);
  2628. + mutex_unlock(&rqsg_if.lock);
  2629. +
  2630. + return status;
  2631. +}
  2632. +
  2633. +static int sam_san_default_rqsg_handler(struct surface_sam_san_rqsg *rqsg, void *data)
  2634. +{
  2635. + struct device *dev = rqsg_if.san_dev;
  2636. +
  2637. + dev_warn(dev, "unhandled request: RQSG(0x%02x, 0x%02x, 0x%02x)\n",
  2638. + rqsg->tc, rqsg->cid, rqsg->iid);
  2639. +
  2640. + return 0;
  2641. +}
  2642. +
  2643. +
  2644. +static bool san_acpi_can_notify(struct device *dev, u64 func)
  2645. +{
  2646. + acpi_handle san = ACPI_HANDLE(dev);
  2647. + return acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, 1 << func);
  2648. +}
  2649. +
  2650. +static int san_acpi_notify_power_event(struct device *dev, enum san_pwr_event event)
  2651. +{
  2652. + acpi_handle san = ACPI_HANDLE(dev);
  2653. + union acpi_object *obj;
  2654. +
  2655. + if (!san_acpi_can_notify(dev, event))
  2656. + return 0;
  2657. +
  2658. + dev_dbg(dev, "notify power event 0x%02x\n", event);
  2659. + obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
  2660. + event, NULL, ACPI_TYPE_BUFFER);
  2661. +
  2662. + if (IS_ERR_OR_NULL(obj))
  2663. + return obj ? PTR_ERR(obj) : -ENXIO;
  2664. +
  2665. + if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
  2666. + dev_err(dev, "got unexpected result from _DSM\n");
  2667. + return -EFAULT;
  2668. + }
  2669. +
  2670. + ACPI_FREE(obj);
  2671. + return 0;
  2672. +}
  2673. +
  2674. +static int san_acpi_notify_sensor_trip_point(struct device *dev, u8 iid)
  2675. +{
  2676. + acpi_handle san = ACPI_HANDLE(dev);
  2677. + union acpi_object *obj;
  2678. + union acpi_object param;
  2679. +
  2680. + if (!san_acpi_can_notify(dev, SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT))
  2681. + return 0;
  2682. +
  2683. + param.type = ACPI_TYPE_INTEGER;
  2684. + param.integer.value = iid;
  2685. +
  2686. + obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
  2687. + SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT,
  2688. + &param, ACPI_TYPE_BUFFER);
  2689. +
  2690. + if (IS_ERR_OR_NULL(obj))
  2691. + return obj ? PTR_ERR(obj) : -ENXIO;
  2692. +
  2693. + if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
  2694. + dev_err(dev, "got unexpected result from _DSM\n");
  2695. + return -EFAULT;
  2696. + }
  2697. +
  2698. + ACPI_FREE(obj);
  2699. + return 0;
  2700. +}
  2701. +
  2702. +
  2703. +static inline int san_evt_power_adapter(struct device *dev, const struct ssam_event *event)
  2704. +{
  2705. + int status;
  2706. +
  2707. + status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_ADP1_STAT);
  2708. + if (status)
  2709. + return status;
  2710. +
  2711. + /*
  2712. + * Enusre that the battery states get updated correctly.
  2713. + * When the battery is fully charged and an adapter is plugged in, it
  2714. + * sometimes is not updated correctly, instead showing it as charging.
  2715. + * Explicitly trigger battery updates to fix this.
  2716. + */
  2717. +
  2718. + status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT1_STAT);
  2719. + if (status)
  2720. + return status;
  2721. +
  2722. + return san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT2_STAT);
  2723. +}
  2724. +
  2725. +static inline int san_evt_power_bix(struct device *dev, const struct ssam_event *event)
  2726. +{
  2727. + enum san_pwr_event evcode;
  2728. +
  2729. + if (event->instance_id == 0x02)
  2730. + evcode = SAN_PWR_EVENT_BAT2_INFO;
  2731. + else
  2732. + evcode = SAN_PWR_EVENT_BAT1_INFO;
  2733. +
  2734. + return san_acpi_notify_power_event(dev, evcode);
  2735. +}
  2736. +
  2737. +static inline int san_evt_power_bst(struct device *dev, const struct ssam_event *event)
  2738. +{
  2739. + enum san_pwr_event evcode;
  2740. +
  2741. + if (event->instance_id == 0x02)
  2742. + evcode = SAN_PWR_EVENT_BAT2_STAT;
  2743. + else
  2744. + evcode = SAN_PWR_EVENT_BAT1_STAT;
  2745. +
  2746. + return san_acpi_notify_power_event(dev, evcode);
  2747. +}
  2748. +
  2749. +static inline int san_evt_power_dptf(struct device *dev, const struct ssam_event *event)
  2750. +{
  2751. + union acpi_object payload;
  2752. + acpi_handle san = ACPI_HANDLE(dev);
  2753. + union acpi_object *obj;
  2754. +
  2755. + if (!san_acpi_can_notify(dev, SAN_PWR_EVENT_DPTF))
  2756. + return 0;
  2757. +
  2758. + /*
  2759. + * The Surface ACPI expects a buffer and not a package. It specifically
  2760. + * checks for ObjectType (Arg3) == 0x03. This will cause a warning in
  2761. + * acpica/nsarguments.c, but this can safely be ignored.
  2762. + */
  2763. + payload.type = ACPI_TYPE_BUFFER;
  2764. + payload.buffer.length = event->length;
  2765. + payload.buffer.pointer = (u8 *)&event->data[0];
  2766. +
  2767. + dev_dbg(dev, "notify power event 0x%02x\n", event->command_id);
  2768. + obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
  2769. + SAN_PWR_EVENT_DPTF, &payload,
  2770. + ACPI_TYPE_BUFFER);
  2771. +
  2772. + if (IS_ERR_OR_NULL(obj))
  2773. + return obj ? PTR_ERR(obj) : -ENXIO;
  2774. +
  2775. + if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
  2776. + dev_err(dev, "got unexpected result from _DSM\n");
  2777. + return -EFAULT;
  2778. + }
  2779. +
  2780. + ACPI_FREE(obj);
  2781. + return 0;
  2782. +}
  2783. +
  2784. +static unsigned long san_evt_power_delay(u8 cid)
  2785. +{
  2786. + switch (cid) {
  2787. + case SAM_EVENT_PWR_CID_ADAPTER:
  2788. + /*
  2789. + * Wait for battery state to update before signalling adapter change.
  2790. + */
  2791. + return SAM_EVENT_DELAY_PWR_ADAPTER;
  2792. +
  2793. + case SAM_EVENT_PWR_CID_BST:
  2794. + /*
  2795. + * Ensure we do not miss anything important due to caching.
  2796. + */
  2797. + return SAM_EVENT_DELAY_PWR_BST;
  2798. +
  2799. + case SAM_EVENT_PWR_CID_BIX:
  2800. + case SAM_EVENT_PWR_CID_DPTF:
  2801. + default:
  2802. + return 0;
  2803. + }
  2804. +}
  2805. +
  2806. +static bool san_evt_power(const struct ssam_event *event, struct device *dev)
  2807. +{
  2808. + int status;
  2809. +
  2810. + switch (event->command_id) {
  2811. + case SAM_EVENT_PWR_CID_BIX:
  2812. + status = san_evt_power_bix(dev, event);
  2813. + break;
  2814. +
  2815. + case SAM_EVENT_PWR_CID_BST:
  2816. + status = san_evt_power_bst(dev, event);
  2817. + break;
  2818. +
  2819. + case SAM_EVENT_PWR_CID_ADAPTER:
  2820. + status = san_evt_power_adapter(dev, event);
  2821. + break;
  2822. +
  2823. + case SAM_EVENT_PWR_CID_DPTF:
  2824. + status = san_evt_power_dptf(dev, event);
  2825. + break;
  2826. +
  2827. + default:
  2828. + return false;
  2829. + }
  2830. +
  2831. + if (status)
  2832. + dev_err(dev, "error handling power event (cid = %x)\n",
  2833. + event->command_id);
  2834. +
  2835. + return true;
  2836. +}
  2837. +
  2838. +static void san_evt_power_workfn(struct work_struct *work)
  2839. +{
  2840. + struct san_event_work *ev = container_of(work, struct san_event_work, work.work);
  2841. +
  2842. + san_evt_power(&ev->event, ev->dev);
  2843. + kfree(ev);
  2844. +}
  2845. +
  2846. +
  2847. +static u32 san_evt_power_nb(struct ssam_notifier_block *nb, const struct ssam_event *event)
  2848. +{
  2849. + struct san_data *d = to_san_data(nb, nf_bat.base);
  2850. + struct san_event_work *work;
  2851. + unsigned long delay = san_evt_power_delay(event->command_id);
  2852. +
  2853. + if (delay == 0) {
  2854. + if (san_evt_power(event, d->dev))
  2855. + return SSAM_NOTIF_HANDLED;
  2856. + else
  2857. + return 0;
  2858. + }
  2859. +
  2860. + work = kzalloc(sizeof(struct san_event_work) + event->length, GFP_KERNEL);
  2861. + if (!work)
  2862. + return ssam_notifier_from_errno(-ENOMEM);
  2863. +
  2864. + INIT_DELAYED_WORK(&work->work, san_evt_power_workfn);
  2865. + work->dev = d->dev;
  2866. +
  2867. + memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
  2868. +
  2869. + schedule_delayed_work(&work->work, delay);
  2870. + return SSAM_NOTIF_HANDLED;
  2871. +}
  2872. +
  2873. +
  2874. +static inline int san_evt_thermal_notify(struct device *dev, const struct ssam_event *event)
  2875. +{
  2876. + return san_acpi_notify_sensor_trip_point(dev, event->instance_id);
  2877. +}
  2878. +
  2879. +static bool san_evt_thermal(const struct ssam_event *event, struct device *dev)
  2880. +{
  2881. + int status;
  2882. +
  2883. + switch (event->command_id) {
  2884. + case SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT:
  2885. + status = san_evt_thermal_notify(dev, event);
  2886. + break;
  2887. +
  2888. + default:
  2889. + return false;
  2890. + }
  2891. +
  2892. + if (status) {
  2893. + dev_err(dev, "error handling thermal event (cid = %x)\n",
  2894. + event->command_id);
  2895. + }
  2896. +
  2897. + return true;
  2898. +}
  2899. +
  2900. +static u32 san_evt_thermal_nb(struct ssam_notifier_block *nb, const struct ssam_event *event)
  2901. +{
  2902. + if (san_evt_thermal(event, to_san_data(nb, nf_tmp.base)->dev))
  2903. + return SSAM_NOTIF_HANDLED;
  2904. + else
  2905. + return 0;
  2906. +}
  2907. +
  2908. +
  2909. +static struct gsb_data_rqsx
  2910. +*san_validate_rqsx(struct device *dev, const char *type, struct gsb_buffer *buffer)
  2911. +{
  2912. + struct gsb_data_rqsx *rqsx = &buffer->data.rqsx;
  2913. +
  2914. + if (buffer->len < 8) {
  2915. + dev_err(dev, "invalid %s package (len = %d)\n",
  2916. + type, buffer->len);
  2917. + return NULL;
  2918. + }
  2919. +
  2920. + if (get_unaligned(&rqsx->cdl) != buffer->len - sizeof(struct gsb_data_rqsx)) {
  2921. + dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n",
  2922. + type, buffer->len, get_unaligned(&rqsx->cdl));
  2923. + return NULL;
  2924. + }
  2925. +
  2926. + if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) {
  2927. + dev_err(dev, "payload for %s package too large (cdl = %d)\n",
  2928. + type, get_unaligned(&rqsx->cdl));
  2929. + return NULL;
  2930. + }
  2931. +
  2932. + if (rqsx->tid != 0x01) {
  2933. + dev_warn(dev, "unsupported %s package (tid = 0x%02x)\n",
  2934. + type, rqsx->tid);
  2935. + return NULL;
  2936. + }
  2937. +
  2938. + return rqsx;
  2939. +}
  2940. +
  2941. +static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *buffer)
  2942. +{
  2943. + struct gsb_data_etwl *etwl = &buffer->data.etwl;
  2944. +
  2945. + if (buffer->len < 3) {
  2946. + dev_err(d->dev, "invalid ETWL package (len = %d)\n", buffer->len);
  2947. + return AE_OK;
  2948. + }
  2949. +
  2950. + dev_err(d->dev, "ETWL(0x%02x, 0x%02x): %.*s\n",
  2951. + etwl->etw3, etwl->etw4,
  2952. + buffer->len - 3, (char *)etwl->msg);
  2953. +
  2954. + // indicate success
  2955. + buffer->status = 0x00;
  2956. + buffer->len = 0x00;
  2957. +
  2958. + return AE_OK;
  2959. +}
  2960. +
  2961. +static void gsb_response_error(struct gsb_buffer *gsb, int status)
  2962. +{
  2963. + gsb->status = 0x00;
  2964. + gsb->len = 0x02;
  2965. + gsb->data.out.status = (u8)(-status);
  2966. + gsb->data.out.len = 0x00;
  2967. +}
  2968. +
  2969. +static void gsb_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len)
  2970. +{
  2971. + gsb->status = 0x00;
  2972. + gsb->len = len + 2;
  2973. + gsb->data.out.status = 0x00;
  2974. + gsb->data.out.len = len;
  2975. +
  2976. + if (len)
  2977. + memcpy(&gsb->data.out.pld[0], ptr, len);
  2978. +}
  2979. +
  2980. +static acpi_status san_rqst_fixup_suspended(struct ssam_request *rqst,
  2981. + struct gsb_buffer *gsb)
  2982. +{
  2983. + if (rqst->target_category == 0x11 && rqst->command_id == 0x0D) {
  2984. + /* Base state quirk:
  2985. + * The base state may be queried from ACPI when the EC is still
  2986. + * suspended. In this case it will return '-EPERM'. This query
  2987. + * will only be triggered from the ACPI lid GPE interrupt, thus
  2988. + * we are either in laptop or studio mode (base status 0x01 or
  2989. + * 0x02). Furthermore, we will only get here if the device (and
  2990. + * EC) have been suspended.
  2991. + *
  2992. + * We now assume that the device is in laptop mode (0x01). This
  2993. + * has the drawback that it will wake the device when unfolding
  2994. + * it in studio mode, but it also allows us to avoid actively
  2995. + * waiting for the EC to wake up, which may incur a notable
  2996. + * delay.
  2997. + */
  2998. +
  2999. + u8 base_state = 1;
  3000. + gsb_response_success(gsb, &base_state, 1);
  3001. + return AE_OK;
  3002. + }
  3003. +
  3004. + gsb_response_error(gsb, -ENXIO);
  3005. + return AE_OK;
  3006. +}
  3007. +
  3008. +static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
  3009. +{
  3010. + u8 rspbuf[SAN_GSB_MAX_RESPONSE];
  3011. + struct gsb_data_rqsx *gsb_rqst;
  3012. + struct ssam_request rqst;
  3013. + struct ssam_response rsp;
  3014. + int status = 0;
  3015. + int try;
  3016. +
  3017. + gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer);
  3018. + if (!gsb_rqst)
  3019. + return AE_OK;
  3020. +
  3021. + rqst.target_category = gsb_rqst->tc;
  3022. + rqst.command_id = gsb_rqst->cid;
  3023. + rqst.instance_id = gsb_rqst->iid;
  3024. + rqst.channel = gsb_rqst->tid;
  3025. + rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0;
  3026. + rqst.length = get_unaligned(&gsb_rqst->cdl);
  3027. + rqst.payload = &gsb_rqst->pld[0];
  3028. +
  3029. + rsp.capacity = ARRAY_SIZE(rspbuf);
  3030. + rsp.length = 0;
  3031. + rsp.pointer = &rspbuf[0];
  3032. +
  3033. + // handle suspended device
  3034. + if (d->dev->power.is_suspended) {
  3035. + dev_warn(d->dev, "rqst: device is suspended, not executing\n");
  3036. + return san_rqst_fixup_suspended(&rqst, buffer);
  3037. + }
  3038. +
  3039. + for (try = 0; try < SAN_RQST_RETRY; try++) {
  3040. + if (try)
  3041. + dev_warn(d->dev, "rqst: IO error, trying again\n");
  3042. +
  3043. + status = san_request_sync_onstack(d->ctrl, &rqst, &rsp);
  3044. + if (status != -ETIMEDOUT && status != -EREMOTEIO)
  3045. + break;
  3046. + }
  3047. +
  3048. + if (!status) {
  3049. + gsb_response_success(buffer, rsp.pointer, rsp.length);
  3050. + } else {
  3051. + dev_err(d->dev, "rqst: failed with error %d\n", status);
  3052. + gsb_response_error(buffer, status);
  3053. + }
  3054. +
  3055. + return AE_OK;
  3056. +}
  3057. +
  3058. +static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer)
  3059. +{
  3060. + struct gsb_data_rqsx *gsb_rqsg;
  3061. + struct surface_sam_san_rqsg rqsg;
  3062. + int status;
  3063. +
  3064. + gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer);
  3065. + if (!gsb_rqsg)
  3066. + return AE_OK;
  3067. +
  3068. + rqsg.tc = gsb_rqsg->tc;
  3069. + rqsg.cid = gsb_rqsg->cid;
  3070. + rqsg.iid = gsb_rqsg->iid;
  3071. + rqsg.cdl = get_unaligned(&gsb_rqsg->cdl);
  3072. + rqsg.pld = &gsb_rqsg->pld[0];
  3073. +
  3074. + status = san_call_rqsg_handler(&rqsg);
  3075. + if (!status) {
  3076. + gsb_response_success(buffer, NULL, 0);
  3077. + } else {
  3078. + dev_err(d->dev, "rqsg: failed with error %d\n", status);
  3079. + gsb_response_error(buffer, status);
  3080. + }
  3081. +
  3082. + return AE_OK;
  3083. +}
  3084. +
  3085. +
  3086. +static acpi_status
  3087. +san_opreg_handler(u32 function, acpi_physical_address command,
  3088. + u32 bits, u64 *value64,
  3089. + void *opreg_context, void *region_context)
  3090. +{
  3091. + struct san_data *d = to_san_data(opreg_context, context);
  3092. + struct gsb_buffer *buffer = (struct gsb_buffer *)value64;
  3093. + int accessor_type = (0xFFFF0000 & function) >> 16;
  3094. +
  3095. + if (command != 0) {
  3096. + dev_warn(d->dev, "unsupported command: 0x%02llx\n", command);
  3097. + return AE_OK;
  3098. + }
  3099. +
  3100. + if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
  3101. + dev_err(d->dev, "invalid access type: 0x%02x\n", accessor_type);
  3102. + return AE_OK;
  3103. + }
  3104. +
  3105. + // buffer must have at least contain the command-value
  3106. + if (buffer->len == 0) {
  3107. + dev_err(d->dev, "request-package too small\n");
  3108. + return AE_OK;
  3109. + }
  3110. +
  3111. + switch (buffer->data.in.cv) {
  3112. + case 0x01: return san_rqst(d, buffer);
  3113. + case 0x02: return san_etwl(d, buffer);
  3114. + case 0x03: return san_rqsg(d, buffer);
  3115. + }
  3116. +
  3117. + dev_warn(d->dev, "unsupported SAN0 request (cv: 0x%02x)\n", buffer->data.in.cv);
  3118. + return AE_OK;
  3119. +}
  3120. +
  3121. +static int san_events_register(struct platform_device *pdev)
  3122. +{
  3123. + struct san_data *d = platform_get_drvdata(pdev);
  3124. + int status;
  3125. +
  3126. + d->nf_bat.base.priority = 1;
  3127. + d->nf_bat.base.fn = san_evt_power_nb;
  3128. + d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;
  3129. + d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;
  3130. + d->nf_bat.event.id.instance = 0;
  3131. + d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;
  3132. +
  3133. + d->nf_tmp.base.priority = 1;
  3134. + d->nf_tmp.base.fn = san_evt_thermal_nb;
  3135. + d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;
  3136. + d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;
  3137. + d->nf_tmp.event.id.instance = 0;
  3138. + d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;
  3139. +
  3140. + status = ssam_notifier_register(d->ctrl, &d->nf_bat);
  3141. + if (status)
  3142. + return status;
  3143. +
  3144. + status = ssam_notifier_register(d->ctrl, &d->nf_tmp);
  3145. + if (status)
  3146. + ssam_notifier_unregister(d->ctrl, &d->nf_bat);
  3147. +
  3148. + return status;
  3149. +}
  3150. +
  3151. +static void san_events_unregister(struct platform_device *pdev)
  3152. +{
  3153. + struct san_data *d = platform_get_drvdata(pdev);
  3154. +
  3155. + ssam_notifier_unregister(d->ctrl, &d->nf_bat);
  3156. + ssam_notifier_unregister(d->ctrl, &d->nf_tmp);
  3157. +}
  3158. +
  3159. +
  3160. +static int san_consumers_link(struct platform_device *pdev,
  3161. + const struct san_acpi_consumer *cons,
  3162. + struct san_consumers *out)
  3163. +{
  3164. + const struct san_acpi_consumer *con;
  3165. + struct san_consumer_link *links, *link;
  3166. + struct acpi_device *adev;
  3167. + acpi_handle handle;
  3168. + u32 max_links = 0;
  3169. + int status;
  3170. +
  3171. + if (!cons)
  3172. + return 0;
  3173. +
  3174. + // count links
  3175. + for (con = cons; con->path; ++con)
  3176. + max_links += 1;
  3177. +
  3178. + // allocate
  3179. + links = kcalloc(max_links, sizeof(struct san_consumer_link), GFP_KERNEL);
  3180. + link = &links[0];
  3181. +
  3182. + if (!links)
  3183. + return -ENOMEM;
  3184. +
  3185. + // create links
  3186. + for (con = cons; con->path; ++con) {
  3187. + status = acpi_get_handle(NULL, con->path, &handle);
  3188. + if (status) {
  3189. + if (con->required || status != AE_NOT_FOUND) {
  3190. + status = -ENXIO;
  3191. + goto cleanup;
  3192. + } else {
  3193. + continue;
  3194. + }
  3195. + }
  3196. +
  3197. + status = acpi_bus_get_device(handle, &adev);
  3198. + if (status)
  3199. + goto cleanup;
  3200. +
  3201. + link->link = device_link_add(&adev->dev, &pdev->dev, con->flags);
  3202. + if (!(link->link)) {
  3203. + status = -EFAULT;
  3204. + goto cleanup;
  3205. + }
  3206. + link->properties = con;
  3207. +
  3208. + link += 1;
  3209. + }
  3210. +
  3211. + out->num = link - links;
  3212. + out->links = links;
  3213. +
  3214. + return 0;
  3215. +
  3216. +cleanup:
  3217. + for (link = link - 1; link >= links; --link) {
  3218. + if (link->properties->flags & DL_FLAG_STATELESS)
  3219. + device_link_del(link->link);
  3220. + }
  3221. +
  3222. + return status;
  3223. +}
  3224. +
  3225. +static void san_consumers_unlink(struct san_consumers *consumers)
  3226. +{
  3227. + u32 i;
  3228. +
  3229. + if (!consumers)
  3230. + return;
  3231. +
  3232. + for (i = 0; i < consumers->num; ++i) {
  3233. + if (consumers->links[i].properties->flags & DL_FLAG_STATELESS)
  3234. + device_link_del(consumers->links[i].link);
  3235. + }
  3236. +
  3237. + kfree(consumers->links);
  3238. +
  3239. + consumers->num = 0;
  3240. + consumers->links = NULL;
  3241. +}
  3242. +
  3243. +static int surface_sam_san_probe(struct platform_device *pdev)
  3244. +{
  3245. + const struct san_acpi_consumer *cons;
  3246. + acpi_handle san = ACPI_HANDLE(&pdev->dev); // _SAN device node
  3247. + struct ssam_controller *ctrl;
  3248. + struct san_data *data;
  3249. + int status;
  3250. +
  3251. + status = ssam_client_bind(&pdev->dev, &ctrl);
  3252. + if (status)
  3253. + return status == -ENXIO ? -EPROBE_DEFER : status;
  3254. +
  3255. + data = kzalloc(sizeof(struct san_data), GFP_KERNEL);
  3256. + if (!data)
  3257. + return -ENOMEM;
  3258. +
  3259. + data->dev = &pdev->dev;
  3260. + data->ctrl = ctrl;
  3261. +
  3262. + cons = acpi_device_get_match_data(&pdev->dev);
  3263. + status = san_consumers_link(pdev, cons, &data->consumers);
  3264. + if (status)
  3265. + goto err_consumers;
  3266. +
  3267. + platform_set_drvdata(pdev, data);
  3268. +
  3269. + status = acpi_install_address_space_handler(san,
  3270. + ACPI_ADR_SPACE_GSBUS,
  3271. + &san_opreg_handler,
  3272. + NULL, &data->context);
  3273. +
  3274. + if (ACPI_FAILURE(status)) {
  3275. + status = -ENODEV;
  3276. + goto err_install_handler;
  3277. + }
  3278. +
  3279. + status = san_events_register(pdev);
  3280. + if (status)
  3281. + goto err_enable_events;
  3282. +
  3283. + mutex_lock(&rqsg_if.lock);
  3284. + if (!rqsg_if.san_dev)
  3285. + rqsg_if.san_dev = &pdev->dev;
  3286. + else
  3287. + status = -EBUSY;
  3288. + mutex_unlock(&rqsg_if.lock);
  3289. +
  3290. + if (status)
  3291. + goto err_install_dev;
  3292. +
  3293. + acpi_walk_dep_device_list(san);
  3294. + return 0;
  3295. +
  3296. +err_install_dev:
  3297. + san_events_unregister(pdev);
  3298. +err_enable_events:
  3299. + acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler);
  3300. +err_install_handler:
  3301. + platform_set_drvdata(san, NULL);
  3302. + san_consumers_unlink(&data->consumers);
  3303. +err_consumers:
  3304. + kfree(data);
  3305. + return status;
  3306. +}
  3307. +
  3308. +static int surface_sam_san_remove(struct platform_device *pdev)
  3309. +{
  3310. + struct san_data *data = platform_get_drvdata(pdev);
  3311. + acpi_handle san = ACPI_HANDLE(&pdev->dev); // _SAN device node
  3312. + acpi_status status = AE_OK;
  3313. +
  3314. + mutex_lock(&rqsg_if.lock);
  3315. + rqsg_if.san_dev = NULL;
  3316. + mutex_unlock(&rqsg_if.lock);
  3317. +
  3318. + acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler);
  3319. + san_events_unregister(pdev);
  3320. +
  3321. + /*
  3322. + * We have unregistered our event sources. Now we need to ensure that
  3323. + * all delayed works they may have spawned are run to completion.
  3324. + */
  3325. + flush_scheduled_work();
  3326. +
  3327. + san_consumers_unlink(&data->consumers);
  3328. + kfree(data);
  3329. +
  3330. + platform_set_drvdata(pdev, NULL);
  3331. + return status;
  3332. +}
  3333. +
  3334. +
  3335. +static const struct san_acpi_consumer san_mshw0091_consumers[] = {
  3336. + { "\\_SB.SRTC", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
  3337. + { "\\ADP1", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
  3338. + { "\\_SB.BAT1", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
  3339. + { "\\_SB.BAT2", false, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
  3340. + { },
  3341. +};
  3342. +
  3343. +static const struct acpi_device_id surface_sam_san_match[] = {
  3344. + { "MSHW0091", (unsigned long) san_mshw0091_consumers },
  3345. + { },
  3346. +};
  3347. +MODULE_DEVICE_TABLE(acpi, surface_sam_san_match);
  3348. +
  3349. +static struct platform_driver surface_sam_san = {
  3350. + .probe = surface_sam_san_probe,
  3351. + .remove = surface_sam_san_remove,
  3352. + .driver = {
  3353. + .name = "surface_sam_san",
  3354. + .acpi_match_table = surface_sam_san_match,
  3355. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  3356. + },
  3357. +};
  3358. +module_platform_driver(surface_sam_san);
  3359. +
  3360. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  3361. +MODULE_DESCRIPTION("Surface ACPI Notify Driver for 5th Generation Surface Devices");
  3362. +MODULE_LICENSE("GPL");
  3363. diff --git a/drivers/platform/x86/surface_sam/surface_sam_san.h b/drivers/platform/x86/surface_sam/surface_sam_san.h
  3364. new file mode 100644
  3365. index 0000000000000..3408dde964b3c
  3366. --- /dev/null
  3367. +++ b/drivers/platform/x86/surface_sam/surface_sam_san.h
  3368. @@ -0,0 +1,30 @@
  3369. +/* SPDX-License-Identifier: GPL-2.0-or-later */
  3370. +/*
  3371. + * Interface for Surface ACPI/Notify (SAN).
  3372. + *
  3373. + * The SAN is the main interface between the Surface Serial Hub (SSH) and the
  3374. + * Surface/System Aggregator Module (SAM). It allows requests to be translated
  3375. + * from ACPI to SSH/SAM. It also interfaces with the discrete GPU hot-plug
  3376. + * driver.
  3377. + */
  3378. +
  3379. +#ifndef _SURFACE_SAM_SAN_H
  3380. +#define _SURFACE_SAM_SAN_H
  3381. +
  3382. +#include <linux/types.h>
  3383. +
  3384. +
  3385. +struct surface_sam_san_rqsg {
  3386. + u8 tc; // target category
  3387. + u8 cid; // command ID
  3388. + u8 iid; // instance ID
  3389. + u16 cdl; // command data length (length of payload)
  3390. + u8 *pld; // pointer to payload of length cdl
  3391. +};
  3392. +
  3393. +typedef int (*surface_sam_san_rqsg_handler_fn)(struct surface_sam_san_rqsg *rqsg, void *data);
  3394. +
  3395. +int surface_sam_san_consumer_register(struct device *consumer, u32 flags);
  3396. +int surface_sam_san_set_rqsg_handler(surface_sam_san_rqsg_handler_fn fn, void *data);
  3397. +
  3398. +#endif /* _SURFACE_SAM_SAN_H */
  3399. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid.c b/drivers/platform/x86/surface_sam/surface_sam_sid.c
  3400. new file mode 100644
  3401. index 0000000000000..bcf9a569ee719
  3402. --- /dev/null
  3403. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid.c
  3404. @@ -0,0 +1,283 @@
  3405. +// SPDX-License-Identifier: GPL-2.0-or-later
  3406. +/*
  3407. + * Surface Integration Driver.
  3408. + * MFD driver to provide device/model dependent functionality.
  3409. + */
  3410. +
  3411. +#include <linux/acpi.h>
  3412. +#include <linux/kernel.h>
  3413. +#include <linux/module.h>
  3414. +#include <linux/platform_device.h>
  3415. +#include <linux/mfd/core.h>
  3416. +
  3417. +#include "surface_sam_sid_power.h"
  3418. +#include "surface_sam_sid_vhf.h"
  3419. +
  3420. +
  3421. +static const struct ssam_battery_properties ssam_battery_props_bat1 = {
  3422. + .registry = SSAM_EVENT_REGISTRY_SAM,
  3423. + .num = 0,
  3424. + .channel = 1,
  3425. + .instance = 1,
  3426. +};
  3427. +
  3428. +static const struct ssam_battery_properties ssam_battery_props_bat2_sb3 = {
  3429. + .registry = SSAM_EVENT_REGISTRY_KIP,
  3430. + .num = 1,
  3431. + .channel = 2,
  3432. + .instance = 1,
  3433. +};
  3434. +
  3435. +
  3436. +static const struct ssam_hid_properties ssam_hid_props_keyboard = {
  3437. + .registry = SSAM_EVENT_REGISTRY_REG,
  3438. + .instance = 1,
  3439. +};
  3440. +
  3441. +static const struct ssam_hid_properties ssam_hid_props_touchpad = {
  3442. + .registry = SSAM_EVENT_REGISTRY_REG,
  3443. + .instance = 3,
  3444. +};
  3445. +
  3446. +static const struct ssam_hid_properties ssam_hid_props_iid5 = {
  3447. + .registry = SSAM_EVENT_REGISTRY_REG,
  3448. + .instance = 5,
  3449. +};
  3450. +
  3451. +static const struct ssam_hid_properties ssam_hid_props_iid6 = {
  3452. + .registry = SSAM_EVENT_REGISTRY_REG,
  3453. + .instance = 6,
  3454. +};
  3455. +
  3456. +
  3457. +static const struct mfd_cell sid_devs_sp4[] = {
  3458. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3459. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3460. + { },
  3461. +};
  3462. +
  3463. +static const struct mfd_cell sid_devs_sp6[] = {
  3464. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3465. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3466. + { },
  3467. +};
  3468. +
  3469. +static const struct mfd_cell sid_devs_sp7[] = {
  3470. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3471. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3472. + { .name = "surface_sam_sid_ac", .id = -1 },
  3473. + {
  3474. + .name = "surface_sam_sid_battery",
  3475. + .id = -1,
  3476. + .platform_data = (void *)&ssam_battery_props_bat1,
  3477. + .pdata_size = sizeof(struct ssam_battery_properties),
  3478. + },
  3479. + { },
  3480. +};
  3481. +
  3482. +static const struct mfd_cell sid_devs_sb1[] = {
  3483. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3484. + { },
  3485. +};
  3486. +
  3487. +static const struct mfd_cell sid_devs_sb2[] = {
  3488. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3489. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3490. + { },
  3491. +};
  3492. +
  3493. +static const struct mfd_cell sid_devs_sb3[] = {
  3494. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3495. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3496. + { .name = "surface_sam_sid_ac", .id = -1 },
  3497. + {
  3498. + .name = "surface_sam_sid_battery",
  3499. + .id = 1,
  3500. + .platform_data = (void *)&ssam_battery_props_bat1,
  3501. + .pdata_size = sizeof(struct ssam_battery_properties),
  3502. + },
  3503. + {
  3504. + .name = "surface_sam_sid_battery",
  3505. + .id = 2,
  3506. + .platform_data = (void *)&ssam_battery_props_bat2_sb3,
  3507. + .pdata_size = sizeof(struct ssam_battery_properties),
  3508. + },
  3509. + {
  3510. + .name = "surface_sam_sid_vhf",
  3511. + .id = 1,
  3512. + .platform_data = (void *)&ssam_hid_props_keyboard,
  3513. + .pdata_size = sizeof(struct ssam_hid_properties),
  3514. + },
  3515. + {
  3516. + .name = "surface_sam_sid_vhf",
  3517. + .id = 3,
  3518. + .platform_data = (void *)&ssam_hid_props_touchpad,
  3519. + .pdata_size = sizeof(struct ssam_hid_properties),
  3520. + },
  3521. + {
  3522. + .name = "surface_sam_sid_vhf",
  3523. + .id = 5,
  3524. + .platform_data = (void *)&ssam_hid_props_iid5,
  3525. + .pdata_size = sizeof(struct ssam_hid_properties),
  3526. + },
  3527. + {
  3528. + .name = "surface_sam_sid_vhf",
  3529. + .id = 6,
  3530. + .platform_data = (void *)&ssam_hid_props_iid6,
  3531. + .pdata_size = sizeof(struct ssam_hid_properties),
  3532. + },
  3533. + { },
  3534. +};
  3535. +
  3536. +static const struct mfd_cell sid_devs_sl1[] = {
  3537. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3538. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3539. + { },
  3540. +};
  3541. +
  3542. +static const struct mfd_cell sid_devs_sl2[] = {
  3543. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3544. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3545. + { },
  3546. +};
  3547. +
  3548. +static const struct mfd_cell sid_devs_sl3_13[] = {
  3549. + { .name = "surface_sam_sid_gpelid", .id = -1 },
  3550. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3551. + { .name = "surface_sam_sid_ac", .id = -1 },
  3552. + {
  3553. + .name = "surface_sam_sid_battery",
  3554. + .id = -1,
  3555. + .platform_data = (void *)&ssam_battery_props_bat1,
  3556. + .pdata_size = sizeof(struct ssam_battery_properties),
  3557. + },
  3558. + {
  3559. + .name = "surface_sam_sid_vhf",
  3560. + .id = 1,
  3561. + .platform_data = (void *)&ssam_hid_props_keyboard,
  3562. + .pdata_size = sizeof(struct ssam_hid_properties),
  3563. + },
  3564. + {
  3565. + .name = "surface_sam_sid_vhf",
  3566. + .id = 3,
  3567. + .platform_data = (void *)&ssam_hid_props_touchpad,
  3568. + .pdata_size = sizeof(struct ssam_hid_properties),
  3569. + },
  3570. + {
  3571. + .name = "surface_sam_sid_vhf",
  3572. + .id = 5,
  3573. + .platform_data = (void *)&ssam_hid_props_iid5,
  3574. + .pdata_size = sizeof(struct ssam_hid_properties),
  3575. + },
  3576. + { },
  3577. +};
  3578. +
  3579. +static const struct mfd_cell sid_devs_sl3_15[] = {
  3580. + { .name = "surface_sam_sid_perfmode", .id = -1 },
  3581. + { .name = "surface_sam_sid_ac", .id = -1 },
  3582. + {
  3583. + .name = "surface_sam_sid_battery",
  3584. + .id = -1,
  3585. + .platform_data = (void *)&ssam_battery_props_bat1,
  3586. + .pdata_size = sizeof(struct ssam_battery_properties),
  3587. + },
  3588. + {
  3589. + .name = "surface_sam_sid_vhf",
  3590. + .id = 1,
  3591. + .platform_data = (void *)&ssam_hid_props_keyboard,
  3592. + .pdata_size = sizeof(struct ssam_hid_properties),
  3593. + },
  3594. + {
  3595. + .name = "surface_sam_sid_vhf",
  3596. + .id = 3,
  3597. + .platform_data = (void *)&ssam_hid_props_touchpad,
  3598. + .pdata_size = sizeof(struct ssam_hid_properties),
  3599. + },
  3600. + {
  3601. + .name = "surface_sam_sid_vhf",
  3602. + .id = 5,
  3603. + .platform_data = (void *)&ssam_hid_props_iid5,
  3604. + .pdata_size = sizeof(struct ssam_hid_properties),
  3605. + },
  3606. + { },
  3607. +};
  3608. +
  3609. +static const struct acpi_device_id surface_sam_sid_match[] = {
  3610. + /* Surface Pro 4, 5, and 6 */
  3611. + { "MSHW0081", (unsigned long)sid_devs_sp4 },
  3612. +
  3613. + /* Surface Pro 6 (OMBR >= 0x10) */
  3614. + { "MSHW0111", (unsigned long)sid_devs_sp6 },
  3615. +
  3616. + /* Surface Pro 7 */
  3617. + { "MSHW0116", (unsigned long)sid_devs_sp7 },
  3618. +
  3619. + /* Surface Book 1 */
  3620. + { "MSHW0080", (unsigned long)sid_devs_sb1 },
  3621. +
  3622. + /* Surface Book 2 */
  3623. + { "MSHW0107", (unsigned long)sid_devs_sb2 },
  3624. +
  3625. + /* Surface Book 3 */
  3626. + { "MSHW0117", (unsigned long)sid_devs_sb3 },
  3627. +
  3628. + /* Surface Laptop 1 */
  3629. + { "MSHW0086", (unsigned long)sid_devs_sl1 },
  3630. +
  3631. + /* Surface Laptop 2 */
  3632. + { "MSHW0112", (unsigned long)sid_devs_sl2 },
  3633. +
  3634. + /* Surface Laptop 3 (13") */
  3635. + { "MSHW0114", (unsigned long)sid_devs_sl3_13 },
  3636. +
  3637. + /* Surface Laptop 3 (15") */
  3638. + { "MSHW0110", (unsigned long)sid_devs_sl3_15 },
  3639. +
  3640. + { },
  3641. +};
  3642. +MODULE_DEVICE_TABLE(acpi, surface_sam_sid_match);
  3643. +
  3644. +
  3645. +static int surface_sam_sid_probe(struct platform_device *pdev)
  3646. +{
  3647. + const struct acpi_device_id *match;
  3648. + const struct mfd_cell *cells, *p;
  3649. +
  3650. + match = acpi_match_device(surface_sam_sid_match, &pdev->dev);
  3651. + if (!match)
  3652. + return -ENODEV;
  3653. +
  3654. + cells = (struct mfd_cell *)match->driver_data;
  3655. + if (!cells)
  3656. + return -ENODEV;
  3657. +
  3658. + for (p = cells; p->name; ++p) {
  3659. + /* just count */
  3660. + }
  3661. +
  3662. + if (p == cells)
  3663. + return -ENODEV;
  3664. +
  3665. + return mfd_add_devices(&pdev->dev, 0, cells, p - cells, NULL, 0, NULL);
  3666. +}
  3667. +
  3668. +static int surface_sam_sid_remove(struct platform_device *pdev)
  3669. +{
  3670. + mfd_remove_devices(&pdev->dev);
  3671. + return 0;
  3672. +}
  3673. +
  3674. +static struct platform_driver surface_sam_sid = {
  3675. + .probe = surface_sam_sid_probe,
  3676. + .remove = surface_sam_sid_remove,
  3677. + .driver = {
  3678. + .name = "surface_sam_sid",
  3679. + .acpi_match_table = surface_sam_sid_match,
  3680. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  3681. + },
  3682. +};
  3683. +module_platform_driver(surface_sam_sid);
  3684. +
  3685. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  3686. +MODULE_DESCRIPTION("Surface Integration Driver for 5th Generation Surface Devices");
  3687. +MODULE_LICENSE("GPL");
  3688. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c b/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c
  3689. new file mode 100644
  3690. index 0000000000000..f0cee43c859b4
  3691. --- /dev/null
  3692. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c
  3693. @@ -0,0 +1,232 @@
  3694. +// SPDX-License-Identifier: GPL-2.0-or-later
  3695. +/*
  3696. + * Surface Lid driver to enable wakeup from suspend via the lid.
  3697. + */
  3698. +
  3699. +#include <linux/acpi.h>
  3700. +#include <linux/dmi.h>
  3701. +#include <linux/kernel.h>
  3702. +#include <linux/module.h>
  3703. +#include <linux/platform_device.h>
  3704. +
  3705. +
  3706. +struct sid_lid_device {
  3707. + const char *acpi_path;
  3708. + const u32 gpe_number;
  3709. +};
  3710. +
  3711. +
  3712. +static const struct sid_lid_device lid_device_l17 = {
  3713. + .acpi_path = "\\_SB.LID0",
  3714. + .gpe_number = 0x17,
  3715. +};
  3716. +
  3717. +static const struct sid_lid_device lid_device_l4D = {
  3718. + .acpi_path = "\\_SB.LID0",
  3719. + .gpe_number = 0x4D,
  3720. +};
  3721. +
  3722. +static const struct sid_lid_device lid_device_l4F = {
  3723. + .acpi_path = "\\_SB.LID0",
  3724. + .gpe_number = 0x4F,
  3725. +};
  3726. +
  3727. +static const struct sid_lid_device lid_device_l57 = {
  3728. + .acpi_path = "\\_SB.LID0",
  3729. + .gpe_number = 0x57,
  3730. +};
  3731. +
  3732. +
  3733. +static const struct dmi_system_id dmi_lid_device_table[] = {
  3734. + {
  3735. + .ident = "Surface Pro 4",
  3736. + .matches = {
  3737. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3738. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
  3739. + },
  3740. + .driver_data = (void *)&lid_device_l17,
  3741. + },
  3742. + {
  3743. + .ident = "Surface Pro 5",
  3744. + .matches = {
  3745. + /* match for SKU here due to generic product name "Surface Pro" */
  3746. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3747. + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
  3748. + },
  3749. + .driver_data = (void *)&lid_device_l4F,
  3750. + },
  3751. + {
  3752. + .ident = "Surface Pro 5 (LTE)",
  3753. + .matches = {
  3754. + /* match for SKU here due to generic product name "Surface Pro" */
  3755. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3756. + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
  3757. + },
  3758. + .driver_data = (void *)&lid_device_l4F,
  3759. + },
  3760. + {
  3761. + .ident = "Surface Pro 6",
  3762. + .matches = {
  3763. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3764. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
  3765. + },
  3766. + .driver_data = (void *)&lid_device_l4F,
  3767. + },
  3768. + {
  3769. + .ident = "Surface Pro 7",
  3770. + .matches = {
  3771. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3772. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 7"),
  3773. + },
  3774. + .driver_data = (void *)&lid_device_l4D,
  3775. + },
  3776. + {
  3777. + .ident = "Surface Book 1",
  3778. + .matches = {
  3779. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3780. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
  3781. + },
  3782. + .driver_data = (void *)&lid_device_l17,
  3783. + },
  3784. + {
  3785. + .ident = "Surface Book 2",
  3786. + .matches = {
  3787. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3788. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
  3789. + },
  3790. + .driver_data = (void *)&lid_device_l17,
  3791. + },
  3792. + {
  3793. + .ident = "Surface Book 3",
  3794. + .matches = {
  3795. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3796. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 3"),
  3797. + },
  3798. + .driver_data = (void *)&lid_device_l4D,
  3799. + },
  3800. + {
  3801. + .ident = "Surface Laptop 1",
  3802. + .matches = {
  3803. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3804. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
  3805. + },
  3806. + .driver_data = (void *)&lid_device_l57,
  3807. + },
  3808. + {
  3809. + .ident = "Surface Laptop 2",
  3810. + .matches = {
  3811. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3812. + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
  3813. + },
  3814. + .driver_data = (void *)&lid_device_l57,
  3815. + },
  3816. + {
  3817. + .ident = "Surface Laptop 3 (13\")",
  3818. + .matches = {
  3819. + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
  3820. + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1867:1868"),
  3821. + },
  3822. + .driver_data = (void *)&lid_device_l4D,
  3823. + },
  3824. + { }
  3825. +};
  3826. +
  3827. +
  3828. +static int sid_lid_enable_wakeup(const struct sid_lid_device *dev, bool enable)
  3829. +{
  3830. + int action = enable ? ACPI_GPE_ENABLE : ACPI_GPE_DISABLE;
  3831. + int status;
  3832. +
  3833. + status = acpi_set_gpe_wake_mask(NULL, dev->gpe_number, action);
  3834. + if (status)
  3835. + return -EFAULT;
  3836. +
  3837. + return 0;
  3838. +}
  3839. +
  3840. +
  3841. +static int surface_sam_sid_gpelid_suspend(struct device *dev)
  3842. +{
  3843. + const struct sid_lid_device *ldev;
  3844. +
  3845. + ldev = dev_get_drvdata(dev);
  3846. + return sid_lid_enable_wakeup(ldev, true);
  3847. +}
  3848. +
  3849. +static int surface_sam_sid_gpelid_resume(struct device *dev)
  3850. +{
  3851. + const struct sid_lid_device *ldev;
  3852. +
  3853. + ldev = dev_get_drvdata(dev);
  3854. + return sid_lid_enable_wakeup(ldev, false);
  3855. +}
  3856. +
  3857. +static SIMPLE_DEV_PM_OPS(surface_sam_sid_gpelid_pm,
  3858. + surface_sam_sid_gpelid_suspend,
  3859. + surface_sam_sid_gpelid_resume);
  3860. +
  3861. +
  3862. +static int surface_sam_sid_gpelid_probe(struct platform_device *pdev)
  3863. +{
  3864. + const struct dmi_system_id *match;
  3865. + struct sid_lid_device *dev;
  3866. + acpi_handle lid_handle;
  3867. + int status;
  3868. +
  3869. + match = dmi_first_match(dmi_lid_device_table);
  3870. + if (!match)
  3871. + return -ENODEV;
  3872. +
  3873. + dev = match->driver_data;
  3874. + if (!dev)
  3875. + return -ENODEV;
  3876. +
  3877. + status = acpi_get_handle(NULL, (acpi_string)dev->acpi_path, &lid_handle);
  3878. + if (status)
  3879. + return -EFAULT;
  3880. +
  3881. + status = acpi_setup_gpe_for_wake(lid_handle, NULL, dev->gpe_number);
  3882. + if (status)
  3883. + return -EFAULT;
  3884. +
  3885. + status = acpi_enable_gpe(NULL, dev->gpe_number);
  3886. + if (status)
  3887. + return -EFAULT;
  3888. +
  3889. + status = sid_lid_enable_wakeup(dev, false);
  3890. + if (status) {
  3891. + acpi_disable_gpe(NULL, dev->gpe_number);
  3892. + return status;
  3893. + }
  3894. +
  3895. + platform_set_drvdata(pdev, dev);
  3896. + return 0;
  3897. +}
  3898. +
  3899. +static int surface_sam_sid_gpelid_remove(struct platform_device *pdev)
  3900. +{
  3901. + struct sid_lid_device *dev = platform_get_drvdata(pdev);
  3902. +
  3903. + /* restore default behavior without this module */
  3904. + sid_lid_enable_wakeup(dev, false);
  3905. + acpi_disable_gpe(NULL, dev->gpe_number);
  3906. +
  3907. + platform_set_drvdata(pdev, NULL);
  3908. + return 0;
  3909. +}
  3910. +
  3911. +static struct platform_driver surface_sam_sid_gpelid = {
  3912. + .probe = surface_sam_sid_gpelid_probe,
  3913. + .remove = surface_sam_sid_gpelid_remove,
  3914. + .driver = {
  3915. + .name = "surface_sam_sid_gpelid",
  3916. + .pm = &surface_sam_sid_gpelid_pm,
  3917. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  3918. + },
  3919. +};
  3920. +module_platform_driver(surface_sam_sid_gpelid);
  3921. +
  3922. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  3923. +MODULE_DESCRIPTION("Surface Lid Driver for 5th Generation Surface Devices");
  3924. +MODULE_LICENSE("GPL");
  3925. +MODULE_ALIAS("platform:surface_sam_sid_gpelid");
  3926. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c b/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c
  3927. new file mode 100644
  3928. index 0000000000000..e0b1e42c2087f
  3929. --- /dev/null
  3930. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c
  3931. @@ -0,0 +1,214 @@
  3932. +// SPDX-License-Identifier: GPL-2.0-or-later
  3933. +/*
  3934. + * Surface Performance Mode Driver.
  3935. + * Allows to change cooling capabilities based on user preference.
  3936. + */
  3937. +
  3938. +#include <asm/unaligned.h>
  3939. +#include <linux/kernel.h>
  3940. +#include <linux/module.h>
  3941. +#include <linux/platform_device.h>
  3942. +
  3943. +#include "surface_sam_ssh.h"
  3944. +
  3945. +
  3946. +#define SID_PARAM_PERM 0644
  3947. +
  3948. +enum sam_perf_mode {
  3949. + SAM_PERF_MODE_NORMAL = 1,
  3950. + SAM_PERF_MODE_BATTERY = 2,
  3951. + SAM_PERF_MODE_PERF1 = 3,
  3952. + SAM_PERF_MODE_PERF2 = 4,
  3953. +
  3954. + __SAM_PERF_MODE__START = 1,
  3955. + __SAM_PERF_MODE__END = 4,
  3956. +};
  3957. +
  3958. +enum sid_param_perf_mode {
  3959. + SID_PARAM_PERF_MODE_AS_IS = 0,
  3960. + SID_PARAM_PERF_MODE_NORMAL = SAM_PERF_MODE_NORMAL,
  3961. + SID_PARAM_PERF_MODE_BATTERY = SAM_PERF_MODE_BATTERY,
  3962. + SID_PARAM_PERF_MODE_PERF1 = SAM_PERF_MODE_PERF1,
  3963. + SID_PARAM_PERF_MODE_PERF2 = SAM_PERF_MODE_PERF2,
  3964. +
  3965. + __SID_PARAM_PERF_MODE__START = 0,
  3966. + __SID_PARAM_PERF_MODE__END = 4,
  3967. +};
  3968. +
  3969. +struct spm_data {
  3970. + struct ssam_controller *ctrl;
  3971. +};
  3972. +
  3973. +
  3974. +struct ssam_perf_info {
  3975. + __le32 mode;
  3976. + __le16 unknown1;
  3977. + __le16 unknown2;
  3978. +} __packed;
  3979. +
  3980. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_tmp_perf_mode_get, struct ssam_perf_info, {
  3981. + .target_category = SSAM_SSH_TC_TMP,
  3982. + .command_id = 0x02,
  3983. + .instance_id = 0x00,
  3984. + .channel = 0x01,
  3985. +});
  3986. +
  3987. +static SSAM_DEFINE_SYNC_REQUEST_W(__ssam_tmp_perf_mode_set, __le32, {
  3988. + .target_category = SSAM_SSH_TC_TMP,
  3989. + .command_id = 0x03,
  3990. + .instance_id = 0x00,
  3991. + .channel = 0x01,
  3992. +});
  3993. +
  3994. +static int ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, u32 mode)
  3995. +{
  3996. + __le32 mode_le = cpu_to_le32(mode);
  3997. +
  3998. + if (mode < __SAM_PERF_MODE__START || mode > __SAM_PERF_MODE__END)
  3999. + return -EINVAL;
  4000. +
  4001. + return __ssam_tmp_perf_mode_set(ctrl, &mode_le);
  4002. +}
  4003. +
  4004. +
  4005. +static int param_perf_mode_set(const char *val, const struct kernel_param *kp)
  4006. +{
  4007. + int perf_mode;
  4008. + int status;
  4009. +
  4010. + status = kstrtoint(val, 0, &perf_mode);
  4011. + if (status)
  4012. + return status;
  4013. +
  4014. + if (perf_mode < __SID_PARAM_PERF_MODE__START || perf_mode > __SID_PARAM_PERF_MODE__END)
  4015. + return -EINVAL;
  4016. +
  4017. + return param_set_int(val, kp);
  4018. +}
  4019. +
  4020. +static const struct kernel_param_ops param_perf_mode_ops = {
  4021. + .set = param_perf_mode_set,
  4022. + .get = param_get_int,
  4023. +};
  4024. +
  4025. +static int param_perf_mode_init = SID_PARAM_PERF_MODE_AS_IS;
  4026. +static int param_perf_mode_exit = SID_PARAM_PERF_MODE_AS_IS;
  4027. +
  4028. +module_param_cb(perf_mode_init, &param_perf_mode_ops, &param_perf_mode_init, SID_PARAM_PERM);
  4029. +module_param_cb(perf_mode_exit, &param_perf_mode_ops, &param_perf_mode_exit, SID_PARAM_PERM);
  4030. +
  4031. +MODULE_PARM_DESC(perf_mode_init, "Performance-mode to be set on module initialization");
  4032. +MODULE_PARM_DESC(perf_mode_exit, "Performance-mode to be set on module exit");
  4033. +
  4034. +
  4035. +static ssize_t perf_mode_show(struct device *dev, struct device_attribute *attr, char *data)
  4036. +{
  4037. + struct spm_data *d = dev_get_drvdata(dev);
  4038. + struct ssam_perf_info info;
  4039. + int status;
  4040. +
  4041. + status = ssam_tmp_perf_mode_get(d->ctrl, &info);
  4042. + if (status) {
  4043. + dev_err(dev, "failed to get current performance mode: %d\n", status);
  4044. + return -EIO;
  4045. + }
  4046. +
  4047. + return sprintf(data, "%d\n", le32_to_cpu(info.mode));
  4048. +}
  4049. +
  4050. +static ssize_t perf_mode_store(struct device *dev, struct device_attribute *attr,
  4051. + const char *data, size_t count)
  4052. +{
  4053. + struct spm_data *d = dev_get_drvdata(dev);
  4054. + int perf_mode;
  4055. + int status;
  4056. +
  4057. + status = kstrtoint(data, 0, &perf_mode);
  4058. + if (status)
  4059. + return status;
  4060. +
  4061. + status = ssam_tmp_perf_mode_set(d->ctrl, perf_mode);
  4062. + if (status)
  4063. + return status;
  4064. +
  4065. + // TODO: Should we notify ACPI here?
  4066. + //
  4067. + // There is a _DSM call described as
  4068. + // WSID._DSM: Notify DPTF on Slider State change
  4069. + // which calls
  4070. + // ODV3 = ToInteger (Arg3)
  4071. + // Notify(IETM, 0x88)
  4072. + // IETM is an INT3400 Intel Dynamic Power Performance Management
  4073. + // device, part of the DPTF framework. From the corresponding
  4074. + // kernel driver, it looks like event 0x88 is being ignored. Also
  4075. + // it is currently unknown what the consequecnes of setting ODV3
  4076. + // are.
  4077. +
  4078. + return count;
  4079. +}
  4080. +
  4081. +static const DEVICE_ATTR_RW(perf_mode);
  4082. +
  4083. +
  4084. +static int surface_sam_sid_perfmode_probe(struct platform_device *pdev)
  4085. +{
  4086. + struct ssam_controller *ctrl;
  4087. + struct spm_data *data;
  4088. + int status;
  4089. +
  4090. + // link to ec
  4091. + status = ssam_client_bind(&pdev->dev, &ctrl);
  4092. + if (status)
  4093. + return status == -ENXIO ? -EPROBE_DEFER : status;
  4094. +
  4095. + data = devm_kzalloc(&pdev->dev, sizeof(struct spm_data), GFP_KERNEL);
  4096. + if (!data)
  4097. + return -ENOMEM;
  4098. +
  4099. + data->ctrl = ctrl;
  4100. + platform_set_drvdata(pdev, data);
  4101. +
  4102. + // set initial perf_mode
  4103. + if (param_perf_mode_init != SID_PARAM_PERF_MODE_AS_IS) {
  4104. + status = ssam_tmp_perf_mode_set(ctrl, param_perf_mode_init);
  4105. + if (status)
  4106. + return status;
  4107. + }
  4108. +
  4109. + // register perf_mode attribute
  4110. + status = sysfs_create_file(&pdev->dev.kobj, &dev_attr_perf_mode.attr);
  4111. + if (status)
  4112. + goto err_sysfs;
  4113. +
  4114. + return 0;
  4115. +
  4116. +err_sysfs:
  4117. + ssam_tmp_perf_mode_set(ctrl, param_perf_mode_exit);
  4118. + return status;
  4119. +}
  4120. +
  4121. +static int surface_sam_sid_perfmode_remove(struct platform_device *pdev)
  4122. +{
  4123. + struct spm_data *data = platform_get_drvdata(pdev);
  4124. +
  4125. + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_perf_mode.attr);
  4126. + ssam_tmp_perf_mode_set(data->ctrl, param_perf_mode_exit);
  4127. +
  4128. + platform_set_drvdata(pdev, NULL);
  4129. + return 0;
  4130. +}
  4131. +
  4132. +static struct platform_driver surface_sam_sid_perfmode = {
  4133. + .probe = surface_sam_sid_perfmode_probe,
  4134. + .remove = surface_sam_sid_perfmode_remove,
  4135. + .driver = {
  4136. + .name = "surface_sam_sid_perfmode",
  4137. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  4138. + },
  4139. +};
  4140. +module_platform_driver(surface_sam_sid_perfmode);
  4141. +
  4142. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  4143. +MODULE_DESCRIPTION("Surface Performance Mode Driver for 5th Generation Surface Devices");
  4144. +MODULE_LICENSE("GPL");
  4145. +MODULE_ALIAS("platform:surface_sam_sid_perfmode");
  4146. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_power.c b/drivers/platform/x86/surface_sam/surface_sam_sid_power.c
  4147. new file mode 100644
  4148. index 0000000000000..64a3d46a128cc
  4149. --- /dev/null
  4150. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_power.c
  4151. @@ -0,0 +1,1054 @@
  4152. +// SPDX-License-Identifier: GPL-2.0-or-later
  4153. +/*
  4154. + * Surface SID Battery/AC Driver.
  4155. + * Provides support for the battery and AC on 7th generation Surface devices.
  4156. + */
  4157. +
  4158. +#include <asm/unaligned.h>
  4159. +#include <linux/kernel.h>
  4160. +#include <linux/delay.h>
  4161. +#include <linux/jiffies.h>
  4162. +#include <linux/module.h>
  4163. +#include <linux/platform_device.h>
  4164. +#include <linux/power_supply.h>
  4165. +#include <linux/workqueue.h>
  4166. +
  4167. +#include "surface_sam_ssh.h"
  4168. +#include "surface_sam_sid_power.h"
  4169. +
  4170. +#define SPWR_WARN KERN_WARNING KBUILD_MODNAME ": "
  4171. +#define SPWR_DEBUG KERN_DEBUG KBUILD_MODNAME ": "
  4172. +
  4173. +
  4174. +// TODO: check BIX/BST for unknown/unsupported 0xffffffff entries
  4175. +// TODO: DPTF (/SAN notifications)?
  4176. +// TODO: other properties?
  4177. +
  4178. +
  4179. +static unsigned int cache_time = 1000;
  4180. +module_param(cache_time, uint, 0644);
  4181. +MODULE_PARM_DESC(cache_time, "battery state chaching time in milliseconds [default: 1000]");
  4182. +
  4183. +#define SPWR_AC_BAT_UPDATE_DELAY msecs_to_jiffies(5000)
  4184. +
  4185. +
  4186. +/*
  4187. + * SAM Interface.
  4188. + */
  4189. +
  4190. +#define SAM_EVENT_PWR_CID_BIX 0x15
  4191. +#define SAM_EVENT_PWR_CID_BST 0x16
  4192. +#define SAM_EVENT_PWR_CID_ADAPTER 0x17
  4193. +
  4194. +#define SAM_BATTERY_STA_OK 0x0f
  4195. +#define SAM_BATTERY_STA_PRESENT 0x10
  4196. +
  4197. +#define SAM_BATTERY_STATE_DISCHARGING 0x01
  4198. +#define SAM_BATTERY_STATE_CHARGING 0x02
  4199. +#define SAM_BATTERY_STATE_CRITICAL 0x04
  4200. +
  4201. +#define SAM_BATTERY_POWER_UNIT_MA 1
  4202. +
  4203. +
  4204. +/* Equivalent to data returned in ACPI _BIX method */
  4205. +struct spwr_bix {
  4206. + u8 revision;
  4207. + __le32 power_unit;
  4208. + __le32 design_cap;
  4209. + __le32 last_full_charge_cap;
  4210. + __le32 technology;
  4211. + __le32 design_voltage;
  4212. + __le32 design_cap_warn;
  4213. + __le32 design_cap_low;
  4214. + __le32 cycle_count;
  4215. + __le32 measurement_accuracy;
  4216. + __le32 max_sampling_time;
  4217. + __le32 min_sampling_time;
  4218. + __le32 max_avg_interval;
  4219. + __le32 min_avg_interval;
  4220. + __le32 bat_cap_granularity_1;
  4221. + __le32 bat_cap_granularity_2;
  4222. + u8 model[21];
  4223. + u8 serial[11];
  4224. + u8 type[5];
  4225. + u8 oem_info[21];
  4226. +} __packed;
  4227. +
  4228. +/* Equivalent to data returned in ACPI _BST method */
  4229. +struct spwr_bst {
  4230. + __le32 state;
  4231. + __le32 present_rate;
  4232. + __le32 remaining_cap;
  4233. + __le32 present_voltage;
  4234. +} __packed;
  4235. +
  4236. +/* DPTF event payload */
  4237. +struct spwr_event_dptf {
  4238. + __le32 pmax;
  4239. + __le32 _1; /* currently unknown */
  4240. + __le32 _2; /* currently unknown */
  4241. +} __packed;
  4242. +
  4243. +
  4244. +/* Get battery status (_STA) */
  4245. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_sta, __le32, {
  4246. + .target_category = SSAM_SSH_TC_BAT,
  4247. + .command_id = 0x01,
  4248. +});
  4249. +
  4250. +/* Get battery static information (_BIX) */
  4251. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_bix, struct spwr_bix, {
  4252. + .target_category = SSAM_SSH_TC_BAT,
  4253. + .command_id = 0x02,
  4254. +});
  4255. +
  4256. +/* Get battery dynamic information (_BST) */
  4257. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_bst, struct spwr_bst, {
  4258. + .target_category = SSAM_SSH_TC_BAT,
  4259. + .command_id = 0x03,
  4260. +});
  4261. +
  4262. +/* Set battery trip point (_BTP) */
  4263. +static SSAM_DEFINE_SYNC_REQUEST_MD_W(ssam_bat_set_btp, __le32, {
  4264. + .target_category = SSAM_SSH_TC_BAT,
  4265. + .command_id = 0x04,
  4266. +});
  4267. +
  4268. +/* Get platform power soruce for battery (DPTF PSRC) */
  4269. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_psrc, __le32, {
  4270. + .target_category = SSAM_SSH_TC_BAT,
  4271. + .command_id = 0x0d,
  4272. +});
  4273. +
  4274. +/* Get maximum platform power for battery (DPTF PMAX) */
  4275. +__always_unused
  4276. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_pmax, __le32, {
  4277. + .target_category = SSAM_SSH_TC_BAT,
  4278. + .command_id = 0x0b,
  4279. +});
  4280. +
  4281. +/* Get adapter rating (DPTF ARTG) */
  4282. +__always_unused
  4283. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_artg, __le32, {
  4284. + .target_category = SSAM_SSH_TC_BAT,
  4285. + .command_id = 0x0f,
  4286. +});
  4287. +
  4288. +/* Unknown (DPTF PSOC) */
  4289. +__always_unused
  4290. +static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_psoc, __le32, {
  4291. + .target_category = SSAM_SSH_TC_BAT,
  4292. + .command_id = 0x0c,
  4293. +});
  4294. +
  4295. +/* Unknown (DPTF CHGI/ INT3403 SPPC) */
  4296. +__always_unused
  4297. +static SSAM_DEFINE_SYNC_REQUEST_MD_W(ssam_bat_set_chgi, __le32, {
  4298. + .target_category = SSAM_SSH_TC_BAT,
  4299. + .command_id = 0x0e,
  4300. +});
  4301. +
  4302. +
  4303. +/*
  4304. + * Common Power-Subsystem Interface.
  4305. + */
  4306. +
  4307. +struct spwr_battery_device {
  4308. + struct platform_device *pdev;
  4309. + struct ssam_controller *ctrl;
  4310. + const struct ssam_battery_properties *p;
  4311. +
  4312. + char name[32];
  4313. + struct power_supply *psy;
  4314. + struct power_supply_desc psy_desc;
  4315. +
  4316. + struct delayed_work update_work;
  4317. +
  4318. + struct ssam_event_notifier notif;
  4319. +
  4320. + struct mutex lock;
  4321. + unsigned long timestamp;
  4322. +
  4323. + __le32 sta;
  4324. + struct spwr_bix bix;
  4325. + struct spwr_bst bst;
  4326. + u32 alarm;
  4327. +};
  4328. +
  4329. +struct spwr_ac_device {
  4330. + struct platform_device *pdev;
  4331. + struct ssam_controller *ctrl;
  4332. +
  4333. + char name[32];
  4334. + struct power_supply *psy;
  4335. + struct power_supply_desc psy_desc;
  4336. +
  4337. + struct ssam_event_notifier notif;
  4338. +
  4339. + struct mutex lock;
  4340. +
  4341. + __le32 state;
  4342. +};
  4343. +
  4344. +static enum power_supply_property spwr_ac_props[] = {
  4345. + POWER_SUPPLY_PROP_ONLINE,
  4346. +};
  4347. +
  4348. +static enum power_supply_property spwr_battery_props_chg[] = {
  4349. + POWER_SUPPLY_PROP_STATUS,
  4350. + POWER_SUPPLY_PROP_PRESENT,
  4351. + POWER_SUPPLY_PROP_TECHNOLOGY,
  4352. + POWER_SUPPLY_PROP_CYCLE_COUNT,
  4353. + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
  4354. + POWER_SUPPLY_PROP_VOLTAGE_NOW,
  4355. + POWER_SUPPLY_PROP_CURRENT_NOW,
  4356. + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
  4357. + POWER_SUPPLY_PROP_CHARGE_FULL,
  4358. + POWER_SUPPLY_PROP_CHARGE_NOW,
  4359. + POWER_SUPPLY_PROP_CAPACITY,
  4360. + POWER_SUPPLY_PROP_CAPACITY_LEVEL,
  4361. + POWER_SUPPLY_PROP_MODEL_NAME,
  4362. + POWER_SUPPLY_PROP_MANUFACTURER,
  4363. + POWER_SUPPLY_PROP_SERIAL_NUMBER,
  4364. +};
  4365. +
  4366. +static enum power_supply_property spwr_battery_props_eng[] = {
  4367. + POWER_SUPPLY_PROP_STATUS,
  4368. + POWER_SUPPLY_PROP_PRESENT,
  4369. + POWER_SUPPLY_PROP_TECHNOLOGY,
  4370. + POWER_SUPPLY_PROP_CYCLE_COUNT,
  4371. + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
  4372. + POWER_SUPPLY_PROP_VOLTAGE_NOW,
  4373. + POWER_SUPPLY_PROP_POWER_NOW,
  4374. + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
  4375. + POWER_SUPPLY_PROP_ENERGY_FULL,
  4376. + POWER_SUPPLY_PROP_ENERGY_NOW,
  4377. + POWER_SUPPLY_PROP_CAPACITY,
  4378. + POWER_SUPPLY_PROP_CAPACITY_LEVEL,
  4379. + POWER_SUPPLY_PROP_MODEL_NAME,
  4380. + POWER_SUPPLY_PROP_MANUFACTURER,
  4381. + POWER_SUPPLY_PROP_SERIAL_NUMBER,
  4382. +};
  4383. +
  4384. +
  4385. +static int spwr_battery_register(struct spwr_battery_device *bat,
  4386. + struct platform_device *pdev,
  4387. + struct ssam_controller *ctrl,
  4388. + const struct ssam_battery_properties *p);
  4389. +
  4390. +static void spwr_battery_unregister(struct spwr_battery_device *bat);
  4391. +
  4392. +
  4393. +static inline bool spwr_battery_present(struct spwr_battery_device *bat)
  4394. +{
  4395. + return le32_to_cpu(bat->sta) & SAM_BATTERY_STA_PRESENT;
  4396. +}
  4397. +
  4398. +
  4399. +static inline int spwr_battery_load_sta(struct spwr_battery_device *bat)
  4400. +{
  4401. + return ssam_bat_get_sta(bat->ctrl, bat->p->channel, bat->p->instance,
  4402. + &bat->sta);
  4403. +}
  4404. +
  4405. +static inline int spwr_battery_load_bix(struct spwr_battery_device *bat)
  4406. +{
  4407. + if (!spwr_battery_present(bat))
  4408. + return 0;
  4409. +
  4410. + return ssam_bat_get_bix(bat->ctrl, bat->p->channel, bat->p->instance,
  4411. + &bat->bix);
  4412. +}
  4413. +
  4414. +static inline int spwr_battery_load_bst(struct spwr_battery_device *bat)
  4415. +{
  4416. + if (!spwr_battery_present(bat))
  4417. + return 0;
  4418. +
  4419. + return ssam_bat_get_bst(bat->ctrl, bat->p->channel, bat->p->instance,
  4420. + &bat->bst);
  4421. +}
  4422. +
  4423. +
  4424. +static inline int spwr_battery_set_alarm_unlocked(struct spwr_battery_device *bat, u32 value)
  4425. +{
  4426. + __le32 alarm = cpu_to_le32(value);
  4427. +
  4428. + bat->alarm = value;
  4429. + return ssam_bat_set_btp(bat->ctrl, bat->p->channel, bat->p->instance,
  4430. + &alarm);
  4431. +}
  4432. +
  4433. +static inline int spwr_battery_set_alarm(struct spwr_battery_device *bat, u32 value)
  4434. +{
  4435. + int status;
  4436. +
  4437. + mutex_lock(&bat->lock);
  4438. + status = spwr_battery_set_alarm_unlocked(bat, value);
  4439. + mutex_unlock(&bat->lock);
  4440. +
  4441. + return status;
  4442. +}
  4443. +
  4444. +static inline int spwr_battery_update_bst_unlocked(struct spwr_battery_device *bat, bool cached)
  4445. +{
  4446. + unsigned long cache_deadline = bat->timestamp + msecs_to_jiffies(cache_time);
  4447. + int status;
  4448. +
  4449. + if (cached && bat->timestamp && time_is_after_jiffies(cache_deadline))
  4450. + return 0;
  4451. +
  4452. + status = spwr_battery_load_sta(bat);
  4453. + if (status)
  4454. + return status;
  4455. +
  4456. + status = spwr_battery_load_bst(bat);
  4457. + if (status)
  4458. + return status;
  4459. +
  4460. + bat->timestamp = jiffies;
  4461. + return 0;
  4462. +}
  4463. +
  4464. +static int spwr_battery_update_bst(struct spwr_battery_device *bat, bool cached)
  4465. +{
  4466. + int status;
  4467. +
  4468. + mutex_lock(&bat->lock);
  4469. + status = spwr_battery_update_bst_unlocked(bat, cached);
  4470. + mutex_unlock(&bat->lock);
  4471. +
  4472. + return status;
  4473. +}
  4474. +
  4475. +static inline int spwr_battery_update_bix_unlocked(struct spwr_battery_device *bat)
  4476. +{
  4477. + int status;
  4478. +
  4479. + status = spwr_battery_load_sta(bat);
  4480. + if (status)
  4481. + return status;
  4482. +
  4483. + status = spwr_battery_load_bix(bat);
  4484. + if (status)
  4485. + return status;
  4486. +
  4487. + status = spwr_battery_load_bst(bat);
  4488. + if (status)
  4489. + return status;
  4490. +
  4491. + bat->timestamp = jiffies;
  4492. + return 0;
  4493. +}
  4494. +
  4495. +static int spwr_battery_update_bix(struct spwr_battery_device *bat)
  4496. +{
  4497. + int status;
  4498. +
  4499. + mutex_lock(&bat->lock);
  4500. + status = spwr_battery_update_bix_unlocked(bat);
  4501. + mutex_unlock(&bat->lock);
  4502. +
  4503. + return status;
  4504. +}
  4505. +
  4506. +static inline int spwr_ac_update_unlocked(struct spwr_ac_device *ac)
  4507. +{
  4508. + return ssam_bat_get_psrc(ac->ctrl, 0x01, 0x01, &ac->state);
  4509. +}
  4510. +
  4511. +static int spwr_ac_update(struct spwr_ac_device *ac)
  4512. +{
  4513. + int status;
  4514. +
  4515. + mutex_lock(&ac->lock);
  4516. + status = spwr_ac_update_unlocked(ac);
  4517. + mutex_unlock(&ac->lock);
  4518. +
  4519. + return status;
  4520. +}
  4521. +
  4522. +
  4523. +static int spwr_battery_recheck(struct spwr_battery_device *bat)
  4524. +{
  4525. + bool present = spwr_battery_present(bat);
  4526. + u32 unit = get_unaligned_le32(&bat->bix.power_unit);
  4527. + int status;
  4528. +
  4529. + status = spwr_battery_update_bix(bat);
  4530. + if (status)
  4531. + return status;
  4532. +
  4533. + // if battery has been attached, (re-)initialize alarm
  4534. + if (!present && spwr_battery_present(bat)) {
  4535. + u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
  4536. + status = spwr_battery_set_alarm(bat, cap_warn);
  4537. + if (status)
  4538. + return status;
  4539. + }
  4540. +
  4541. + // if the unit has changed, re-add the battery
  4542. + if (unit != get_unaligned_le32(&bat->bix.power_unit)) {
  4543. + spwr_battery_unregister(bat);
  4544. + status = spwr_battery_register(bat, bat->pdev, bat->ctrl, bat->p);
  4545. + }
  4546. +
  4547. + return status;
  4548. +}
  4549. +
  4550. +
  4551. +static inline int spwr_notify_bix(struct spwr_battery_device *bat)
  4552. +{
  4553. + int status;
  4554. +
  4555. + status = spwr_battery_recheck(bat);
  4556. + if (!status)
  4557. + power_supply_changed(bat->psy);
  4558. +
  4559. + return status;
  4560. +}
  4561. +
  4562. +static inline int spwr_notify_bst(struct spwr_battery_device *bat)
  4563. +{
  4564. + int status;
  4565. +
  4566. + status = spwr_battery_update_bst(bat, false);
  4567. + if (!status)
  4568. + power_supply_changed(bat->psy);
  4569. +
  4570. + return status;
  4571. +}
  4572. +
  4573. +static inline int spwr_notify_adapter_bat(struct spwr_battery_device *bat)
  4574. +{
  4575. + u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
  4576. + u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
  4577. +
  4578. + /*
  4579. + * Handle battery update quirk:
  4580. + * When the battery is fully charged and the adapter is plugged in or
  4581. + * removed, the EC does not send a separate event for the state
  4582. + * (charging/discharging) change. Furthermore it may take some time until
  4583. + * the state is updated on the battery. Schedule an update to solve this.
  4584. + */
  4585. +
  4586. + if (remaining_cap >= last_full_cap)
  4587. + schedule_delayed_work(&bat->update_work, SPWR_AC_BAT_UPDATE_DELAY);
  4588. +
  4589. + return 0;
  4590. +}
  4591. +
  4592. +static inline int spwr_notify_adapter_ac(struct spwr_ac_device *ac)
  4593. +{
  4594. + int status;
  4595. +
  4596. + status = spwr_ac_update(ac);
  4597. + if (!status)
  4598. + power_supply_changed(ac->psy);
  4599. +
  4600. + return status;
  4601. +}
  4602. +
  4603. +static u32 spwr_notify_bat(struct ssam_notifier_block *nb, const struct ssam_event *event)
  4604. +{
  4605. + struct spwr_battery_device *bat = container_of(nb, struct spwr_battery_device, notif.base);
  4606. + int status;
  4607. +
  4608. + dev_dbg(&bat->pdev->dev, "power event (cid = 0x%02x, iid = %d, chn = %d)\n",
  4609. + event->command_id, event->instance_id, event->channel);
  4610. +
  4611. + // handled here, needs to be handled for all channels/instances
  4612. + if (event->command_id == SAM_EVENT_PWR_CID_ADAPTER) {
  4613. + status = spwr_notify_adapter_bat(bat);
  4614. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  4615. + }
  4616. +
  4617. + // check for the correct channel and instance ID
  4618. + if (event->channel != bat->p->channel)
  4619. + return 0;
  4620. +
  4621. + if (event->instance_id != bat->p->instance)
  4622. + return 0;
  4623. +
  4624. + switch (event->command_id) {
  4625. + case SAM_EVENT_PWR_CID_BIX:
  4626. + status = spwr_notify_bix(bat);
  4627. + break;
  4628. +
  4629. + case SAM_EVENT_PWR_CID_BST:
  4630. + status = spwr_notify_bst(bat);
  4631. + break;
  4632. +
  4633. + default:
  4634. + return 0;
  4635. + }
  4636. +
  4637. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  4638. +}
  4639. +
  4640. +static u32 spwr_notify_ac(struct ssam_notifier_block *nb, const struct ssam_event *event)
  4641. +{
  4642. + struct spwr_ac_device *ac = container_of(nb, struct spwr_ac_device, notif.base);
  4643. + int status;
  4644. +
  4645. + dev_dbg(&ac->pdev->dev, "power event (cid = 0x%02x, iid = %d, chn = %d)\n",
  4646. + event->command_id, event->instance_id, event->channel);
  4647. +
  4648. + // AC has IID = 0
  4649. + if (event->instance_id != 0)
  4650. + return 0;
  4651. +
  4652. + switch (event->command_id) {
  4653. + case SAM_EVENT_PWR_CID_ADAPTER:
  4654. + status = spwr_notify_adapter_ac(ac);
  4655. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  4656. +
  4657. + default:
  4658. + return 0;
  4659. + }
  4660. +}
  4661. +
  4662. +static void spwr_battery_update_bst_workfn(struct work_struct *work)
  4663. +{
  4664. + struct delayed_work *dwork = to_delayed_work(work);
  4665. + struct spwr_battery_device *bat = container_of(dwork, struct spwr_battery_device, update_work);
  4666. + int status;
  4667. +
  4668. + status = spwr_battery_update_bst(bat, false);
  4669. + if (!status)
  4670. + power_supply_changed(bat->psy);
  4671. +
  4672. + if (status)
  4673. + dev_err(&bat->pdev->dev, "failed to update battery state: %d\n", status);
  4674. +}
  4675. +
  4676. +
  4677. +static inline int spwr_battery_prop_status(struct spwr_battery_device *bat)
  4678. +{
  4679. + u32 state = get_unaligned_le32(&bat->bst.state);
  4680. + u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
  4681. + u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
  4682. + u32 present_rate = get_unaligned_le32(&bat->bst.present_rate);
  4683. +
  4684. + if (state & SAM_BATTERY_STATE_DISCHARGING)
  4685. + return POWER_SUPPLY_STATUS_DISCHARGING;
  4686. +
  4687. + if (state & SAM_BATTERY_STATE_CHARGING)
  4688. + return POWER_SUPPLY_STATUS_CHARGING;
  4689. +
  4690. + if (last_full_cap == remaining_cap)
  4691. + return POWER_SUPPLY_STATUS_FULL;
  4692. +
  4693. + if (present_rate == 0)
  4694. + return POWER_SUPPLY_STATUS_NOT_CHARGING;
  4695. +
  4696. + return POWER_SUPPLY_STATUS_UNKNOWN;
  4697. +}
  4698. +
  4699. +static inline int spwr_battery_prop_technology(struct spwr_battery_device *bat)
  4700. +{
  4701. + if (!strcasecmp("NiCd", bat->bix.type))
  4702. + return POWER_SUPPLY_TECHNOLOGY_NiCd;
  4703. +
  4704. + if (!strcasecmp("NiMH", bat->bix.type))
  4705. + return POWER_SUPPLY_TECHNOLOGY_NiMH;
  4706. +
  4707. + if (!strcasecmp("LION", bat->bix.type))
  4708. + return POWER_SUPPLY_TECHNOLOGY_LION;
  4709. +
  4710. + if (!strncasecmp("LI-ION", bat->bix.type, 6))
  4711. + return POWER_SUPPLY_TECHNOLOGY_LION;
  4712. +
  4713. + if (!strcasecmp("LiP", bat->bix.type))
  4714. + return POWER_SUPPLY_TECHNOLOGY_LIPO;
  4715. +
  4716. + return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
  4717. +}
  4718. +
  4719. +static inline int spwr_battery_prop_capacity(struct spwr_battery_device *bat)
  4720. +{
  4721. + u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
  4722. + u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
  4723. +
  4724. + if (remaining_cap && last_full_cap)
  4725. + return remaining_cap * 100 / last_full_cap;
  4726. + else
  4727. + return 0;
  4728. +}
  4729. +
  4730. +static inline int spwr_battery_prop_capacity_level(struct spwr_battery_device *bat)
  4731. +{
  4732. + u32 state = get_unaligned_le32(&bat->bst.state);
  4733. + u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
  4734. + u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
  4735. +
  4736. + if (state & SAM_BATTERY_STATE_CRITICAL)
  4737. + return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
  4738. +
  4739. + if (remaining_cap >= last_full_cap)
  4740. + return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
  4741. +
  4742. + if (remaining_cap <= bat->alarm)
  4743. + return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
  4744. +
  4745. + return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
  4746. +}
  4747. +
  4748. +static int spwr_ac_get_property(struct power_supply *psy,
  4749. + enum power_supply_property psp,
  4750. + union power_supply_propval *val)
  4751. +{
  4752. + struct spwr_ac_device *ac = power_supply_get_drvdata(psy);
  4753. + int status;
  4754. +
  4755. + mutex_lock(&ac->lock);
  4756. +
  4757. + status = spwr_ac_update_unlocked(ac);
  4758. + if (status)
  4759. + goto out;
  4760. +
  4761. + switch (psp) {
  4762. + case POWER_SUPPLY_PROP_ONLINE:
  4763. + val->intval = le32_to_cpu(ac->state) == 1;
  4764. + break;
  4765. +
  4766. + default:
  4767. + status = -EINVAL;
  4768. + goto out;
  4769. + }
  4770. +
  4771. +out:
  4772. + mutex_unlock(&ac->lock);
  4773. + return status;
  4774. +}
  4775. +
  4776. +static int spwr_battery_get_property(struct power_supply *psy,
  4777. + enum power_supply_property psp,
  4778. + union power_supply_propval *val)
  4779. +{
  4780. + struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
  4781. + int status;
  4782. +
  4783. + mutex_lock(&bat->lock);
  4784. +
  4785. + status = spwr_battery_update_bst_unlocked(bat, true);
  4786. + if (status)
  4787. + goto out;
  4788. +
  4789. + // abort if battery is not present
  4790. + if (!spwr_battery_present(bat) && psp != POWER_SUPPLY_PROP_PRESENT) {
  4791. + status = -ENODEV;
  4792. + goto out;
  4793. + }
  4794. +
  4795. + switch (psp) {
  4796. + case POWER_SUPPLY_PROP_STATUS:
  4797. + val->intval = spwr_battery_prop_status(bat);
  4798. + break;
  4799. +
  4800. + case POWER_SUPPLY_PROP_PRESENT:
  4801. + val->intval = spwr_battery_present(bat);
  4802. + break;
  4803. +
  4804. + case POWER_SUPPLY_PROP_TECHNOLOGY:
  4805. + val->intval = spwr_battery_prop_technology(bat);
  4806. + break;
  4807. +
  4808. + case POWER_SUPPLY_PROP_CYCLE_COUNT:
  4809. + val->intval = get_unaligned_le32(&bat->bix.cycle_count);
  4810. + break;
  4811. +
  4812. + case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
  4813. + val->intval = get_unaligned_le32(&bat->bix.design_voltage)
  4814. + * 1000;
  4815. + break;
  4816. +
  4817. + case POWER_SUPPLY_PROP_VOLTAGE_NOW:
  4818. + val->intval = get_unaligned_le32(&bat->bst.present_voltage)
  4819. + * 1000;
  4820. + break;
  4821. +
  4822. + case POWER_SUPPLY_PROP_CURRENT_NOW:
  4823. + case POWER_SUPPLY_PROP_POWER_NOW:
  4824. + val->intval = get_unaligned_le32(&bat->bst.present_rate) * 1000;
  4825. + break;
  4826. +
  4827. + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
  4828. + case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
  4829. + val->intval = get_unaligned_le32(&bat->bix.design_cap) * 1000;
  4830. + break;
  4831. +
  4832. + case POWER_SUPPLY_PROP_CHARGE_FULL:
  4833. + case POWER_SUPPLY_PROP_ENERGY_FULL:
  4834. + val->intval = get_unaligned_le32(&bat->bix.last_full_charge_cap)
  4835. + * 1000;
  4836. + break;
  4837. +
  4838. + case POWER_SUPPLY_PROP_CHARGE_NOW:
  4839. + case POWER_SUPPLY_PROP_ENERGY_NOW:
  4840. + val->intval = get_unaligned_le32(&bat->bst.remaining_cap)
  4841. + * 1000;
  4842. + break;
  4843. +
  4844. + case POWER_SUPPLY_PROP_CAPACITY:
  4845. + val->intval = spwr_battery_prop_capacity(bat);
  4846. + break;
  4847. +
  4848. + case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
  4849. + val->intval = spwr_battery_prop_capacity_level(bat);
  4850. + break;
  4851. +
  4852. + case POWER_SUPPLY_PROP_MODEL_NAME:
  4853. + val->strval = bat->bix.model;
  4854. + break;
  4855. +
  4856. + case POWER_SUPPLY_PROP_MANUFACTURER:
  4857. + val->strval = bat->bix.oem_info;
  4858. + break;
  4859. +
  4860. + case POWER_SUPPLY_PROP_SERIAL_NUMBER:
  4861. + val->strval = bat->bix.serial;
  4862. + break;
  4863. +
  4864. + default:
  4865. + status = -EINVAL;
  4866. + goto out;
  4867. + }
  4868. +
  4869. +out:
  4870. + mutex_unlock(&bat->lock);
  4871. + return status;
  4872. +}
  4873. +
  4874. +
  4875. +static ssize_t spwr_battery_alarm_show(struct device *dev,
  4876. + struct device_attribute *attr,
  4877. + char *buf)
  4878. +{
  4879. + struct power_supply *psy = dev_get_drvdata(dev);
  4880. + struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
  4881. +
  4882. + return sprintf(buf, "%d\n", bat->alarm * 1000);
  4883. +}
  4884. +
  4885. +static ssize_t spwr_battery_alarm_store(struct device *dev,
  4886. + struct device_attribute *attr,
  4887. + const char *buf, size_t count)
  4888. +{
  4889. + struct power_supply *psy = dev_get_drvdata(dev);
  4890. + struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
  4891. + unsigned long value;
  4892. + int status;
  4893. +
  4894. + status = kstrtoul(buf, 0, &value);
  4895. + if (status)
  4896. + return status;
  4897. +
  4898. + if (!spwr_battery_present(bat))
  4899. + return -ENODEV;
  4900. +
  4901. + status = spwr_battery_set_alarm(bat, value / 1000);
  4902. + if (status)
  4903. + return status;
  4904. +
  4905. + return count;
  4906. +}
  4907. +
  4908. +static const struct device_attribute alarm_attr = {
  4909. + .attr = {.name = "alarm", .mode = 0644},
  4910. + .show = spwr_battery_alarm_show,
  4911. + .store = spwr_battery_alarm_store,
  4912. +};
  4913. +
  4914. +
  4915. +static int spwr_ac_register(struct spwr_ac_device *ac,
  4916. + struct platform_device *pdev,
  4917. + struct ssam_controller *ctrl)
  4918. +{
  4919. + struct power_supply_config psy_cfg = {};
  4920. + __le32 sta;
  4921. + int status;
  4922. +
  4923. + // make sure the device is there and functioning properly
  4924. + status = ssam_bat_get_sta(ctrl, 0x01, 0x01, &sta);
  4925. + if (status)
  4926. + return status;
  4927. +
  4928. + if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
  4929. + return -ENODEV;
  4930. +
  4931. + psy_cfg.drv_data = ac;
  4932. +
  4933. + ac->pdev = pdev;
  4934. + ac->ctrl = ctrl;
  4935. + mutex_init(&ac->lock);
  4936. +
  4937. + snprintf(ac->name, ARRAY_SIZE(ac->name), "ADP0");
  4938. +
  4939. + ac->psy_desc.name = ac->name;
  4940. + ac->psy_desc.type = POWER_SUPPLY_TYPE_MAINS;
  4941. + ac->psy_desc.properties = spwr_ac_props;
  4942. + ac->psy_desc.num_properties = ARRAY_SIZE(spwr_ac_props);
  4943. + ac->psy_desc.get_property = spwr_ac_get_property;
  4944. +
  4945. + ac->psy = power_supply_register(&ac->pdev->dev, &ac->psy_desc, &psy_cfg);
  4946. + if (IS_ERR(ac->psy)) {
  4947. + status = PTR_ERR(ac->psy);
  4948. + goto err_psy;
  4949. + }
  4950. +
  4951. + ac->notif.base.priority = 1;
  4952. + ac->notif.base.fn = spwr_notify_ac;
  4953. + ac->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
  4954. + ac->notif.event.id.target_category = SSAM_SSH_TC_BAT;
  4955. + ac->notif.event.id.instance = 0;
  4956. + ac->notif.event.flags = SSAM_EVENT_SEQUENCED;
  4957. +
  4958. + status = ssam_notifier_register(ctrl, &ac->notif);
  4959. + if (status)
  4960. + goto err_notif;
  4961. +
  4962. + return 0;
  4963. +
  4964. +err_notif:
  4965. + power_supply_unregister(ac->psy);
  4966. +err_psy:
  4967. + mutex_destroy(&ac->lock);
  4968. + return status;
  4969. +}
  4970. +
  4971. +static int spwr_ac_unregister(struct spwr_ac_device *ac)
  4972. +{
  4973. + ssam_notifier_unregister(ac->ctrl, &ac->notif);
  4974. + power_supply_unregister(ac->psy);
  4975. + mutex_destroy(&ac->lock);
  4976. + return 0;
  4977. +}
  4978. +
  4979. +static int spwr_battery_register(struct spwr_battery_device *bat,
  4980. + struct platform_device *pdev,
  4981. + struct ssam_controller *ctrl,
  4982. + const struct ssam_battery_properties *p)
  4983. +{
  4984. + struct power_supply_config psy_cfg = {};
  4985. + __le32 sta;
  4986. + int status;
  4987. +
  4988. + bat->pdev = pdev;
  4989. + bat->ctrl = ctrl;
  4990. + bat->p = p;
  4991. +
  4992. + // make sure the device is there and functioning properly
  4993. + status = ssam_bat_get_sta(ctrl, bat->p->channel, bat->p->instance, &sta);
  4994. + if (status)
  4995. + return status;
  4996. +
  4997. + if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
  4998. + return -ENODEV;
  4999. +
  5000. + status = spwr_battery_update_bix_unlocked(bat);
  5001. + if (status)
  5002. + return status;
  5003. +
  5004. + if (spwr_battery_present(bat)) {
  5005. + u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
  5006. + status = spwr_battery_set_alarm_unlocked(bat, cap_warn);
  5007. + if (status)
  5008. + return status;
  5009. + }
  5010. +
  5011. + snprintf(bat->name, ARRAY_SIZE(bat->name), "BAT%d", bat->p->num);
  5012. + bat->psy_desc.name = bat->name;
  5013. + bat->psy_desc.type = POWER_SUPPLY_TYPE_BATTERY;
  5014. +
  5015. + if (get_unaligned_le32(&bat->bix.power_unit) == SAM_BATTERY_POWER_UNIT_MA) {
  5016. + bat->psy_desc.properties = spwr_battery_props_chg;
  5017. + bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_chg);
  5018. + } else {
  5019. + bat->psy_desc.properties = spwr_battery_props_eng;
  5020. + bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_eng);
  5021. + }
  5022. +
  5023. + bat->psy_desc.get_property = spwr_battery_get_property;
  5024. +
  5025. + mutex_init(&bat->lock);
  5026. + psy_cfg.drv_data = bat;
  5027. +
  5028. + INIT_DELAYED_WORK(&bat->update_work, spwr_battery_update_bst_workfn);
  5029. +
  5030. + bat->psy = power_supply_register(&bat->pdev->dev, &bat->psy_desc, &psy_cfg);
  5031. + if (IS_ERR(bat->psy)) {
  5032. + status = PTR_ERR(bat->psy);
  5033. + goto err_psy;
  5034. + }
  5035. +
  5036. + bat->notif.base.priority = 1;
  5037. + bat->notif.base.fn = spwr_notify_bat;
  5038. + bat->notif.event.reg = p->registry;
  5039. + bat->notif.event.id.target_category = SSAM_SSH_TC_BAT;
  5040. + bat->notif.event.id.instance = 0;
  5041. + bat->notif.event.flags = SSAM_EVENT_SEQUENCED;
  5042. +
  5043. + status = ssam_notifier_register(ctrl, &bat->notif);
  5044. + if (status)
  5045. + goto err_notif;
  5046. +
  5047. + status = device_create_file(&bat->psy->dev, &alarm_attr);
  5048. + if (status)
  5049. + goto err_file;
  5050. +
  5051. + return 0;
  5052. +
  5053. +err_file:
  5054. + ssam_notifier_unregister(ctrl, &bat->notif);
  5055. +err_notif:
  5056. + power_supply_unregister(bat->psy);
  5057. +err_psy:
  5058. + mutex_destroy(&bat->lock);
  5059. + return status;
  5060. +}
  5061. +
  5062. +static void spwr_battery_unregister(struct spwr_battery_device *bat)
  5063. +{
  5064. + ssam_notifier_unregister(bat->ctrl, &bat->notif);
  5065. + cancel_delayed_work_sync(&bat->update_work);
  5066. + device_remove_file(&bat->psy->dev, &alarm_attr);
  5067. + power_supply_unregister(bat->psy);
  5068. + mutex_destroy(&bat->lock);
  5069. +}
  5070. +
  5071. +
  5072. +/*
  5073. + * Battery Driver.
  5074. + */
  5075. +
  5076. +#ifdef CONFIG_PM_SLEEP
  5077. +static int surface_sam_sid_battery_resume(struct device *dev)
  5078. +{
  5079. + struct spwr_battery_device *bat;
  5080. +
  5081. + bat = dev_get_drvdata(dev);
  5082. + return spwr_battery_recheck(bat);
  5083. +}
  5084. +#else
  5085. +#define surface_sam_sid_battery_resume NULL
  5086. +#endif
  5087. +
  5088. +SIMPLE_DEV_PM_OPS(surface_sam_sid_battery_pm, NULL, surface_sam_sid_battery_resume);
  5089. +
  5090. +static int surface_sam_sid_battery_probe(struct platform_device *pdev)
  5091. +{
  5092. + struct spwr_battery_device *bat;
  5093. + struct ssam_controller *ctrl;
  5094. + int status;
  5095. +
  5096. + // link to ec
  5097. + status = ssam_client_bind(&pdev->dev, &ctrl);
  5098. + if (status)
  5099. + return status == -ENXIO ? -EPROBE_DEFER : status;
  5100. +
  5101. + bat = devm_kzalloc(&pdev->dev, sizeof(struct spwr_battery_device), GFP_KERNEL);
  5102. + if (!bat)
  5103. + return -ENOMEM;
  5104. +
  5105. + platform_set_drvdata(pdev, bat);
  5106. + return spwr_battery_register(bat, pdev, ctrl, pdev->dev.platform_data);
  5107. +}
  5108. +
  5109. +static int surface_sam_sid_battery_remove(struct platform_device *pdev)
  5110. +{
  5111. + struct spwr_battery_device *bat;
  5112. +
  5113. + bat = platform_get_drvdata(pdev);
  5114. + spwr_battery_unregister(bat);
  5115. +
  5116. + return 0;
  5117. +}
  5118. +
  5119. +static struct platform_driver surface_sam_sid_battery = {
  5120. + .probe = surface_sam_sid_battery_probe,
  5121. + .remove = surface_sam_sid_battery_remove,
  5122. + .driver = {
  5123. + .name = "surface_sam_sid_battery",
  5124. + .pm = &surface_sam_sid_battery_pm,
  5125. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  5126. + },
  5127. +};
  5128. +
  5129. +
  5130. +/*
  5131. + * AC Driver.
  5132. + */
  5133. +
  5134. +static int surface_sam_sid_ac_probe(struct platform_device *pdev)
  5135. +{
  5136. + struct spwr_ac_device *ac;
  5137. + struct ssam_controller *ctrl;
  5138. + int status;
  5139. +
  5140. + // link to ec
  5141. + status = ssam_client_bind(&pdev->dev, &ctrl);
  5142. + if (status)
  5143. + return status == -ENXIO ? -EPROBE_DEFER : status;
  5144. +
  5145. + ac = devm_kzalloc(&pdev->dev, sizeof(struct spwr_ac_device), GFP_KERNEL);
  5146. + if (!ac)
  5147. + return -ENOMEM;
  5148. +
  5149. + status = spwr_ac_register(ac, pdev, ctrl);
  5150. + if (status)
  5151. + return status;
  5152. +
  5153. + platform_set_drvdata(pdev, ac);
  5154. + return 0;
  5155. +}
  5156. +
  5157. +static int surface_sam_sid_ac_remove(struct platform_device *pdev)
  5158. +{
  5159. + struct spwr_ac_device *ac;
  5160. +
  5161. + ac = platform_get_drvdata(pdev);
  5162. + return spwr_ac_unregister(ac);
  5163. +}
  5164. +
  5165. +static struct platform_driver surface_sam_sid_ac = {
  5166. + .probe = surface_sam_sid_ac_probe,
  5167. + .remove = surface_sam_sid_ac_remove,
  5168. + .driver = {
  5169. + .name = "surface_sam_sid_ac",
  5170. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  5171. + },
  5172. +};
  5173. +
  5174. +
  5175. +static int __init surface_sam_sid_power_init(void)
  5176. +{
  5177. + int status;
  5178. +
  5179. + status = platform_driver_register(&surface_sam_sid_battery);
  5180. + if (status)
  5181. + return status;
  5182. +
  5183. + status = platform_driver_register(&surface_sam_sid_ac);
  5184. + if (status) {
  5185. + platform_driver_unregister(&surface_sam_sid_battery);
  5186. + return status;
  5187. + }
  5188. +
  5189. + return 0;
  5190. +}
  5191. +
  5192. +static void __exit surface_sam_sid_power_exit(void)
  5193. +{
  5194. + platform_driver_unregister(&surface_sam_sid_battery);
  5195. + platform_driver_unregister(&surface_sam_sid_ac);
  5196. +}
  5197. +
  5198. +module_init(surface_sam_sid_power_init);
  5199. +module_exit(surface_sam_sid_power_exit);
  5200. +
  5201. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  5202. +MODULE_DESCRIPTION("Surface Battery/AC Driver for 7th Generation Surface Devices");
  5203. +MODULE_LICENSE("GPL");
  5204. +MODULE_ALIAS("platform:surface_sam_sid_ac");
  5205. +MODULE_ALIAS("platform:surface_sam_sid_battery");
  5206. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_power.h b/drivers/platform/x86/surface_sam/surface_sam_sid_power.h
  5207. new file mode 100644
  5208. index 0000000000000..d8d9509b7d122
  5209. --- /dev/null
  5210. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_power.h
  5211. @@ -0,0 +1,16 @@
  5212. +
  5213. +#ifndef _SURFACE_SAM_SID_POWER_H
  5214. +#define _SURFACE_SAM_SID_POWER_H
  5215. +
  5216. +#include <linux/types.h>
  5217. +#include "surface_sam_ssh.h"
  5218. +
  5219. +
  5220. +struct ssam_battery_properties {
  5221. + struct ssam_event_registry registry;
  5222. + u8 num;
  5223. + u8 channel;
  5224. + u8 instance;
  5225. +};
  5226. +
  5227. +#endif /* _SURFACE_SAM_SID_POWER_H */
  5228. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c
  5229. new file mode 100644
  5230. index 0000000000000..a6059d6796619
  5231. --- /dev/null
  5232. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c
  5233. @@ -0,0 +1,429 @@
  5234. +// SPDX-License-Identifier: GPL-2.0-or-later
  5235. +/*
  5236. + * Microsofs Surface HID (VHF) driver for HID input events via SAM.
  5237. + * Used for keyboard input events on the 7th generation Surface Laptops.
  5238. + */
  5239. +
  5240. +#include <linux/acpi.h>
  5241. +#include <linux/hid.h>
  5242. +#include <linux/input.h>
  5243. +#include <linux/platform_device.h>
  5244. +#include <linux/types.h>
  5245. +
  5246. +#include "surface_sam_ssh.h"
  5247. +#include "surface_sam_sid_vhf.h"
  5248. +
  5249. +#define SID_VHF_INPUT_NAME "Microsoft Surface HID"
  5250. +
  5251. +#define SAM_EVENT_SID_VHF_TC 0x15
  5252. +
  5253. +#define VHF_HID_STARTED 0
  5254. +
  5255. +struct sid_vhf {
  5256. + struct platform_device *dev;
  5257. + struct ssam_controller *ctrl;
  5258. + const struct ssam_hid_properties *p;
  5259. +
  5260. + struct ssam_event_notifier notif;
  5261. +
  5262. + struct hid_device *hid;
  5263. + unsigned long state;
  5264. +};
  5265. +
  5266. +
  5267. +static int sid_vhf_hid_start(struct hid_device *hid)
  5268. +{
  5269. + hid_dbg(hid, "%s\n", __func__);
  5270. + return 0;
  5271. +}
  5272. +
  5273. +static void sid_vhf_hid_stop(struct hid_device *hid)
  5274. +{
  5275. + hid_dbg(hid, "%s\n", __func__);
  5276. +}
  5277. +
  5278. +static int sid_vhf_hid_open(struct hid_device *hid)
  5279. +{
  5280. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5281. +
  5282. + hid_dbg(hid, "%s\n", __func__);
  5283. +
  5284. + set_bit(VHF_HID_STARTED, &vhf->state);
  5285. + return 0;
  5286. +}
  5287. +
  5288. +static void sid_vhf_hid_close(struct hid_device *hid)
  5289. +{
  5290. +
  5291. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5292. +
  5293. + hid_dbg(hid, "%s\n", __func__);
  5294. +
  5295. + clear_bit(VHF_HID_STARTED, &vhf->state);
  5296. +}
  5297. +
  5298. +struct surface_sam_sid_vhf_meta_rqst {
  5299. + u8 id;
  5300. + u32 offset;
  5301. + u32 length; // buffer limit on send, length of data received on receive
  5302. + u8 end; // 0x01 if end was reached
  5303. +} __packed;
  5304. +
  5305. +struct vhf_device_metadata_info {
  5306. + u8 len;
  5307. + u8 _2;
  5308. + u8 _3;
  5309. + u8 _4;
  5310. + u8 _5;
  5311. + u8 _6;
  5312. + u8 _7;
  5313. + u16 hid_len; // hid descriptor length
  5314. +} __packed;
  5315. +
  5316. +struct vhf_device_metadata {
  5317. + u32 len;
  5318. + u16 vendor_id;
  5319. + u16 product_id;
  5320. + u8 _1[24];
  5321. +} __packed;
  5322. +
  5323. +union vhf_buffer_data {
  5324. + struct vhf_device_metadata_info info;
  5325. + u8 pld[0x76];
  5326. + struct vhf_device_metadata meta;
  5327. +};
  5328. +
  5329. +struct surface_sam_sid_vhf_meta_resp {
  5330. + struct surface_sam_sid_vhf_meta_rqst rqst;
  5331. + union vhf_buffer_data data;
  5332. +} __packed;
  5333. +
  5334. +
  5335. +static int vhf_get_metadata(struct ssam_controller *ctrl, u8 iid,
  5336. + struct vhf_device_metadata *meta)
  5337. +{
  5338. + struct surface_sam_sid_vhf_meta_resp data = {};
  5339. + struct ssam_request rqst;
  5340. + struct ssam_response rsp;
  5341. + int status;
  5342. +
  5343. + data.rqst.id = 2;
  5344. + data.rqst.offset = 0;
  5345. + data.rqst.length = 0x76;
  5346. + data.rqst.end = 0;
  5347. +
  5348. + rqst.target_category = SSAM_SSH_TC_HID;;
  5349. + rqst.command_id = 0x04;
  5350. + rqst.instance_id = iid;
  5351. + rqst.channel = 0x02;
  5352. + rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
  5353. + rqst.length = sizeof(struct surface_sam_sid_vhf_meta_rqst);
  5354. + rqst.payload = (u8 *)&data.rqst;
  5355. +
  5356. + rsp.capacity = sizeof(struct surface_sam_sid_vhf_meta_resp);
  5357. + rsp.length = 0;
  5358. + rsp.pointer = (u8 *)&data;
  5359. +
  5360. + status = ssam_request_sync(ctrl, &rqst, &rsp);
  5361. + if (status)
  5362. + return status;
  5363. +
  5364. + *meta = data.data.meta;
  5365. +
  5366. + return 0;
  5367. +}
  5368. +
  5369. +static int vhf_get_hid_descriptor(struct hid_device *hid, u8 iid, u8 **desc, int *size)
  5370. +{
  5371. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5372. + struct surface_sam_sid_vhf_meta_resp data = {};
  5373. + struct ssam_request rqst;
  5374. + struct ssam_response rsp;
  5375. + int status, len;
  5376. + u8 *buf;
  5377. +
  5378. + data.rqst.id = 0;
  5379. + data.rqst.offset = 0;
  5380. + data.rqst.length = 0x76;
  5381. + data.rqst.end = 0;
  5382. +
  5383. + rqst.target_category = SSAM_SSH_TC_HID;
  5384. + rqst.command_id = 0x04;
  5385. + rqst.instance_id = iid;
  5386. + rqst.channel = 0x02;
  5387. + rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
  5388. + rqst.length = sizeof(struct surface_sam_sid_vhf_meta_rqst);
  5389. + rqst.payload = (u8 *)&data.rqst;
  5390. +
  5391. + rsp.capacity = sizeof(struct surface_sam_sid_vhf_meta_resp);
  5392. + rsp.length = 0;
  5393. + rsp.pointer = (u8 *)&data;
  5394. +
  5395. + // first fetch 00 to get the total length
  5396. + status = ssam_request_sync(vhf->ctrl, &rqst, &rsp);
  5397. + if (status)
  5398. + return status;
  5399. +
  5400. + len = data.data.info.hid_len;
  5401. +
  5402. + // allocate a buffer for the descriptor
  5403. + buf = kzalloc(len, GFP_KERNEL);
  5404. +
  5405. + // then, iterate and write into buffer, copying out bytes
  5406. + data.rqst.id = 1;
  5407. + data.rqst.offset = 0;
  5408. + data.rqst.length = 0x76;
  5409. + data.rqst.end = 0;
  5410. +
  5411. + while (!data.rqst.end && data.rqst.offset < len) {
  5412. + status = ssam_request_sync(vhf->ctrl, &rqst, &rsp);
  5413. + if (status) {
  5414. + kfree(buf);
  5415. + return status;
  5416. + }
  5417. + memcpy(buf + data.rqst.offset, data.data.pld, data.rqst.length);
  5418. +
  5419. + data.rqst.offset += data.rqst.length;
  5420. + }
  5421. +
  5422. + *desc = buf;
  5423. + *size = len;
  5424. +
  5425. + return 0;
  5426. +}
  5427. +
  5428. +static int sid_vhf_hid_parse(struct hid_device *hid)
  5429. +{
  5430. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5431. + int ret = 0, size;
  5432. + u8 *buf;
  5433. +
  5434. + ret = vhf_get_hid_descriptor(hid, vhf->p->instance, &buf, &size);
  5435. + if (ret != 0) {
  5436. + hid_err(hid, "Failed to read HID descriptor from device: %d\n", ret);
  5437. + return -EIO;
  5438. + }
  5439. + hid_dbg(hid, "HID descriptor of device:");
  5440. + print_hex_dump_debug("descriptor:", DUMP_PREFIX_OFFSET, 16, 1, buf, size, false);
  5441. +
  5442. + ret = hid_parse_report(hid, buf, size);
  5443. + kfree(buf);
  5444. + return ret;
  5445. +
  5446. +}
  5447. +
  5448. +static int sid_vhf_hid_raw_request(struct hid_device *hid, unsigned char
  5449. + reportnum, u8 *buf, size_t len, unsigned char rtype, int
  5450. + reqtype)
  5451. +{
  5452. + struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
  5453. + struct ssam_request rqst;
  5454. + struct ssam_response rsp;
  5455. + int status;
  5456. + u8 cid;
  5457. +
  5458. + hid_dbg(hid, "%s: reportnum=%#04x rtype=%i reqtype=%i\n", __func__, reportnum, rtype, reqtype);
  5459. + print_hex_dump_debug("report:", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
  5460. +
  5461. + // Byte 0 is the report number. Report data starts at byte 1.
  5462. + buf[0] = reportnum;
  5463. +
  5464. + switch (rtype) {
  5465. + case HID_OUTPUT_REPORT:
  5466. + cid = 0x01;
  5467. + break;
  5468. + case HID_FEATURE_REPORT:
  5469. + switch (reqtype) {
  5470. + case HID_REQ_GET_REPORT:
  5471. + // The EC doesn't respond to GET FEATURE for these touchpad reports
  5472. + // we immediately discard to avoid waiting for a timeout.
  5473. + if (reportnum == 6 || reportnum == 7 || reportnum == 8 || reportnum == 9 || reportnum == 0x0b) {
  5474. + hid_dbg(hid, "%s: skipping get feature report for 0x%02x\n", __func__, reportnum);
  5475. + return 0;
  5476. + }
  5477. +
  5478. + cid = 0x02;
  5479. + break;
  5480. + case HID_REQ_SET_REPORT:
  5481. + cid = 0x03;
  5482. + break;
  5483. + default:
  5484. + hid_err(hid, "%s: unknown req type 0x%02x\n", __func__, rtype);
  5485. + return -EIO;
  5486. + }
  5487. + break;
  5488. + default:
  5489. + hid_err(hid, "%s: unknown report type 0x%02x\n", __func__, reportnum);
  5490. + return -EIO;
  5491. + }
  5492. +
  5493. + rqst.target_category = SSAM_SSH_TC_HID;
  5494. + rqst.channel = 0x02;
  5495. + rqst.instance_id = vhf->p->instance;
  5496. + rqst.command_id = cid;
  5497. + rqst.flags = reqtype == HID_REQ_GET_REPORT ? SSAM_REQUEST_HAS_RESPONSE : 0;
  5498. + rqst.length = reqtype == HID_REQ_GET_REPORT ? 1 : len;
  5499. + rqst.payload = buf;
  5500. +
  5501. + rsp.capacity = len;
  5502. + rsp.length = 0;
  5503. + rsp.pointer = buf;
  5504. +
  5505. + hid_dbg(hid, "%s: sending to cid=%#04x snc=%#04x\n", __func__, cid, HID_REQ_GET_REPORT == reqtype);
  5506. +
  5507. + status = ssam_request_sync(vhf->ctrl, &rqst, &rsp);
  5508. + hid_dbg(hid, "%s: status %i\n", __func__, status);
  5509. +
  5510. + if (status)
  5511. + return status;
  5512. +
  5513. + if (rsp.length > 0)
  5514. + print_hex_dump_debug("response:", DUMP_PREFIX_OFFSET, 16, 1, rsp.pointer, rsp.length, false);
  5515. +
  5516. + return rsp.length;
  5517. +}
  5518. +
  5519. +static struct hid_ll_driver sid_vhf_hid_ll_driver = {
  5520. + .start = sid_vhf_hid_start,
  5521. + .stop = sid_vhf_hid_stop,
  5522. + .open = sid_vhf_hid_open,
  5523. + .close = sid_vhf_hid_close,
  5524. + .parse = sid_vhf_hid_parse,
  5525. + .raw_request = sid_vhf_hid_raw_request,
  5526. +};
  5527. +
  5528. +
  5529. +static struct hid_device *sid_vhf_create_hid_device(struct platform_device *pdev, struct vhf_device_metadata *meta)
  5530. +{
  5531. + struct hid_device *hid;
  5532. +
  5533. + hid = hid_allocate_device();
  5534. + if (IS_ERR(hid))
  5535. + return hid;
  5536. +
  5537. + hid->dev.parent = &pdev->dev;
  5538. +
  5539. + hid->bus = BUS_VIRTUAL;
  5540. + hid->vendor = meta->vendor_id;
  5541. + hid->product = meta->product_id;
  5542. +
  5543. + hid->ll_driver = &sid_vhf_hid_ll_driver;
  5544. +
  5545. + sprintf(hid->name, "%s", SID_VHF_INPUT_NAME);
  5546. +
  5547. + return hid;
  5548. +}
  5549. +
  5550. +static u32 sid_vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event)
  5551. +{
  5552. + struct sid_vhf *vhf = container_of(nb, struct sid_vhf, notif.base);
  5553. + int status;
  5554. +
  5555. + if (event->target_category != SSAM_SSH_TC_HID)
  5556. + return 0;
  5557. +
  5558. + if (event->channel != 0x02)
  5559. + return 0;
  5560. +
  5561. + if (event->instance_id != vhf->p->instance)
  5562. + return 0;
  5563. +
  5564. + if (event->command_id != 0x00 && event->command_id != 0x03 && event->command_id != 0x04)
  5565. + return 0;
  5566. +
  5567. + // skip if HID hasn't started yet
  5568. + if (!test_bit(VHF_HID_STARTED, &vhf->state))
  5569. + return SSAM_NOTIF_HANDLED;
  5570. +
  5571. + status = hid_input_report(vhf->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0);
  5572. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  5573. +}
  5574. +
  5575. +static int surface_sam_sid_vhf_probe(struct platform_device *pdev)
  5576. +{
  5577. + const struct ssam_hid_properties *p = pdev->dev.platform_data;
  5578. + struct ssam_controller *ctrl;
  5579. + struct sid_vhf *vhf;
  5580. + struct vhf_device_metadata meta = {};
  5581. + struct hid_device *hid;
  5582. + int status;
  5583. +
  5584. + // add device link to EC
  5585. + status = ssam_client_bind(&pdev->dev, &ctrl);
  5586. + if (status)
  5587. + return status == -ENXIO ? -EPROBE_DEFER : status;
  5588. +
  5589. + vhf = kzalloc(sizeof(struct sid_vhf), GFP_KERNEL);
  5590. + if (!vhf)
  5591. + return -ENOMEM;
  5592. +
  5593. + status = vhf_get_metadata(ctrl, p->instance, &meta);
  5594. + if (status)
  5595. + goto err_create_hid;
  5596. +
  5597. + hid = sid_vhf_create_hid_device(pdev, &meta);
  5598. + if (IS_ERR(hid)) {
  5599. + status = PTR_ERR(hid);
  5600. + goto err_create_hid;
  5601. + }
  5602. +
  5603. + vhf->dev = pdev;
  5604. + vhf->ctrl = ctrl;
  5605. + vhf->p = pdev->dev.platform_data;
  5606. + vhf->hid = hid;
  5607. +
  5608. + vhf->notif.base.priority = 1;
  5609. + vhf->notif.base.fn = sid_vhf_event_handler;
  5610. + vhf->notif.event.reg = p->registry;
  5611. + vhf->notif.event.id.target_category = SSAM_SSH_TC_HID;
  5612. + vhf->notif.event.id.instance = p->instance;
  5613. + vhf->notif.event.flags = 0;
  5614. +
  5615. + platform_set_drvdata(pdev, vhf);
  5616. +
  5617. + status = ssam_notifier_register(ctrl, &vhf->notif);
  5618. + if (status)
  5619. + goto err_notif;
  5620. +
  5621. + status = hid_add_device(hid);
  5622. + if (status)
  5623. + goto err_add_hid;
  5624. +
  5625. + return 0;
  5626. +
  5627. +err_add_hid:
  5628. + ssam_notifier_unregister(ctrl, &vhf->notif);
  5629. +err_notif:
  5630. + hid_destroy_device(hid);
  5631. + platform_set_drvdata(pdev, NULL);
  5632. +err_create_hid:
  5633. + kfree(vhf);
  5634. + return status;
  5635. +}
  5636. +
  5637. +static int surface_sam_sid_vhf_remove(struct platform_device *pdev)
  5638. +{
  5639. + struct sid_vhf *vhf = platform_get_drvdata(pdev);
  5640. +
  5641. + ssam_notifier_unregister(vhf->ctrl, &vhf->notif);
  5642. + hid_destroy_device(vhf->hid);
  5643. + kfree(vhf);
  5644. +
  5645. + platform_set_drvdata(pdev, NULL);
  5646. + return 0;
  5647. +}
  5648. +
  5649. +static struct platform_driver surface_sam_sid_vhf = {
  5650. + .probe = surface_sam_sid_vhf_probe,
  5651. + .remove = surface_sam_sid_vhf_remove,
  5652. + .driver = {
  5653. + .name = "surface_sam_sid_vhf",
  5654. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  5655. + },
  5656. +};
  5657. +module_platform_driver(surface_sam_sid_vhf);
  5658. +
  5659. +MODULE_AUTHOR("Blaž Hrastnik <blaz@mxxn.io>");
  5660. +MODULE_DESCRIPTION("Driver for HID devices connected via Surface SAM");
  5661. +MODULE_LICENSE("GPL");
  5662. +MODULE_ALIAS("platform:surface_sam_sid_vhf");
  5663. diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h
  5664. new file mode 100644
  5665. index 0000000000000..d956de5cf877a
  5666. --- /dev/null
  5667. +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h
  5668. @@ -0,0 +1,14 @@
  5669. +
  5670. +#ifndef _SURFACE_SAM_SID_VHF_H
  5671. +#define _SURFACE_SAM_SID_VHF_H
  5672. +
  5673. +#include <linux/types.h>
  5674. +#include "surface_sam_ssh.h"
  5675. +
  5676. +
  5677. +struct ssam_hid_properties {
  5678. + struct ssam_event_registry registry;
  5679. + u8 instance;
  5680. +};
  5681. +
  5682. +#endif /* _SURFACE_SAM_SID_VHF_H */
  5683. diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh.c b/drivers/platform/x86/surface_sam/surface_sam_ssh.c
  5684. new file mode 100644
  5685. index 0000000000000..4551b75570f22
  5686. --- /dev/null
  5687. +++ b/drivers/platform/x86/surface_sam/surface_sam_ssh.c
  5688. @@ -0,0 +1,5329 @@
  5689. +// SPDX-License-Identifier: GPL-2.0-or-later
  5690. +/*
  5691. + * Surface Serial Hub (SSH) driver for communication with the Surface/System
  5692. + * Aggregator Module.
  5693. + */
  5694. +
  5695. +#include <asm/unaligned.h>
  5696. +#include <linux/acpi.h>
  5697. +#include <linux/atomic.h>
  5698. +#include <linux/completion.h>
  5699. +#include <linux/crc-ccitt.h>
  5700. +#include <linux/dmaengine.h>
  5701. +#include <linux/gpio/consumer.h>
  5702. +#include <linux/interrupt.h>
  5703. +#include <linux/jiffies.h>
  5704. +#include <linux/kernel.h>
  5705. +#include <linux/kfifo.h>
  5706. +#include <linux/kref.h>
  5707. +#include <linux/kthread.h>
  5708. +#include <linux/ktime.h>
  5709. +#include <linux/list.h>
  5710. +#include <linux/mutex.h>
  5711. +#include <linux/pm.h>
  5712. +#include <linux/refcount.h>
  5713. +#include <linux/serdev.h>
  5714. +#include <linux/spinlock.h>
  5715. +#include <linux/sysfs.h>
  5716. +#include <linux/workqueue.h>
  5717. +
  5718. +#include "surface_sam_ssh.h"
  5719. +
  5720. +#define CREATE_TRACE_POINTS
  5721. +#include "surface_sam_ssh_trace.h"
  5722. +
  5723. +
  5724. +/* -- Error injection helpers. ---------------------------------------------- */
  5725. +
  5726. +#ifdef CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION
  5727. +#define noinline_if_inject noinline
  5728. +#else /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
  5729. +#define noinline_if_inject inline
  5730. +#endif /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
  5731. +
  5732. +
  5733. +/* -- SSH protocol utility functions and definitions. ----------------------- */
  5734. +
  5735. +/*
  5736. + * The number of reserved event IDs, used for registering an SSH event
  5737. + * handler. Valid event IDs are numbers below or equal to this value, with
  5738. + * exception of zero, which is not an event ID. Thus, this is also the
  5739. + * absolute maximum number of event handlers that can be registered.
  5740. + */
  5741. +#define SSH_NUM_EVENTS 34
  5742. +
  5743. +/*
  5744. + * The number of communication channels used in the protocol.
  5745. + */
  5746. +#define SSH_NUM_CHANNELS 2
  5747. +
  5748. +
  5749. +static inline u16 ssh_crc(const u8 *buf, size_t len)
  5750. +{
  5751. + return crc_ccitt_false(0xffff, buf, len);
  5752. +}
  5753. +
  5754. +static inline u16 ssh_rqid_next_valid(u16 rqid)
  5755. +{
  5756. + return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u;
  5757. +}
  5758. +
  5759. +static inline u16 ssh_rqid_to_event(u16 rqid)
  5760. +{
  5761. + return rqid - 1u;
  5762. +}
  5763. +
  5764. +static inline bool ssh_rqid_is_event(u16 rqid)
  5765. +{
  5766. + return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS;
  5767. +}
  5768. +
  5769. +static inline int ssh_tc_to_rqid(u8 tc)
  5770. +{
  5771. + return tc;
  5772. +}
  5773. +
  5774. +static inline u8 ssh_channel_to_index(u8 channel)
  5775. +{
  5776. + return channel - 1u;
  5777. +}
  5778. +
  5779. +static inline bool ssh_channel_is_valid(u8 channel)
  5780. +{
  5781. + return ssh_channel_to_index(channel) < SSH_NUM_CHANNELS;
  5782. +}
  5783. +
  5784. +
  5785. +/* -- Safe counters. -------------------------------------------------------- */
  5786. +
  5787. +struct ssh_seq_counter {
  5788. + u8 value;
  5789. +};
  5790. +
  5791. +struct ssh_rqid_counter {
  5792. + u16 value;
  5793. +};
  5794. +
  5795. +static inline void ssh_seq_reset(struct ssh_seq_counter *c)
  5796. +{
  5797. + WRITE_ONCE(c->value, 0);
  5798. +}
  5799. +
  5800. +static inline u8 ssh_seq_next(struct ssh_seq_counter *c)
  5801. +{
  5802. + u8 old = READ_ONCE(c->value);
  5803. + u8 new = old + 1;
  5804. + u8 ret;
  5805. +
  5806. + while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
  5807. + old = ret;
  5808. + new = old + 1;
  5809. + }
  5810. +
  5811. + return old;
  5812. +}
  5813. +
  5814. +static inline void ssh_rqid_reset(struct ssh_rqid_counter *c)
  5815. +{
  5816. + WRITE_ONCE(c->value, 0);
  5817. +}
  5818. +
  5819. +static inline u16 ssh_rqid_next(struct ssh_rqid_counter *c)
  5820. +{
  5821. + u16 old = READ_ONCE(c->value);
  5822. + u16 new = ssh_rqid_next_valid(old);
  5823. + u16 ret;
  5824. +
  5825. + while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
  5826. + old = ret;
  5827. + new = ssh_rqid_next_valid(old);
  5828. + }
  5829. +
  5830. + return old;
  5831. +}
  5832. +
  5833. +
  5834. +/* -- Builder functions for SAM-over-SSH messages. -------------------------- */
  5835. +
  5836. +struct msgbuf {
  5837. + u8 *begin;
  5838. + u8 *end;
  5839. + u8 *ptr;
  5840. +};
  5841. +
  5842. +static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap)
  5843. +{
  5844. + msgb->begin = ptr;
  5845. + msgb->end = ptr + cap;
  5846. + msgb->ptr = ptr;
  5847. +}
  5848. +
  5849. +static inline size_t msgb_bytes_used(const struct msgbuf *msgb)
  5850. +{
  5851. + return msgb->ptr - msgb->begin;
  5852. +}
  5853. +
  5854. +static inline void msgb_push_u16(struct msgbuf *msgb, u16 value)
  5855. +{
  5856. + if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end))
  5857. + return;
  5858. +
  5859. + put_unaligned_le16(value, msgb->ptr);
  5860. + msgb->ptr += sizeof(u16);
  5861. +}
  5862. +
  5863. +static inline void msgb_push_syn(struct msgbuf *msgb)
  5864. +{
  5865. + msgb_push_u16(msgb, SSH_MSG_SYN);
  5866. +}
  5867. +
  5868. +static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len)
  5869. +{
  5870. + msgb->ptr = memcpy(msgb->ptr, buf, len) + len;
  5871. +}
  5872. +
  5873. +static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len)
  5874. +{
  5875. + msgb_push_u16(msgb, ssh_crc(buf, len));
  5876. +}
  5877. +
  5878. +static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq)
  5879. +{
  5880. + struct ssh_frame *frame = (struct ssh_frame *)msgb->ptr;
  5881. + const u8 *const begin = msgb->ptr;
  5882. +
  5883. + if (WARN_ON(msgb->ptr + sizeof(*frame) > msgb->end))
  5884. + return;
  5885. +
  5886. + frame->type = ty;
  5887. + put_unaligned_le16(len, &frame->len);
  5888. + frame->seq = seq;
  5889. +
  5890. + msgb->ptr += sizeof(*frame);
  5891. + msgb_push_crc(msgb, begin, msgb->ptr - begin);
  5892. +}
  5893. +
  5894. +static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq)
  5895. +{
  5896. + // SYN
  5897. + msgb_push_syn(msgb);
  5898. +
  5899. + // ACK-type frame + CRC
  5900. + msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq);
  5901. +
  5902. + // payload CRC (ACK-type frames do not have a payload)
  5903. + msgb_push_crc(msgb, msgb->ptr, 0);
  5904. +}
  5905. +
  5906. +static inline void msgb_push_nak(struct msgbuf *msgb)
  5907. +{
  5908. + // SYN
  5909. + msgb_push_syn(msgb);
  5910. +
  5911. + // NAK-type frame + CRC
  5912. + msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00);
  5913. +
  5914. + // payload CRC (ACK-type frames do not have a payload)
  5915. + msgb_push_crc(msgb, msgb->ptr, 0);
  5916. +}
  5917. +
  5918. +static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid,
  5919. + const struct ssam_request *rqst)
  5920. +{
  5921. + struct ssh_command *cmd;
  5922. + const u8 *cmd_begin;
  5923. + const u8 type = SSH_FRAME_TYPE_DATA_SEQ;
  5924. +
  5925. + // SYN
  5926. + msgb_push_syn(msgb);
  5927. +
  5928. + // command frame + crc
  5929. + msgb_push_frame(msgb, type, sizeof(*cmd) + rqst->length, seq);
  5930. +
  5931. + // frame payload: command struct + payload
  5932. + if (WARN_ON(msgb->ptr + sizeof(*cmd) > msgb->end))
  5933. + return;
  5934. +
  5935. + cmd_begin = msgb->ptr;
  5936. + cmd = (struct ssh_command *)msgb->ptr;
  5937. +
  5938. + cmd->type = SSH_PLD_TYPE_CMD;
  5939. + cmd->tc = rqst->target_category;
  5940. + cmd->chn_out = rqst->channel;
  5941. + cmd->chn_in = 0x00;
  5942. + cmd->iid = rqst->instance_id;
  5943. + put_unaligned_le16(rqid, &cmd->rqid);
  5944. + cmd->cid = rqst->command_id;
  5945. +
  5946. + msgb->ptr += sizeof(*cmd);
  5947. +
  5948. + // command payload
  5949. + msgb_push_buf(msgb, rqst->payload, rqst->length);
  5950. +
  5951. + // crc for command struct + payload
  5952. + msgb_push_crc(msgb, cmd_begin, msgb->ptr - cmd_begin);
  5953. +}
  5954. +
  5955. +
  5956. +/* -- Parser functions and utilities for SAM-over-SSH messages. ------------- */
  5957. +
  5958. +struct sshp_buf {
  5959. + u8 *ptr;
  5960. + size_t len;
  5961. + size_t cap;
  5962. +};
  5963. +
  5964. +
  5965. +static inline bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc)
  5966. +{
  5967. + u16 actual = ssh_crc(src->ptr, src->len);
  5968. + u16 expected = get_unaligned_le16(crc);
  5969. +
  5970. + return actual == expected;
  5971. +}
  5972. +
  5973. +static bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem)
  5974. +{
  5975. + size_t i;
  5976. +
  5977. + for (i = 0; i < src->len - 1; i++) {
  5978. + if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) {
  5979. + rem->ptr = src->ptr + i;
  5980. + rem->len = src->len - i;
  5981. + return true;
  5982. + }
  5983. + }
  5984. +
  5985. + if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) {
  5986. + rem->ptr = src->ptr + src->len - 1;
  5987. + rem->len = 1;
  5988. + return false;
  5989. + } else {
  5990. + rem->ptr = src->ptr + src->len;
  5991. + rem->len = 0;
  5992. + return false;
  5993. + }
  5994. +}
  5995. +
  5996. +static bool sshp_starts_with_syn(const struct ssam_span *src)
  5997. +{
  5998. + return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN;
  5999. +}
  6000. +
  6001. +static int sshp_parse_frame(const struct device *dev,
  6002. + const struct ssam_span *source,
  6003. + struct ssh_frame **frame,
  6004. + struct ssam_span *payload,
  6005. + size_t maxlen)
  6006. +{
  6007. + struct ssam_span sf;
  6008. + struct ssam_span sp;
  6009. +
  6010. + // initialize output
  6011. + *frame = NULL;
  6012. + payload->ptr = NULL;
  6013. + payload->len = 0;
  6014. +
  6015. + if (!sshp_starts_with_syn(source)) {
  6016. + dev_warn(dev, "rx: parser: invalid start of frame\n");
  6017. + return -ENOMSG;
  6018. + }
  6019. +
  6020. + // check for minumum packet length
  6021. + if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) {
  6022. + dev_dbg(dev, "rx: parser: not enough data for frame\n");
  6023. + return 0;
  6024. + }
  6025. +
  6026. + // pin down frame
  6027. + sf.ptr = source->ptr + sizeof(u16);
  6028. + sf.len = sizeof(struct ssh_frame);
  6029. +
  6030. + // validate frame CRC
  6031. + if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) {
  6032. + dev_warn(dev, "rx: parser: invalid frame CRC\n");
  6033. + return -EBADMSG;
  6034. + }
  6035. +
  6036. + // ensure packet does not exceed maximum length
  6037. + if (unlikely(((struct ssh_frame *)sf.ptr)->len > maxlen)) {
  6038. + dev_warn(dev, "rx: parser: frame too large: %u bytes\n",
  6039. + ((struct ssh_frame *)sf.ptr)->len);
  6040. + return -EMSGSIZE;
  6041. + }
  6042. +
  6043. + // pin down payload
  6044. + sp.ptr = sf.ptr + sf.len + sizeof(u16);
  6045. + sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len);
  6046. +
  6047. + // check for frame + payload length
  6048. + if (source->len < SSH_MESSAGE_LENGTH(sp.len)) {
  6049. + dev_dbg(dev, "rx: parser: not enough data for payload\n");
  6050. + return 0;
  6051. + }
  6052. +
  6053. + // validate payload crc
  6054. + if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) {
  6055. + dev_warn(dev, "rx: parser: invalid payload CRC\n");
  6056. + return -EBADMSG;
  6057. + }
  6058. +
  6059. + *frame = (struct ssh_frame *)sf.ptr;
  6060. + *payload = sp;
  6061. +
  6062. + dev_dbg(dev, "rx: parser: valid frame found (type: 0x%02x, len: %u)\n",
  6063. + (*frame)->type, (*frame)->len);
  6064. +
  6065. + return 0;
  6066. +}
  6067. +
  6068. +static int sshp_parse_command(const struct device *dev,
  6069. + const struct ssam_span *source,
  6070. + struct ssh_command **command,
  6071. + struct ssam_span *command_data)
  6072. +{
  6073. + // check for minimum length
  6074. + if (unlikely(source->len < sizeof(struct ssh_command))) {
  6075. + *command = NULL;
  6076. + command_data->ptr = NULL;
  6077. + command_data->len = 0;
  6078. +
  6079. + dev_err(dev, "rx: parser: command payload is too short\n");
  6080. + return -ENOMSG;
  6081. + }
  6082. +
  6083. + *command = (struct ssh_command *)source->ptr;
  6084. + command_data->ptr = source->ptr + sizeof(struct ssh_command);
  6085. + command_data->len = source->len - sizeof(struct ssh_command);
  6086. +
  6087. + dev_dbg(dev, "rx: parser: valid command found (tc: 0x%02x,"
  6088. + " cid: 0x%02x)\n", (*command)->tc, (*command)->cid);
  6089. +
  6090. + return 0;
  6091. +}
  6092. +
  6093. +
  6094. +static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap)
  6095. +{
  6096. + buf->ptr = ptr;
  6097. + buf->len = 0;
  6098. + buf->cap = cap;
  6099. +}
  6100. +
  6101. +static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags)
  6102. +{
  6103. + u8 *ptr;
  6104. +
  6105. + ptr = kzalloc(cap, flags);
  6106. + if (!ptr)
  6107. + return -ENOMEM;
  6108. +
  6109. + sshp_buf_init(buf, ptr, cap);
  6110. + return 0;
  6111. +
  6112. +}
  6113. +
  6114. +static inline void sshp_buf_free(struct sshp_buf *buf)
  6115. +{
  6116. + kfree(buf->ptr);
  6117. + buf->ptr = NULL;
  6118. + buf->len = 0;
  6119. + buf->cap = 0;
  6120. +}
  6121. +
  6122. +static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n)
  6123. +{
  6124. + memmove(buf->ptr, buf->ptr + n, buf->len - n);
  6125. + buf->len -= n;
  6126. +}
  6127. +
  6128. +static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf,
  6129. + struct kfifo *fifo)
  6130. +{
  6131. + size_t n;
  6132. +
  6133. + n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len);
  6134. + buf->len += n;
  6135. +
  6136. + return n;
  6137. +}
  6138. +
  6139. +static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset,
  6140. + struct ssam_span *span)
  6141. +{
  6142. + span->ptr = buf->ptr + offset;
  6143. + span->len = buf->len - offset;
  6144. +}
  6145. +
  6146. +
  6147. +/* -- Packet transport layer (ptl). ----------------------------------------- */
  6148. +/*
  6149. + * To simplify reasoning about the code below, we define a few concepts. The
  6150. + * system below is similar to a state-machine for packets, however, there are
  6151. + * too many states to explicitly write them down. To (somewhat) manage the
  6152. + * states and packets we rely on flags, reference counting, and some simple
  6153. + * concepts. State transitions are triggered by actions.
  6154. + *
  6155. + * >> Actions <<
  6156. + *
  6157. + * - submit
  6158. + * - transmission start (process next item in queue)
  6159. + * - transmission finished (guaranteed to never be parallel to transmission
  6160. + * start)
  6161. + * - ACK received
  6162. + * - NAK received (this is equivalent to issuing re-submit for all pending
  6163. + * packets)
  6164. + * - timeout (this is equivalent to re-issuing a submit or canceling)
  6165. + * - cancel (non-pending and pending)
  6166. + *
  6167. + * >> Data Structures, Packet Ownership, General Overview <<
  6168. + *
  6169. + * The code below employs two main data structures: The packet queue, containing
  6170. + * all packets scheduled for transmission, and the set of pending packets,
  6171. + * containing all packets awaiting an ACK.
  6172. + *
  6173. + * Shared ownership of a packet is controlled via reference counting. Inside the
  6174. + * transmission system are a total of five packet owners:
  6175. + *
  6176. + * - the packet queue,
  6177. + * - the pending set,
  6178. + * - the transmitter thread,
  6179. + * - the receiver thread (via ACKing), and
  6180. + * - the timeout work item.
  6181. + *
  6182. + * Normal operation is as follows: The initial reference of the packet is
  6183. + * obtained by submitting the packet and queueing it. The receiver thread
  6184. + * takes packets from the queue. By doing this, it does not increment the
  6185. + * refcount but takes over the reference (removing it from the queue).
  6186. + * If the packet is sequenced (i.e. needs to be ACKed by the client), the
  6187. + * transmitter thread sets-up the timeout and adds the packet to the pending set
  6188. + * before starting to transmit it. As the timeout is handled by a reaper task,
  6189. + * no additional reference for it is needed. After the transmit is done, the
  6190. + * reference hold by the transmitter thread is dropped. If the packet is
  6191. + * unsequenced (i.e. does not need an ACK), the packet is completed by the
  6192. + * transmitter thread before dropping that reference.
  6193. + *
  6194. + * On receial of an ACK, the receiver thread removes and obtains the refernce to
  6195. + * the packet from the pending set. On succes, the receiver thread will then
  6196. + * complete the packet and drop its reference.
  6197. + *
  6198. + * On error, the completion callback is immediately run by on thread on which
  6199. + * the error was detected.
  6200. + *
  6201. + * To ensure that a packet eventually leaves the system it is marked as "locked"
  6202. + * directly before it is going to be completed or when it is canceled. Marking a
  6203. + * packet as "locked" has the effect that passing and creating new references
  6204. + * of the packet will be blocked. This means that the packet cannot be added
  6205. + * to the queue, the pending set, and the timeout, or be picked up by the
  6206. + * transmitter thread or receiver thread. To remove a packet from the system it
  6207. + * has to be marked as locked and subsequently all references from the data
  6208. + * structures (queue, pending) have to be removed. References held by threads
  6209. + * will eventually be dropped automatically as their execution progresses.
  6210. + *
  6211. + * Note that the packet completion callback is, in case of success and for a
  6212. + * sequenced packet, guaranteed to run on the receiver thread, thus providing a
  6213. + * way to reliably identify responses to the packet. The packet completion
  6214. + * callback is only run once and it does not indicate that the packet has fully
  6215. + * left the system. In case of re-submission (and with somewhat unlikely
  6216. + * timing), it may be possible that the packet is being re-transmitted while the
  6217. + * completion callback runs. Completion will occur both on success and internal
  6218. + * error, as well as when the packet is canceled.
  6219. + *
  6220. + * >> Flags <<
  6221. + *
  6222. + * Flags are used to indicate the state and progression of a packet. Some flags
  6223. + * have stricter guarantees than other:
  6224. + *
  6225. + * - locked
  6226. + * Indicates if the packet is locked. If the packet is locked, passing and/or
  6227. + * creating additional references to the packet is forbidden. The packet thus
  6228. + * may not be queued, dequeued, or removed or added to the pending set. Note
  6229. + * that the packet state flags may still change (e.g. it may be marked as
  6230. + * ACKed, transmitted, ...).
  6231. + *
  6232. + * - completed
  6233. + * Indicates if the packet completion has been run or is about to be run. This
  6234. + * flag is used to ensure that the packet completion callback is only run
  6235. + * once.
  6236. + *
  6237. + * - queued
  6238. + * Indicates if a packet is present in the submission queue or not. This flag
  6239. + * must only be modified with the queue lock held, and must be coherent
  6240. + * presence of the packet in the queue.
  6241. + *
  6242. + * - pending
  6243. + * Indicates if a packet is present in the set of pending packets or not.
  6244. + * This flag must only be modified with the pending lock held, and must be
  6245. + * coherent presence of the packet in the pending set.
  6246. + *
  6247. + * - transmitting
  6248. + * Indicates if the packet is currently transmitting. In case of
  6249. + * re-transmissions, it is only safe to wait on the "transmitted" completion
  6250. + * after this flag has been set. The completion will be set both in success
  6251. + * and error case.
  6252. + *
  6253. + * - transmitted
  6254. + * Indicates if the packet has been transmitted. This flag is not cleared by
  6255. + * the system, thus it indicates the first transmission only.
  6256. + *
  6257. + * - acked
  6258. + * Indicates if the packet has been acknowledged by the client. There are no
  6259. + * other guarantees given. For example, the packet may still be canceled
  6260. + * and/or the completion may be triggered an error even though this bit is
  6261. + * set. Rely on the status provided by completion instead.
  6262. + *
  6263. + * - canceled
  6264. + * Indicates if the packet has been canceled from the outside. There are no
  6265. + * other guarantees given. Specifically, the packet may be completed by
  6266. + * another part of the system before the cancellation attempts to complete it.
  6267. + *
  6268. + * >> General Notes <<
  6269. + *
  6270. + * To avoid deadlocks, if both queue and pending locks are required, the pending
  6271. + * lock must be acquired before the queue lock.
  6272. + */
  6273. +
  6274. +/**
  6275. + * Maximum number transmission attempts per sequenced packet in case of
  6276. + * time-outs. Must be smaller than 16.
  6277. + */
  6278. +#define SSH_PTL_MAX_PACKET_TRIES 3
  6279. +
  6280. +/**
  6281. + * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
  6282. + * time-frame after starting transmission, the packet will be re-submitted.
  6283. + */
  6284. +#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000)
  6285. +
  6286. +/**
  6287. + * Maximum time resolution for timeouts. Currently set to max(2 jiffies, 50ms).
  6288. + * Should be larger than one jiffy to avoid direct re-scheduling of reaper
  6289. + * work_struct.
  6290. + */
  6291. +#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
  6292. +
  6293. +/**
  6294. + * Maximum number of sequenced packets concurrently waiting for an ACK.
  6295. + * Packets marked as blocking will not be transmitted while this limit is
  6296. + * reached.
  6297. + */
  6298. +#define SSH_PTL_MAX_PENDING 1
  6299. +
  6300. +#define SSH_PTL_RX_BUF_LEN 4096
  6301. +
  6302. +#define SSH_PTL_RX_FIFO_LEN 4096
  6303. +
  6304. +
  6305. +enum ssh_ptl_state_flags {
  6306. + SSH_PTL_SF_SHUTDOWN_BIT,
  6307. +};
  6308. +
  6309. +struct ssh_ptl_ops {
  6310. + void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data);
  6311. +};
  6312. +
  6313. +struct ssh_ptl {
  6314. + struct serdev_device *serdev;
  6315. + unsigned long state;
  6316. +
  6317. + struct {
  6318. + spinlock_t lock;
  6319. + struct list_head head;
  6320. + } queue;
  6321. +
  6322. + struct {
  6323. + spinlock_t lock;
  6324. + struct list_head head;
  6325. + atomic_t count;
  6326. + } pending;
  6327. +
  6328. + struct {
  6329. + bool thread_signal;
  6330. + struct task_struct *thread;
  6331. + struct wait_queue_head thread_wq;
  6332. + struct wait_queue_head packet_wq;
  6333. + struct ssh_packet *packet;
  6334. + size_t offset;
  6335. + } tx;
  6336. +
  6337. + struct {
  6338. + struct task_struct *thread;
  6339. + struct wait_queue_head wq;
  6340. + struct kfifo fifo;
  6341. + struct sshp_buf buf;
  6342. +
  6343. + struct {
  6344. + u16 seqs[8];
  6345. + u16 offset;
  6346. + } blocked;
  6347. + } rx;
  6348. +
  6349. + struct {
  6350. + ktime_t timeout;
  6351. + ktime_t expires;
  6352. + struct delayed_work reaper;
  6353. + } rtx_timeout;
  6354. +
  6355. + struct ssh_ptl_ops ops;
  6356. +};
  6357. +
  6358. +
  6359. +#define __ssam_prcond(func, p, fmt, ...) \
  6360. + do { \
  6361. + if ((p)) \
  6362. + func((p), fmt, ##__VA_ARGS__); \
  6363. + } while (0);
  6364. +
  6365. +#define ptl_dbg(p, fmt, ...) dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
  6366. +#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
  6367. +#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
  6368. +#define ptl_err(p, fmt, ...) dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
  6369. +#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__)
  6370. +
  6371. +#define to_ssh_packet(ptr, member) \
  6372. + container_of(ptr, struct ssh_packet, member)
  6373. +
  6374. +#define to_ssh_ptl(ptr, member) \
  6375. + container_of(ptr, struct ssh_ptl, member)
  6376. +
  6377. +
  6378. +#ifdef CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION
  6379. +
  6380. +/**
  6381. + * ssh_ptl_should_drop_ack_packet - error injection hook to drop ACK packets
  6382. + *
  6383. + * Useful to test detection and handling of automated re-transmits by the EC.
  6384. + * Specifically of packets that the EC consideres not-ACKed but the driver
  6385. + * already consideres ACKed (due to dropped ACK). In this case, the EC
  6386. + * re-transmits the packet-to-be-ACKed and the driver should detect it as
  6387. + * duplicate/already handled. Note that the driver should still send an ACK
  6388. + * for the re-transmitted packet.
  6389. + */
  6390. +static noinline bool ssh_ptl_should_drop_ack_packet(void)
  6391. +{
  6392. + return false;
  6393. +}
  6394. +ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE);
  6395. +
  6396. +/**
  6397. + * ssh_ptl_should_drop_nak_packet - error injection hook to drop NAK packets
  6398. + *
  6399. + * Useful to test/force automated (timeout-based) re-transmit by the EC.
  6400. + * Specifically, packets that have not reached the driver completely/with valid
  6401. + * checksums. Only useful in combination with receival of (injected) bad data.
  6402. + */
  6403. +static noinline bool ssh_ptl_should_drop_nak_packet(void)
  6404. +{
  6405. + return false;
  6406. +}
  6407. +ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE);
  6408. +
  6409. +/**
  6410. + * ssh_ptl_should_drop_dsq_packet - error injection hook to drop sequenced data
  6411. + * packet
  6412. + *
  6413. + * Useful to test re-transmit timeout of the driver. If the data packet has not
  6414. + * been ACKed after a certain time, the driver should re-transmit the packet up
  6415. + * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
  6416. + */
  6417. +static noinline bool ssh_ptl_should_drop_dsq_packet(void)
  6418. +{
  6419. + return false;
  6420. +}
  6421. +ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE);
  6422. +
  6423. +/**
  6424. + * ssh_ptl_should_fail_write - error injection hook to make serdev_device_write
  6425. + * fail
  6426. + *
  6427. + * Hook to simulate errors in serdev_device_write when transmitting packets.
  6428. + */
  6429. +static noinline int ssh_ptl_should_fail_write(void)
  6430. +{
  6431. + return 0;
  6432. +}
  6433. +ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO);
  6434. +
  6435. +/**
  6436. + * ssh_ptl_should_corrupt_tx_data - error injection hook to simualte invalid
  6437. + * data being sent to the EC
  6438. + *
  6439. + * Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
  6440. + * Causes the packet data to be actively corrupted by overwriting it with
  6441. + * pre-defined values, such that it becomes invalid, causing the EC to respond
  6442. + * with a NAK packet. Useful to test handling of NAK packets received by the
  6443. + * driver.
  6444. + */
  6445. +static noinline bool ssh_ptl_should_corrupt_tx_data(void)
  6446. +{
  6447. + return false;
  6448. +}
  6449. +ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE);
  6450. +
  6451. +/**
  6452. + * ssh_ptl_should_corrupt_rx_syn - error injection hook to simulate invalid
  6453. + * data being sent by the EC
  6454. + *
  6455. + * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
  6456. + * test handling thereof in the driver.
  6457. + */
  6458. +static noinline bool ssh_ptl_should_corrupt_rx_syn(void)
  6459. +{
  6460. + return false;
  6461. +}
  6462. +ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE);
  6463. +
  6464. +/**
  6465. + * ssh_ptl_should_corrupt_rx_data - error injection hook to simulate invalid
  6466. + * data being sent by the EC
  6467. + *
  6468. + * Hook to simulate invalid data/checksum of the message frame and test handling
  6469. + * thereof in the driver.
  6470. + */
  6471. +static noinline bool ssh_ptl_should_corrupt_rx_data(void)
  6472. +{
  6473. + return false;
  6474. +}
  6475. +ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE);
  6476. +
  6477. +
  6478. +static inline bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet)
  6479. +{
  6480. + if (likely(!ssh_ptl_should_drop_ack_packet()))
  6481. + return false;
  6482. +
  6483. + trace_ssam_ei_tx_drop_ack_packet(packet);
  6484. + ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n",
  6485. + packet);
  6486. +
  6487. + return true;
  6488. +}
  6489. +
  6490. +static inline bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet)
  6491. +{
  6492. + if (likely(!ssh_ptl_should_drop_nak_packet()))
  6493. + return false;
  6494. +
  6495. + trace_ssam_ei_tx_drop_nak_packet(packet);
  6496. + ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n",
  6497. + packet);
  6498. +
  6499. + return true;
  6500. +}
  6501. +
  6502. +static inline bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet)
  6503. +{
  6504. + if (likely(!ssh_ptl_should_drop_dsq_packet()))
  6505. + return false;
  6506. +
  6507. + trace_ssam_ei_tx_drop_dsq_packet(packet);
  6508. + ptl_info(packet->ptl,
  6509. + "packet error injection: dropping sequenced data packet %p\n",
  6510. + packet);
  6511. +
  6512. + return true;
  6513. +}
  6514. +
  6515. +static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
  6516. +{
  6517. + // ignore packets that don't carry any data (i.e. flush)
  6518. + if (!packet->data.ptr || !packet->data.len)
  6519. + return false;
  6520. +
  6521. + switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) {
  6522. + case SSH_FRAME_TYPE_ACK:
  6523. + return __ssh_ptl_should_drop_ack_packet(packet);
  6524. +
  6525. + case SSH_FRAME_TYPE_NAK:
  6526. + return __ssh_ptl_should_drop_nak_packet(packet);
  6527. +
  6528. + case SSH_FRAME_TYPE_DATA_SEQ:
  6529. + return __ssh_ptl_should_drop_dsq_packet(packet);
  6530. +
  6531. + default:
  6532. + return false;
  6533. + }
  6534. +}
  6535. +
  6536. +static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet,
  6537. + const unsigned char *buf, size_t count)
  6538. +{
  6539. + int status;
  6540. +
  6541. + status = ssh_ptl_should_fail_write();
  6542. + if (unlikely(status)) {
  6543. + trace_ssam_ei_tx_fail_write(packet, status);
  6544. + ptl_info(packet->ptl,
  6545. + "packet error injection: simulating transmit error %d, packet %p\n",
  6546. + status, packet);
  6547. +
  6548. + return status;
  6549. + }
  6550. +
  6551. + return serdev_device_write_buf(ptl->serdev, buf, count);
  6552. +}
  6553. +
  6554. +static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
  6555. +{
  6556. + // ignore packets that don't carry any data (i.e. flush)
  6557. + if (!packet->data.ptr || !packet->data.len)
  6558. + return;
  6559. +
  6560. + // only allow sequenced data packets to be modified
  6561. + if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ)
  6562. + return;
  6563. +
  6564. + if (likely(!ssh_ptl_should_corrupt_tx_data()))
  6565. + return;
  6566. +
  6567. + trace_ssam_ei_tx_corrupt_data(packet);
  6568. + ptl_info(packet->ptl,
  6569. + "packet error injection: simulating invalid transmit data on packet %p\n",
  6570. + packet);
  6571. +
  6572. + /*
  6573. + * NB: The value 0xb3 has been chosen more or less randomly so that it
  6574. + * doesn't have any (major) overlap with the SYN bytes (aa 55) and is
  6575. + * non-trivial (i.e. non-zero, non-0xff).
  6576. + */
  6577. + memset(packet->data.ptr, 0xb3, packet->data.len);
  6578. +}
  6579. +
  6580. +static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
  6581. + struct ssam_span *data)
  6582. +{
  6583. + struct ssam_span frame;
  6584. +
  6585. + // check if there actually is something to corrupt
  6586. + if (!sshp_find_syn(data, &frame))
  6587. + return;
  6588. +
  6589. + if (likely(!ssh_ptl_should_corrupt_rx_syn()))
  6590. + return;
  6591. +
  6592. + trace_ssam_ei_rx_corrupt_syn("data_length", data->len);
  6593. +
  6594. + data->ptr[1] = 0xb3; // set second byte of SYN to "random" value
  6595. +}
  6596. +
  6597. +static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
  6598. + struct ssam_span *frame)
  6599. +{
  6600. + size_t payload_len, message_len;
  6601. + struct ssh_frame *sshf;
  6602. +
  6603. + // ignore incomplete messages, will get handled once it's complete
  6604. + if (frame->len < SSH_MESSAGE_LENGTH(0))
  6605. + return;
  6606. +
  6607. + // ignore incomplete messages, part 2
  6608. + payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]);
  6609. + message_len = SSH_MESSAGE_LENGTH(payload_len);
  6610. + if (frame->len < message_len)
  6611. + return;
  6612. +
  6613. + if (likely(!ssh_ptl_should_corrupt_rx_data()))
  6614. + return;
  6615. +
  6616. + sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)];
  6617. + trace_ssam_ei_rx_corrupt_data(sshf);
  6618. +
  6619. + /*
  6620. + * Flip bits in first byte of payload checksum. This is basically
  6621. + * equivalent to a payload/frame data error without us having to worry
  6622. + * about (the, arguably pretty small, probability of) accidental
  6623. + * checksum collisions.
  6624. + */
  6625. + frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2];
  6626. +}
  6627. +
  6628. +#else /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
  6629. +
  6630. +static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
  6631. +{
  6632. + return false;
  6633. +}
  6634. +
  6635. +static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl,
  6636. + struct ssh_packet *packet,
  6637. + const unsigned char *buf,
  6638. + size_t count)
  6639. +{
  6640. + return serdev_device_write_buf(ptl->serdev, buf, count);
  6641. +}
  6642. +
  6643. +static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
  6644. +{
  6645. +}
  6646. +
  6647. +static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
  6648. + struct ssam_span *data)
  6649. +{
  6650. +}
  6651. +
  6652. +static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
  6653. + struct ssam_span *frame)
  6654. +{
  6655. +}
  6656. +
  6657. +#endif /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
  6658. +
  6659. +
  6660. +static void __ssh_ptl_packet_release(struct kref *kref)
  6661. +{
  6662. + struct ssh_packet *p = to_ssh_packet(kref, refcnt);
  6663. +
  6664. + trace_ssam_packet_release(p);
  6665. +
  6666. + ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
  6667. + p->ops->release(p);
  6668. +}
  6669. +
  6670. +void ssh_packet_get(struct ssh_packet *packet)
  6671. +{
  6672. + kref_get(&packet->refcnt);
  6673. +}
  6674. +EXPORT_SYMBOL_GPL(ssh_packet_get);
  6675. +
  6676. +void ssh_packet_put(struct ssh_packet *packet)
  6677. +{
  6678. + kref_put(&packet->refcnt, __ssh_ptl_packet_release);
  6679. +}
  6680. +EXPORT_SYMBOL_GPL(ssh_packet_put);
  6681. +
  6682. +static inline u8 ssh_packet_get_seq(struct ssh_packet *packet)
  6683. +{
  6684. + return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
  6685. +}
  6686. +
  6687. +
  6688. +struct ssh_packet_args {
  6689. + unsigned long type;
  6690. + u8 priority;
  6691. + const struct ssh_packet_ops *ops;
  6692. +};
  6693. +
  6694. +static void ssh_packet_init(struct ssh_packet *packet,
  6695. + const struct ssh_packet_args *args)
  6696. +{
  6697. + kref_init(&packet->refcnt);
  6698. +
  6699. + packet->ptl = NULL;
  6700. + INIT_LIST_HEAD(&packet->queue_node);
  6701. + INIT_LIST_HEAD(&packet->pending_node);
  6702. +
  6703. + packet->state = args->type & SSH_PACKET_FLAGS_TY_MASK;
  6704. + packet->priority = args->priority;
  6705. + packet->timestamp = KTIME_MAX;
  6706. +
  6707. + packet->data.ptr = NULL;
  6708. + packet->data.len = 0;
  6709. +
  6710. + packet->ops = args->ops;
  6711. +}
  6712. +
  6713. +
  6714. +static struct kmem_cache *ssh_ctrl_packet_cache;
  6715. +
  6716. +static int __init ssh_ctrl_packet_cache_init(void)
  6717. +{
  6718. + const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL;
  6719. + const unsigned int align = __alignof__(struct ssh_packet);
  6720. + struct kmem_cache *cache;
  6721. +
  6722. + cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL);
  6723. + if (!cache)
  6724. + return -ENOMEM;
  6725. +
  6726. + ssh_ctrl_packet_cache = cache;
  6727. + return 0;
  6728. +}
  6729. +
  6730. +static void __exit ssh_ctrl_packet_cache_destroy(void)
  6731. +{
  6732. + kmem_cache_destroy(ssh_ctrl_packet_cache);
  6733. + ssh_ctrl_packet_cache = NULL;
  6734. +}
  6735. +
  6736. +static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
  6737. + struct ssam_span *buffer, gfp_t flags)
  6738. +{
  6739. + *packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags);
  6740. + if (!*packet)
  6741. + return -ENOMEM;
  6742. +
  6743. + buffer->ptr = (u8 *)(*packet + 1);
  6744. + buffer->len = SSH_MSG_LEN_CTRL;
  6745. +
  6746. + trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
  6747. + return 0;
  6748. +}
  6749. +
  6750. +static void ssh_ctrl_packet_free(struct ssh_packet *p)
  6751. +{
  6752. + trace_ssam_ctrl_packet_free(p);
  6753. + kmem_cache_free(ssh_ctrl_packet_cache, p);
  6754. +}
  6755. +
  6756. +static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
  6757. + .complete = NULL,
  6758. + .release = ssh_ctrl_packet_free,
  6759. +};
  6760. +
  6761. +
  6762. +static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
  6763. + ktime_t expires)
  6764. +{
  6765. + unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
  6766. + ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION);
  6767. + ktime_t old;
  6768. +
  6769. + // re-adjust / schedule reaper if it is above resolution delta
  6770. + old = READ_ONCE(ptl->rtx_timeout.expires);
  6771. + while (ktime_before(aexp, old))
  6772. + old = cmpxchg64(&ptl->rtx_timeout.expires, old, expires);
  6773. +
  6774. + // if we updated the reaper expiration, modify work timeout
  6775. + if (old == expires)
  6776. + mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
  6777. +}
  6778. +
  6779. +static void ssh_ptl_timeout_start(struct ssh_packet *packet)
  6780. +{
  6781. + struct ssh_ptl *ptl = packet->ptl;
  6782. + ktime_t timestamp = ktime_get_coarse_boottime();
  6783. + ktime_t timeout = ptl->rtx_timeout.timeout;
  6784. +
  6785. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
  6786. + return;
  6787. +
  6788. + WRITE_ONCE(packet->timestamp, timestamp);
  6789. + smp_mb__after_atomic();
  6790. +
  6791. + ssh_ptl_timeout_reaper_mod(packet->ptl, timestamp, timestamp + timeout);
  6792. +}
  6793. +
  6794. +
  6795. +static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p)
  6796. +{
  6797. + struct list_head *head;
  6798. + u8 priority = READ_ONCE(p->priority);
  6799. +
  6800. + /*
  6801. + * We generally assume that there are less control (ACK/NAK) packets and
  6802. + * re-submitted data packets as there are normal data packets (at least
  6803. + * in situations in which many packets are queued; if there aren't many
  6804. + * packets queued the decision on how to iterate should be basically
  6805. + * irrellevant; the number of control/data packets is more or less
  6806. + * limited via the maximum number of pending packets). Thus, when
  6807. + * inserting a control or re-submitted data packet, (determined by their
  6808. + * priority), we search from front to back. Normal data packets are,
  6809. + * usually queued directly at the tail of the queue, so for those search
  6810. + * from back to front.
  6811. + */
  6812. +
  6813. + if (priority > SSH_PACKET_PRIORITY_DATA) {
  6814. + list_for_each(head, &p->ptl->queue.head) {
  6815. + p = list_entry(head, struct ssh_packet, queue_node);
  6816. +
  6817. + if (READ_ONCE(p->priority) < priority)
  6818. + break;
  6819. + }
  6820. + } else {
  6821. + list_for_each_prev(head, &p->ptl->queue.head) {
  6822. + p = list_entry(head, struct ssh_packet, queue_node);
  6823. +
  6824. + if (READ_ONCE(p->priority) >= priority) {
  6825. + head = head->next;
  6826. + break;
  6827. + }
  6828. + }
  6829. + }
  6830. +
  6831. +
  6832. + return head;
  6833. +}
  6834. +
  6835. +static int ssh_ptl_queue_push(struct ssh_packet *packet)
  6836. +{
  6837. + struct ssh_ptl *ptl = packet->ptl;
  6838. + struct list_head *head;
  6839. +
  6840. + spin_lock(&ptl->queue.lock);
  6841. +
  6842. + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) {
  6843. + spin_unlock(&ptl->queue.lock);
  6844. + return -ESHUTDOWN;
  6845. + }
  6846. +
  6847. + // avoid further transitions when cancelling/completing
  6848. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) {
  6849. + spin_unlock(&ptl->queue.lock);
  6850. + return -EINVAL;
  6851. + }
  6852. +
  6853. + // if this packet has already been queued, do not add it
  6854. + if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
  6855. + spin_unlock(&ptl->queue.lock);
  6856. + return -EALREADY;
  6857. + }
  6858. +
  6859. + head = __ssh_ptl_queue_find_entrypoint(packet);
  6860. +
  6861. + ssh_packet_get(packet);
  6862. + list_add_tail(&packet->queue_node, &ptl->queue.head);
  6863. +
  6864. + spin_unlock(&ptl->queue.lock);
  6865. + return 0;
  6866. +}
  6867. +
  6868. +static void ssh_ptl_queue_remove(struct ssh_packet *packet)
  6869. +{
  6870. + struct ssh_ptl *ptl = packet->ptl;
  6871. + bool remove;
  6872. +
  6873. + spin_lock(&ptl->queue.lock);
  6874. +
  6875. + remove = test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state);
  6876. + if (remove)
  6877. + list_del(&packet->queue_node);
  6878. +
  6879. + spin_unlock(&ptl->queue.lock);
  6880. +
  6881. + if (remove)
  6882. + ssh_packet_put(packet);
  6883. +}
  6884. +
  6885. +
  6886. +static void ssh_ptl_pending_push(struct ssh_packet *packet)
  6887. +{
  6888. + struct ssh_ptl *ptl = packet->ptl;
  6889. +
  6890. + spin_lock(&ptl->pending.lock);
  6891. +
  6892. + // if we are cancelling/completing this packet, do not add it
  6893. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) {
  6894. + spin_unlock(&ptl->pending.lock);
  6895. + return;
  6896. + }
  6897. +
  6898. + // in case it is already pending (e.g. re-submission), do not add it
  6899. + if (test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
  6900. + spin_unlock(&ptl->pending.lock);
  6901. + return;
  6902. + }
  6903. +
  6904. + atomic_inc(&ptl->pending.count);
  6905. + ssh_packet_get(packet);
  6906. + list_add_tail(&packet->pending_node, &ptl->pending.head);
  6907. +
  6908. + spin_unlock(&ptl->pending.lock);
  6909. +}
  6910. +
  6911. +static void ssh_ptl_pending_remove(struct ssh_packet *packet)
  6912. +{
  6913. + struct ssh_ptl *ptl = packet->ptl;
  6914. + bool remove;
  6915. +
  6916. + spin_lock(&ptl->pending.lock);
  6917. +
  6918. + remove = test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state);
  6919. + if (remove) {
  6920. + list_del(&packet->pending_node);
  6921. + atomic_dec(&ptl->pending.count);
  6922. + }
  6923. +
  6924. + spin_unlock(&ptl->pending.lock);
  6925. +
  6926. + if (remove)
  6927. + ssh_packet_put(packet);
  6928. +}
  6929. +
  6930. +
  6931. +static void __ssh_ptl_complete(struct ssh_packet *p, int status)
  6932. +{
  6933. + struct ssh_ptl *ptl = READ_ONCE(p->ptl);
  6934. +
  6935. + trace_ssam_packet_complete(p, status);
  6936. +
  6937. + ptl_dbg_cond(ptl, "ptl: completing packet %p\n", p);
  6938. + if (status && status != -ECANCELED)
  6939. + ptl_dbg_cond(ptl, "ptl: packet error: %d\n", status);
  6940. +
  6941. + if (p->ops->complete)
  6942. + p->ops->complete(p, status);
  6943. +}
  6944. +
  6945. +static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status)
  6946. +{
  6947. + /*
  6948. + * A call to this function should in general be preceeded by
  6949. + * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
  6950. + * packet to the structures it's going to be removed from.
  6951. + *
  6952. + * The set_bit call does not need explicit memory barriers as the
  6953. + * implicit barrier of the test_and_set_bit call below ensure that the
  6954. + * flag is visible before we actually attempt to remove the packet.
  6955. + */
  6956. +
  6957. + if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
  6958. + return;
  6959. +
  6960. + ssh_ptl_queue_remove(p);
  6961. + ssh_ptl_pending_remove(p);
  6962. +
  6963. + __ssh_ptl_complete(p, status);
  6964. +}
  6965. +
  6966. +
  6967. +static bool ssh_ptl_tx_can_process(struct ssh_packet *packet)
  6968. +{
  6969. + struct ssh_ptl *ptl = packet->ptl;
  6970. +
  6971. + if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state))
  6972. + return !atomic_read(&ptl->pending.count);
  6973. +
  6974. + // we can alwas process non-blocking packets
  6975. + if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state))
  6976. + return true;
  6977. +
  6978. + // if we are already waiting for this packet, send it again
  6979. + if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state))
  6980. + return true;
  6981. +
  6982. + // otherwise: check if we have the capacity to send
  6983. + return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING;
  6984. +}
  6985. +
  6986. +static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl)
  6987. +{
  6988. + struct ssh_packet *packet = ERR_PTR(-ENOENT);
  6989. + struct ssh_packet *p, *n;
  6990. +
  6991. + spin_lock(&ptl->queue.lock);
  6992. + list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
  6993. + /*
  6994. + * If we are cancelling or completing this packet, ignore it.
  6995. + * It's going to be removed from this queue shortly.
  6996. + */
  6997. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  6998. + continue;
  6999. +
  7000. + /*
  7001. + * Packets should be ordered non-blocking/to-be-resent first.
  7002. + * If we cannot process this packet, assume that we can't
  7003. + * process any following packet either and abort.
  7004. + */
  7005. + if (!ssh_ptl_tx_can_process(p)) {
  7006. + packet = ERR_PTR(-EBUSY);
  7007. + break;
  7008. + }
  7009. +
  7010. + /*
  7011. + * We are allowed to change the state now. Remove it from the
  7012. + * queue and mark it as being transmitted. Note that we cannot
  7013. + * add it to the set of pending packets yet, as queue locks must
  7014. + * always be acquired before packet locks (otherwise we might
  7015. + * run into a deadlock).
  7016. + */
  7017. +
  7018. + list_del(&p->queue_node);
  7019. +
  7020. + /*
  7021. + * Ensure that the "queued" bit gets cleared after setting the
  7022. + * "transmitting" bit to guaranteee non-zero flags.
  7023. + */
  7024. + set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state);
  7025. + smp_mb__before_atomic();
  7026. + clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
  7027. +
  7028. + packet = p;
  7029. + break;
  7030. + }
  7031. + spin_unlock(&ptl->queue.lock);
  7032. +
  7033. + return packet;
  7034. +}
  7035. +
  7036. +static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl)
  7037. +{
  7038. + struct ssh_packet *p;
  7039. +
  7040. + p = ssh_ptl_tx_pop(ptl);
  7041. + if (IS_ERR(p))
  7042. + return p;
  7043. +
  7044. + if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) {
  7045. + ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p);
  7046. + ssh_ptl_pending_push(p);
  7047. + ssh_ptl_timeout_start(p);
  7048. + } else {
  7049. + ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p);
  7050. + }
  7051. +
  7052. + /*
  7053. + * Update number of tries. This directly influences the priority in case
  7054. + * the packet is re-submitted (e.g. via timeout/NAK). Note that this is
  7055. + * the only place where we update the priority in-flight. As this runs
  7056. + * only on the tx-thread, this read-modify-write procedure is safe.
  7057. + */
  7058. + WRITE_ONCE(p->priority, READ_ONCE(p->priority) + 1);
  7059. +
  7060. + return p;
  7061. +}
  7062. +
  7063. +static void ssh_ptl_tx_compl_success(struct ssh_packet *packet)
  7064. +{
  7065. + struct ssh_ptl *ptl = packet->ptl;
  7066. +
  7067. + ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet);
  7068. +
  7069. + /*
  7070. + * Transition to state to "transmitted". Ensure that the flags never get
  7071. + * zero with barrier.
  7072. + */
  7073. + set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state);
  7074. + smp_mb__before_atomic();
  7075. + clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
  7076. +
  7077. + // if the packet is unsequenced, we're done: lock and complete
  7078. + if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) {
  7079. + set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
  7080. + ssh_ptl_remove_and_complete(packet, 0);
  7081. + }
  7082. +
  7083. + /*
  7084. + * Notify that a packet transmission has finished. In general we're only
  7085. + * waiting for one packet (if any), so wake_up_all should be fine.
  7086. + */
  7087. + wake_up_all(&ptl->tx.packet_wq);
  7088. +}
  7089. +
  7090. +static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status)
  7091. +{
  7092. + /*
  7093. + * Transmission failure: Lock the packet and try to complete it. Ensure
  7094. + * that the flags never get zero with barrier.
  7095. + */
  7096. + set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
  7097. + smp_mb__before_atomic();
  7098. + clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
  7099. +
  7100. + ptl_err(packet->ptl, "ptl: transmission error: %d\n", status);
  7101. + ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet);
  7102. +
  7103. + ssh_ptl_remove_and_complete(packet, status);
  7104. +
  7105. + /*
  7106. + * Notify that a packet transmission has finished. In general we're only
  7107. + * waiting for one packet (if any), so wake_up_all should be fine.
  7108. + */
  7109. + wake_up_all(&packet->ptl->tx.packet_wq);
  7110. +}
  7111. +
  7112. +static void ssh_ptl_tx_threadfn_wait(struct ssh_ptl *ptl)
  7113. +{
  7114. + wait_event_interruptible(ptl->tx.thread_wq,
  7115. + READ_ONCE(ptl->tx.thread_signal) || kthread_should_stop());
  7116. + WRITE_ONCE(ptl->tx.thread_signal, false);
  7117. +}
  7118. +
  7119. +static int ssh_ptl_tx_threadfn(void *data)
  7120. +{
  7121. + struct ssh_ptl *ptl = data;
  7122. +
  7123. + while (!kthread_should_stop()) {
  7124. + unsigned char *buf;
  7125. + bool drop = false;
  7126. + size_t len = 0;
  7127. + int status = 0;
  7128. +
  7129. + // if we don't have a packet, get the next and add it to pending
  7130. + if (IS_ERR_OR_NULL(ptl->tx.packet)) {
  7131. + ptl->tx.packet = ssh_ptl_tx_next(ptl);
  7132. + ptl->tx.offset = 0;
  7133. +
  7134. + // if no packet is available, we are done
  7135. + if (IS_ERR(ptl->tx.packet)) {
  7136. + ssh_ptl_tx_threadfn_wait(ptl);
  7137. + continue;
  7138. + }
  7139. + }
  7140. +
  7141. + // error injection: drop packet to simulate transmission problem
  7142. + if (ptl->tx.offset == 0)
  7143. + drop = ssh_ptl_should_drop_packet(ptl->tx.packet);
  7144. +
  7145. + // error injection: simulate invalid packet data
  7146. + if (ptl->tx.offset == 0 && !drop)
  7147. + ssh_ptl_tx_inject_invalid_data(ptl->tx.packet);
  7148. +
  7149. + // flush-packets don't have any data
  7150. + if (likely(ptl->tx.packet->data.ptr && !drop)) {
  7151. + buf = ptl->tx.packet->data.ptr + ptl->tx.offset;
  7152. + len = ptl->tx.packet->data.len - ptl->tx.offset;
  7153. +
  7154. + ptl_dbg(ptl, "tx: sending data (length: %zu)\n", len);
  7155. + print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
  7156. + buf, len, false);
  7157. +
  7158. + status = ssh_ptl_write_buf(ptl, ptl->tx.packet, buf, len);
  7159. + }
  7160. +
  7161. + if (status < 0) {
  7162. + // complete packet with error
  7163. + ssh_ptl_tx_compl_error(ptl->tx.packet, status);
  7164. + ssh_packet_put(ptl->tx.packet);
  7165. + ptl->tx.packet = NULL;
  7166. +
  7167. + } else if (status == len) {
  7168. + // complete packet and/or mark as transmitted
  7169. + ssh_ptl_tx_compl_success(ptl->tx.packet);
  7170. + ssh_packet_put(ptl->tx.packet);
  7171. + ptl->tx.packet = NULL;
  7172. +
  7173. + } else { // need more buffer space
  7174. + ptl->tx.offset += status;
  7175. + ssh_ptl_tx_threadfn_wait(ptl);
  7176. + }
  7177. + }
  7178. +
  7179. + // cancel active packet before we actually stop
  7180. + if (!IS_ERR_OR_NULL(ptl->tx.packet)) {
  7181. + ssh_ptl_tx_compl_error(ptl->tx.packet, -ESHUTDOWN);
  7182. + ssh_packet_put(ptl->tx.packet);
  7183. + ptl->tx.packet = NULL;
  7184. + }
  7185. +
  7186. + return 0;
  7187. +}
  7188. +
  7189. +static inline void ssh_ptl_tx_wakeup(struct ssh_ptl *ptl, bool force)
  7190. +{
  7191. + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
  7192. + return;
  7193. +
  7194. + if (force || atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING) {
  7195. + WRITE_ONCE(ptl->tx.thread_signal, true);
  7196. + smp_mb__after_atomic();
  7197. + wake_up(&ptl->tx.thread_wq);
  7198. + }
  7199. +}
  7200. +
  7201. +static int ssh_ptl_tx_start(struct ssh_ptl *ptl)
  7202. +{
  7203. + ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "surface-sh-tx");
  7204. + if (IS_ERR(ptl->tx.thread))
  7205. + return PTR_ERR(ptl->tx.thread);
  7206. +
  7207. + return 0;
  7208. +}
  7209. +
  7210. +static int ssh_ptl_tx_stop(struct ssh_ptl *ptl)
  7211. +{
  7212. + int status = 0;
  7213. +
  7214. + if (ptl->tx.thread) {
  7215. + status = kthread_stop(ptl->tx.thread);
  7216. + ptl->tx.thread = NULL;
  7217. + }
  7218. +
  7219. + return status;
  7220. +}
  7221. +
  7222. +
  7223. +static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id)
  7224. +{
  7225. + struct ssh_packet *packet = ERR_PTR(-ENOENT);
  7226. + struct ssh_packet *p, *n;
  7227. +
  7228. + spin_lock(&ptl->pending.lock);
  7229. + list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
  7230. + /*
  7231. + * We generally expect packets to be in order, so first packet
  7232. + * to be added to pending is first to be sent, is first to be
  7233. + * ACKed.
  7234. + */
  7235. + if (unlikely(ssh_packet_get_seq(p) != seq_id))
  7236. + continue;
  7237. +
  7238. + /*
  7239. + * In case we receive an ACK while handling a transmission error
  7240. + * completion. The packet will be removed shortly.
  7241. + */
  7242. + if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
  7243. + packet = ERR_PTR(-EPERM);
  7244. + break;
  7245. + }
  7246. +
  7247. + /*
  7248. + * Mark packet as ACKed and remove it from pending. Ensure that
  7249. + * the flags never get zero with barrier.
  7250. + */
  7251. + set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state);
  7252. + smp_mb__before_atomic();
  7253. + clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
  7254. +
  7255. + atomic_dec(&ptl->pending.count);
  7256. + list_del(&p->pending_node);
  7257. + packet = p;
  7258. +
  7259. + break;
  7260. + }
  7261. + spin_unlock(&ptl->pending.lock);
  7262. +
  7263. + return packet;
  7264. +}
  7265. +
  7266. +static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet)
  7267. +{
  7268. + wait_event(packet->ptl->tx.packet_wq,
  7269. + test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state)
  7270. + || test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state));
  7271. +}
  7272. +
  7273. +static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq)
  7274. +{
  7275. + struct ssh_packet *p;
  7276. + int status = 0;
  7277. +
  7278. + p = ssh_ptl_ack_pop(ptl, seq);
  7279. + if (IS_ERR(p)) {
  7280. + if (PTR_ERR(p) == -ENOENT) {
  7281. + /*
  7282. + * The packet has not been found in the set of pending
  7283. + * packets.
  7284. + */
  7285. + ptl_warn(ptl, "ptl: received ACK for non-pending"
  7286. + " packet\n");
  7287. + } else {
  7288. + /*
  7289. + * The packet is pending, but we are not allowed to take
  7290. + * it because it has been locked.
  7291. + */
  7292. + }
  7293. + return;
  7294. + }
  7295. +
  7296. + ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p);
  7297. +
  7298. + /*
  7299. + * It is possible that the packet has been transmitted, but the state
  7300. + * has not been updated from "transmitting" to "transmitted" yet.
  7301. + * In that case, we need to wait for this transition to occur in order
  7302. + * to determine between success or failure.
  7303. + */
  7304. + if (test_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state))
  7305. + ssh_ptl_wait_until_transmitted(p);
  7306. +
  7307. + /*
  7308. + * The packet will already be locked in case of a transmission error or
  7309. + * cancellation. Let the transmitter or cancellation issuer complete the
  7310. + * packet.
  7311. + */
  7312. + if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
  7313. + ssh_packet_put(p);
  7314. + return;
  7315. + }
  7316. +
  7317. + if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state))) {
  7318. + ptl_err(ptl, "ptl: received ACK before packet had been fully"
  7319. + " transmitted\n");
  7320. + status = -EREMOTEIO;
  7321. + }
  7322. +
  7323. + ssh_ptl_remove_and_complete(p, status);
  7324. + ssh_packet_put(p);
  7325. +
  7326. + ssh_ptl_tx_wakeup(ptl, false);
  7327. +}
  7328. +
  7329. +
  7330. +static int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
  7331. +{
  7332. + struct ssh_ptl *ptl_old;
  7333. + int status;
  7334. +
  7335. + trace_ssam_packet_submit(p);
  7336. +
  7337. + // validate packet fields
  7338. + if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
  7339. + if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
  7340. + return -EINVAL;
  7341. + } else if (!p->data.ptr) {
  7342. + return -EINVAL;
  7343. + }
  7344. +
  7345. + /*
  7346. + * The ptl reference only gets set on or before the first submission.
  7347. + * After the first submission, it has to be read-only.
  7348. + */
  7349. + ptl_old = READ_ONCE(p->ptl);
  7350. + if (ptl_old == NULL)
  7351. + WRITE_ONCE(p->ptl, ptl);
  7352. + else if (ptl_old != ptl)
  7353. + return -EALREADY;
  7354. +
  7355. + status = ssh_ptl_queue_push(p);
  7356. + if (status)
  7357. + return status;
  7358. +
  7359. + ssh_ptl_tx_wakeup(ptl, !test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state));
  7360. + return 0;
  7361. +}
  7362. +
  7363. +static void __ssh_ptl_resubmit(struct ssh_packet *packet)
  7364. +{
  7365. + struct list_head *head;
  7366. +
  7367. + trace_ssam_packet_resubmit(packet);
  7368. +
  7369. + spin_lock(&packet->ptl->queue.lock);
  7370. +
  7371. + // if this packet has already been queued, do not add it
  7372. + if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
  7373. + spin_unlock(&packet->ptl->queue.lock);
  7374. + return;
  7375. + }
  7376. +
  7377. + // find first node with lower priority
  7378. + head = __ssh_ptl_queue_find_entrypoint(packet);
  7379. +
  7380. + WRITE_ONCE(packet->timestamp, KTIME_MAX);
  7381. + smp_mb__after_atomic();
  7382. +
  7383. + // add packet
  7384. + ssh_packet_get(packet);
  7385. + list_add_tail(&packet->queue_node, head);
  7386. +
  7387. + spin_unlock(&packet->ptl->queue.lock);
  7388. +}
  7389. +
  7390. +static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl)
  7391. +{
  7392. + struct ssh_packet *p;
  7393. + bool resub = false;
  7394. + u8 try;
  7395. +
  7396. + /*
  7397. + * Note: We deliberately do not remove/attempt to cancel and complete
  7398. + * packets that are out of tires in this function. The packet will be
  7399. + * eventually canceled and completed by the timeout. Removing the packet
  7400. + * here could lead to overly eager cancelation if the packet has not
  7401. + * been re-transmitted yet but the tries-counter already updated (i.e
  7402. + * ssh_ptl_tx_next removed the packet from the queue and updated the
  7403. + * counter, but re-transmission for the last try has not actually
  7404. + * started yet).
  7405. + */
  7406. +
  7407. + spin_lock(&ptl->pending.lock);
  7408. +
  7409. + // re-queue all pending packets
  7410. + list_for_each_entry(p, &ptl->pending.head, pending_node) {
  7411. + // avoid further transitions if locked
  7412. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  7413. + continue;
  7414. +
  7415. + // do not re-schedule if packet is out of tries
  7416. + try = ssh_packet_priority_get_try(READ_ONCE(p->priority));
  7417. + if (try >= SSH_PTL_MAX_PACKET_TRIES)
  7418. + continue;
  7419. +
  7420. + resub = true;
  7421. + __ssh_ptl_resubmit(p);
  7422. + }
  7423. +
  7424. + spin_unlock(&ptl->pending.lock);
  7425. +
  7426. + ssh_ptl_tx_wakeup(ptl, resub);
  7427. +}
  7428. +
  7429. +static void ssh_ptl_cancel(struct ssh_packet *p)
  7430. +{
  7431. + if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
  7432. + return;
  7433. +
  7434. + trace_ssam_packet_cancel(p);
  7435. +
  7436. + /*
  7437. + * Lock packet and commit with memory barrier. If this packet has
  7438. + * already been locked, it's going to be removed and completed by
  7439. + * another party, which should have precedence.
  7440. + */
  7441. + if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  7442. + return;
  7443. +
  7444. + /*
  7445. + * By marking the packet as locked and employing the implicit memory
  7446. + * barrier of test_and_set_bit, we have guaranteed that, at this point,
  7447. + * the packet cannot be added to the queue any more.
  7448. + *
  7449. + * In case the packet has never been submitted, packet->ptl is NULL. If
  7450. + * the packet is currently being submitted, packet->ptl may be NULL or
  7451. + * non-NULL. Due marking the packet as locked above and committing with
  7452. + * the memory barrier, we have guaranteed that, if packet->ptl is NULL,
  7453. + * the packet will never be added to the queue. If packet->ptl is
  7454. + * non-NULL, we don't have any guarantees.
  7455. + */
  7456. +
  7457. + if (READ_ONCE(p->ptl)) {
  7458. + ssh_ptl_remove_and_complete(p, -ECANCELED);
  7459. + ssh_ptl_tx_wakeup(p->ptl, false);
  7460. + } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
  7461. + __ssh_ptl_complete(p, -ECANCELED);
  7462. + }
  7463. +}
  7464. +
  7465. +
  7466. +static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout)
  7467. +{
  7468. + ktime_t timestamp = READ_ONCE(p->timestamp);
  7469. +
  7470. + if (timestamp != KTIME_MAX)
  7471. + return ktime_add(timestamp, timeout);
  7472. + else
  7473. + return KTIME_MAX;
  7474. +}
  7475. +
  7476. +static void ssh_ptl_timeout_reap(struct work_struct *work)
  7477. +{
  7478. + struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
  7479. + struct ssh_packet *p, *n;
  7480. + LIST_HEAD(claimed);
  7481. + ktime_t now = ktime_get_coarse_boottime();
  7482. + ktime_t timeout = ptl->rtx_timeout.timeout;
  7483. + ktime_t next = KTIME_MAX;
  7484. + bool resub = false;
  7485. +
  7486. + trace_ssam_ptl_timeout_reap("pending", atomic_read(&ptl->pending.count));
  7487. +
  7488. + /*
  7489. + * Mark reaper as "not pending". This is done before checking any
  7490. + * packets to avoid lost-update type problems.
  7491. + */
  7492. + WRITE_ONCE(ptl->rtx_timeout.expires, KTIME_MAX);
  7493. + smp_mb__after_atomic();
  7494. +
  7495. + spin_lock(&ptl->pending.lock);
  7496. +
  7497. + list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
  7498. + ktime_t expires = ssh_packet_get_expiration(p, timeout);
  7499. + u8 try;
  7500. +
  7501. + /*
  7502. + * Check if the timeout hasn't expired yet. Find out next
  7503. + * expiration date to be handled after this run.
  7504. + */
  7505. + if (ktime_after(expires, now)) {
  7506. + next = ktime_before(expires, next) ? expires : next;
  7507. + continue;
  7508. + }
  7509. +
  7510. + // avoid further transitions if locked
  7511. + if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  7512. + continue;
  7513. +
  7514. + trace_ssam_packet_timeout(p);
  7515. +
  7516. + // check if we still have some tries left
  7517. + try = ssh_packet_priority_get_try(READ_ONCE(p->priority));
  7518. + if (likely(try < SSH_PTL_MAX_PACKET_TRIES)) {
  7519. + resub = true;
  7520. + __ssh_ptl_resubmit(p);
  7521. + continue;
  7522. + }
  7523. +
  7524. + // no more tries left: cancel the packet
  7525. +
  7526. + // if someone else has locked the packet already, don't use it
  7527. + if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
  7528. + continue;
  7529. +
  7530. + /*
  7531. + * We have now marked the packet as locked. Thus it cannot be
  7532. + * added to the pending list again after we've removed it here.
  7533. + * We can therefore re-use the pending_node of this packet
  7534. + * temporarily.
  7535. + */
  7536. +
  7537. + clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
  7538. +
  7539. + atomic_dec(&ptl->pending.count);
  7540. + list_del(&p->pending_node);
  7541. +
  7542. + list_add_tail(&p->pending_node, &claimed);
  7543. + }
  7544. +
  7545. + spin_unlock(&ptl->pending.lock);
  7546. +
  7547. + // cancel and complete the packet
  7548. + list_for_each_entry_safe(p, n, &claimed, pending_node) {
  7549. + if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
  7550. + ssh_ptl_queue_remove(p);
  7551. + __ssh_ptl_complete(p, -ETIMEDOUT);
  7552. + }
  7553. +
  7554. + // drop the reference we've obtained by removing it from pending
  7555. + list_del(&p->pending_node);
  7556. + ssh_packet_put(p);
  7557. + }
  7558. +
  7559. + // ensure that reaper doesn't run again immediately
  7560. + next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION));
  7561. + if (next != KTIME_MAX)
  7562. + ssh_ptl_timeout_reaper_mod(ptl, now, next);
  7563. +
  7564. + // force-wakeup to properly handle re-transmits if we've re-submitted
  7565. + ssh_ptl_tx_wakeup(ptl, resub);
  7566. +}
  7567. +
  7568. +
  7569. +static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
  7570. +{
  7571. + int i;
  7572. +
  7573. + // check if SEQ has been seen recently (i.e. packet was re-transmitted)
  7574. + for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
  7575. + if (likely(ptl->rx.blocked.seqs[i] != seq))
  7576. + continue;
  7577. +
  7578. + ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
  7579. + return true;
  7580. + }
  7581. +
  7582. + // update list of blocked seuence IDs
  7583. + ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq;
  7584. + ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
  7585. + % ARRAY_SIZE(ptl->rx.blocked.seqs);
  7586. +
  7587. + return false;
  7588. +}
  7589. +
  7590. +static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
  7591. + const struct ssh_frame *frame,
  7592. + const struct ssam_span *payload)
  7593. +{
  7594. + if (ssh_ptl_rx_retransmit_check(ptl, frame->seq))
  7595. + return;
  7596. +
  7597. + ptl->ops.data_received(ptl, payload);
  7598. +}
  7599. +
  7600. +static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq)
  7601. +{
  7602. + struct ssh_packet_args args;
  7603. + struct ssh_packet *packet;
  7604. + struct ssam_span buf;
  7605. + struct msgbuf msgb;
  7606. + int status;
  7607. +
  7608. + status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
  7609. + if (status) {
  7610. + ptl_err(ptl, "ptl: failed to allocate ACK packet\n");
  7611. + return;
  7612. + }
  7613. +
  7614. + args.type = 0;
  7615. + args.priority = SSH_PACKET_PRIORITY(ACK, 0);
  7616. + args.ops = &ssh_ptl_ctrl_packet_ops;
  7617. + ssh_packet_init(packet, &args);
  7618. +
  7619. + msgb_init(&msgb, buf.ptr, buf.len);
  7620. + msgb_push_ack(&msgb, seq);
  7621. + ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
  7622. +
  7623. + ssh_ptl_submit(ptl, packet);
  7624. + ssh_packet_put(packet);
  7625. +}
  7626. +
  7627. +static void ssh_ptl_send_nak(struct ssh_ptl *ptl)
  7628. +{
  7629. + struct ssh_packet_args args;
  7630. + struct ssh_packet *packet;
  7631. + struct ssam_span buf;
  7632. + struct msgbuf msgb;
  7633. + int status;
  7634. +
  7635. + status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
  7636. + if (status) {
  7637. + ptl_err(ptl, "ptl: failed to allocate NAK packet\n");
  7638. + return;
  7639. + }
  7640. +
  7641. + args.type = 0;
  7642. + args.priority = SSH_PACKET_PRIORITY(NAK, 0);
  7643. + args.ops = &ssh_ptl_ctrl_packet_ops;
  7644. + ssh_packet_init(packet, &args);
  7645. +
  7646. + msgb_init(&msgb, buf.ptr, buf.len);
  7647. + msgb_push_nak(&msgb);
  7648. + ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
  7649. +
  7650. + ssh_ptl_submit(ptl, packet);
  7651. + ssh_packet_put(packet);
  7652. +}
  7653. +
  7654. +static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
  7655. +{
  7656. + struct ssh_frame *frame;
  7657. + struct ssam_span payload;
  7658. + struct ssam_span aligned;
  7659. + bool syn_found;
  7660. + int status;
  7661. +
  7662. + // error injection: modify data to simulate corrupt SYN bytes
  7663. + ssh_ptl_rx_inject_invalid_syn(ptl, source);
  7664. +
  7665. + // find SYN
  7666. + syn_found = sshp_find_syn(source, &aligned);
  7667. +
  7668. + if (unlikely(aligned.ptr - source->ptr) > 0) {
  7669. + ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
  7670. +
  7671. + /*
  7672. + * Notes:
  7673. + * - This might send multiple NAKs in case the communication
  7674. + * starts with an invalid SYN and is broken down into multiple
  7675. + * pieces. This should generally be handled fine, we just
  7676. + * might receive duplicate data in this case, which is
  7677. + * detected when handling data frames.
  7678. + * - This path will also be executed on invalid CRCs: When an
  7679. + * invalid CRC is encountered, the code below will skip data
  7680. + * until direclty after the SYN. This causes the search for
  7681. + * the next SYN, which is generally not placed directly after
  7682. + * the last one.
  7683. + */
  7684. + ssh_ptl_send_nak(ptl);
  7685. + }
  7686. +
  7687. + if (unlikely(!syn_found))
  7688. + return aligned.ptr - source->ptr;
  7689. +
  7690. + // error injection: modify data to simulate corruption
  7691. + ssh_ptl_rx_inject_invalid_data(ptl, &aligned);
  7692. +
  7693. + // parse and validate frame
  7694. + status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
  7695. + SSH_PTL_RX_BUF_LEN);
  7696. + if (status) // invalid frame: skip to next syn
  7697. + return aligned.ptr - source->ptr + sizeof(u16);
  7698. + if (!frame) // not enough data
  7699. + return aligned.ptr - source->ptr;
  7700. +
  7701. + trace_ssam_rx_frame_received(frame);
  7702. +
  7703. + switch (frame->type) {
  7704. + case SSH_FRAME_TYPE_ACK:
  7705. + ssh_ptl_acknowledge(ptl, frame->seq);
  7706. + break;
  7707. +
  7708. + case SSH_FRAME_TYPE_NAK:
  7709. + ssh_ptl_resubmit_pending(ptl);
  7710. + break;
  7711. +
  7712. + case SSH_FRAME_TYPE_DATA_SEQ:
  7713. + ssh_ptl_send_ack(ptl, frame->seq);
  7714. + /* fallthrough */
  7715. +
  7716. + case SSH_FRAME_TYPE_DATA_NSQ:
  7717. + ssh_ptl_rx_dataframe(ptl, frame, &payload);
  7718. + break;
  7719. +
  7720. + default:
  7721. + ptl_warn(ptl, "ptl: received frame with unknown type 0x%02x\n",
  7722. + frame->type);
  7723. + break;
  7724. + }
  7725. +
  7726. + return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(frame->len);
  7727. +}
  7728. +
  7729. +static int ssh_ptl_rx_threadfn(void *data)
  7730. +{
  7731. + struct ssh_ptl *ptl = data;
  7732. +
  7733. + while (true) {
  7734. + struct ssam_span span;
  7735. + size_t offs = 0;
  7736. + size_t n;
  7737. +
  7738. + wait_event_interruptible(ptl->rx.wq,
  7739. + !kfifo_is_empty(&ptl->rx.fifo)
  7740. + || kthread_should_stop());
  7741. + if (kthread_should_stop())
  7742. + break;
  7743. +
  7744. + // copy from fifo to evaluation buffer
  7745. + n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo);
  7746. +
  7747. + ptl_dbg(ptl, "rx: received data (size: %zu)\n", n);
  7748. + print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1,
  7749. + ptl->rx.buf.ptr + ptl->rx.buf.len - n,
  7750. + n, false);
  7751. +
  7752. + // parse until we need more bytes or buffer is empty
  7753. + while (offs < ptl->rx.buf.len) {
  7754. + sshp_buf_span_from(&ptl->rx.buf, offs, &span);
  7755. + n = ssh_ptl_rx_eval(ptl, &span);
  7756. + if (n == 0)
  7757. + break; // need more bytes
  7758. +
  7759. + offs += n;
  7760. + }
  7761. +
  7762. + // throw away the evaluated parts
  7763. + sshp_buf_drop(&ptl->rx.buf, offs);
  7764. + }
  7765. +
  7766. + return 0;
  7767. +}
  7768. +
  7769. +static inline void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl)
  7770. +{
  7771. + wake_up(&ptl->rx.wq);
  7772. +}
  7773. +
  7774. +static int ssh_ptl_rx_start(struct ssh_ptl *ptl)
  7775. +{
  7776. + if (ptl->rx.thread)
  7777. + return 0;
  7778. +
  7779. + ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl, "surface-sh-rx");
  7780. + if (IS_ERR(ptl->rx.thread))
  7781. + return PTR_ERR(ptl->rx.thread);
  7782. +
  7783. + return 0;
  7784. +}
  7785. +
  7786. +static int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
  7787. +{
  7788. + int status = 0;
  7789. +
  7790. + if (ptl->rx.thread) {
  7791. + status = kthread_stop(ptl->rx.thread);
  7792. + ptl->rx.thread = NULL;
  7793. + }
  7794. +
  7795. + return status;
  7796. +}
  7797. +
  7798. +static int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
  7799. +{
  7800. + int used;
  7801. +
  7802. + if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
  7803. + return -ESHUTDOWN;
  7804. +
  7805. + used = kfifo_in(&ptl->rx.fifo, buf, n);
  7806. + if (used)
  7807. + ssh_ptl_rx_wakeup(ptl);
  7808. +
  7809. + return used;
  7810. +}
  7811. +
  7812. +
  7813. +struct ssh_flush_packet {
  7814. + struct ssh_packet base;
  7815. + struct completion completion;
  7816. + int status;
  7817. +};
  7818. +
  7819. +static void ssh_ptl_flush_complete(struct ssh_packet *p, int status)
  7820. +{
  7821. + struct ssh_flush_packet *packet;
  7822. +
  7823. + packet = container_of(p, struct ssh_flush_packet, base);
  7824. + packet->status = status;
  7825. +}
  7826. +
  7827. +static void ssh_ptl_flush_release(struct ssh_packet *p)
  7828. +{
  7829. + struct ssh_flush_packet *packet;
  7830. +
  7831. + packet = container_of(p, struct ssh_flush_packet, base);
  7832. + complete_all(&packet->completion);
  7833. +}
  7834. +
  7835. +static const struct ssh_packet_ops ssh_flush_packet_ops = {
  7836. + .complete = ssh_ptl_flush_complete,
  7837. + .release = ssh_ptl_flush_release,
  7838. +};
  7839. +
  7840. +/**
  7841. + * ssh_ptl_shutdown - shut down the packet transmission layer
  7842. + * @ptl: packet transmission layer
  7843. + *
  7844. + * Shuts down the packet transmission layer, removing and canceling all queued
  7845. + * and pending packets. Packets canceled by this operation will be completed
  7846. + * with -ESHUTDOWN as status.
  7847. + *
  7848. + * As a result of this function, the transmission layer will be marked as shut
  7849. + * down. Submission of packets after the transmission layer has been shut down
  7850. + * will fail with -ESHUTDOWN.
  7851. + */
  7852. +static void ssh_ptl_shutdown(struct ssh_ptl *ptl)
  7853. +{
  7854. + LIST_HEAD(complete_q);
  7855. + LIST_HEAD(complete_p);
  7856. + struct ssh_packet *p, *n;
  7857. + int status;
  7858. +
  7859. + // ensure that no new packets (including ACK/NAK) can be submitted
  7860. + set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state);
  7861. + smp_mb__after_atomic();
  7862. +
  7863. + status = ssh_ptl_rx_stop(ptl);
  7864. + if (status)
  7865. + ptl_err(ptl, "ptl: failed to stop receiver thread\n");
  7866. +
  7867. + status = ssh_ptl_tx_stop(ptl);
  7868. + if (status)
  7869. + ptl_err(ptl, "ptl: failed to stop transmitter thread\n");
  7870. +
  7871. + cancel_delayed_work_sync(&ptl->rtx_timeout.reaper);
  7872. +
  7873. + /*
  7874. + * At this point, all threads have been stopped. This means that the
  7875. + * only references to packets from inside the system are in the queue
  7876. + * and pending set.
  7877. + *
  7878. + * Note: We still need locks here because someone could still be
  7879. + * cancelling packets.
  7880. + *
  7881. + * Note 2: We can re-use queue_node (or pending_node) if we mark the
  7882. + * packet as locked an then remove it from the queue (or pending set
  7883. + * respecitvely). Marking the packet as locked avoids re-queueing
  7884. + * (which should already be prevented by having stopped the treads...)
  7885. + * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
  7886. + * new list via other threads (e.g. canellation).
  7887. + *
  7888. + * Note 3: There may be overlap between complete_p and complete_q.
  7889. + * This is handled via test_and_set_bit on the "completed" flag
  7890. + * (also handles cancelation).
  7891. + */
  7892. +
  7893. + // mark queued packets as locked and move them to complete_q
  7894. + spin_lock(&ptl->queue.lock);
  7895. + list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
  7896. + set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
  7897. + smp_mb__before_atomic();
  7898. + clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
  7899. +
  7900. + list_del(&p->queue_node);
  7901. + list_add_tail(&p->queue_node, &complete_q);
  7902. + }
  7903. + spin_unlock(&ptl->queue.lock);
  7904. +
  7905. + // mark pending packets as locked and move them to complete_p
  7906. + spin_lock(&ptl->pending.lock);
  7907. + list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
  7908. + set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
  7909. + smp_mb__before_atomic();
  7910. + clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
  7911. +
  7912. + list_del(&p->pending_node);
  7913. + list_add_tail(&p->pending_node, &complete_q);
  7914. + }
  7915. + atomic_set(&ptl->pending.count, 0);
  7916. + spin_unlock(&ptl->pending.lock);
  7917. +
  7918. + // complete and drop packets on complete_q
  7919. + list_for_each_entry(p, &complete_q, queue_node) {
  7920. + if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
  7921. + __ssh_ptl_complete(p, -ESHUTDOWN);
  7922. +
  7923. + ssh_packet_put(p);
  7924. + }
  7925. +
  7926. + // complete and drop packets on complete_p
  7927. + list_for_each_entry(p, &complete_p, pending_node) {
  7928. + if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
  7929. + __ssh_ptl_complete(p, -ESHUTDOWN);
  7930. +
  7931. + ssh_packet_put(p);
  7932. + }
  7933. +
  7934. + /*
  7935. + * At this point we have guaranteed that the system doesn't reference
  7936. + * any packets any more.
  7937. + */
  7938. +}
  7939. +
  7940. +static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl)
  7941. +{
  7942. + return ptl->serdev ? &ptl->serdev->dev : NULL;
  7943. +}
  7944. +
  7945. +static int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
  7946. + struct ssh_ptl_ops *ops)
  7947. +{
  7948. + int i, status;
  7949. +
  7950. + ptl->serdev = serdev;
  7951. + ptl->state = 0;
  7952. +
  7953. + spin_lock_init(&ptl->queue.lock);
  7954. + INIT_LIST_HEAD(&ptl->queue.head);
  7955. +
  7956. + spin_lock_init(&ptl->pending.lock);
  7957. + INIT_LIST_HEAD(&ptl->pending.head);
  7958. + atomic_set_release(&ptl->pending.count, 0);
  7959. +
  7960. + ptl->tx.thread = NULL;
  7961. + ptl->tx.thread_signal = false;
  7962. + ptl->tx.packet = NULL;
  7963. + ptl->tx.offset = 0;
  7964. + init_waitqueue_head(&ptl->tx.thread_wq);
  7965. + init_waitqueue_head(&ptl->tx.packet_wq);
  7966. +
  7967. + ptl->rx.thread = NULL;
  7968. + init_waitqueue_head(&ptl->rx.wq);
  7969. +
  7970. + ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT;
  7971. + ptl->rtx_timeout.expires = KTIME_MAX;
  7972. + INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap);
  7973. +
  7974. + ptl->ops = *ops;
  7975. +
  7976. + // initialize list of recent/blocked SEQs with invalid sequence IDs
  7977. + for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++)
  7978. + ptl->rx.blocked.seqs[i] = 0xFFFF;
  7979. + ptl->rx.blocked.offset = 0;
  7980. +
  7981. + status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL);
  7982. + if (status)
  7983. + return status;
  7984. +
  7985. + status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL);
  7986. + if (status)
  7987. + kfifo_free(&ptl->rx.fifo);
  7988. +
  7989. + return status;
  7990. +}
  7991. +
  7992. +static void ssh_ptl_destroy(struct ssh_ptl *ptl)
  7993. +{
  7994. + kfifo_free(&ptl->rx.fifo);
  7995. + sshp_buf_free(&ptl->rx.buf);
  7996. +}
  7997. +
  7998. +
  7999. +/* -- Request transport layer (rtl). ---------------------------------------- */
  8000. +
  8001. +#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000)
  8002. +#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
  8003. +
  8004. +#define SSH_RTL_MAX_PENDING 3
  8005. +
  8006. +
  8007. +enum ssh_rtl_state_flags {
  8008. + SSH_RTL_SF_SHUTDOWN_BIT,
  8009. +};
  8010. +
  8011. +struct ssh_rtl_ops {
  8012. + void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd,
  8013. + const struct ssam_span *data);
  8014. +};
  8015. +
  8016. +struct ssh_rtl {
  8017. + struct ssh_ptl ptl;
  8018. + unsigned long state;
  8019. +
  8020. + struct {
  8021. + spinlock_t lock;
  8022. + struct list_head head;
  8023. + } queue;
  8024. +
  8025. + struct {
  8026. + spinlock_t lock;
  8027. + struct list_head head;
  8028. + atomic_t count;
  8029. + } pending;
  8030. +
  8031. + struct {
  8032. + struct work_struct work;
  8033. + } tx;
  8034. +
  8035. + struct {
  8036. + ktime_t timeout;
  8037. + ktime_t expires;
  8038. + struct delayed_work reaper;
  8039. + } rtx_timeout;
  8040. +
  8041. + struct ssh_rtl_ops ops;
  8042. +};
  8043. +
  8044. +
  8045. +#define rtl_dbg(r, fmt, ...) ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__)
  8046. +#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__)
  8047. +#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__)
  8048. +#define rtl_err(r, fmt, ...) ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__)
  8049. +#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__)
  8050. +
  8051. +#define to_ssh_rtl(ptr, member) \
  8052. + container_of(ptr, struct ssh_rtl, member)
  8053. +
  8054. +#define to_ssh_request(ptr, member) \
  8055. + container_of(ptr, struct ssh_request, member)
  8056. +
  8057. +static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst)
  8058. +{
  8059. + struct ssh_ptl *ptl = READ_ONCE(rqst->packet.ptl);
  8060. + return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL;
  8061. +}
  8062. +
  8063. +
  8064. +/**
  8065. + * ssh_rtl_should_drop_response - error injection hook to drop request responses
  8066. + *
  8067. + * Useful to cause request transmission timeouts in the driver by dropping the
  8068. + * response to a request.
  8069. + */
  8070. +static noinline_if_inject bool ssh_rtl_should_drop_response(void)
  8071. +{
  8072. + return false;
  8073. +}
  8074. +ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
  8075. +
  8076. +
  8077. +static inline u16 ssh_request_get_rqid(struct ssh_request *rqst)
  8078. +{
  8079. + return get_unaligned_le16(rqst->packet.data.ptr
  8080. + + SSH_MSGOFFSET_COMMAND(rqid));
  8081. +}
  8082. +
  8083. +static inline u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
  8084. +{
  8085. + if (!rqst->packet.data.ptr)
  8086. + return -1;
  8087. +
  8088. + return ssh_request_get_rqid(rqst);
  8089. +}
  8090. +
  8091. +
  8092. +static void ssh_rtl_queue_remove(struct ssh_request *rqst)
  8093. +{
  8094. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8095. + bool remove;
  8096. +
  8097. + spin_lock(&rtl->queue.lock);
  8098. +
  8099. + remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
  8100. + if (remove)
  8101. + list_del(&rqst->node);
  8102. +
  8103. + spin_unlock(&rtl->queue.lock);
  8104. +
  8105. + if (remove)
  8106. + ssh_request_put(rqst);
  8107. +}
  8108. +
  8109. +static void ssh_rtl_pending_remove(struct ssh_request *rqst)
  8110. +{
  8111. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8112. + bool remove;
  8113. +
  8114. + spin_lock(&rtl->pending.lock);
  8115. +
  8116. + remove = test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state);
  8117. + if (remove) {
  8118. + atomic_dec(&rtl->pending.count);
  8119. + list_del(&rqst->node);
  8120. + }
  8121. +
  8122. + spin_unlock(&rtl->pending.lock);
  8123. +
  8124. + if (remove)
  8125. + ssh_request_put(rqst);
  8126. +}
  8127. +
  8128. +
  8129. +static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
  8130. +{
  8131. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8132. +
  8133. + trace_ssam_request_complete(rqst, status);
  8134. +
  8135. + // rtl/ptl may not be set if we're cancelling before submitting
  8136. + rtl_dbg_cond(rtl, "rtl: completing request (rqid: 0x%04x,"
  8137. + " status: %d)\n", ssh_request_get_rqid_safe(rqst), status);
  8138. +
  8139. + if (status && status != -ECANCELED)
  8140. + rtl_dbg_cond(rtl, "rtl: request error: %d\n", status);
  8141. +
  8142. + rqst->ops->complete(rqst, NULL, NULL, status);
  8143. +}
  8144. +
  8145. +static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
  8146. + const struct ssh_command *cmd,
  8147. + const struct ssam_span *data)
  8148. +{
  8149. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8150. +
  8151. + trace_ssam_request_complete(rqst, 0);
  8152. +
  8153. + rtl_dbg(rtl, "rtl: completing request with response"
  8154. + " (rqid: 0x%04x)\n", ssh_request_get_rqid(rqst));
  8155. +
  8156. + rqst->ops->complete(rqst, cmd, data, 0);
  8157. +}
  8158. +
  8159. +
  8160. +static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
  8161. +{
  8162. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8163. +
  8164. + if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
  8165. + return !atomic_read(&rtl->pending.count);
  8166. +
  8167. + return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
  8168. +}
  8169. +
  8170. +static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
  8171. +{
  8172. + struct ssh_request *rqst = ERR_PTR(-ENOENT);
  8173. + struct ssh_request *p, *n;
  8174. +
  8175. + spin_lock(&rtl->queue.lock);
  8176. +
  8177. + // find first non-locked request and remove it
  8178. + list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
  8179. + if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
  8180. + continue;
  8181. +
  8182. + if (!ssh_rtl_tx_can_process(p)) {
  8183. + rqst = ERR_PTR(-EBUSY);
  8184. + break;
  8185. + }
  8186. +
  8187. + /*
  8188. + * Remove from queue and mark as transmitting. Ensure that the
  8189. + * state does not get zero via memory barrier.
  8190. + */
  8191. + set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
  8192. + smp_mb__before_atomic();
  8193. + clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
  8194. +
  8195. + list_del(&p->node);
  8196. +
  8197. + rqst = p;
  8198. + break;
  8199. + }
  8200. +
  8201. + spin_unlock(&rtl->queue.lock);
  8202. + return rqst;
  8203. +}
  8204. +
  8205. +static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
  8206. +{
  8207. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8208. +
  8209. + spin_lock(&rtl->pending.lock);
  8210. +
  8211. + if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
  8212. + spin_unlock(&rtl->pending.lock);
  8213. + return -EINVAL;
  8214. + }
  8215. +
  8216. + if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
  8217. + spin_unlock(&rtl->pending.lock);
  8218. + return -EALREADY;
  8219. + }
  8220. +
  8221. + atomic_inc(&rtl->pending.count);
  8222. + ssh_request_get(rqst);
  8223. + list_add_tail(&rqst->node, &rtl->pending.head);
  8224. +
  8225. + spin_unlock(&rtl->pending.lock);
  8226. + return 0;
  8227. +}
  8228. +
  8229. +static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
  8230. +{
  8231. + struct ssh_request *rqst;
  8232. + int status;
  8233. +
  8234. + // get and prepare next request for transmit
  8235. + rqst = ssh_rtl_tx_next(rtl);
  8236. + if (IS_ERR(rqst))
  8237. + return PTR_ERR(rqst);
  8238. +
  8239. + // add to/mark as pending
  8240. + status = ssh_rtl_tx_pending_push(rqst);
  8241. + if (status) {
  8242. + ssh_request_put(rqst);
  8243. + return -EAGAIN;
  8244. + }
  8245. +
  8246. + // submit packet
  8247. + status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
  8248. + if (status == -ESHUTDOWN) {
  8249. + /*
  8250. + * Packet has been refused due to the packet layer shutting
  8251. + * down. Complete it here.
  8252. + */
  8253. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
  8254. + smp_mb__after_atomic();
  8255. +
  8256. + ssh_rtl_pending_remove(rqst);
  8257. + ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
  8258. +
  8259. + ssh_request_put(rqst);
  8260. + return -ESHUTDOWN;
  8261. +
  8262. + } else if (status) {
  8263. + /*
  8264. + * If submitting the packet failed and the packet layer isn't
  8265. + * shutting down, the packet has either been submmitted/queued
  8266. + * before (-EALREADY, which cannot happen as we have guaranteed
  8267. + * that requests cannot be re-submitted), or the packet was
  8268. + * marked as locked (-EINVAL). To mark the packet locked at this
  8269. + * stage, the request, and thus the packets itself, had to have
  8270. + * been canceled. Simply drop the reference. Cancellation itself
  8271. + * will remove it from the set of pending requests.
  8272. + */
  8273. +
  8274. + WARN_ON(status != -EINVAL);
  8275. +
  8276. + ssh_request_put(rqst);
  8277. + return -EAGAIN;
  8278. + }
  8279. +
  8280. + ssh_request_put(rqst);
  8281. + return 0;
  8282. +}
  8283. +
  8284. +static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
  8285. +{
  8286. + bool empty;
  8287. +
  8288. + spin_lock(&rtl->queue.lock);
  8289. + empty = list_empty(&rtl->queue.head);
  8290. + spin_unlock(&rtl->queue.lock);
  8291. +
  8292. + return empty;
  8293. +}
  8294. +
  8295. +static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
  8296. +{
  8297. + if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
  8298. + return false;
  8299. +
  8300. + if (ssh_rtl_queue_empty(rtl))
  8301. + return false;
  8302. +
  8303. + return schedule_work(&rtl->tx.work);
  8304. +}
  8305. +
  8306. +static void ssh_rtl_tx_work_fn(struct work_struct *work)
  8307. +{
  8308. + struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
  8309. + int i, status;
  8310. +
  8311. + /*
  8312. + * Try to be nice and not block the workqueue: Run a maximum of 10
  8313. + * tries, then re-submit if necessary. This should not be neccesary,
  8314. + * for normal execution, but guarantee it anyway.
  8315. + */
  8316. + for (i = 0; i < 10; i++) {
  8317. + status = ssh_rtl_tx_try_process_one(rtl);
  8318. + if (status == -ENOENT || status == -EBUSY)
  8319. + return; // no more requests to process
  8320. +
  8321. + if (status == -ESHUTDOWN) {
  8322. + /*
  8323. + * Packet system shutting down. No new packets can be
  8324. + * transmitted. Return silently, the party initiating
  8325. + * the shutdown should handle the rest.
  8326. + */
  8327. + return;
  8328. + }
  8329. +
  8330. + WARN_ON(status != 0 && status != -EAGAIN);
  8331. + }
  8332. +
  8333. + // out of tries, reschedule
  8334. + ssh_rtl_tx_schedule(rtl);
  8335. +}
  8336. +
  8337. +
  8338. +static int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
  8339. +{
  8340. + trace_ssam_request_submit(rqst);
  8341. +
  8342. + /*
  8343. + * Ensure that requests expecting a response are sequenced. If this
  8344. + * invariant ever changes, see the comment in ssh_rtl_complete on what
  8345. + * is required to be changed in the code.
  8346. + */
  8347. + if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
  8348. + if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
  8349. + return -EINVAL;
  8350. +
  8351. + // try to set ptl and check if this request has already been submitted
  8352. + if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl) != NULL)
  8353. + return -EALREADY;
  8354. +
  8355. + spin_lock(&rtl->queue.lock);
  8356. +
  8357. + if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
  8358. + spin_unlock(&rtl->queue.lock);
  8359. + return -ESHUTDOWN;
  8360. + }
  8361. +
  8362. + if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
  8363. + spin_unlock(&rtl->queue.lock);
  8364. + return -EINVAL;
  8365. + }
  8366. +
  8367. + ssh_request_get(rqst);
  8368. + set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
  8369. + list_add_tail(&rqst->node, &rtl->queue.head);
  8370. +
  8371. + spin_unlock(&rtl->queue.lock);
  8372. +
  8373. + ssh_rtl_tx_schedule(rtl);
  8374. + return 0;
  8375. +}
  8376. +
  8377. +
  8378. +static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
  8379. + ktime_t expires)
  8380. +{
  8381. + unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
  8382. + ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
  8383. + ktime_t old;
  8384. +
  8385. + // re-adjust / schedule reaper if it is above resolution delta
  8386. + old = READ_ONCE(rtl->rtx_timeout.expires);
  8387. + while (ktime_before(aexp, old))
  8388. + old = cmpxchg64(&rtl->rtx_timeout.expires, old, expires);
  8389. +
  8390. + // if we updated the reaper expiration, modify work timeout
  8391. + if (old == expires)
  8392. + mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
  8393. +}
  8394. +
  8395. +static void ssh_rtl_timeout_start(struct ssh_request *rqst)
  8396. +{
  8397. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  8398. + ktime_t timestamp = ktime_get_coarse_boottime();
  8399. + ktime_t timeout = rtl->rtx_timeout.timeout;
  8400. +
  8401. + if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
  8402. + return;
  8403. +
  8404. + WRITE_ONCE(rqst->timestamp, timestamp);
  8405. + smp_mb__after_atomic();
  8406. +
  8407. + ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
  8408. +}
  8409. +
  8410. +
  8411. +static void ssh_rtl_complete(struct ssh_rtl *rtl,
  8412. + const struct ssh_command *command,
  8413. + const struct ssam_span *command_data)
  8414. +{
  8415. + struct ssh_request *r = NULL;
  8416. + struct ssh_request *p, *n;
  8417. + u16 rqid = get_unaligned_le16(&command->rqid);
  8418. +
  8419. + trace_ssam_rx_response_received(command, command_data->len);
  8420. +
  8421. + /*
  8422. + * Get request from pending based on request ID and mark it as response
  8423. + * received and locked.
  8424. + */
  8425. + spin_lock(&rtl->pending.lock);
  8426. + list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
  8427. + // we generally expect requests to be processed in order
  8428. + if (unlikely(ssh_request_get_rqid(p) != rqid))
  8429. + continue;
  8430. +
  8431. + // simulate response timeout
  8432. + if (ssh_rtl_should_drop_response()) {
  8433. + spin_unlock(&rtl->pending.lock);
  8434. +
  8435. + trace_ssam_ei_rx_drop_response(p);
  8436. + rtl_info(rtl, "request error injection: "
  8437. + "dropping response for request %p\n",
  8438. + &p->packet);
  8439. + return;
  8440. + }
  8441. +
  8442. + /*
  8443. + * Mark as "response received" and "locked" as we're going to
  8444. + * complete it. Ensure that the state doesn't get zero by
  8445. + * employing a memory barrier.
  8446. + */
  8447. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
  8448. + set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
  8449. + smp_mb__before_atomic();
  8450. + clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
  8451. +
  8452. + atomic_dec(&rtl->pending.count);
  8453. + list_del(&p->node);
  8454. +
  8455. + r = p;
  8456. + break;
  8457. + }
  8458. + spin_unlock(&rtl->pending.lock);
  8459. +
  8460. + if (!r) {
  8461. + rtl_warn(rtl, "rtl: dropping unexpected command message"
  8462. + " (rqid = 0x%04x)\n", rqid);
  8463. + return;
  8464. + }
  8465. +
  8466. + // if the request hasn't been completed yet, we will do this now
  8467. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
  8468. + ssh_request_put(r);
  8469. + ssh_rtl_tx_schedule(rtl);
  8470. + return;
  8471. + }
  8472. +
  8473. + /*
  8474. + * Make sure the request has been transmitted. In case of a sequenced
  8475. + * request, we are guaranteed that the completion callback will run on
  8476. + * the receiver thread directly when the ACK for the packet has been
  8477. + * received. Similarly, this function is guaranteed to run on the
  8478. + * receiver thread. Thus we are guaranteed that if the packet has been
  8479. + * successfully transmitted and received an ACK, the transmitted flag
  8480. + * has been set and is visible here.
  8481. + *
  8482. + * We are currently not handling unsequenced packets here, as those
  8483. + * should never expect a response as ensured in ssh_rtl_submit. If this
  8484. + * ever changes, one would have to test for
  8485. + *
  8486. + * (r->state & (transmitting | transmitted))
  8487. + *
  8488. + * on unsequenced packets to determine if they could have been
  8489. + * transmitted. There are no synchronization guarantees as in the
  8490. + * sequenced case, since, in this case, the callback function will not
  8491. + * run on the same thread. Thus an exact determination is impossible.
  8492. + */
  8493. + if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
  8494. + rtl_err(rtl, "rtl: received response before ACK for request"
  8495. + " (rqid = 0x%04x)\n", rqid);
  8496. +
  8497. + /*
  8498. + * NB: Timeout has already been canceled, request already been
  8499. + * removed from pending and marked as locked and completed. As
  8500. + * we receive a "false" response, the packet might still be
  8501. + * queued though.
  8502. + */
  8503. + ssh_rtl_queue_remove(r);
  8504. +
  8505. + ssh_rtl_complete_with_status(r, -EREMOTEIO);
  8506. + ssh_request_put(r);
  8507. +
  8508. + ssh_rtl_tx_schedule(rtl);
  8509. + return;
  8510. + }
  8511. +
  8512. + /*
  8513. + * NB: Timeout has already been canceled, request already been
  8514. + * removed from pending and marked as locked and completed. The request
  8515. + * can also not be queued any more, as it has been marked as
  8516. + * transmitting and later transmitted. Thus no need to remove it from
  8517. + * anywhere.
  8518. + */
  8519. +
  8520. + ssh_rtl_complete_with_rsp(r, command, command_data);
  8521. + ssh_request_put(r);
  8522. +
  8523. + ssh_rtl_tx_schedule(rtl);
  8524. +}
  8525. +
  8526. +
  8527. +static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
  8528. +{
  8529. + struct ssh_rtl *rtl;
  8530. + unsigned long state, fixed;
  8531. + bool remove;
  8532. +
  8533. + /*
  8534. + * Handle unsubmitted request: Try to mark the packet as locked,
  8535. + * expecting the state to be zero (i.e. unsubmitted). Note that, if
  8536. + * setting the state worked, we might still be adding the packet to the
  8537. + * queue in a currently executing submit call. In that case, however,
  8538. + * ptl reference must have been set previously, as locked is checked
  8539. + * after setting ptl. Thus only if we successfully lock this request and
  8540. + * ptl is NULL, we have successfully removed the request.
  8541. + * Otherwise we need to try and grab it from the queue.
  8542. + *
  8543. + * Note that if the CMPXCHG fails, we are guaranteed that ptl has
  8544. + * been set and is non-NULL, as states can only be nonzero after this
  8545. + * has been set. Also note that we need to fetch the static (type) flags
  8546. + * to ensure that they don't cause the cmpxchg to fail.
  8547. + */
  8548. + fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
  8549. + state = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
  8550. + if (!state && !READ_ONCE(r->packet.ptl)) {
  8551. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8552. + return true;
  8553. +
  8554. + ssh_rtl_complete_with_status(r, -ECANCELED);
  8555. + return true;
  8556. + }
  8557. +
  8558. + rtl = ssh_request_rtl(r);
  8559. + spin_lock(&rtl->queue.lock);
  8560. +
  8561. + /*
  8562. + * Note: 1) Requests cannot be re-submitted. 2) If a request is queued,
  8563. + * it cannot be "transmitting"/"pending" yet. Thus, if we successfully
  8564. + * remove the the request here, we have removed all its occurences in
  8565. + * the system.
  8566. + */
  8567. +
  8568. + remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
  8569. + if (!remove) {
  8570. + spin_unlock(&rtl->queue.lock);
  8571. + return false;
  8572. + }
  8573. +
  8574. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  8575. + list_del(&r->node);
  8576. +
  8577. + spin_unlock(&rtl->queue.lock);
  8578. +
  8579. + ssh_request_put(r); // drop reference obtained from queue
  8580. +
  8581. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8582. + return true;
  8583. +
  8584. + ssh_rtl_complete_with_status(r, -ECANCELED);
  8585. + return true;
  8586. +}
  8587. +
  8588. +static bool ssh_rtl_cancel_pending(struct ssh_request *r)
  8589. +{
  8590. + // if the packet is already locked, it's going to be removed shortly
  8591. + if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
  8592. + return true;
  8593. +
  8594. + /*
  8595. + * Now that we have locked the packet, we have guaranteed that it can't
  8596. + * be added to the system any more. If rtl is zero, the locked
  8597. + * check in ssh_rtl_submit has not been run and any submission,
  8598. + * currently in progress or called later, won't add the packet. Thus we
  8599. + * can directly complete it.
  8600. + */
  8601. + if (!ssh_request_rtl(r)) {
  8602. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8603. + return true;
  8604. +
  8605. + ssh_rtl_complete_with_status(r, -ECANCELED);
  8606. + return true;
  8607. + }
  8608. +
  8609. + /*
  8610. + * Try to cancel the packet. If the packet has not been completed yet,
  8611. + * this will subsequently (and synchronously) call the completion
  8612. + * callback of the packet, which will complete the request.
  8613. + */
  8614. + ssh_ptl_cancel(&r->packet);
  8615. +
  8616. + /*
  8617. + * If the packet has been completed with success, i.e. has not been
  8618. + * canceled by the above call, the request may not have been completed
  8619. + * yet (may be waiting for a response). Check if we need to do this
  8620. + * here.
  8621. + */
  8622. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8623. + return true;
  8624. +
  8625. + ssh_rtl_queue_remove(r);
  8626. + ssh_rtl_pending_remove(r);
  8627. + ssh_rtl_complete_with_status(r, -ECANCELED);
  8628. +
  8629. + return true;
  8630. +}
  8631. +
  8632. +static bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
  8633. +{
  8634. + struct ssh_rtl *rtl;
  8635. + bool canceled;
  8636. +
  8637. + if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
  8638. + return true;
  8639. +
  8640. + trace_ssam_request_cancel(rqst);
  8641. +
  8642. + if (pending)
  8643. + canceled = ssh_rtl_cancel_pending(rqst);
  8644. + else
  8645. + canceled = ssh_rtl_cancel_nonpending(rqst);
  8646. +
  8647. + // note: rtl may be NULL if request has not been submitted yet
  8648. + rtl = ssh_request_rtl(rqst);
  8649. + if (canceled && rtl)
  8650. + ssh_rtl_tx_schedule(rtl);
  8651. +
  8652. + return canceled;
  8653. +}
  8654. +
  8655. +
  8656. +static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
  8657. +{
  8658. + struct ssh_request *r = to_ssh_request(p, packet);
  8659. +
  8660. + if (unlikely(status)) {
  8661. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  8662. +
  8663. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8664. + return;
  8665. +
  8666. + /*
  8667. + * The packet may get cancelled even though it has not been
  8668. + * submitted yet. The request may still be queued. Check the
  8669. + * queue and remove it if necessary. As the timeout would have
  8670. + * been started in this function on success, there's no need to
  8671. + * cancel it here.
  8672. + */
  8673. + ssh_rtl_queue_remove(r);
  8674. + ssh_rtl_pending_remove(r);
  8675. + ssh_rtl_complete_with_status(r, status);
  8676. +
  8677. + ssh_rtl_tx_schedule(ssh_request_rtl(r));
  8678. + return;
  8679. + }
  8680. +
  8681. + /*
  8682. + * Mark as transmitted, ensure that state doesn't get zero by inserting
  8683. + * a memory barrier.
  8684. + */
  8685. + set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
  8686. + smp_mb__before_atomic();
  8687. + clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
  8688. +
  8689. + // if we expect a response, we just need to start the timeout
  8690. + if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
  8691. + ssh_rtl_timeout_start(r);
  8692. + return;
  8693. + }
  8694. +
  8695. + /*
  8696. + * If we don't expect a response, lock, remove, and complete the
  8697. + * request. Note that, at this point, the request is guaranteed to have
  8698. + * left the queue and no timeout has been started. Thus we only need to
  8699. + * remove it from pending. If the request has already been completed (it
  8700. + * may have been canceled) return.
  8701. + */
  8702. +
  8703. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  8704. + if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8705. + return;
  8706. +
  8707. + ssh_rtl_pending_remove(r);
  8708. + ssh_rtl_complete_with_status(r, 0);
  8709. +
  8710. + ssh_rtl_tx_schedule(ssh_request_rtl(r));
  8711. +}
  8712. +
  8713. +
  8714. +static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeo)
  8715. +{
  8716. + ktime_t timestamp = READ_ONCE(r->timestamp);
  8717. +
  8718. + if (timestamp != KTIME_MAX)
  8719. + return ktime_add(timestamp, timeo);
  8720. + else
  8721. + return KTIME_MAX;
  8722. +}
  8723. +
  8724. +static void ssh_rtl_timeout_reap(struct work_struct *work)
  8725. +{
  8726. + struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
  8727. + struct ssh_request *r, *n;
  8728. + LIST_HEAD(claimed);
  8729. + ktime_t now = ktime_get_coarse_boottime();
  8730. + ktime_t timeout = rtl->rtx_timeout.timeout;
  8731. + ktime_t next = KTIME_MAX;
  8732. +
  8733. + trace_ssam_rtl_timeout_reap("pending", atomic_read(&rtl->pending.count));
  8734. +
  8735. + /*
  8736. + * Mark reaper as "not pending". This is done before checking any
  8737. + * requests to avoid lost-update type problems.
  8738. + */
  8739. + WRITE_ONCE(rtl->rtx_timeout.expires, KTIME_MAX);
  8740. + smp_mb__after_atomic();
  8741. +
  8742. + spin_lock(&rtl->pending.lock);
  8743. + list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
  8744. + ktime_t expires = ssh_request_get_expiration(r, timeout);
  8745. +
  8746. + /*
  8747. + * Check if the timeout hasn't expired yet. Find out next
  8748. + * expiration date to be handled after this run.
  8749. + */
  8750. + if (ktime_after(expires, now)) {
  8751. + next = ktime_before(expires, next) ? expires : next;
  8752. + continue;
  8753. + }
  8754. +
  8755. + // avoid further transitions if locked
  8756. + if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
  8757. + continue;
  8758. +
  8759. + /*
  8760. + * We have now marked the packet as locked. Thus it cannot be
  8761. + * added to the pending or queued lists again after we've
  8762. + * removed it here. We can therefore re-use the node of this
  8763. + * packet temporarily.
  8764. + */
  8765. +
  8766. + clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
  8767. +
  8768. + atomic_dec(&rtl->pending.count);
  8769. + list_del(&r->node);
  8770. +
  8771. + list_add_tail(&r->node, &claimed);
  8772. + }
  8773. + spin_unlock(&rtl->pending.lock);
  8774. +
  8775. + // cancel and complete the request
  8776. + list_for_each_entry_safe(r, n, &claimed, node) {
  8777. + trace_ssam_request_timeout(r);
  8778. +
  8779. + /*
  8780. + * At this point we've removed the packet from pending. This
  8781. + * means that we've obtained the last (only) reference of the
  8782. + * system to it. Thus we can just complete it.
  8783. + */
  8784. + if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  8785. + ssh_rtl_complete_with_status(r, -ETIMEDOUT);
  8786. +
  8787. + // drop the reference we've obtained by removing it from pending
  8788. + list_del(&r->node);
  8789. + ssh_request_put(r);
  8790. + }
  8791. +
  8792. + // ensure that reaper doesn't run again immediately
  8793. + next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
  8794. + if (next != KTIME_MAX)
  8795. + ssh_rtl_timeout_reaper_mod(rtl, now, next);
  8796. +
  8797. + ssh_rtl_tx_schedule(rtl);
  8798. +}
  8799. +
  8800. +
  8801. +static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
  8802. + const struct ssam_span *data)
  8803. +{
  8804. + trace_ssam_rx_event_received(cmd, data->len);
  8805. +
  8806. + rtl_dbg(rtl, "rtl: handling event (rqid: 0x%04x)\n",
  8807. + get_unaligned_le16(&cmd->rqid));
  8808. +
  8809. + rtl->ops.handle_event(rtl, cmd, data);
  8810. +}
  8811. +
  8812. +static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
  8813. +{
  8814. + struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
  8815. + struct device *dev = &p->serdev->dev;
  8816. + struct ssh_command *command;
  8817. + struct ssam_span command_data;
  8818. +
  8819. + if (sshp_parse_command(dev, data, &command, &command_data))
  8820. + return;
  8821. +
  8822. + if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
  8823. + ssh_rtl_rx_event(rtl, command, &command_data);
  8824. + else
  8825. + ssh_rtl_complete(rtl, command, &command_data);
  8826. +}
  8827. +
  8828. +static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
  8829. +{
  8830. + switch (data->ptr[0]) {
  8831. + case SSH_PLD_TYPE_CMD:
  8832. + ssh_rtl_rx_command(p, data);
  8833. + break;
  8834. +
  8835. + default:
  8836. + ptl_err(p, "rtl: rx: unknown frame payload type"
  8837. + " (type: 0x%02x)\n", data->ptr[0]);
  8838. + break;
  8839. + }
  8840. +}
  8841. +
  8842. +
  8843. +static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl)
  8844. +{
  8845. + return ssh_ptl_get_device(&rtl->ptl);
  8846. +}
  8847. +
  8848. +static inline bool ssh_rtl_tx_flush(struct ssh_rtl *rtl)
  8849. +{
  8850. + return flush_work(&rtl->tx.work);
  8851. +}
  8852. +
  8853. +static inline int ssh_rtl_tx_start(struct ssh_rtl *rtl)
  8854. +{
  8855. + int status;
  8856. + bool sched;
  8857. +
  8858. + status = ssh_ptl_tx_start(&rtl->ptl);
  8859. + if (status)
  8860. + return status;
  8861. +
  8862. + /*
  8863. + * If the packet layer has been shut down and restarted without shutting
  8864. + * down the request layer, there may still be requests queued and not
  8865. + * handled.
  8866. + */
  8867. + spin_lock(&rtl->queue.lock);
  8868. + sched = !list_empty(&rtl->queue.head);
  8869. + spin_unlock(&rtl->queue.lock);
  8870. +
  8871. + if (sched)
  8872. + ssh_rtl_tx_schedule(rtl);
  8873. +
  8874. + return 0;
  8875. +}
  8876. +
  8877. +static inline int ssh_rtl_rx_start(struct ssh_rtl *rtl)
  8878. +{
  8879. + return ssh_ptl_rx_start(&rtl->ptl);
  8880. +}
  8881. +
  8882. +static int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
  8883. + const struct ssh_rtl_ops *ops)
  8884. +{
  8885. + struct ssh_ptl_ops ptl_ops;
  8886. + int status;
  8887. +
  8888. + ptl_ops.data_received = ssh_rtl_rx_data;
  8889. +
  8890. + status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
  8891. + if (status)
  8892. + return status;
  8893. +
  8894. + spin_lock_init(&rtl->queue.lock);
  8895. + INIT_LIST_HEAD(&rtl->queue.head);
  8896. +
  8897. + spin_lock_init(&rtl->pending.lock);
  8898. + INIT_LIST_HEAD(&rtl->pending.head);
  8899. + atomic_set_release(&rtl->pending.count, 0);
  8900. +
  8901. + INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
  8902. +
  8903. + rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
  8904. + rtl->rtx_timeout.expires = KTIME_MAX;
  8905. + INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
  8906. +
  8907. + rtl->ops = *ops;
  8908. +
  8909. + return 0;
  8910. +}
  8911. +
  8912. +static void ssh_rtl_destroy(struct ssh_rtl *rtl)
  8913. +{
  8914. + ssh_ptl_destroy(&rtl->ptl);
  8915. +}
  8916. +
  8917. +
  8918. +static void ssh_rtl_packet_release(struct ssh_packet *p)
  8919. +{
  8920. + struct ssh_request *rqst = to_ssh_request(p, packet);
  8921. + rqst->ops->release(rqst);
  8922. +}
  8923. +
  8924. +static const struct ssh_packet_ops ssh_rtl_packet_ops = {
  8925. + .complete = ssh_rtl_packet_callback,
  8926. + .release = ssh_rtl_packet_release,
  8927. +};
  8928. +
  8929. +static void ssh_request_init(struct ssh_request *rqst,
  8930. + enum ssam_request_flags flags,
  8931. + const struct ssh_request_ops *ops)
  8932. +{
  8933. + struct ssh_packet_args packet_args;
  8934. +
  8935. + packet_args.type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
  8936. + if (!(flags & SSAM_REQUEST_UNSEQUENCED))
  8937. + packet_args.type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
  8938. +
  8939. + packet_args.priority = SSH_PACKET_PRIORITY(DATA, 0);
  8940. + packet_args.ops = &ssh_rtl_packet_ops;
  8941. +
  8942. + ssh_packet_init(&rqst->packet, &packet_args);
  8943. + INIT_LIST_HEAD(&rqst->node);
  8944. +
  8945. + rqst->state = 0;
  8946. + if (flags & SSAM_REQUEST_HAS_RESPONSE)
  8947. + rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
  8948. +
  8949. + rqst->timestamp = KTIME_MAX;
  8950. + rqst->ops = ops;
  8951. +}
  8952. +
  8953. +
  8954. +struct ssh_flush_request {
  8955. + struct ssh_request base;
  8956. + struct completion completion;
  8957. + int status;
  8958. +};
  8959. +
  8960. +static void ssh_rtl_flush_request_complete(struct ssh_request *r,
  8961. + const struct ssh_command *cmd,
  8962. + const struct ssam_span *data,
  8963. + int status)
  8964. +{
  8965. + struct ssh_flush_request *rqst;
  8966. +
  8967. + rqst = container_of(r, struct ssh_flush_request, base);
  8968. + rqst->status = status;
  8969. +}
  8970. +
  8971. +static void ssh_rtl_flush_request_release(struct ssh_request *r)
  8972. +{
  8973. + struct ssh_flush_request *rqst;
  8974. +
  8975. + rqst = container_of(r, struct ssh_flush_request, base);
  8976. + complete_all(&rqst->completion);
  8977. +}
  8978. +
  8979. +static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
  8980. + .complete = ssh_rtl_flush_request_complete,
  8981. + .release = ssh_rtl_flush_request_release,
  8982. +};
  8983. +
  8984. +/**
  8985. + * ssh_rtl_flush - flush the request transmission layer
  8986. + * @rtl: request transmission layer
  8987. + * @timeout: timeout for the flush operation in jiffies
  8988. + *
  8989. + * Queue a special flush request and wait for its completion. This request
  8990. + * will be completed after all other currently queued and pending requests
  8991. + * have been completed. Instead of a normal data packet, this request submits
  8992. + * a special flush packet, meaning that upon completion, also the underlying
  8993. + * packet transmission layer has been flushed.
  8994. + *
  8995. + * Flushing the request layer gurarantees that all previously submitted
  8996. + * requests have been fully completed before this call returns. Additinally,
  8997. + * flushing blocks execution of all later submitted requests until the flush
  8998. + * has been completed.
  8999. + *
  9000. + * If the caller ensures that no new requests are submitted after a call to
  9001. + * this function, the request transmission layer is guaranteed to have no
  9002. + * remaining requests when this call returns. The same guarantee does not hold
  9003. + * for the packet layer, on which control packets may still be queued after
  9004. + * this call. See the documentation of ssh_ptl_flush for more details on
  9005. + * packet layer flushing.
  9006. + *
  9007. + * Return: Zero on success, -ETIMEDOUT if the flush timed out and has been
  9008. + * canceled as a result of the timeout, or -ESHUTDOWN if the packet and/or
  9009. + * request transmission layer has been shut down before this call. May also
  9010. + * return -EINTR if the underlying packet transmission has been interrupted.
  9011. + */
  9012. +static int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
  9013. +{
  9014. + const unsigned init_flags = SSAM_REQUEST_UNSEQUENCED;
  9015. + struct ssh_flush_request rqst;
  9016. + int status;
  9017. +
  9018. + ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
  9019. + rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
  9020. + rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
  9021. + rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
  9022. +
  9023. + init_completion(&rqst.completion);
  9024. +
  9025. + status = ssh_rtl_submit(rtl, &rqst.base);
  9026. + if (status)
  9027. + return status;
  9028. +
  9029. + ssh_request_put(&rqst.base);
  9030. +
  9031. + if (wait_for_completion_timeout(&rqst.completion, timeout))
  9032. + return 0;
  9033. +
  9034. + ssh_rtl_cancel(&rqst.base, true);
  9035. + wait_for_completion(&rqst.completion);
  9036. +
  9037. + WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED
  9038. + && rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
  9039. +
  9040. + return rqst.status == -ECANCELED ? -ETIMEDOUT : status;
  9041. +}
  9042. +
  9043. +
  9044. +static void ssh_rtl_shutdown(struct ssh_rtl *rtl)
  9045. +{
  9046. + struct ssh_request *r, *n;
  9047. + LIST_HEAD(claimed);
  9048. + int pending;
  9049. +
  9050. + set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
  9051. + smp_mb__after_atomic();
  9052. +
  9053. + // remove requests from queue
  9054. + spin_lock(&rtl->queue.lock);
  9055. + list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
  9056. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  9057. + smp_mb__before_atomic();
  9058. + clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
  9059. +
  9060. + list_del(&r->node);
  9061. + list_add_tail(&r->node, &claimed);
  9062. + }
  9063. + spin_unlock(&rtl->queue.lock);
  9064. +
  9065. + /*
  9066. + * We have now guaranteed that the queue is empty and no more new
  9067. + * requests can be submitted (i.e. it will stay empty). This means that
  9068. + * calling ssh_rtl_tx_schedule will not schedule tx.work any more. So we
  9069. + * can simply call cancel_work_sync on tx.work here and when that
  9070. + * returns, we've locked it down. This also means that after this call,
  9071. + * we don't submit any more packets to the underlying packet layer, so
  9072. + * we can also shut that down.
  9073. + */
  9074. +
  9075. + cancel_work_sync(&rtl->tx.work);
  9076. + ssh_ptl_shutdown(&rtl->ptl);
  9077. + cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
  9078. +
  9079. + /*
  9080. + * Shutting down the packet layer should also have caneled all requests.
  9081. + * Thus the pending set should be empty. Attempt to handle this
  9082. + * gracefully anyways, even though this should be dead code.
  9083. + */
  9084. +
  9085. + pending = atomic_read(&rtl->pending.count);
  9086. + if (WARN_ON(pending)) {
  9087. + spin_lock(&rtl->pending.lock);
  9088. + list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
  9089. + set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
  9090. + smp_mb__before_atomic();
  9091. + clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
  9092. +
  9093. + list_del(&r->node);
  9094. + list_add_tail(&r->node, &claimed);
  9095. + }
  9096. + spin_unlock(&rtl->pending.lock);
  9097. + }
  9098. +
  9099. + // finally cancel and complete requests
  9100. + list_for_each_entry_safe(r, n, &claimed, node) {
  9101. + // test_and_set because we still might compete with cancellation
  9102. + if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
  9103. + ssh_rtl_complete_with_status(r, -ESHUTDOWN);
  9104. +
  9105. + // drop the reference we've obtained by removing it from list
  9106. + list_del(&r->node);
  9107. + ssh_request_put(r);
  9108. + }
  9109. +}
  9110. +
  9111. +
  9112. +/* -- Event notifier/callbacks. --------------------------------------------- */
  9113. +/*
  9114. + * The notifier system is based on linux/notifier.h, specifically the SRCU
  9115. + * implementation. The difference to that is, that some bits of the notifier
  9116. + * call return value can be tracked accross multiple calls. This is done so that
  9117. + * handling of events can be tracked and a warning can be issued in case an
  9118. + * event goes unhandled. The idea of that waring is that it should help discover
  9119. + * and identify new/currently unimplemented features.
  9120. + */
  9121. +
  9122. +struct ssam_nf_head {
  9123. + struct srcu_struct srcu;
  9124. + struct ssam_notifier_block __rcu *head;
  9125. +};
  9126. +
  9127. +
  9128. +int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event)
  9129. +{
  9130. + struct ssam_notifier_block *nb, *next_nb;
  9131. + int ret = 0, idx;
  9132. +
  9133. + idx = srcu_read_lock(&nh->srcu);
  9134. +
  9135. + nb = rcu_dereference_raw(nh->head);
  9136. + while (nb) {
  9137. + next_nb = rcu_dereference_raw(nb->next);
  9138. +
  9139. + ret = (ret & SSAM_NOTIF_STATE_MASK) | nb->fn(nb, event);
  9140. + if (ret & SSAM_NOTIF_STOP)
  9141. + break;
  9142. +
  9143. + nb = next_nb;
  9144. + }
  9145. +
  9146. + srcu_read_unlock(&nh->srcu, idx);
  9147. + return ret;
  9148. +}
  9149. +
  9150. +/*
  9151. + * Note: This function must be synchronized by the caller with respect to other
  9152. + * insert and/or remove calls.
  9153. + */
  9154. +int __ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
  9155. +{
  9156. + struct ssam_notifier_block **link = &nh->head;
  9157. +
  9158. + while ((*link) != NULL) {
  9159. + if (unlikely((*link) == nb)) {
  9160. + WARN(1, "double register detected");
  9161. + return -EINVAL;
  9162. + }
  9163. +
  9164. + if (nb->priority > (*link)->priority)
  9165. + break;
  9166. +
  9167. + link = &((*link)->next);
  9168. + }
  9169. +
  9170. + nb->next = *link;
  9171. + rcu_assign_pointer(*link, nb);
  9172. +
  9173. + return 0;
  9174. +}
  9175. +
  9176. +/*
  9177. + * Note: This function must be synchronized by the caller with respect to other
  9178. + * insert and/or remove calls. On success, the caller _must_ ensure SRCU
  9179. + * synchronization by calling `synchronize_srcu(&nh->srcu)` after leaving the
  9180. + * critical section, to ensure that the removed notifier block is not in use any
  9181. + * more.
  9182. + */
  9183. +int __ssam_nfblk_remove(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
  9184. +{
  9185. + struct ssam_notifier_block **link = &nh->head;
  9186. +
  9187. + while ((*link) != NULL) {
  9188. + if ((*link) == nb) {
  9189. + rcu_assign_pointer(*link, nb->next);
  9190. + return 0;
  9191. + }
  9192. +
  9193. + link = &((*link)->next);
  9194. + }
  9195. +
  9196. + return -ENOENT;
  9197. +}
  9198. +
  9199. +static int ssam_nf_head_init(struct ssam_nf_head *nh)
  9200. +{
  9201. + int status;
  9202. +
  9203. + status = init_srcu_struct(&nh->srcu);
  9204. + if (status)
  9205. + return status;
  9206. +
  9207. + nh->head = NULL;
  9208. + return 0;
  9209. +}
  9210. +
  9211. +static void ssam_nf_head_destroy(struct ssam_nf_head *nh)
  9212. +{
  9213. + cleanup_srcu_struct(&nh->srcu);
  9214. +}
  9215. +
  9216. +
  9217. +/* -- Event/notification registry. ------------------------------------------ */
  9218. +
  9219. +struct ssam_nf_refcount_key {
  9220. + struct ssam_event_registry reg;
  9221. + struct ssam_event_id id;
  9222. +};
  9223. +
  9224. +struct ssam_nf_refcount_entry {
  9225. + struct rb_node node;
  9226. + struct ssam_nf_refcount_key key;
  9227. + int refcount;
  9228. +};
  9229. +
  9230. +struct ssam_nf {
  9231. + struct mutex lock;
  9232. + struct rb_root refcount;
  9233. + struct ssam_nf_head head[SSH_NUM_EVENTS];
  9234. +};
  9235. +
  9236. +
  9237. +static int ssam_nf_refcount_inc(struct ssam_nf *nf,
  9238. + struct ssam_event_registry reg,
  9239. + struct ssam_event_id id)
  9240. +{
  9241. + struct ssam_nf_refcount_entry *entry;
  9242. + struct ssam_nf_refcount_key key;
  9243. + struct rb_node **link = &nf->refcount.rb_node;
  9244. + struct rb_node *parent = NULL;
  9245. + int cmp;
  9246. +
  9247. + key.reg = reg;
  9248. + key.id = id;
  9249. +
  9250. + while (*link) {
  9251. + entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
  9252. + parent = *link;
  9253. +
  9254. + cmp = memcmp(&key, &entry->key, sizeof(key));
  9255. + if (cmp < 0) {
  9256. + link = &(*link)->rb_left;
  9257. + } else if (cmp > 0) {
  9258. + link = &(*link)->rb_right;
  9259. + } else if (entry->refcount < INT_MAX) {
  9260. + return ++entry->refcount;
  9261. + } else {
  9262. + return -ENOSPC;
  9263. + }
  9264. + }
  9265. +
  9266. + entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  9267. + if (!entry)
  9268. + return -ENOMEM;
  9269. +
  9270. + entry->key = key;
  9271. + entry->refcount = 1;
  9272. +
  9273. + rb_link_node(&entry->node, parent, link);
  9274. + rb_insert_color(&entry->node, &nf->refcount);
  9275. +
  9276. + return entry->refcount;
  9277. +}
  9278. +
  9279. +static int ssam_nf_refcount_dec(struct ssam_nf *nf,
  9280. + struct ssam_event_registry reg,
  9281. + struct ssam_event_id id)
  9282. +{
  9283. + struct ssam_nf_refcount_entry *entry;
  9284. + struct ssam_nf_refcount_key key;
  9285. + struct rb_node *node = nf->refcount.rb_node;
  9286. + int cmp, rc;
  9287. +
  9288. + key.reg = reg;
  9289. + key.id = id;
  9290. +
  9291. + while (node) {
  9292. + entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
  9293. +
  9294. + cmp = memcmp(&key, &entry->key, sizeof(key));
  9295. + if (cmp < 0) {
  9296. + node = node->rb_left;
  9297. + } else if (cmp > 0) {
  9298. + node = node->rb_right;
  9299. + } else {
  9300. + rc = --entry->refcount;
  9301. +
  9302. + if (rc == 0) {
  9303. + rb_erase(&entry->node, &nf->refcount);
  9304. + kfree(entry);
  9305. + }
  9306. +
  9307. + return rc;
  9308. + }
  9309. + }
  9310. +
  9311. + return -ENOENT;
  9312. +}
  9313. +
  9314. +static bool ssam_nf_refcount_empty(struct ssam_nf *nf)
  9315. +{
  9316. + return RB_EMPTY_ROOT(&nf->refcount);
  9317. +}
  9318. +
  9319. +static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid,
  9320. + struct ssam_event *event)
  9321. +{
  9322. + struct ssam_nf_head *nf_head;
  9323. + int status, nf_ret;
  9324. +
  9325. + if (!ssh_rqid_is_event(rqid)) {
  9326. + dev_warn(dev, "event: unsupported rqid: 0x%04x\n", rqid);
  9327. + return;
  9328. + }
  9329. +
  9330. + nf_head = &nf->head[ssh_rqid_to_event(rqid)];
  9331. + nf_ret = ssam_nfblk_call_chain(nf_head, event);
  9332. + status = ssam_notifier_to_errno(nf_ret);
  9333. +
  9334. + if (status < 0) {
  9335. + dev_err(dev, "event: error handling event: %d "
  9336. + "(tc: 0x%02x, cid: 0x%02x, iid: 0x%02x, chn: 0x%02x)\n",
  9337. + status, event->target_category, event->command_id,
  9338. + event->instance_id, event->channel);
  9339. + }
  9340. +
  9341. + if (!(nf_ret & SSAM_NOTIF_HANDLED)) {
  9342. + dev_warn(dev, "event: unhandled event (rqid: 0x%02x, "
  9343. + "tc: 0x%02x, cid: 0x%02x, iid: 0x%02x, chn: 0x%02x)\n",
  9344. + rqid, event->target_category, event->command_id,
  9345. + event->instance_id, event->channel);
  9346. + }
  9347. +}
  9348. +
  9349. +static int ssam_nf_init(struct ssam_nf *nf)
  9350. +{
  9351. + int i, status;
  9352. +
  9353. + for (i = 0; i < SSH_NUM_EVENTS; i++) {
  9354. + status = ssam_nf_head_init(&nf->head[i]);
  9355. + if (status)
  9356. + break;
  9357. + }
  9358. +
  9359. + if (status) {
  9360. + for (i = i - 1; i >= 0; i--)
  9361. + ssam_nf_head_destroy(&nf->head[i]);
  9362. +
  9363. + return status;
  9364. + }
  9365. +
  9366. + mutex_init(&nf->lock);
  9367. + return 0;
  9368. +}
  9369. +
  9370. +static void ssam_nf_destroy(struct ssam_nf *nf)
  9371. +{
  9372. + int i;
  9373. +
  9374. + for (i = 0; i < SSH_NUM_EVENTS; i++)
  9375. + ssam_nf_head_destroy(&nf->head[i]);
  9376. +
  9377. + mutex_destroy(&nf->lock);
  9378. +}
  9379. +
  9380. +
  9381. +/* -- Event/async request completion system. -------------------------------- */
  9382. +
  9383. +#define SSAM_CPLT_WQ_NAME "ssam_cpltq"
  9384. +
  9385. +
  9386. +struct ssam_cplt;
  9387. +struct ssam_event_item;
  9388. +
  9389. +struct ssam_event_item_ops {
  9390. + void (*free)(struct ssam_event_item *);
  9391. +};
  9392. +
  9393. +struct ssam_event_item {
  9394. + struct list_head node;
  9395. + u16 rqid;
  9396. +
  9397. + struct ssam_event_item_ops ops;
  9398. + struct ssam_event event; // must be last
  9399. +};
  9400. +
  9401. +struct ssam_event_queue {
  9402. + struct ssam_cplt *cplt;
  9403. +
  9404. + spinlock_t lock;
  9405. + struct list_head head;
  9406. + struct work_struct work;
  9407. +};
  9408. +
  9409. +struct ssam_event_channel {
  9410. + struct ssam_event_queue queue[SSH_NUM_EVENTS];
  9411. +};
  9412. +
  9413. +struct ssam_cplt {
  9414. + struct device *dev;
  9415. + struct workqueue_struct *wq;
  9416. +
  9417. + struct {
  9418. + struct ssam_event_channel channel[SSH_NUM_CHANNELS];
  9419. + struct ssam_nf notif;
  9420. + } event;
  9421. +};
  9422. +
  9423. +
  9424. +/**
  9425. + * Maximum payload length for cached `ssam_event_item`s.
  9426. + *
  9427. + * This length has been chosen to be accomodate standard touchpad and keyboard
  9428. + * input events. Events with larger payloads will be allocated separately.
  9429. + */
  9430. +#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN 32
  9431. +
  9432. +static struct kmem_cache *ssam_event_item_cache;
  9433. +
  9434. +static int ssam_event_item_cache_init(void)
  9435. +{
  9436. + const unsigned int size = sizeof(struct ssam_event_item)
  9437. + + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN;
  9438. + const unsigned int align = __alignof__(struct ssam_event_item);
  9439. + struct kmem_cache *cache;
  9440. +
  9441. + cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL);
  9442. + if (!cache)
  9443. + return -ENOMEM;
  9444. +
  9445. + ssam_event_item_cache = cache;
  9446. + return 0;
  9447. +}
  9448. +
  9449. +static void ssam_event_item_cache_destroy(void)
  9450. +{
  9451. + kmem_cache_destroy(ssam_event_item_cache);
  9452. + ssam_event_item_cache = NULL;
  9453. +}
  9454. +
  9455. +static void __ssam_event_item_free_cached(struct ssam_event_item *item)
  9456. +{
  9457. + kmem_cache_free(ssam_event_item_cache, item);
  9458. +}
  9459. +
  9460. +static void __ssam_event_item_free_generic(struct ssam_event_item *item)
  9461. +{
  9462. + kfree(item);
  9463. +}
  9464. +
  9465. +static inline void ssam_event_item_free(struct ssam_event_item *item)
  9466. +{
  9467. + trace_ssam_event_item_free(item);
  9468. + item->ops.free(item);
  9469. +}
  9470. +
  9471. +static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
  9472. +{
  9473. + struct ssam_event_item *item;
  9474. +
  9475. + if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) {
  9476. + item = kmem_cache_alloc(ssam_event_item_cache, GFP_KERNEL);
  9477. + if (!item)
  9478. + return NULL;
  9479. +
  9480. + item->ops.free = __ssam_event_item_free_cached;
  9481. + } else {
  9482. + const size_t n = sizeof(struct ssam_event_item) + len;
  9483. + item = kzalloc(n, GFP_KERNEL);
  9484. + if (!item)
  9485. + return NULL;
  9486. +
  9487. + item->ops.free = __ssam_event_item_free_generic;
  9488. + }
  9489. +
  9490. + item->event.length = len;
  9491. +
  9492. + trace_ssam_event_item_alloc(item, len);
  9493. + return item;
  9494. +}
  9495. +
  9496. +
  9497. +static void ssam_event_queue_push(struct ssam_event_queue *q,
  9498. + struct ssam_event_item *item)
  9499. +{
  9500. + spin_lock(&q->lock);
  9501. + list_add_tail(&item->node, &q->head);
  9502. + spin_unlock(&q->lock);
  9503. +}
  9504. +
  9505. +static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
  9506. +{
  9507. + struct ssam_event_item *item;
  9508. +
  9509. + spin_lock(&q->lock);
  9510. + item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
  9511. + if (item)
  9512. + list_del(&item->node);
  9513. + spin_unlock(&q->lock);
  9514. +
  9515. + return item;
  9516. +}
  9517. +
  9518. +static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
  9519. +{
  9520. + bool empty;
  9521. +
  9522. + spin_lock(&q->lock);
  9523. + empty = list_empty(&q->head);
  9524. + spin_unlock(&q->lock);
  9525. +
  9526. + return empty;
  9527. +}
  9528. +
  9529. +static struct ssam_event_queue *ssam_cplt_get_event_queue(
  9530. + struct ssam_cplt *cplt, u8 channel, u16 rqid)
  9531. +{
  9532. + u16 event = ssh_rqid_to_event(rqid);
  9533. + u16 chidx = ssh_channel_to_index(channel);
  9534. +
  9535. + if (!ssh_rqid_is_event(rqid)) {
  9536. + dev_err(cplt->dev, "event: unsupported rqid: 0x%04x\n", rqid);
  9537. + return NULL;
  9538. + }
  9539. +
  9540. + if (!ssh_channel_is_valid(channel)) {
  9541. + dev_warn(cplt->dev, "event: unsupported channel: %u\n",
  9542. + channel);
  9543. + chidx = 0;
  9544. + }
  9545. +
  9546. + return &cplt->event.channel[chidx].queue[event];
  9547. +}
  9548. +
  9549. +static inline bool ssam_cplt_submit(struct ssam_cplt *cplt,
  9550. + struct work_struct *work)
  9551. +{
  9552. + return queue_work(cplt->wq, work);
  9553. +}
  9554. +
  9555. +static int ssam_cplt_submit_event(struct ssam_cplt *cplt,
  9556. + struct ssam_event_item *item)
  9557. +{
  9558. + struct ssam_event_queue *evq;
  9559. +
  9560. + evq = ssam_cplt_get_event_queue(cplt, item->event.channel, item->rqid);
  9561. + if (!evq)
  9562. + return -EINVAL;
  9563. +
  9564. + ssam_event_queue_push(evq, item);
  9565. + ssam_cplt_submit(cplt, &evq->work);
  9566. + return 0;
  9567. +}
  9568. +
  9569. +static void ssam_cplt_flush(struct ssam_cplt *cplt)
  9570. +{
  9571. + flush_workqueue(cplt->wq);
  9572. +}
  9573. +
  9574. +static void ssam_event_queue_work_fn(struct work_struct *work)
  9575. +{
  9576. + struct ssam_event_queue *queue;
  9577. + struct ssam_event_item *item;
  9578. + struct ssam_nf *nf;
  9579. + struct device *dev;
  9580. + int i;
  9581. +
  9582. + queue = container_of(work, struct ssam_event_queue, work);
  9583. + nf = &queue->cplt->event.notif;
  9584. + dev = queue->cplt->dev;
  9585. +
  9586. + for (i = 0; i < 10; i++) {
  9587. + item = ssam_event_queue_pop(queue);
  9588. + if (item == NULL)
  9589. + return;
  9590. +
  9591. + ssam_nf_call(nf, dev, item->rqid, &item->event);
  9592. + ssam_event_item_free(item);
  9593. + }
  9594. +
  9595. + if (!ssam_event_queue_is_empty(queue))
  9596. + ssam_cplt_submit(queue->cplt, &queue->work);
  9597. +}
  9598. +
  9599. +static void ssam_event_queue_init(struct ssam_cplt *cplt,
  9600. + struct ssam_event_queue *evq)
  9601. +{
  9602. + evq->cplt = cplt;
  9603. + spin_lock_init(&evq->lock);
  9604. + INIT_LIST_HEAD(&evq->head);
  9605. + INIT_WORK(&evq->work, ssam_event_queue_work_fn);
  9606. +}
  9607. +
  9608. +static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
  9609. +{
  9610. + struct ssam_event_channel *channel;
  9611. + int status, c, i;
  9612. +
  9613. + cplt->dev = dev;
  9614. +
  9615. + cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
  9616. + if (!cplt->wq)
  9617. + return -ENOMEM;
  9618. +
  9619. + for (c = 0; c < ARRAY_SIZE(cplt->event.channel); c++) {
  9620. + channel = &cplt->event.channel[c];
  9621. +
  9622. + for (i = 0; i < ARRAY_SIZE(channel->queue); i++)
  9623. + ssam_event_queue_init(cplt, &channel->queue[i]);
  9624. + }
  9625. +
  9626. + status = ssam_nf_init(&cplt->event.notif);
  9627. + if (status)
  9628. + destroy_workqueue(cplt->wq);
  9629. +
  9630. + return status;
  9631. +}
  9632. +
  9633. +static void ssam_cplt_destroy(struct ssam_cplt *cplt)
  9634. +{
  9635. + /*
  9636. + * Note: destroy_workqueue ensures that all currently queued work will
  9637. + * be fully completed and the workqueue drained. This means that this
  9638. + * call will inherently also free any queued ssam_event_items, thus we
  9639. + * don't have to take care of that here explicitly.
  9640. + */
  9641. + destroy_workqueue(cplt->wq);
  9642. + ssam_nf_destroy(&cplt->event.notif);
  9643. +}
  9644. +
  9645. +
  9646. +/* -- Main SSAM device structures. ------------------------------------------ */
  9647. +
  9648. +enum ssam_controller_state {
  9649. + SSAM_CONTROLLER_UNINITIALIZED,
  9650. + SSAM_CONTROLLER_INITIALIZED,
  9651. + SSAM_CONTROLLER_STARTED,
  9652. + SSAM_CONTROLLER_STOPPED,
  9653. + SSAM_CONTROLLER_SUSPENDED,
  9654. +};
  9655. +
  9656. +struct ssam_device_caps {
  9657. + u32 notif_display:1;
  9658. + u32 notif_d0exit:1;
  9659. +};
  9660. +
  9661. +struct ssam_controller {
  9662. + enum ssam_controller_state state;
  9663. +
  9664. + struct ssh_rtl rtl;
  9665. + struct ssam_cplt cplt;
  9666. +
  9667. + struct {
  9668. + struct ssh_seq_counter seq;
  9669. + struct ssh_rqid_counter rqid;
  9670. + } counter;
  9671. +
  9672. + struct {
  9673. + int num;
  9674. + bool wakeup_enabled;
  9675. + } irq;
  9676. +
  9677. + struct ssam_device_caps caps;
  9678. +};
  9679. +
  9680. +
  9681. +#define ssam_dbg(ctrl, fmt, ...) rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
  9682. +#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
  9683. +#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
  9684. +#define ssam_err(ctrl, fmt, ...) rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
  9685. +
  9686. +#define to_ssam_controller(ptr, member) \
  9687. + container_of(ptr, struct ssam_controller, member)
  9688. +
  9689. +struct device *ssam_controller_device(struct ssam_controller *c)
  9690. +{
  9691. + return ssh_rtl_get_device(&c->rtl);
  9692. +}
  9693. +EXPORT_SYMBOL_GPL(ssam_controller_device);
  9694. +
  9695. +
  9696. +static void ssam_handle_event(struct ssh_rtl *rtl,
  9697. + const struct ssh_command *cmd,
  9698. + const struct ssam_span *data)
  9699. +{
  9700. + struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl);
  9701. + struct ssam_event_item *item;
  9702. +
  9703. + item = ssam_event_item_alloc(data->len, GFP_KERNEL);
  9704. + if (!item)
  9705. + return;
  9706. +
  9707. + item->rqid = get_unaligned_le16(&cmd->rqid);
  9708. + item->event.target_category = cmd->tc;
  9709. + item->event.command_id = cmd->cid;
  9710. + item->event.instance_id = cmd->iid;
  9711. + item->event.channel = cmd->chn_in;
  9712. + memcpy(&item->event.data[0], data->ptr, data->len);
  9713. +
  9714. + ssam_cplt_submit_event(&ctrl->cplt, item);
  9715. +}
  9716. +
  9717. +static const struct ssh_rtl_ops ssam_rtl_ops = {
  9718. + .handle_event = ssam_handle_event,
  9719. +};
  9720. +
  9721. +
  9722. +static bool ssam_notifier_empty(struct ssam_controller *ctrl);
  9723. +static void ssam_notifier_unregister_all(struct ssam_controller *ctrl);
  9724. +
  9725. +
  9726. +#define SSAM_SSH_DSM_REVISION 0
  9727. +#define SSAM_SSH_DSM_NOTIF_D0 8
  9728. +static const guid_t SSAM_SSH_DSM_UUID = GUID_INIT(0xd5e383e1, 0xd892, 0x4a76,
  9729. + 0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5);
  9730. +
  9731. +static int ssam_device_caps_load_from_acpi(acpi_handle handle,
  9732. + struct ssam_device_caps *caps)
  9733. +{
  9734. + union acpi_object *obj;
  9735. + u64 funcs = 0;
  9736. + int i;
  9737. +
  9738. + // set defaults
  9739. + caps->notif_display = true;
  9740. + caps->notif_d0exit = false;
  9741. +
  9742. + if (!acpi_has_method(handle, "_DSM"))
  9743. + return 0;
  9744. +
  9745. + // get function availability bitfield
  9746. + obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_UUID, 0, 0, NULL,
  9747. + ACPI_TYPE_BUFFER);
  9748. + if (!obj)
  9749. + return -EFAULT;
  9750. +
  9751. + for (i = 0; i < obj->buffer.length && i < 8; i++)
  9752. + funcs |= (((u64)obj->buffer.pointer[i]) << (i * 8));
  9753. +
  9754. + ACPI_FREE(obj);
  9755. +
  9756. + // D0 exit/entry notification
  9757. + if (funcs & BIT(SSAM_SSH_DSM_NOTIF_D0)) {
  9758. + obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_UUID,
  9759. + SSAM_SSH_DSM_REVISION, SSAM_SSH_DSM_NOTIF_D0,
  9760. + NULL, ACPI_TYPE_INTEGER);
  9761. + if (!obj)
  9762. + return -EFAULT;
  9763. +
  9764. + caps->notif_d0exit = !!obj->integer.value;
  9765. + ACPI_FREE(obj);
  9766. + }
  9767. +
  9768. + return 0;
  9769. +}
  9770. +
  9771. +static int ssam_controller_init(struct ssam_controller *ctrl,
  9772. + struct serdev_device *serdev)
  9773. +{
  9774. + acpi_handle handle = ACPI_HANDLE(&serdev->dev);
  9775. + int status;
  9776. +
  9777. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_UNINITIALIZED) {
  9778. + dev_err(&serdev->dev, "embedded controller already initialized\n");
  9779. + return -EBUSY;
  9780. + }
  9781. +
  9782. + status = ssam_device_caps_load_from_acpi(handle, &ctrl->caps);
  9783. + if (status)
  9784. + return status;
  9785. +
  9786. + dev_dbg(&serdev->dev, "device capabilities:\n");
  9787. + dev_dbg(&serdev->dev, " notif_display: %u\n", ctrl->caps.notif_display);
  9788. + dev_dbg(&serdev->dev, " notif_d0exit: %u\n", ctrl->caps.notif_d0exit);
  9789. +
  9790. + ssh_seq_reset(&ctrl->counter.seq);
  9791. + ssh_rqid_reset(&ctrl->counter.rqid);
  9792. +
  9793. + // initialize event/request completion system
  9794. + status = ssam_cplt_init(&ctrl->cplt, &serdev->dev);
  9795. + if (status)
  9796. + return status;
  9797. +
  9798. + // initialize request and packet transmission layers
  9799. + status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops);
  9800. + if (status) {
  9801. + ssam_cplt_destroy(&ctrl->cplt);
  9802. + return status;
  9803. + }
  9804. +
  9805. + // update state
  9806. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_INITIALIZED);
  9807. + return 0;
  9808. +}
  9809. +
  9810. +static int ssam_controller_start(struct ssam_controller *ctrl)
  9811. +{
  9812. + int status;
  9813. +
  9814. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_INITIALIZED)
  9815. + return -EINVAL;
  9816. +
  9817. + status = ssh_rtl_tx_start(&ctrl->rtl);
  9818. + if (status)
  9819. + return status;
  9820. +
  9821. + status = ssh_rtl_rx_start(&ctrl->rtl);
  9822. + if (status) {
  9823. + ssh_rtl_tx_flush(&ctrl->rtl);
  9824. + return status;
  9825. + }
  9826. +
  9827. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_STARTED);
  9828. + return 0;
  9829. +}
  9830. +
  9831. +static void ssam_controller_shutdown(struct ssam_controller *ctrl)
  9832. +{
  9833. + enum ssam_controller_state s = smp_load_acquire(&ctrl->state);
  9834. + int status;
  9835. +
  9836. + if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED)
  9837. + return;
  9838. +
  9839. + // try to flush pending events and requests while everything still works
  9840. + status = ssh_rtl_flush(&ctrl->rtl, msecs_to_jiffies(5000));
  9841. + if (status) {
  9842. + ssam_err(ctrl, "failed to flush request transmission layer: %d\n",
  9843. + status);
  9844. + }
  9845. +
  9846. + // try to flush out all currently completing requests and events
  9847. + ssam_cplt_flush(&ctrl->cplt);
  9848. +
  9849. + /*
  9850. + * We expect all notifiers to have been removed by the respective client
  9851. + * driver that set them up at this point. If this warning occurs, some
  9852. + * client driver has not done that...
  9853. + */
  9854. + WARN_ON(!ssam_notifier_empty(ctrl));
  9855. +
  9856. + /*
  9857. + * Nevertheless, we should still take care of drivers that don't behave
  9858. + * well. Thus disable all enabled events, unregister all notifiers.
  9859. + */
  9860. + ssam_notifier_unregister_all(ctrl);
  9861. +
  9862. + // cancel rem. requests, ensure no new ones can be queued, stop threads
  9863. + ssh_rtl_tx_flush(&ctrl->rtl);
  9864. + ssh_rtl_shutdown(&ctrl->rtl);
  9865. +
  9866. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_STOPPED);
  9867. +}
  9868. +
  9869. +static void ssam_controller_destroy(struct ssam_controller *ctrl)
  9870. +{
  9871. + if (smp_load_acquire(&ctrl->state) == SSAM_CONTROLLER_UNINITIALIZED)
  9872. + return;
  9873. +
  9874. + /*
  9875. + * Note: New events could still have been received after the previous
  9876. + * flush in ssam_controller_shutdown, before the request transport layer
  9877. + * has been shut down. At this point, after the shutdown, we can be sure
  9878. + * that no new events will be queued. The call to ssam_cplt_destroy will
  9879. + * ensure that those remaining are being completed and freed.
  9880. + */
  9881. +
  9882. + // actually free resources
  9883. + ssam_cplt_destroy(&ctrl->cplt);
  9884. + ssh_rtl_destroy(&ctrl->rtl);
  9885. +
  9886. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_UNINITIALIZED);
  9887. +}
  9888. +
  9889. +static int ssam_controller_suspend(struct ssam_controller *ctrl)
  9890. +{
  9891. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_STARTED)
  9892. + return -EINVAL;
  9893. +
  9894. + ssam_dbg(ctrl, "pm: suspending controller\n");
  9895. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_SUSPENDED);
  9896. + return 0;
  9897. +}
  9898. +
  9899. +static int ssam_controller_resume(struct ssam_controller *ctrl)
  9900. +{
  9901. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_SUSPENDED)
  9902. + return -EINVAL;
  9903. +
  9904. + ssam_dbg(ctrl, "pm: resuming controller\n");
  9905. + smp_store_release(&ctrl->state, SSAM_CONTROLLER_STARTED);
  9906. + return 0;
  9907. +}
  9908. +
  9909. +
  9910. +static inline
  9911. +int ssam_controller_receive_buf(struct ssam_controller *ctrl,
  9912. + const unsigned char *buf, size_t n)
  9913. +{
  9914. + return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
  9915. +}
  9916. +
  9917. +static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl)
  9918. +{
  9919. + ssh_ptl_tx_wakeup(&ctrl->rtl.ptl, true);
  9920. +}
  9921. +
  9922. +
  9923. +/* -- Top-level request interface ------------------------------------------- */
  9924. +
  9925. +ssize_t ssam_request_write_data(struct ssam_span *buf,
  9926. + struct ssam_controller *ctrl,
  9927. + struct ssam_request *spec)
  9928. +{
  9929. + struct msgbuf msgb;
  9930. + u16 rqid;
  9931. + u8 seq;
  9932. +
  9933. + if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE)
  9934. + return -EINVAL;
  9935. +
  9936. + msgb_init(&msgb, buf->ptr, buf->len);
  9937. + seq = ssh_seq_next(&ctrl->counter.seq);
  9938. + rqid = ssh_rqid_next(&ctrl->counter.rqid);
  9939. + msgb_push_cmd(&msgb, seq, rqid, spec);
  9940. +
  9941. + return msgb_bytes_used(&msgb);
  9942. +}
  9943. +EXPORT_SYMBOL_GPL(ssam_request_write_data);
  9944. +
  9945. +
  9946. +static void ssam_request_sync_complete(struct ssh_request *rqst,
  9947. + const struct ssh_command *cmd,
  9948. + const struct ssam_span *data, int status)
  9949. +{
  9950. + struct ssh_rtl *rtl = ssh_request_rtl(rqst);
  9951. + struct ssam_request_sync *r;
  9952. +
  9953. + r = container_of(rqst, struct ssam_request_sync, base);
  9954. + r->status = status;
  9955. +
  9956. + if (r->resp)
  9957. + r->resp->length = 0;
  9958. +
  9959. + if (status) {
  9960. + rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status);
  9961. + return;
  9962. + }
  9963. +
  9964. + if (!data) // handle requests without a response
  9965. + return;
  9966. +
  9967. + if (!r->resp || !r->resp->pointer) {
  9968. + if (data->len) {
  9969. + rtl_warn(rtl, "rsp: no response buffer provided, "
  9970. + "dropping data\n");
  9971. + }
  9972. + return;
  9973. + }
  9974. +
  9975. + if (data->len > r->resp->capacity) {
  9976. + rtl_err(rtl, "rsp: response buffer too small, "
  9977. + "capacity: %zu bytes, got: %zu bytes\n",
  9978. + r->resp->capacity, data->len);
  9979. + r->status = -ENOSPC;
  9980. + return;
  9981. + }
  9982. +
  9983. + r->resp->length = data->len;
  9984. + memcpy(r->resp->pointer, data->ptr, data->len);
  9985. +}
  9986. +
  9987. +static void ssam_request_sync_release(struct ssh_request *rqst)
  9988. +{
  9989. + complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp);
  9990. +}
  9991. +
  9992. +static const struct ssh_request_ops ssam_request_sync_ops = {
  9993. + .release = ssam_request_sync_release,
  9994. + .complete = ssam_request_sync_complete,
  9995. +};
  9996. +
  9997. +
  9998. +int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
  9999. + struct ssam_request_sync **rqst,
  10000. + struct ssam_span *buffer)
  10001. +{
  10002. + size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len);
  10003. +
  10004. + *rqst = kzalloc(sizeof(struct ssam_request_sync) + msglen, flags);
  10005. + if (!*rqst)
  10006. + return -ENOMEM;
  10007. +
  10008. + buffer->ptr = (u8 *)(*rqst + 1);
  10009. + buffer->len = msglen;
  10010. +
  10011. + return 0;
  10012. +}
  10013. +EXPORT_SYMBOL_GPL(ssam_request_sync_alloc);
  10014. +
  10015. +void ssam_request_sync_init(struct ssam_request_sync *rqst,
  10016. + enum ssam_request_flags flags)
  10017. +{
  10018. + ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops);
  10019. + init_completion(&rqst->comp);
  10020. + rqst->resp = NULL;
  10021. + rqst->status = 0;
  10022. +}
  10023. +EXPORT_SYMBOL_GPL(ssam_request_sync_init);
  10024. +
  10025. +int ssam_request_sync_submit(struct ssam_controller *ctrl,
  10026. + struct ssam_request_sync *rqst)
  10027. +{
  10028. + enum ssam_controller_state state = smp_load_acquire(&ctrl->state);
  10029. + int status;
  10030. +
  10031. + if (state == SSAM_CONTROLLER_SUSPENDED) {
  10032. + ssam_warn(ctrl, "rqst: embedded controller is suspended\n");
  10033. + ssh_request_put(&rqst->base);
  10034. + return -EPERM;
  10035. + }
  10036. +
  10037. + if (state != SSAM_CONTROLLER_STARTED) {
  10038. + ssam_warn(ctrl, "rqst: embedded controller is uninitialized\n");
  10039. + ssh_request_put(&rqst->base);
  10040. + return -ENXIO;
  10041. + }
  10042. +
  10043. + status = ssh_rtl_submit(&ctrl->rtl, &rqst->base);
  10044. + ssh_request_put(&rqst->base);
  10045. +
  10046. + return status;
  10047. +}
  10048. +EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
  10049. +
  10050. +int ssam_request_sync(struct ssam_controller *ctrl, struct ssam_request *spec,
  10051. + struct ssam_response *rsp)
  10052. +{
  10053. + struct ssam_request_sync *rqst;
  10054. + struct ssam_span buf;
  10055. + size_t len;
  10056. + int status;
  10057. +
  10058. + // prevent overflow, allows us to skip checks later on
  10059. + if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) {
  10060. + ssam_err(ctrl, "rqst: request payload too large\n");
  10061. + return -EINVAL;
  10062. + }
  10063. +
  10064. + status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf);
  10065. + if (status)
  10066. + return status;
  10067. +
  10068. + ssam_request_sync_init(rqst, spec->flags);
  10069. + ssam_request_sync_set_resp(rqst, rsp);
  10070. +
  10071. + len = ssam_request_write_data(&buf, ctrl, spec);
  10072. + ssam_request_sync_set_data(rqst, buf.ptr, len);
  10073. +
  10074. + status = ssam_request_sync_submit(ctrl, rqst);
  10075. + if (!status)
  10076. + status = ssam_request_sync_wait(rqst);
  10077. +
  10078. + kfree(rqst);
  10079. + return status;
  10080. +}
  10081. +EXPORT_SYMBOL_GPL(ssam_request_sync);
  10082. +
  10083. +int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
  10084. + struct ssam_request *spec,
  10085. + struct ssam_response *rsp,
  10086. + struct ssam_span *buf)
  10087. +{
  10088. + struct ssam_request_sync rqst;
  10089. + size_t len;
  10090. + int status;
  10091. +
  10092. + // prevent overflow, allows us to skip checks later on
  10093. + if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) {
  10094. + ssam_err(ctrl, "rqst: request payload too large\n");
  10095. + return -EINVAL;
  10096. + }
  10097. +
  10098. + ssam_request_sync_init(&rqst, spec->flags);
  10099. + ssam_request_sync_set_resp(&rqst, rsp);
  10100. +
  10101. + len = ssam_request_write_data(buf, ctrl, spec);
  10102. + ssam_request_sync_set_data(&rqst, buf->ptr, len);
  10103. +
  10104. + status = ssam_request_sync_submit(ctrl, &rqst);
  10105. + if (!status)
  10106. + status = ssam_request_sync_wait(&rqst);
  10107. +
  10108. + return status;
  10109. +}
  10110. +EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
  10111. +
  10112. +
  10113. +/* -- Internal SAM requests. ------------------------------------------------ */
  10114. +
  10115. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
  10116. + .target_category = SSAM_SSH_TC_SAM,
  10117. + .command_id = 0x13,
  10118. + .instance_id = 0x00,
  10119. + .channel = 0x01,
  10120. +});
  10121. +
  10122. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
  10123. + .target_category = SSAM_SSH_TC_SAM,
  10124. + .command_id = 0x15,
  10125. + .instance_id = 0x00,
  10126. + .channel = 0x01,
  10127. +});
  10128. +
  10129. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
  10130. + .target_category = SSAM_SSH_TC_SAM,
  10131. + .command_id = 0x16,
  10132. + .instance_id = 0x00,
  10133. + .channel = 0x01,
  10134. +});
  10135. +
  10136. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
  10137. + .target_category = SSAM_SSH_TC_SAM,
  10138. + .command_id = 0x33,
  10139. + .instance_id = 0x00,
  10140. + .channel = 0x01,
  10141. +});
  10142. +
  10143. +static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
  10144. + .target_category = SSAM_SSH_TC_SAM,
  10145. + .command_id = 0x34,
  10146. + .instance_id = 0x00,
  10147. + .channel = 0x01,
  10148. +});
  10149. +
  10150. +static int ssam_ssh_event_enable(struct ssam_controller *ctrl,
  10151. + struct ssam_event_registry reg,
  10152. + struct ssam_event_id id, u8 flags)
  10153. +{
  10154. + struct ssh_notification_params params;
  10155. + struct ssam_request rqst;
  10156. + struct ssam_response result;
  10157. + int status;
  10158. +
  10159. + u16 rqid = ssh_tc_to_rqid(id.target_category);
  10160. + u8 buf[1] = { 0x00 };
  10161. +
  10162. + // only allow RQIDs that lie within event spectrum
  10163. + if (!ssh_rqid_is_event(rqid))
  10164. + return -EINVAL;
  10165. +
  10166. + params.target_category = id.target_category;
  10167. + params.instance_id = id.instance;
  10168. + params.flags = flags;
  10169. + put_unaligned_le16(rqid, &params.request_id);
  10170. +
  10171. + rqst.target_category = reg.target_category;
  10172. + rqst.command_id = reg.cid_enable;
  10173. + rqst.instance_id = 0x00;
  10174. + rqst.channel = reg.channel;
  10175. + rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
  10176. + rqst.length = sizeof(params);
  10177. + rqst.payload = (u8 *)&params;
  10178. +
  10179. + result.capacity = ARRAY_SIZE(buf);
  10180. + result.length = 0;
  10181. + result.pointer = buf;
  10182. +
  10183. + status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params));
  10184. + if (status) {
  10185. + ssam_err(ctrl, "failed to enable event source "
  10186. + "(tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
  10187. + id.target_category, id.instance, reg.target_category);
  10188. + }
  10189. +
  10190. + if (buf[0] != 0x00) {
  10191. + ssam_warn(ctrl, "unexpected result while enabling event source: "
  10192. + "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
  10193. + buf[0], id.target_category, id.instance,
  10194. + reg.target_category);
  10195. + }
  10196. +
  10197. + return status;
  10198. +
  10199. +}
  10200. +
  10201. +static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
  10202. + struct ssam_event_registry reg,
  10203. + struct ssam_event_id id, u8 flags)
  10204. +{
  10205. + struct ssh_notification_params params;
  10206. + struct ssam_request rqst;
  10207. + struct ssam_response result;
  10208. + int status;
  10209. +
  10210. + u16 rqid = ssh_tc_to_rqid(id.target_category);
  10211. + u8 buf[1] = { 0x00 };
  10212. +
  10213. + // only allow RQIDs that lie within event spectrum
  10214. + if (!ssh_rqid_is_event(rqid))
  10215. + return -EINVAL;
  10216. +
  10217. + params.target_category = id.target_category;
  10218. + params.instance_id = id.instance;
  10219. + params.flags = flags;
  10220. + put_unaligned_le16(rqid, &params.request_id);
  10221. +
  10222. + rqst.target_category = reg.target_category;
  10223. + rqst.command_id = reg.cid_disable;
  10224. + rqst.instance_id = 0x00;
  10225. + rqst.channel = reg.channel;
  10226. + rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
  10227. + rqst.length = sizeof(params);
  10228. + rqst.payload = (u8 *)&params;
  10229. +
  10230. + result.capacity = ARRAY_SIZE(buf);
  10231. + result.length = 0;
  10232. + result.pointer = buf;
  10233. +
  10234. + status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params));
  10235. + if (status) {
  10236. + ssam_err(ctrl, "failed to disable event source "
  10237. + "(tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
  10238. + id.target_category, id.instance, reg.target_category);
  10239. + }
  10240. +
  10241. + if (buf[0] != 0x00) {
  10242. + ssam_warn(ctrl, "unexpected result while disabling event source: "
  10243. + "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
  10244. + buf[0], id.target_category, id.instance,
  10245. + reg.target_category);
  10246. + }
  10247. +
  10248. + return status;
  10249. +}
  10250. +
  10251. +
  10252. +/* -- Wrappers for internal SAM requests. ----------------------------------- */
  10253. +
  10254. +static int ssam_log_firmware_version(struct ssam_controller *ctrl)
  10255. +{
  10256. + __le32 __version;
  10257. + u32 version, a, b, c;
  10258. + int status;
  10259. +
  10260. + status = ssam_ssh_get_firmware_version(ctrl, &__version);
  10261. + if (status)
  10262. + return status;
  10263. +
  10264. + version = le32_to_cpu(__version);
  10265. + a = (version >> 24) & 0xff;
  10266. + b = ((version >> 8) & 0xffff);
  10267. + c = version & 0xff;
  10268. +
  10269. + ssam_info(ctrl, "SAM controller version: %u.%u.%u\n", a, b, c);
  10270. + return 0;
  10271. +}
  10272. +
  10273. +static int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl)
  10274. +{
  10275. + int status;
  10276. + u8 response;
  10277. +
  10278. + if (!ctrl->caps.notif_display)
  10279. + return 0;
  10280. +
  10281. + ssam_dbg(ctrl, "pm: notifying display off\n");
  10282. +
  10283. + status = ssam_ssh_notif_display_off(ctrl, &response);
  10284. + if (status)
  10285. + return status;
  10286. +
  10287. + if (response != 0) {
  10288. + ssam_err(ctrl, "unexpected response from display-off notification: "
  10289. + "0x%02x\n", response);
  10290. + return -EIO;
  10291. + }
  10292. +
  10293. + return 0;
  10294. +}
  10295. +
  10296. +static int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl)
  10297. +{
  10298. + int status;
  10299. + u8 response;
  10300. +
  10301. + if (!ctrl->caps.notif_display)
  10302. + return 0;
  10303. +
  10304. + ssam_dbg(ctrl, "pm: notifying display on\n");
  10305. +
  10306. + status = ssam_ssh_notif_display_on(ctrl, &response);
  10307. + if (status)
  10308. + return status;
  10309. +
  10310. + if (response != 0) {
  10311. + ssam_err(ctrl, "unexpected response from display-on notification: "
  10312. + "0x%02x\n", response);
  10313. + return -EIO;
  10314. + }
  10315. +
  10316. + return 0;
  10317. +}
  10318. +
  10319. +static int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl)
  10320. +{
  10321. + int status;
  10322. + u8 response;
  10323. +
  10324. + if (!ctrl->caps.notif_d0exit)
  10325. + return 0;
  10326. +
  10327. + ssam_dbg(ctrl, "pm: notifying D0 exit\n");
  10328. +
  10329. + status = ssam_ssh_notif_d0_exit(ctrl, &response);
  10330. + if (status)
  10331. + return status;
  10332. +
  10333. + if (response != 0) {
  10334. + ssam_err(ctrl, "unexpected response from D0-exit notification: "
  10335. + "0x%02x\n", response);
  10336. + return -EIO;
  10337. + }
  10338. +
  10339. + return 0;
  10340. +}
  10341. +
  10342. +static int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
  10343. +{
  10344. + int status;
  10345. + u8 response;
  10346. +
  10347. + if (!ctrl->caps.notif_d0exit)
  10348. + return 0;
  10349. +
  10350. + ssam_dbg(ctrl, "pm: notifying D0 entry\n");
  10351. +
  10352. + status = ssam_ssh_notif_d0_entry(ctrl, &response);
  10353. + if (status)
  10354. + return status;
  10355. +
  10356. + if (response != 0) {
  10357. + ssam_err(ctrl, "unexpected response from D0-entry notification: "
  10358. + "0x%02x\n", response);
  10359. + return -EIO;
  10360. + }
  10361. +
  10362. + return 0;
  10363. +}
  10364. +
  10365. +
  10366. +/* -- Top-level event registry interface. ----------------------------------- */
  10367. +
  10368. +int ssam_notifier_register(struct ssam_controller *ctrl,
  10369. + struct ssam_event_notifier *n)
  10370. +{
  10371. + u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
  10372. + struct ssam_nf_head *nf_head;
  10373. + struct ssam_nf *nf;
  10374. + int rc, status;
  10375. +
  10376. + if (!ssh_rqid_is_event(rqid))
  10377. + return -EINVAL;
  10378. +
  10379. + nf = &ctrl->cplt.event.notif;
  10380. + nf_head = &nf->head[ssh_rqid_to_event(rqid)];
  10381. +
  10382. + mutex_lock(&nf->lock);
  10383. +
  10384. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_STARTED) {
  10385. + mutex_unlock(&nf->lock);
  10386. + return -ENXIO;
  10387. + }
  10388. +
  10389. + rc = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
  10390. + if (rc < 0) {
  10391. + mutex_unlock(&nf->lock);
  10392. + return rc;
  10393. + }
  10394. +
  10395. + ssam_dbg(ctrl, "enabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x, "
  10396. + "rc: %d)\n", n->event.reg.target_category,
  10397. + n->event.id.target_category, n->event.id.instance, rc);
  10398. +
  10399. + status = __ssam_nfblk_insert(nf_head, &n->base);
  10400. + if (status) {
  10401. + ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
  10402. + mutex_unlock(&nf->lock);
  10403. + return status;
  10404. + }
  10405. +
  10406. + if (rc == 1) {
  10407. + status = ssam_ssh_event_enable(ctrl, n->event.reg, n->event.id,
  10408. + n->event.flags);
  10409. + if (status) {
  10410. + __ssam_nfblk_remove(nf_head, &n->base);
  10411. + ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
  10412. + mutex_unlock(&nf->lock);
  10413. + synchronize_srcu(&nf_head->srcu);
  10414. + return status;
  10415. + }
  10416. + }
  10417. +
  10418. + mutex_unlock(&nf->lock);
  10419. + return 0;
  10420. +
  10421. +}
  10422. +EXPORT_SYMBOL_GPL(ssam_notifier_register);
  10423. +
  10424. +int ssam_notifier_unregister(struct ssam_controller *ctrl,
  10425. + struct ssam_event_notifier *n)
  10426. +{
  10427. + u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
  10428. + struct ssam_nf_head *nf_head;
  10429. + struct ssam_nf *nf;
  10430. + int rc, status = 0;
  10431. +
  10432. + if (!ssh_rqid_is_event(rqid))
  10433. + return -EINVAL;
  10434. +
  10435. + nf = &ctrl->cplt.event.notif;
  10436. + nf_head = &nf->head[ssh_rqid_to_event(rqid)];
  10437. +
  10438. + mutex_lock(&nf->lock);
  10439. +
  10440. + if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_STARTED) {
  10441. + mutex_unlock(&nf->lock);
  10442. + return -ENXIO;
  10443. + }
  10444. +
  10445. + rc = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
  10446. + if (rc < 0) {
  10447. + mutex_unlock(&nf->lock);
  10448. + return rc;
  10449. + }
  10450. +
  10451. + ssam_dbg(ctrl, "disabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x, "
  10452. + "rc: %d)\n", n->event.reg.target_category,
  10453. + n->event.id.target_category, n->event.id.instance, rc);
  10454. +
  10455. + if (rc == 0) {
  10456. + status = ssam_ssh_event_disable(ctrl, n->event.reg, n->event.id,
  10457. + n->event.flags);
  10458. + }
  10459. +
  10460. + __ssam_nfblk_remove(nf_head, &n->base);
  10461. + mutex_unlock(&nf->lock);
  10462. + synchronize_srcu(&nf_head->srcu);
  10463. +
  10464. + return status;
  10465. +}
  10466. +EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
  10467. +
  10468. +static bool ssam_notifier_empty(struct ssam_controller *ctrl)
  10469. +{
  10470. + struct ssam_nf *nf = &ctrl->cplt.event.notif;
  10471. + bool result;
  10472. +
  10473. + mutex_lock(&nf->lock);
  10474. + result = ssam_nf_refcount_empty(nf);
  10475. + mutex_unlock(&nf->lock);
  10476. +
  10477. + return result;
  10478. +}
  10479. +
  10480. +static void ssam_notifier_unregister_all(struct ssam_controller *ctrl)
  10481. +{
  10482. + struct ssam_nf *nf = &ctrl->cplt.event.notif;
  10483. + struct ssam_nf_refcount_entry *pos, *n;
  10484. +
  10485. + mutex_lock(&nf->lock);
  10486. + rbtree_postorder_for_each_entry_safe(pos, n, &nf->refcount, node) {
  10487. + // ignore errors, will get logged in call
  10488. + ssam_ssh_event_disable(ctrl, pos->key.reg, pos->key.id, 0);
  10489. + kfree(pos);
  10490. + }
  10491. + nf->refcount = RB_ROOT;
  10492. + mutex_unlock(&nf->lock);
  10493. +}
  10494. +
  10495. +
  10496. +/* -- Wakeup IRQ. ----------------------------------------------------------- */
  10497. +
  10498. +static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false };
  10499. +static const struct acpi_gpio_params gpio_ssam_wakeup = { 1, 0, false };
  10500. +
  10501. +static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
  10502. + { "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 },
  10503. + { "ssam_wakeup-gpio", &gpio_ssam_wakeup, 1 },
  10504. + { },
  10505. +};
  10506. +
  10507. +static irqreturn_t ssam_irq_handle(int irq, void *dev_id)
  10508. +{
  10509. + struct ssam_controller *ctrl = dev_id;
  10510. +
  10511. + ssam_dbg(ctrl, "pm: wake irq triggered\n");
  10512. +
  10513. + // Note: Proper wakeup detection is currently unimplemented.
  10514. + // When the EC is in display-off or any other non-D0 state, it
  10515. + // does not send events/notifications to the host. Instead it
  10516. + // signals that there are events available via the wakeup IRQ.
  10517. + // This driver is responsible for calling back to the EC to
  10518. + // release these events one-by-one.
  10519. + //
  10520. + // This IRQ should not cause a full system resume by its own.
  10521. + // Instead, events should be handled by their respective subsystem
  10522. + // drivers, which in turn should signal whether a full system
  10523. + // resume should be performed.
  10524. + //
  10525. + // TODO: Send GPIO callback command repeatedly to EC until callback
  10526. + // returns 0x00. Return flag of callback is "has more events".
  10527. + // Each time the command is sent, one event is "released". Once
  10528. + // all events have been released (return = 0x00), the GPIO is
  10529. + // re-armed. Detect wakeup events during this process, go back to
  10530. + // sleep if no wakeup event has been received.
  10531. +
  10532. + return IRQ_HANDLED;
  10533. +}
  10534. +
  10535. +static int ssam_irq_setup(struct ssam_controller *ctrl)
  10536. +{
  10537. + struct device *dev = ssam_controller_device(ctrl);
  10538. + struct gpio_desc *gpiod;
  10539. + int irq;
  10540. + int status;
  10541. +
  10542. + /*
  10543. + * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH.
  10544. + * However, the GPIO line only gets reset by sending the GPIO callback
  10545. + * command to SAM (or alternatively the display-on notification). As
  10546. + * proper handling for this interrupt is not implemented yet, leaving
  10547. + * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback
  10548. + * never gets sent and thus the line line never gets reset). To avoid
  10549. + * this, mark the IRQ as TRIGGER_RISING for now, only creating a single
  10550. + * interrupt, and let the SAM resume callback during the controller
  10551. + * resume process clear it.
  10552. + */
  10553. + const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
  10554. +
  10555. + gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
  10556. + if (IS_ERR(gpiod))
  10557. + return PTR_ERR(gpiod);
  10558. +
  10559. + irq = gpiod_to_irq(gpiod);
  10560. + gpiod_put(gpiod);
  10561. +
  10562. + if (irq < 0)
  10563. + return irq;
  10564. +
  10565. + status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf,
  10566. + "surface_sam_wakeup", ctrl);
  10567. + if (status)
  10568. + return status;
  10569. +
  10570. + ctrl->irq.num = irq;
  10571. + return 0;
  10572. +}
  10573. +
  10574. +static void ssam_irq_free(struct ssam_controller *ctrl)
  10575. +{
  10576. + free_irq(ctrl->irq.num, ctrl);
  10577. + ctrl->irq.num = -1;
  10578. +}
  10579. +
  10580. +
  10581. +/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
  10582. +
  10583. +static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
  10584. + size_t n)
  10585. +{
  10586. + struct ssam_controller *ctrl = serdev_device_get_drvdata(dev);
  10587. + return ssam_controller_receive_buf(ctrl, buf, n);
  10588. +}
  10589. +
  10590. +static void ssam_write_wakeup(struct serdev_device *dev)
  10591. +{
  10592. + struct ssam_controller *ctrl = serdev_device_get_drvdata(dev);
  10593. + ssam_controller_write_wakeup(ctrl);
  10594. +}
  10595. +
  10596. +static const struct serdev_device_ops ssam_serdev_ops = {
  10597. + .receive_buf = ssam_receive_buf,
  10598. + .write_wakeup = ssam_write_wakeup,
  10599. +};
  10600. +
  10601. +
  10602. +/* -- ACPI based device setup. ---------------------------------------------- */
  10603. +
  10604. +static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
  10605. + void *ctx)
  10606. +{
  10607. + struct serdev_device *serdev = ctx;
  10608. + struct acpi_resource_common_serialbus *serial;
  10609. + struct acpi_resource_uart_serialbus *uart;
  10610. + bool flow_control;
  10611. + int status = 0;
  10612. +
  10613. + if (rsc->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
  10614. + return AE_OK;
  10615. +
  10616. + serial = &rsc->data.common_serial_bus;
  10617. + if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART)
  10618. + return AE_OK;
  10619. +
  10620. + uart = &rsc->data.uart_serial_bus;
  10621. +
  10622. + // set up serdev device
  10623. + serdev_device_set_baudrate(serdev, uart->default_baud_rate);
  10624. +
  10625. + // serdev currently only supports RTSCTS flow control
  10626. + if (uart->flow_control & (~((u8) ACPI_UART_FLOW_CONTROL_HW))) {
  10627. + dev_warn(&serdev->dev, "setup: unsupported flow control"
  10628. + " (value: 0x%02x)\n", uart->flow_control);
  10629. + }
  10630. +
  10631. + // set RTSCTS flow control
  10632. + flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW;
  10633. + serdev_device_set_flow_control(serdev, flow_control);
  10634. +
  10635. + // serdev currently only supports EVEN/ODD parity
  10636. + switch (uart->parity) {
  10637. + case ACPI_UART_PARITY_NONE:
  10638. + status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
  10639. + break;
  10640. + case ACPI_UART_PARITY_EVEN:
  10641. + status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN);
  10642. + break;
  10643. + case ACPI_UART_PARITY_ODD:
  10644. + status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD);
  10645. + break;
  10646. + default:
  10647. + dev_warn(&serdev->dev, "setup: unsupported parity"
  10648. + " (value: 0x%02x)\n", uart->parity);
  10649. + break;
  10650. + }
  10651. +
  10652. + if (status) {
  10653. + dev_err(&serdev->dev, "setup: failed to set parity"
  10654. + " (value: 0x%02x)\n", uart->parity);
  10655. + return status;
  10656. + }
  10657. +
  10658. + return AE_CTRL_TERMINATE; // we've found the resource and are done
  10659. +}
  10660. +
  10661. +static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
  10662. + struct serdev_device *serdev)
  10663. +{
  10664. + return acpi_walk_resources(handle, METHOD_NAME__CRS,
  10665. + ssam_serdev_setup_via_acpi_crs, serdev);
  10666. +}
  10667. +
  10668. +
  10669. +/* -- Power management. ----------------------------------------------------- */
  10670. +
  10671. +static void surface_sam_ssh_shutdown(struct device *dev)
  10672. +{
  10673. + struct ssam_controller *c = dev_get_drvdata(dev);
  10674. + int status;
  10675. +
  10676. + /*
  10677. + * Try to signal display-off and D0-exit, ignore any errors.
  10678. + *
  10679. + * Note: It has not been established yet if this is actually
  10680. + * necessary/useful for shutdown.
  10681. + */
  10682. +
  10683. + status = ssam_ctrl_notif_display_off(c);
  10684. + if (status)
  10685. + ssam_err(c, "pm: display-off notification failed: %d\n", status);
  10686. +
  10687. + status = ssam_ctrl_notif_d0_exit(c);
  10688. + if (status)
  10689. + ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
  10690. +}
  10691. +
  10692. +static int surface_sam_ssh_suspend(struct device *dev)
  10693. +{
  10694. + struct ssam_controller *c = dev_get_drvdata(dev);
  10695. + int status;
  10696. +
  10697. + /*
  10698. + * Try to signal display-off and D0-exit, enable IRQ wakeup if
  10699. + * specified. Abort on error.
  10700. + *
  10701. + * Note: Signalling display-off/display-on should normally be done from
  10702. + * some sort of display state notifier. As that is not available, signal
  10703. + * it here.
  10704. + */
  10705. +
  10706. + status = ssam_ctrl_notif_display_off(c);
  10707. + if (status) {
  10708. + ssam_err(c, "pm: display-off notification failed: %d\n", status);
  10709. + return status;
  10710. + }
  10711. +
  10712. + status = ssam_ctrl_notif_d0_exit(c);
  10713. + if (status) {
  10714. + ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
  10715. + goto err_notif;
  10716. + }
  10717. +
  10718. + if (device_may_wakeup(dev)) {
  10719. + status = enable_irq_wake(c->irq.num);
  10720. + if (status) {
  10721. + ssam_err(c, "failed to disable wake IRQ: %d\n", status);
  10722. + goto err_irq;
  10723. + }
  10724. +
  10725. + c->irq.wakeup_enabled = true;
  10726. + } else {
  10727. + c->irq.wakeup_enabled = false;
  10728. + }
  10729. +
  10730. + WARN_ON(ssam_controller_suspend(c));
  10731. + return 0;
  10732. +
  10733. +err_irq:
  10734. + ssam_ctrl_notif_d0_entry(c);
  10735. +err_notif:
  10736. + ssam_ctrl_notif_display_on(c);
  10737. + return status;
  10738. +}
  10739. +
  10740. +static int surface_sam_ssh_resume(struct device *dev)
  10741. +{
  10742. + struct ssam_controller *c = dev_get_drvdata(dev);
  10743. + int status;
  10744. +
  10745. + WARN_ON(ssam_controller_resume(c));
  10746. +
  10747. + /*
  10748. + * Try to disable IRQ wakeup (if specified), signal display-on and
  10749. + * D0-entry. In case of errors, log them and try to restore normal
  10750. + * operation state as far as possible.
  10751. + *
  10752. + * Note: Signalling display-off/display-on should normally be done from
  10753. + * some sort of display state notifier. As that is not available, signal
  10754. + * it here.
  10755. + */
  10756. +
  10757. + if (c->irq.wakeup_enabled) {
  10758. + status = disable_irq_wake(c->irq.num);
  10759. + if (status)
  10760. + ssam_err(c, "failed to disable wake IRQ: %d\n", status);
  10761. +
  10762. + c->irq.wakeup_enabled = false;
  10763. + }
  10764. +
  10765. + status = ssam_ctrl_notif_d0_entry(c);
  10766. + if (status)
  10767. + ssam_err(c, "pm: display-on notification failed: %d\n", status);
  10768. +
  10769. + status = ssam_ctrl_notif_display_on(c);
  10770. + if (status)
  10771. + ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
  10772. +
  10773. + return 0;
  10774. +}
  10775. +
  10776. +static SIMPLE_DEV_PM_OPS(surface_sam_ssh_pm_ops, surface_sam_ssh_suspend,
  10777. + surface_sam_ssh_resume);
  10778. +
  10779. +
  10780. +/* -- Device/driver setup. -------------------------------------------------- */
  10781. +
  10782. +static struct ssam_controller ssam_controller = {
  10783. + .state = SSAM_CONTROLLER_UNINITIALIZED,
  10784. +};
  10785. +static DEFINE_MUTEX(ssam_controller_lock);
  10786. +
  10787. +static int __ssam_client_link(struct ssam_controller *c, struct device *client)
  10788. +{
  10789. + const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
  10790. + struct device_link *link;
  10791. + struct device *ctrldev;
  10792. +
  10793. + if (smp_load_acquire(&c->state) != SSAM_CONTROLLER_STARTED)
  10794. + return -ENXIO;
  10795. +
  10796. + if ((ctrldev = ssam_controller_device(c)) == NULL)
  10797. + return -ENXIO;
  10798. +
  10799. + if ((link = device_link_add(client, ctrldev, flags)) == NULL)
  10800. + return -ENOMEM;
  10801. +
  10802. + /*
  10803. + * Return -ENXIO if supplier driver is on its way to be removed. In this
  10804. + * case, the controller won't be around for much longer and the device
  10805. + * link is not going to save us any more, as unbinding is already in
  10806. + * progress.
  10807. + */
  10808. + if (link->status == DL_STATE_SUPPLIER_UNBIND)
  10809. + return -ENXIO;
  10810. +
  10811. + return 0;
  10812. +}
  10813. +
  10814. +int ssam_client_bind(struct device *client, struct ssam_controller **ctrl)
  10815. +{
  10816. + struct ssam_controller *c = &ssam_controller;
  10817. + int status;
  10818. +
  10819. + mutex_lock(&ssam_controller_lock);
  10820. + status = __ssam_client_link(c, client);
  10821. + mutex_unlock(&ssam_controller_lock);
  10822. +
  10823. + *ctrl = status == 0 ? c : NULL;
  10824. + return status;
  10825. +}
  10826. +EXPORT_SYMBOL_GPL(ssam_client_bind);
  10827. +
  10828. +
  10829. +static int surface_sam_ssh_probe(struct serdev_device *serdev)
  10830. +{
  10831. + struct ssam_controller *ctrl = &ssam_controller;
  10832. + acpi_handle *ssh = ACPI_HANDLE(&serdev->dev);
  10833. + int status;
  10834. +
  10835. + if (gpiod_count(&serdev->dev, NULL) < 0)
  10836. + return -ENODEV;
  10837. +
  10838. + status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
  10839. + if (status)
  10840. + return status;
  10841. +
  10842. + // set up EC
  10843. + mutex_lock(&ssam_controller_lock);
  10844. +
  10845. + // initialize controller
  10846. + status = ssam_controller_init(ctrl, serdev);
  10847. + if (status)
  10848. + goto err_ctrl_init;
  10849. +
  10850. + // set up serdev device
  10851. + serdev_device_set_drvdata(serdev, ctrl);
  10852. + serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
  10853. + status = serdev_device_open(serdev);
  10854. + if (status)
  10855. + goto err_devopen;
  10856. +
  10857. + status = ssam_serdev_setup_via_acpi(ssh, serdev);
  10858. + if (ACPI_FAILURE(status))
  10859. + goto err_devinit;
  10860. +
  10861. + // start controller
  10862. + status = ssam_controller_start(ctrl);
  10863. + if (status)
  10864. + goto err_devinit;
  10865. +
  10866. + // initial SAM requests: log version, notify default/init power states
  10867. + status = ssam_log_firmware_version(ctrl);
  10868. + if (status)
  10869. + goto err_initrq;
  10870. +
  10871. + status = ssam_ctrl_notif_d0_entry(ctrl);
  10872. + if (status)
  10873. + goto err_initrq;
  10874. +
  10875. + status = ssam_ctrl_notif_display_on(ctrl);
  10876. + if (status)
  10877. + goto err_initrq;
  10878. +
  10879. + // setup IRQ
  10880. + status = ssam_irq_setup(ctrl);
  10881. + if (status)
  10882. + goto err_initrq;
  10883. +
  10884. + mutex_unlock(&ssam_controller_lock);
  10885. +
  10886. + /*
  10887. + * TODO: The EC can wake up the system via the associated GPIO interrupt
  10888. + * in multiple situations. One of which is the remaining battery
  10889. + * capacity falling below a certain threshold. Normally, we should
  10890. + * use the device_init_wakeup function, however, the EC also seems
  10891. + * to have other reasons for waking up the system and it seems
  10892. + * that Windows has additional checks whether the system should be
  10893. + * resumed. In short, this causes some spurious unwanted wake-ups.
  10894. + * For now let's thus default power/wakeup to false.
  10895. + */
  10896. + device_set_wakeup_capable(&serdev->dev, true);
  10897. + acpi_walk_dep_device_list(ssh);
  10898. +
  10899. + return 0;
  10900. +
  10901. +err_initrq:
  10902. + ssam_controller_shutdown(ctrl);
  10903. +err_devinit:
  10904. + serdev_device_close(serdev);
  10905. +err_devopen:
  10906. + ssam_controller_destroy(ctrl);
  10907. +err_ctrl_init:
  10908. + serdev_device_set_drvdata(serdev, NULL);
  10909. + mutex_unlock(&ssam_controller_lock);
  10910. + return status;
  10911. +}
  10912. +
  10913. +static void surface_sam_ssh_remove(struct serdev_device *serdev)
  10914. +{
  10915. + struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev);
  10916. + int status;
  10917. +
  10918. + mutex_lock(&ssam_controller_lock);
  10919. + ssam_irq_free(ctrl);
  10920. +
  10921. + // suspend EC and disable events
  10922. + status = ssam_ctrl_notif_display_off(ctrl);
  10923. + if (status) {
  10924. + dev_err(&serdev->dev, "display-off notification failed: %d\n",
  10925. + status);
  10926. + }
  10927. +
  10928. + status = ssam_ctrl_notif_d0_exit(ctrl);
  10929. + if (status) {
  10930. + dev_err(&serdev->dev, "D0-exit notification failed: %d\n",
  10931. + status);
  10932. + }
  10933. +
  10934. + ssam_controller_shutdown(ctrl);
  10935. +
  10936. + // shut down actual transport
  10937. + serdev_device_wait_until_sent(serdev, 0);
  10938. + serdev_device_close(serdev);
  10939. +
  10940. + ssam_controller_destroy(ctrl);
  10941. +
  10942. + device_set_wakeup_capable(&serdev->dev, false);
  10943. + serdev_device_set_drvdata(serdev, NULL);
  10944. + mutex_unlock(&ssam_controller_lock);
  10945. +}
  10946. +
  10947. +
  10948. +static const struct acpi_device_id surface_sam_ssh_match[] = {
  10949. + { "MSHW0084", 0 },
  10950. + { },
  10951. +};
  10952. +MODULE_DEVICE_TABLE(acpi, surface_sam_ssh_match);
  10953. +
  10954. +static struct serdev_device_driver surface_sam_ssh = {
  10955. + .probe = surface_sam_ssh_probe,
  10956. + .remove = surface_sam_ssh_remove,
  10957. + .driver = {
  10958. + .name = "surface_sam_ssh",
  10959. + .acpi_match_table = surface_sam_ssh_match,
  10960. + .pm = &surface_sam_ssh_pm_ops,
  10961. + .shutdown = surface_sam_ssh_shutdown,
  10962. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  10963. + },
  10964. +};
  10965. +
  10966. +
  10967. +/* -- Module setup. --------------------------------------------------------- */
  10968. +
  10969. +static int __init surface_sam_ssh_init(void)
  10970. +{
  10971. + int status;
  10972. +
  10973. + status = ssh_ctrl_packet_cache_init();
  10974. + if (status)
  10975. + goto err_cpkg;
  10976. +
  10977. + status = ssam_event_item_cache_init();
  10978. + if (status)
  10979. + goto err_evitem;
  10980. +
  10981. + status = serdev_device_driver_register(&surface_sam_ssh);
  10982. + if (status)
  10983. + goto err_register;
  10984. +
  10985. + return 0;
  10986. +
  10987. +err_register:
  10988. + ssam_event_item_cache_destroy();
  10989. +err_evitem:
  10990. + ssh_ctrl_packet_cache_destroy();
  10991. +err_cpkg:
  10992. + return status;
  10993. +}
  10994. +
  10995. +static void __exit surface_sam_ssh_exit(void)
  10996. +{
  10997. + serdev_device_driver_unregister(&surface_sam_ssh);
  10998. + ssam_event_item_cache_destroy();
  10999. + ssh_ctrl_packet_cache_destroy();
  11000. +}
  11001. +
  11002. +/*
  11003. + * Ensure that the driver is loaded late due to some issues with the UART
  11004. + * communication. Specifically, we want to ensure that DMA is ready and being
  11005. + * used. Not using DMA can result in spurious communication failures,
  11006. + * especially during boot, which among other things will result in wrong
  11007. + * battery information (via ACPI _BIX) being displayed. Using a late init_call
  11008. + * instead of the normal module_init gives the DMA subsystem time to
  11009. + * initialize and via that results in a more stable communication, avoiding
  11010. + * such failures.
  11011. + */
  11012. +late_initcall(surface_sam_ssh_init);
  11013. +module_exit(surface_sam_ssh_exit);
  11014. +
  11015. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  11016. +MODULE_DESCRIPTION("Surface Serial Hub Driver for 5th Generation Surface Devices");
  11017. +MODULE_LICENSE("GPL");
  11018. diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh.h b/drivers/platform/x86/surface_sam/surface_sam_ssh.h
  11019. new file mode 100644
  11020. index 0000000000000..ba57adb2a3c9d
  11021. --- /dev/null
  11022. +++ b/drivers/platform/x86/surface_sam/surface_sam_ssh.h
  11023. @@ -0,0 +1,717 @@
  11024. +/* SPDX-License-Identifier: GPL-2.0-or-later */
  11025. +/*
  11026. + * Interface for Surface Serial Hub (SSH).
  11027. + *
  11028. + * The SSH is the main communication hub for communication between host and
  11029. + * the Surface/System Aggregator Module (SAM) on newer Microsoft Surface
  11030. + * devices (Book 2, Pro 5, Laptops, ...). Also referred to as SAM-over-SSH.
  11031. + * Older devices (Book 1, Pro 4) use SAM-over-HID (via I2C).
  11032. + */
  11033. +
  11034. +#ifndef _SURFACE_SAM_SSH_H
  11035. +#define _SURFACE_SAM_SSH_H
  11036. +
  11037. +#include <linux/types.h>
  11038. +#include <linux/device.h>
  11039. +
  11040. +
  11041. +/* -- Data structures for SAM-over-SSH communication. ----------------------- */
  11042. +
  11043. +/**
  11044. + * enum ssh_frame_type - Frame types for SSH frames.
  11045. + * @SSH_FRAME_TYPE_DATA_SEQ: Indicates a data frame, followed by a payload with
  11046. + * the length specified in the ssh_frame.len field. This
  11047. + * frame is sequenced, meaning that an ACK is required.
  11048. + * @SSH_FRAME_TYPE_DATA_NSQ: Same as SSH_FRAME_TYPE_DATA_SEQ, but unsequenced,
  11049. + * meaning that the message does not have to be ACKed.
  11050. + * @SSH_FRAME_TYPE_ACK: Indicates an ACK message.
  11051. + * @SSH_FRAME_TYPE_NAK: Indicates an error response for previously sent
  11052. + * frame. In general, this means that the frame and/or
  11053. + * payload is malformed, e.g. a CRC is wrong. For command-
  11054. + * type payloads, this can also mean that the command is
  11055. + * invalid.
  11056. + */
  11057. +enum ssh_frame_type {
  11058. + SSH_FRAME_TYPE_DATA_SEQ = 0x80,
  11059. + SSH_FRAME_TYPE_DATA_NSQ = 0x00,
  11060. + SSH_FRAME_TYPE_ACK = 0x40,
  11061. + SSH_FRAME_TYPE_NAK = 0x04,
  11062. +};
  11063. +
  11064. +/**
  11065. + * struct ssh_frame - SSH communication frame.
  11066. + * @type: The type of the frame. See &enum ssh_frame_type.
  11067. + * @len: The length of the frame payload directly following the CRC for this
  11068. + * frame. Does not include the final CRC for that payload.
  11069. + * @seq: The sequence number for this message/exchange.
  11070. + */
  11071. +struct ssh_frame {
  11072. + u8 type;
  11073. + __le16 len;
  11074. + u8 seq;
  11075. +} __packed;
  11076. +
  11077. +static_assert(sizeof(struct ssh_frame) == 4);
  11078. +
  11079. +/*
  11080. + * Maximum SSH frame payload length in bytes. This is the physical maximum
  11081. + * length of the protocol. Implementations may set a more constrained limit.
  11082. + */
  11083. +#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX
  11084. +
  11085. +/**
  11086. + * enum ssh_payload_type - Type indicator for the SSH payload.
  11087. + * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command
  11088. + * payload.
  11089. + */
  11090. +enum ssh_payload_type {
  11091. + SSH_PLD_TYPE_CMD = 0x80,
  11092. +};
  11093. +
  11094. +/**
  11095. + * struct ssh_command - Payload of a command-type frame.
  11096. + * @type: The type of the payload. See &enum ssh_payload_type. Should be
  11097. + * SSH_PLD_TYPE_CMD for this struct.
  11098. + * @tc: Command target category.
  11099. + * @chn_out: Output channel. Should be zero if this an incoming (EC to host)
  11100. + * message.
  11101. + * @chn_in: Input channel. Should be zero if this is an outgoing (hos to EC)
  11102. + * message.
  11103. + * @iid: Instance ID.
  11104. + * @rqid: Request ID. Used to match requests with responses and differentiate
  11105. + * between responses and events.
  11106. + * @cid: Command ID.
  11107. + */
  11108. +struct ssh_command {
  11109. + u8 type;
  11110. + u8 tc;
  11111. + u8 chn_out;
  11112. + u8 chn_in;
  11113. + u8 iid;
  11114. + __le16 rqid;
  11115. + u8 cid;
  11116. +} __packed;
  11117. +
  11118. +static_assert(sizeof(struct ssh_command) == 8);
  11119. +
  11120. +/*
  11121. + * Maximum SSH command payload length in bytes. This is the physical maximum
  11122. + * length of the protocol. Implementations may set a more constrained limit.
  11123. + */
  11124. +#define SSH_COMMAND_MAX_PAYLOAD_SIZE \
  11125. + (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command))
  11126. +
  11127. +/**
  11128. + * struct ssh_notification_params - Command payload to enable/disable SSH
  11129. + * notifications.
  11130. + * @target_category: The target category for which notifications should be
  11131. + * enabled/disabled.
  11132. + * @flags: Flags determining how notifications are being sent.
  11133. + * @request_id: The request ID that is used to send these notifications.
  11134. + * @instance_id: The specific instance in the given target category for
  11135. + * which notifications should be enabled.
  11136. + */
  11137. +struct ssh_notification_params {
  11138. + u8 target_category;
  11139. + u8 flags;
  11140. + __le16 request_id;
  11141. + u8 instance_id;
  11142. +} __packed;
  11143. +
  11144. +static_assert(sizeof(struct ssh_notification_params) == 5);
  11145. +
  11146. +/**
  11147. + * SSH message syncrhonization (SYN) bytes.
  11148. + */
  11149. +#define SSH_MSG_SYN ((u16)0x55aa)
  11150. +
  11151. +/**
  11152. + * Base-length of a SSH message. This is the minimum number of bytes required
  11153. + * to form a message. The actual message length is SSH_MSG_LEN_BASE plus the
  11154. + * length of the frame payload.
  11155. + */
  11156. +#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16))
  11157. +
  11158. +/**
  11159. + * Length of a SSH control message.
  11160. + */
  11161. +#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE
  11162. +
  11163. +/**
  11164. + * Length of a SSH message with payload of specified size.
  11165. + */
  11166. +#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + payload_size)
  11167. +
  11168. +/**
  11169. + * Length of a SSH command message with command payload of specified size.
  11170. + */
  11171. +#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \
  11172. + SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + payload_size)
  11173. +
  11174. +/**
  11175. + * Offset of the specified struct ssh_frame field in the raw SSH message data.
  11176. + */
  11177. +#define SSH_MSGOFFSET_FRAME(field) \
  11178. + (sizeof(u16) + offsetof(struct ssh_frame, field))
  11179. +
  11180. +/**
  11181. + * Offset of the specified struct ssh_command field in the raw SSH message data.
  11182. + */
  11183. +#define SSH_MSGOFFSET_COMMAND(field) \
  11184. + (2ull * sizeof(u16) + sizeof(struct ssh_frame) \
  11185. + + offsetof(struct ssh_command, field))
  11186. +
  11187. +/**
  11188. + * struct ssam_span - reference to a buffer region
  11189. + * @ptr: pointer to the buffer region
  11190. + * @len: length of the buffer region
  11191. + *
  11192. + * A reference to a (non-owned) buffer segment, consisting of pointer and
  11193. + * length. Use of this struct indicates non-owned data, i.e. data of which the
  11194. + * life-time is managed (i.e. it is allocated/freed) via another pointer.
  11195. + */
  11196. +struct ssam_span {
  11197. + u8 *ptr;
  11198. + size_t len;
  11199. +};
  11200. +
  11201. +
  11202. +/* -- Packet transport layer (ptl). ----------------------------------------- */
  11203. +
  11204. +enum ssh_packet_priority {
  11205. + SSH_PACKET_PRIORITY_FLUSH = 0,
  11206. + SSH_PACKET_PRIORITY_DATA = 0,
  11207. + SSH_PACKET_PRIORITY_NAK = 1 << 4,
  11208. + SSH_PACKET_PRIORITY_ACK = 2 << 4,
  11209. +};
  11210. +
  11211. +#define SSH_PACKET_PRIORITY(base, try) \
  11212. + ((SSH_PACKET_PRIORITY_##base) | ((try) & 0x0f))
  11213. +
  11214. +#define ssh_packet_priority_get_try(p) ((p) & 0x0f)
  11215. +
  11216. +
  11217. +enum ssh_packet_flags {
  11218. + SSH_PACKET_SF_LOCKED_BIT,
  11219. + SSH_PACKET_SF_QUEUED_BIT,
  11220. + SSH_PACKET_SF_PENDING_BIT,
  11221. + SSH_PACKET_SF_TRANSMITTING_BIT,
  11222. + SSH_PACKET_SF_TRANSMITTED_BIT,
  11223. + SSH_PACKET_SF_ACKED_BIT,
  11224. + SSH_PACKET_SF_CANCELED_BIT,
  11225. + SSH_PACKET_SF_COMPLETED_BIT,
  11226. +
  11227. + SSH_PACKET_TY_FLUSH_BIT,
  11228. + SSH_PACKET_TY_SEQUENCED_BIT,
  11229. + SSH_PACKET_TY_BLOCKING_BIT,
  11230. +
  11231. + SSH_PACKET_FLAGS_SF_MASK =
  11232. + BIT(SSH_PACKET_SF_LOCKED_BIT)
  11233. + | BIT(SSH_PACKET_SF_QUEUED_BIT)
  11234. + | BIT(SSH_PACKET_SF_PENDING_BIT)
  11235. + | BIT(SSH_PACKET_SF_TRANSMITTING_BIT)
  11236. + | BIT(SSH_PACKET_SF_TRANSMITTED_BIT)
  11237. + | BIT(SSH_PACKET_SF_ACKED_BIT)
  11238. + | BIT(SSH_PACKET_SF_CANCELED_BIT)
  11239. + | BIT(SSH_PACKET_SF_COMPLETED_BIT),
  11240. +
  11241. + SSH_PACKET_FLAGS_TY_MASK =
  11242. + BIT(SSH_PACKET_TY_FLUSH_BIT)
  11243. + | BIT(SSH_PACKET_TY_SEQUENCED_BIT)
  11244. + | BIT(SSH_PACKET_TY_BLOCKING_BIT),
  11245. +};
  11246. +
  11247. +
  11248. +struct ssh_ptl;
  11249. +struct ssh_packet;
  11250. +
  11251. +struct ssh_packet_ops {
  11252. + void (*release)(struct ssh_packet *p);
  11253. + void (*complete)(struct ssh_packet *p, int status);
  11254. +};
  11255. +
  11256. +struct ssh_packet {
  11257. + struct ssh_ptl *ptl;
  11258. + struct kref refcnt;
  11259. +
  11260. + u8 priority;
  11261. +
  11262. + struct {
  11263. + size_t len;
  11264. + u8 *ptr;
  11265. + } data;
  11266. +
  11267. + unsigned long state;
  11268. + ktime_t timestamp;
  11269. +
  11270. + struct list_head queue_node;
  11271. + struct list_head pending_node;
  11272. +
  11273. + const struct ssh_packet_ops *ops;
  11274. +};
  11275. +
  11276. +
  11277. +void ssh_packet_get(struct ssh_packet *p);
  11278. +void ssh_packet_put(struct ssh_packet *p);
  11279. +
  11280. +static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len)
  11281. +{
  11282. + p->data.ptr = ptr;
  11283. + p->data.len = len;
  11284. +}
  11285. +
  11286. +
  11287. +/* -- Request transport layer (rtl). ---------------------------------------- */
  11288. +
  11289. +enum ssh_request_flags {
  11290. + SSH_REQUEST_SF_LOCKED_BIT,
  11291. + SSH_REQUEST_SF_QUEUED_BIT,
  11292. + SSH_REQUEST_SF_PENDING_BIT,
  11293. + SSH_REQUEST_SF_TRANSMITTING_BIT,
  11294. + SSH_REQUEST_SF_TRANSMITTED_BIT,
  11295. + SSH_REQUEST_SF_RSPRCVD_BIT,
  11296. + SSH_REQUEST_SF_CANCELED_BIT,
  11297. + SSH_REQUEST_SF_COMPLETED_BIT,
  11298. +
  11299. + SSH_REQUEST_TY_FLUSH_BIT,
  11300. + SSH_REQUEST_TY_HAS_RESPONSE_BIT,
  11301. +
  11302. + SSH_REQUEST_FLAGS_SF_MASK =
  11303. + BIT(SSH_REQUEST_SF_LOCKED_BIT)
  11304. + | BIT(SSH_REQUEST_SF_QUEUED_BIT)
  11305. + | BIT(SSH_REQUEST_SF_PENDING_BIT)
  11306. + | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT)
  11307. + | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT)
  11308. + | BIT(SSH_REQUEST_SF_RSPRCVD_BIT)
  11309. + | BIT(SSH_REQUEST_SF_CANCELED_BIT)
  11310. + | BIT(SSH_REQUEST_SF_COMPLETED_BIT),
  11311. +
  11312. + SSH_REQUEST_FLAGS_TY_MASK =
  11313. + BIT(SSH_REQUEST_TY_FLUSH_BIT)
  11314. + | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),
  11315. +};
  11316. +
  11317. +
  11318. +struct ssh_rtl;
  11319. +struct ssh_request;
  11320. +
  11321. +struct ssh_request_ops {
  11322. + void (*release)(struct ssh_request *rqst);
  11323. + void (*complete)(struct ssh_request *rqst,
  11324. + const struct ssh_command *cmd,
  11325. + const struct ssam_span *data, int status);
  11326. +};
  11327. +
  11328. +struct ssh_request {
  11329. + struct ssh_packet packet;
  11330. + struct list_head node;
  11331. +
  11332. + unsigned long state;
  11333. + ktime_t timestamp;
  11334. +
  11335. + const struct ssh_request_ops *ops;
  11336. +};
  11337. +
  11338. +
  11339. +static inline void ssh_request_get(struct ssh_request *r)
  11340. +{
  11341. + ssh_packet_get(&r->packet);
  11342. +}
  11343. +
  11344. +static inline void ssh_request_put(struct ssh_request *r)
  11345. +{
  11346. + ssh_packet_put(&r->packet);
  11347. +}
  11348. +
  11349. +static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len)
  11350. +{
  11351. + ssh_packet_set_data(&r->packet, ptr, len);
  11352. +}
  11353. +
  11354. +
  11355. +/* -- Main data types and definitions --------------------------------------- */
  11356. +
  11357. +enum ssam_ssh_tc {
  11358. + SSAM_SSH_TC_SAM = 0x01, // generic system functionality, real-time clock
  11359. + SSAM_SSH_TC_BAT = 0x02, // battery/power subsystem
  11360. + SSAM_SSH_TC_TMP = 0x03, // thermal subsystem
  11361. + SSAM_SSH_TC_PMC = 0x04,
  11362. + SSAM_SSH_TC_FAN = 0x05,
  11363. + SSAM_SSH_TC_PoM = 0x06,
  11364. + SSAM_SSH_TC_DBG = 0x07,
  11365. + SSAM_SSH_TC_KBD = 0x08, // legacy keyboard (Laptop 1/2)
  11366. + SSAM_SSH_TC_FWU = 0x09,
  11367. + SSAM_SSH_TC_UNI = 0x0a,
  11368. + SSAM_SSH_TC_LPC = 0x0b,
  11369. + SSAM_SSH_TC_TCL = 0x0c,
  11370. + SSAM_SSH_TC_SFL = 0x0d,
  11371. + SSAM_SSH_TC_KIP = 0x0e,
  11372. + SSAM_SSH_TC_EXT = 0x0f,
  11373. + SSAM_SSH_TC_BLD = 0x10,
  11374. + SSAM_SSH_TC_BAS = 0x11, // detachment system (Surface Book 2/3)
  11375. + SSAM_SSH_TC_SEN = 0x12,
  11376. + SSAM_SSH_TC_SRQ = 0x13,
  11377. + SSAM_SSH_TC_MCU = 0x14,
  11378. + SSAM_SSH_TC_HID = 0x15, // generic HID input subsystem
  11379. + SSAM_SSH_TC_TCH = 0x16,
  11380. + SSAM_SSH_TC_BKL = 0x17,
  11381. + SSAM_SSH_TC_TAM = 0x18,
  11382. + SSAM_SSH_TC_ACC = 0x19,
  11383. + SSAM_SSH_TC_UFI = 0x1a,
  11384. + SSAM_SSH_TC_USC = 0x1b,
  11385. + SSAM_SSH_TC_PEN = 0x1c,
  11386. + SSAM_SSH_TC_VID = 0x1d,
  11387. + SSAM_SSH_TC_AUD = 0x1e,
  11388. + SSAM_SSH_TC_SMC = 0x1f,
  11389. + SSAM_SSH_TC_KPD = 0x20,
  11390. + SSAM_SSH_TC_REG = 0x21,
  11391. +};
  11392. +
  11393. +struct ssam_controller;
  11394. +
  11395. +/**
  11396. + * struct ssam_event_flags - Flags for enabling/disabling SAM-over-SSH events
  11397. + * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame.
  11398. + */
  11399. +enum ssam_event_flags {
  11400. + SSAM_EVENT_SEQUENCED = BIT(0),
  11401. +};
  11402. +
  11403. +struct ssam_event {
  11404. + u8 target_category;
  11405. + u8 command_id;
  11406. + u8 instance_id;
  11407. + u8 channel;
  11408. + u16 length;
  11409. + u8 data[0];
  11410. +};
  11411. +
  11412. +enum ssam_request_flags {
  11413. + SSAM_REQUEST_HAS_RESPONSE = BIT(0),
  11414. + SSAM_REQUEST_UNSEQUENCED = BIT(1),
  11415. +};
  11416. +
  11417. +struct ssam_request {
  11418. + u8 target_category;
  11419. + u8 command_id;
  11420. + u8 instance_id;
  11421. + u8 channel;
  11422. + u16 flags;
  11423. + u16 length;
  11424. + const u8 *payload;
  11425. +};
  11426. +
  11427. +struct ssam_response {
  11428. + size_t capacity;
  11429. + size_t length;
  11430. + u8 *pointer;
  11431. +};
  11432. +
  11433. +
  11434. +int ssam_client_bind(struct device *client, struct ssam_controller **ctrl);
  11435. +
  11436. +struct device *ssam_controller_device(struct ssam_controller *c);
  11437. +
  11438. +ssize_t ssam_request_write_data(struct ssam_span *buf,
  11439. + struct ssam_controller *ctrl,
  11440. + struct ssam_request *spec);
  11441. +
  11442. +
  11443. +/* -- Synchronous request interface. ---------------------------------------- */
  11444. +
  11445. +struct ssam_request_sync {
  11446. + struct ssh_request base;
  11447. + struct completion comp;
  11448. + struct ssam_response *resp;
  11449. + int status;
  11450. +};
  11451. +
  11452. +int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
  11453. + struct ssam_request_sync **rqst,
  11454. + struct ssam_span *buffer);
  11455. +
  11456. +void ssam_request_sync_init(struct ssam_request_sync *rqst,
  11457. + enum ssam_request_flags flags);
  11458. +
  11459. +static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst,
  11460. + u8 *ptr, size_t len)
  11461. +{
  11462. + ssh_request_set_data(&rqst->base, ptr, len);
  11463. +}
  11464. +
  11465. +static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst,
  11466. + struct ssam_response *resp)
  11467. +{
  11468. + rqst->resp = resp;
  11469. +}
  11470. +
  11471. +int ssam_request_sync_submit(struct ssam_controller *ctrl,
  11472. + struct ssam_request_sync *rqst);
  11473. +
  11474. +static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
  11475. +{
  11476. + wait_for_completion(&rqst->comp);
  11477. + return rqst->status;
  11478. +}
  11479. +
  11480. +int ssam_request_sync(struct ssam_controller *ctrl, struct ssam_request *spec,
  11481. + struct ssam_response *rsp);
  11482. +
  11483. +int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
  11484. + struct ssam_request *spec,
  11485. + struct ssam_response *rsp,
  11486. + struct ssam_span *buf);
  11487. +
  11488. +
  11489. +#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len) \
  11490. + ({ \
  11491. + u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \
  11492. + struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \
  11493. + int __status; \
  11494. + \
  11495. + /* ensure input does not overflow buffer */ \
  11496. + if ((rqst)->length <= payload_len) { \
  11497. + __status = ssam_request_sync_with_buffer( \
  11498. + ctrl, rqst, rsp, &__buf); \
  11499. + } else { \
  11500. + __status = -EINVAL; \
  11501. + } \
  11502. + \
  11503. + __status; \
  11504. + })
  11505. +
  11506. +
  11507. +struct ssam_request_spec {
  11508. + u8 target_category;
  11509. + u8 command_id;
  11510. + u8 instance_id;
  11511. + u8 channel;
  11512. + u8 flags;
  11513. +};
  11514. +
  11515. +struct ssam_request_spec_md {
  11516. + u8 target_category;
  11517. + u8 command_id;
  11518. + u8 flags;
  11519. +};
  11520. +
  11521. +#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \
  11522. + int name(struct ssam_controller *ctrl) \
  11523. + { \
  11524. + struct ssam_request_spec s = (struct ssam_request_spec)spec; \
  11525. + struct ssam_request rqst; \
  11526. + \
  11527. + rqst.target_category = s.target_category; \
  11528. + rqst.command_id = s.command_id; \
  11529. + rqst.instance_id = s.instance_id; \
  11530. + rqst.channel = s.channel; \
  11531. + rqst.flags = s.flags; \
  11532. + rqst.length = 0; \
  11533. + rqst.payload = NULL; \
  11534. + \
  11535. + return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
  11536. + }
  11537. +
  11538. +#define SSAM_DEFINE_SYNC_REQUEST_W(name, wtype, spec...) \
  11539. + int name(struct ssam_controller *ctrl, const wtype *in) \
  11540. + { \
  11541. + struct ssam_request_spec s = (struct ssam_request_spec)spec; \
  11542. + struct ssam_request rqst; \
  11543. + \
  11544. + rqst.target_category = s.target_category; \
  11545. + rqst.command_id = s.command_id; \
  11546. + rqst.instance_id = s.instance_id; \
  11547. + rqst.channel = s.channel; \
  11548. + rqst.flags = s.flags; \
  11549. + rqst.length = sizeof(wtype); \
  11550. + rqst.payload = (u8 *)in; \
  11551. + \
  11552. + return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
  11553. + sizeof(wtype)); \
  11554. + }
  11555. +
  11556. +#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \
  11557. + int name(struct ssam_controller *ctrl, rtype *out) \
  11558. + { \
  11559. + struct ssam_request_spec s = (struct ssam_request_spec)spec; \
  11560. + struct ssam_request rqst; \
  11561. + struct ssam_response rsp; \
  11562. + int status; \
  11563. + \
  11564. + rqst.target_category = s.target_category; \
  11565. + rqst.command_id = s.command_id; \
  11566. + rqst.instance_id = s.instance_id; \
  11567. + rqst.channel = s.channel; \
  11568. + rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
  11569. + rqst.length = 0; \
  11570. + rqst.payload = NULL; \
  11571. + \
  11572. + rsp.capacity = sizeof(rtype); \
  11573. + rsp.length = 0; \
  11574. + rsp.pointer = (u8 *)out; \
  11575. + \
  11576. + status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
  11577. + if (status) \
  11578. + return status; \
  11579. + \
  11580. + if (rsp.length != sizeof(rtype)) { \
  11581. + struct device *dev = ssam_controller_device(ctrl); \
  11582. + dev_err(dev, "rqst: invalid response length, expected %zu, got %zu" \
  11583. + " (tc: 0x%02x, cid: 0x%02x)", sizeof(rtype), \
  11584. + rsp.length, rqst.target_category, \
  11585. + rqst.command_id); \
  11586. + return -EIO; \
  11587. + } \
  11588. + \
  11589. + return 0; \
  11590. + }
  11591. +
  11592. +#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, wtype, spec...) \
  11593. + int name(struct ssam_controller *ctrl, u8 chn, u8 iid, const wtype *in) \
  11594. + { \
  11595. + struct ssam_request_spec_md s \
  11596. + = (struct ssam_request_spec_md)spec; \
  11597. + struct ssam_request rqst; \
  11598. + \
  11599. + rqst.target_category = s.target_category; \
  11600. + rqst.command_id = s.command_id; \
  11601. + rqst.instance_id = iid; \
  11602. + rqst.channel = chn; \
  11603. + rqst.flags = s.flags; \
  11604. + rqst.length = sizeof(wtype); \
  11605. + rqst.payload = (u8 *)in; \
  11606. + \
  11607. + return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
  11608. + sizeof(wtype)); \
  11609. + }
  11610. +
  11611. +#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \
  11612. + int name(struct ssam_controller *ctrl, u8 chn, u8 iid, rtype *out) \
  11613. + { \
  11614. + struct ssam_request_spec_md s \
  11615. + = (struct ssam_request_spec_md)spec; \
  11616. + struct ssam_request rqst; \
  11617. + struct ssam_response rsp; \
  11618. + int status; \
  11619. + \
  11620. + rqst.target_category = s.target_category; \
  11621. + rqst.command_id = s.command_id; \
  11622. + rqst.instance_id = iid; \
  11623. + rqst.channel = chn; \
  11624. + rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
  11625. + rqst.length = 0; \
  11626. + rqst.payload = NULL; \
  11627. + \
  11628. + rsp.capacity = sizeof(rtype); \
  11629. + rsp.length = 0; \
  11630. + rsp.pointer = (u8 *)out; \
  11631. + \
  11632. + status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
  11633. + if (status) \
  11634. + return status; \
  11635. + \
  11636. + if (rsp.length != sizeof(rtype)) { \
  11637. + struct device *dev = ssam_controller_device(ctrl); \
  11638. + dev_err(dev, "rqst: invalid response length, expected %zu, got %zu" \
  11639. + " (tc: 0x%02x, cid: 0x%02x)", sizeof(rtype), \
  11640. + rsp.length, rqst.target_category, \
  11641. + rqst.command_id); \
  11642. + return -EIO; \
  11643. + } \
  11644. + \
  11645. + return 0; \
  11646. + }
  11647. +
  11648. +
  11649. +/* -- Event notifier/callbacks. --------------------------------------------- */
  11650. +
  11651. +#define SSAM_NOTIF_STATE_SHIFT 2
  11652. +#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1)
  11653. +
  11654. +#define SSAM_NOTIF_HANDLED BIT(0)
  11655. +#define SSAM_NOTIF_STOP BIT(1)
  11656. +
  11657. +
  11658. +struct ssam_notifier_block;
  11659. +
  11660. +typedef u32 (*ssam_notifier_fn_t)(struct ssam_notifier_block *nb,
  11661. + const struct ssam_event *event);
  11662. +
  11663. +struct ssam_notifier_block {
  11664. + struct ssam_notifier_block __rcu *next;
  11665. + ssam_notifier_fn_t fn;
  11666. + int priority;
  11667. +};
  11668. +
  11669. +
  11670. +static inline u32 ssam_notifier_from_errno(int err)
  11671. +{
  11672. + if (WARN_ON(err > 0) || err == 0)
  11673. + return 0;
  11674. + else
  11675. + return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP;
  11676. +}
  11677. +
  11678. +static inline int ssam_notifier_to_errno(u32 ret)
  11679. +{
  11680. + return -(ret >> SSAM_NOTIF_STATE_SHIFT);
  11681. +}
  11682. +
  11683. +
  11684. +/* -- Event/notification registry. ------------------------------------------ */
  11685. +
  11686. +struct ssam_event_registry {
  11687. + u8 target_category;
  11688. + u8 channel;
  11689. + u8 cid_enable;
  11690. + u8 cid_disable;
  11691. +};
  11692. +
  11693. +struct ssam_event_id {
  11694. + u8 target_category;
  11695. + u8 instance;
  11696. +};
  11697. +
  11698. +
  11699. +#define SSAM_EVENT_REGISTRY(tc, chn, cid_en, cid_dis) \
  11700. + ((struct ssam_event_registry) { \
  11701. + .target_category = (tc), \
  11702. + .channel = (chn), \
  11703. + .cid_enable = (cid_en), \
  11704. + .cid_disable = (cid_dis), \
  11705. + })
  11706. +
  11707. +#define SSAM_EVENT_ID(tc, iid) \
  11708. + ((struct ssam_event_id) { \
  11709. + .target_category = tc, \
  11710. + .instance = iid, \
  11711. + })
  11712. +
  11713. +
  11714. +#define SSAM_EVENT_REGISTRY_SAM \
  11715. + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c)
  11716. +
  11717. +#define SSAM_EVENT_REGISTRY_KIP \
  11718. + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
  11719. +
  11720. +#define SSAM_EVENT_REGISTRY_REG \
  11721. + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
  11722. +
  11723. +
  11724. +struct ssam_event_notifier {
  11725. + struct ssam_notifier_block base;
  11726. +
  11727. + struct {
  11728. + struct ssam_event_registry reg;
  11729. + struct ssam_event_id id;
  11730. + u8 flags;
  11731. + } event;
  11732. +};
  11733. +
  11734. +int ssam_notifier_register(struct ssam_controller *ctrl,
  11735. + struct ssam_event_notifier *n);
  11736. +
  11737. +int ssam_notifier_unregister(struct ssam_controller *ctrl,
  11738. + struct ssam_event_notifier *n);
  11739. +
  11740. +#endif /* _SURFACE_SAM_SSH_H */
  11741. diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h b/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h
  11742. new file mode 100644
  11743. index 0000000000000..8ea9a2fc99d7e
  11744. --- /dev/null
  11745. +++ b/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h
  11746. @@ -0,0 +1,587 @@
  11747. +#undef TRACE_SYSTEM
  11748. +#define TRACE_SYSTEM surface_sam_ssh
  11749. +
  11750. +#if !defined(_SURFACE_SAM_SSH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
  11751. +#define _SURFACE_SAM_SSH_TRACE_H
  11752. +
  11753. +#include <linux/tracepoint.h>
  11754. +
  11755. +#include "surface_sam_ssh.h"
  11756. +
  11757. +
  11758. +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ);
  11759. +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ);
  11760. +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK);
  11761. +TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK);
  11762. +
  11763. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT);
  11764. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT);
  11765. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT);
  11766. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT);
  11767. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT);
  11768. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT);
  11769. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT);
  11770. +TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT);
  11771. +
  11772. +TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT);
  11773. +TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT);
  11774. +TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT);
  11775. +
  11776. +TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK);
  11777. +TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK);
  11778. +
  11779. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT);
  11780. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT);
  11781. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT);
  11782. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT);
  11783. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT);
  11784. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT);
  11785. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT);
  11786. +TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT);
  11787. +
  11788. +TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT);
  11789. +TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
  11790. +
  11791. +TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK);
  11792. +TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK);
  11793. +
  11794. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM);
  11795. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT);
  11796. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP);
  11797. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC);
  11798. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN);
  11799. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM);
  11800. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG);
  11801. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD);
  11802. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU);
  11803. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI);
  11804. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC);
  11805. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL);
  11806. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL);
  11807. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP);
  11808. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT);
  11809. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD);
  11810. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS);
  11811. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN);
  11812. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ);
  11813. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU);
  11814. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
  11815. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
  11816. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
  11817. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
  11818. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
  11819. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
  11820. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
  11821. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
  11822. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID);
  11823. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
  11824. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
  11825. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
  11826. +TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
  11827. +
  11828. +
  11829. +#define SSAM_PTR_UID_LEN 9
  11830. +#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1)
  11831. +#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1)
  11832. +#define SSAM_RQID_NOT_APPLICABLE ((u32)-1)
  11833. +#define SSAM_SSH_TC_NOT_APPLICABLE 0
  11834. +
  11835. +
  11836. +#ifndef _SURFACE_SAM_SSH_TRACE_HELPERS
  11837. +#define _SURFACE_SAM_SSH_TRACE_HELPERS
  11838. +
  11839. +static inline void ssam_trace_ptr_uid(const void *ptr, char* uid_str)
  11840. +{
  11841. + char buf[2 * sizeof(void*) + 1];
  11842. +
  11843. + snprintf(buf, ARRAY_SIZE(buf), "%p", ptr);
  11844. + memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN],
  11845. + SSAM_PTR_UID_LEN);
  11846. +}
  11847. +
  11848. +static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p)
  11849. +{
  11850. + if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0))
  11851. + return SSAM_SEQ_NOT_APPLICABLE;
  11852. +
  11853. + return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
  11854. +}
  11855. +
  11856. +static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p)
  11857. +{
  11858. + if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
  11859. + return SSAM_RQID_NOT_APPLICABLE;
  11860. +
  11861. + return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]);
  11862. +}
  11863. +
  11864. +static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
  11865. +{
  11866. + if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
  11867. + return SSAM_SSH_TC_NOT_APPLICABLE;
  11868. +
  11869. + return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]);
  11870. +}
  11871. +
  11872. +#endif /* _SURFACE_SAM_SSH_TRACE_HELPERS */
  11873. +
  11874. +#define ssam_trace_get_command_field_u8(packet, field) \
  11875. + ((!packet || packet->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \
  11876. + ? 0 : p->data.ptr[SSH_MSGOFFSET_COMMAND(field)])
  11877. +
  11878. +#define ssam_show_generic_u8_field(value) \
  11879. + __print_symbolic(value, \
  11880. + { SSAM_U8_FIELD_NOT_APPLICABLE, "N/A" } \
  11881. + )
  11882. +
  11883. +
  11884. +#define ssam_show_frame_type(ty) \
  11885. + __print_symbolic(ty, \
  11886. + { SSH_FRAME_TYPE_DATA_SEQ, "DSEQ" }, \
  11887. + { SSH_FRAME_TYPE_DATA_NSQ, "DNSQ" }, \
  11888. + { SSH_FRAME_TYPE_ACK, "ACK" }, \
  11889. + { SSH_FRAME_TYPE_NAK, "NAK" } \
  11890. + )
  11891. +
  11892. +#define ssam_show_packet_type(type) \
  11893. + __print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "", \
  11894. + { BIT(SSH_PACKET_TY_FLUSH_BIT), "F" }, \
  11895. + { BIT(SSH_PACKET_TY_SEQUENCED_BIT), "S" }, \
  11896. + { BIT(SSH_PACKET_TY_BLOCKING_BIT), "B" } \
  11897. + )
  11898. +
  11899. +#define ssam_show_packet_state(state) \
  11900. + __print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "", \
  11901. + { BIT(SSH_PACKET_SF_LOCKED_BIT), "L" }, \
  11902. + { BIT(SSH_PACKET_SF_QUEUED_BIT), "Q" }, \
  11903. + { BIT(SSH_PACKET_SF_PENDING_BIT), "P" }, \
  11904. + { BIT(SSH_PACKET_SF_TRANSMITTING_BIT), "S" }, \
  11905. + { BIT(SSH_PACKET_SF_TRANSMITTED_BIT), "T" }, \
  11906. + { BIT(SSH_PACKET_SF_ACKED_BIT), "A" }, \
  11907. + { BIT(SSH_PACKET_SF_CANCELED_BIT), "C" }, \
  11908. + { BIT(SSH_PACKET_SF_COMPLETED_BIT), "F" } \
  11909. + )
  11910. +
  11911. +#define ssam_show_packet_seq(seq) \
  11912. + __print_symbolic(seq, \
  11913. + { SSAM_SEQ_NOT_APPLICABLE, "N/A" } \
  11914. + )
  11915. +
  11916. +
  11917. +#define ssam_show_request_type(flags) \
  11918. + __print_flags(flags & SSH_REQUEST_FLAGS_TY_MASK, "", \
  11919. + { BIT(SSH_REQUEST_TY_FLUSH_BIT), "F" }, \
  11920. + { BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), "R" } \
  11921. + )
  11922. +
  11923. +#define ssam_show_request_state(flags) \
  11924. + __print_flags(flags & SSH_REQUEST_FLAGS_SF_MASK, "", \
  11925. + { BIT(SSH_REQUEST_SF_LOCKED_BIT), "L" }, \
  11926. + { BIT(SSH_REQUEST_SF_QUEUED_BIT), "Q" }, \
  11927. + { BIT(SSH_REQUEST_SF_PENDING_BIT), "P" }, \
  11928. + { BIT(SSH_REQUEST_SF_TRANSMITTING_BIT), "S" }, \
  11929. + { BIT(SSH_REQUEST_SF_TRANSMITTED_BIT), "T" }, \
  11930. + { BIT(SSH_REQUEST_SF_RSPRCVD_BIT), "A" }, \
  11931. + { BIT(SSH_REQUEST_SF_CANCELED_BIT), "C" }, \
  11932. + { BIT(SSH_REQUEST_SF_COMPLETED_BIT), "F" } \
  11933. + )
  11934. +
  11935. +#define ssam_show_request_id(rqid) \
  11936. + __print_symbolic(rqid, \
  11937. + { SSAM_RQID_NOT_APPLICABLE, "N/A" } \
  11938. + )
  11939. +
  11940. +#define ssam_show_ssh_tc(rqid) \
  11941. + __print_symbolic(rqid, \
  11942. + { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
  11943. + { SSAM_SSH_TC_SAM, "SAM" }, \
  11944. + { SSAM_SSH_TC_BAT, "BAT" }, \
  11945. + { SSAM_SSH_TC_TMP, "TMP" }, \
  11946. + { SSAM_SSH_TC_PMC, "PMC" }, \
  11947. + { SSAM_SSH_TC_FAN, "FAN" }, \
  11948. + { SSAM_SSH_TC_PoM, "PoM" }, \
  11949. + { SSAM_SSH_TC_DBG, "DBG" }, \
  11950. + { SSAM_SSH_TC_KBD, "KBD" }, \
  11951. + { SSAM_SSH_TC_FWU, "FWU" }, \
  11952. + { SSAM_SSH_TC_UNI, "UNI" }, \
  11953. + { SSAM_SSH_TC_LPC, "LPC" }, \
  11954. + { SSAM_SSH_TC_TCL, "TCL" }, \
  11955. + { SSAM_SSH_TC_SFL, "SFL" }, \
  11956. + { SSAM_SSH_TC_KIP, "KIP" }, \
  11957. + { SSAM_SSH_TC_EXT, "EXT" }, \
  11958. + { SSAM_SSH_TC_BLD, "BLD" }, \
  11959. + { SSAM_SSH_TC_BAS, "BAS" }, \
  11960. + { SSAM_SSH_TC_SEN, "SEN" }, \
  11961. + { SSAM_SSH_TC_SRQ, "SRQ" }, \
  11962. + { SSAM_SSH_TC_MCU, "MCU" }, \
  11963. + { SSAM_SSH_TC_HID, "HID" }, \
  11964. + { SSAM_SSH_TC_TCH, "TCH" }, \
  11965. + { SSAM_SSH_TC_BKL, "BKL" }, \
  11966. + { SSAM_SSH_TC_TAM, "TAM" }, \
  11967. + { SSAM_SSH_TC_ACC, "ACC" }, \
  11968. + { SSAM_SSH_TC_UFI, "UFI" }, \
  11969. + { SSAM_SSH_TC_USC, "USC" }, \
  11970. + { SSAM_SSH_TC_PEN, "PEN" }, \
  11971. + { SSAM_SSH_TC_VID, "VID" }, \
  11972. + { SSAM_SSH_TC_AUD, "AUD" }, \
  11973. + { SSAM_SSH_TC_SMC, "SMC" }, \
  11974. + { SSAM_SSH_TC_KPD, "KPD" }, \
  11975. + { SSAM_SSH_TC_REG, "REG" } \
  11976. + )
  11977. +
  11978. +
  11979. +DECLARE_EVENT_CLASS(ssam_frame_class,
  11980. + TP_PROTO(const struct ssh_frame *frame),
  11981. +
  11982. + TP_ARGS(frame),
  11983. +
  11984. + TP_STRUCT__entry(
  11985. + __field(u8, type)
  11986. + __field(u8, seq)
  11987. + __field(u16, len)
  11988. + ),
  11989. +
  11990. + TP_fast_assign(
  11991. + __entry->type = frame->type;
  11992. + __entry->seq = frame->seq;
  11993. + __entry->len = get_unaligned_le16(&frame->len);
  11994. + ),
  11995. +
  11996. + TP_printk("ty=%s, seq=0x%02x, len=%u",
  11997. + ssam_show_frame_type(__entry->type),
  11998. + __entry->seq,
  11999. + __entry->len
  12000. + )
  12001. +);
  12002. +
  12003. +#define DEFINE_SSAM_FRAME_EVENT(name) \
  12004. + DEFINE_EVENT(ssam_frame_class, ssam_##name, \
  12005. + TP_PROTO(const struct ssh_frame *frame), \
  12006. + TP_ARGS(frame) \
  12007. + )
  12008. +
  12009. +
  12010. +DECLARE_EVENT_CLASS(ssam_command_class,
  12011. + TP_PROTO(const struct ssh_command *cmd, u16 len),
  12012. +
  12013. + TP_ARGS(cmd, len),
  12014. +
  12015. + TP_STRUCT__entry(
  12016. + __field(u16, rqid)
  12017. + __field(u16, len)
  12018. + __field(u8, tc)
  12019. + __field(u8, cid)
  12020. + __field(u8, iid)
  12021. + ),
  12022. +
  12023. + TP_fast_assign(
  12024. + __entry->rqid = get_unaligned_le16(&cmd->rqid);
  12025. + __entry->tc = cmd->tc;
  12026. + __entry->cid = cmd->cid;
  12027. + __entry->iid = cmd->iid;
  12028. + __entry->len = len;
  12029. + ),
  12030. +
  12031. + TP_printk("rqid=0x%04x, tc=%s, cid=0x%02x, iid=0x%02x, len=%u",
  12032. + __entry->rqid,
  12033. + ssam_show_ssh_tc(__entry->tc),
  12034. + __entry->cid,
  12035. + __entry->iid,
  12036. + __entry->len
  12037. + )
  12038. +);
  12039. +
  12040. +#define DEFINE_SSAM_COMMAND_EVENT(name) \
  12041. + DEFINE_EVENT(ssam_command_class, ssam_##name, \
  12042. + TP_PROTO(const struct ssh_command *cmd, u16 len), \
  12043. + TP_ARGS(cmd, len) \
  12044. + )
  12045. +
  12046. +
  12047. +DECLARE_EVENT_CLASS(ssam_packet_class,
  12048. + TP_PROTO(const struct ssh_packet *packet),
  12049. +
  12050. + TP_ARGS(packet),
  12051. +
  12052. + TP_STRUCT__entry(
  12053. + __array(char, uid, SSAM_PTR_UID_LEN)
  12054. + __field(u8, priority)
  12055. + __field(u16, length)
  12056. + __field(unsigned long, state)
  12057. + __field(u16, seq)
  12058. + ),
  12059. +
  12060. + TP_fast_assign(
  12061. + ssam_trace_ptr_uid(packet, __entry->uid);
  12062. + __entry->priority = READ_ONCE(packet->priority);
  12063. + __entry->length = packet->data.len;
  12064. + __entry->state = READ_ONCE(packet->state);
  12065. + __entry->seq = ssam_trace_get_packet_seq(packet);
  12066. + ),
  12067. +
  12068. + TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s",
  12069. + __entry->uid,
  12070. + ssam_show_packet_seq(__entry->seq),
  12071. + ssam_show_packet_type(__entry->state),
  12072. + __entry->priority,
  12073. + __entry->length,
  12074. + ssam_show_packet_state(__entry->state)
  12075. + )
  12076. +);
  12077. +
  12078. +#define DEFINE_SSAM_PACKET_EVENT(name) \
  12079. + DEFINE_EVENT(ssam_packet_class, ssam_##name, \
  12080. + TP_PROTO(const struct ssh_packet *packet), \
  12081. + TP_ARGS(packet) \
  12082. + )
  12083. +
  12084. +
  12085. +DECLARE_EVENT_CLASS(ssam_packet_status_class,
  12086. + TP_PROTO(const struct ssh_packet *packet, int status),
  12087. +
  12088. + TP_ARGS(packet, status),
  12089. +
  12090. + TP_STRUCT__entry(
  12091. + __array(char, uid, SSAM_PTR_UID_LEN)
  12092. + __field(u8, priority)
  12093. + __field(u16, length)
  12094. + __field(unsigned long, state)
  12095. + __field(u16, seq)
  12096. + __field(int, status)
  12097. + ),
  12098. +
  12099. + TP_fast_assign(
  12100. + ssam_trace_ptr_uid(packet, __entry->uid);
  12101. + __entry->priority = READ_ONCE(packet->priority);
  12102. + __entry->length = packet->data.len;
  12103. + __entry->state = READ_ONCE(packet->state);
  12104. + __entry->seq = ssam_trace_get_packet_seq(packet);
  12105. + __entry->status = status;
  12106. + ),
  12107. +
  12108. + TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s, status=%d",
  12109. + __entry->uid,
  12110. + ssam_show_packet_seq(__entry->seq),
  12111. + ssam_show_packet_type(__entry->state),
  12112. + __entry->priority,
  12113. + __entry->length,
  12114. + ssam_show_packet_state(__entry->state),
  12115. + __entry->status
  12116. + )
  12117. +);
  12118. +
  12119. +#define DEFINE_SSAM_PACKET_STATUS_EVENT(name) \
  12120. + DEFINE_EVENT(ssam_packet_status_class, ssam_##name, \
  12121. + TP_PROTO(const struct ssh_packet *packet, int status), \
  12122. + TP_ARGS(packet, status) \
  12123. + )
  12124. +
  12125. +
  12126. +DECLARE_EVENT_CLASS(ssam_request_class,
  12127. + TP_PROTO(const struct ssh_request *request),
  12128. +
  12129. + TP_ARGS(request),
  12130. +
  12131. + TP_STRUCT__entry(
  12132. + __array(char, uid, SSAM_PTR_UID_LEN)
  12133. + __field(unsigned long, state)
  12134. + __field(u32, rqid)
  12135. + __field(u8, tc)
  12136. + __field(u16, cid)
  12137. + __field(u16, iid)
  12138. + ),
  12139. +
  12140. + TP_fast_assign(
  12141. + const struct ssh_packet *p = &request->packet;
  12142. +
  12143. + // use packet for UID so we can match requests to packets
  12144. + ssam_trace_ptr_uid(p, __entry->uid);
  12145. + __entry->state = READ_ONCE(request->state);
  12146. + __entry->rqid = ssam_trace_get_request_id(p);
  12147. + __entry->tc = ssam_trace_get_request_tc(p);
  12148. + __entry->cid = ssam_trace_get_command_field_u8(p, cid);
  12149. + __entry->iid = ssam_trace_get_command_field_u8(p, iid);
  12150. + ),
  12151. +
  12152. + TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s",
  12153. + __entry->uid,
  12154. + ssam_show_request_id(__entry->rqid),
  12155. + ssam_show_request_type(__entry->state),
  12156. + ssam_show_request_state(__entry->state),
  12157. + ssam_show_ssh_tc(__entry->tc),
  12158. + ssam_show_generic_u8_field(__entry->cid),
  12159. + ssam_show_generic_u8_field(__entry->iid)
  12160. + )
  12161. +);
  12162. +
  12163. +#define DEFINE_SSAM_REQUEST_EVENT(name) \
  12164. + DEFINE_EVENT(ssam_request_class, ssam_##name, \
  12165. + TP_PROTO(const struct ssh_request *request), \
  12166. + TP_ARGS(request) \
  12167. + )
  12168. +
  12169. +
  12170. +DECLARE_EVENT_CLASS(ssam_request_status_class,
  12171. + TP_PROTO(const struct ssh_request *request, int status),
  12172. +
  12173. + TP_ARGS(request, status),
  12174. +
  12175. + TP_STRUCT__entry(
  12176. + __array(char, uid, SSAM_PTR_UID_LEN)
  12177. + __field(unsigned long, state)
  12178. + __field(u32, rqid)
  12179. + __field(u8, tc)
  12180. + __field(u16, cid)
  12181. + __field(u16, iid)
  12182. + __field(int, status)
  12183. + ),
  12184. +
  12185. + TP_fast_assign(
  12186. + const struct ssh_packet *p = &request->packet;
  12187. +
  12188. + // use packet for UID so we can match requests to packets
  12189. + ssam_trace_ptr_uid(p, __entry->uid);
  12190. + __entry->state = READ_ONCE(request->state);
  12191. + __entry->rqid = ssam_trace_get_request_id(p);
  12192. + __entry->tc = ssam_trace_get_request_tc(p);
  12193. + __entry->cid = ssam_trace_get_command_field_u8(p, cid);
  12194. + __entry->iid = ssam_trace_get_command_field_u8(p, iid);
  12195. + __entry->status = status;
  12196. + ),
  12197. +
  12198. + TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d",
  12199. + __entry->uid,
  12200. + ssam_show_request_id(__entry->rqid),
  12201. + ssam_show_request_type(__entry->state),
  12202. + ssam_show_request_state(__entry->state),
  12203. + ssam_show_ssh_tc(__entry->tc),
  12204. + ssam_show_generic_u8_field(__entry->cid),
  12205. + ssam_show_generic_u8_field(__entry->iid),
  12206. + __entry->status
  12207. + )
  12208. +);
  12209. +
  12210. +#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name) \
  12211. + DEFINE_EVENT(ssam_request_status_class, ssam_##name, \
  12212. + TP_PROTO(const struct ssh_request *request, int status),\
  12213. + TP_ARGS(request, status) \
  12214. + )
  12215. +
  12216. +
  12217. +DECLARE_EVENT_CLASS(ssam_alloc_class,
  12218. + TP_PROTO(void *ptr, size_t len),
  12219. +
  12220. + TP_ARGS(ptr, len),
  12221. +
  12222. + TP_STRUCT__entry(
  12223. + __array(char, uid, SSAM_PTR_UID_LEN)
  12224. + __field(size_t, len)
  12225. + ),
  12226. +
  12227. + TP_fast_assign(
  12228. + ssam_trace_ptr_uid(ptr, __entry->uid);
  12229. + __entry->len = len;
  12230. + ),
  12231. +
  12232. + TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len)
  12233. +);
  12234. +
  12235. +#define DEFINE_SSAM_ALLOC_EVENT(name) \
  12236. + DEFINE_EVENT(ssam_alloc_class, ssam_##name, \
  12237. + TP_PROTO(void *ptr, size_t len), \
  12238. + TP_ARGS(ptr, len) \
  12239. + )
  12240. +
  12241. +
  12242. +DECLARE_EVENT_CLASS(ssam_free_class,
  12243. + TP_PROTO(void *ptr),
  12244. +
  12245. + TP_ARGS(ptr),
  12246. +
  12247. + TP_STRUCT__entry(
  12248. + __array(char, uid, SSAM_PTR_UID_LEN)
  12249. + __field(size_t, len)
  12250. + ),
  12251. +
  12252. + TP_fast_assign(
  12253. + ssam_trace_ptr_uid(ptr, __entry->uid);
  12254. + ),
  12255. +
  12256. + TP_printk("uid=%s", __entry->uid)
  12257. +);
  12258. +
  12259. +#define DEFINE_SSAM_FREE_EVENT(name) \
  12260. + DEFINE_EVENT(ssam_free_class, ssam_##name, \
  12261. + TP_PROTO(void *ptr), \
  12262. + TP_ARGS(ptr) \
  12263. + )
  12264. +
  12265. +
  12266. +DECLARE_EVENT_CLASS(ssam_generic_uint_class,
  12267. + TP_PROTO(const char* property, unsigned int value),
  12268. +
  12269. + TP_ARGS(property, value),
  12270. +
  12271. + TP_STRUCT__entry(
  12272. + __string(property, property)
  12273. + __field(unsigned int, value)
  12274. + ),
  12275. +
  12276. + TP_fast_assign(
  12277. + __assign_str(property, property);
  12278. + __entry->value = value;
  12279. + ),
  12280. +
  12281. + TP_printk("%s=%u", __get_str(property), __entry->value)
  12282. +);
  12283. +
  12284. +#define DEFINE_SSAM_GENERIC_UINT_EVENT(name) \
  12285. + DEFINE_EVENT(ssam_generic_uint_class, ssam_##name, \
  12286. + TP_PROTO(const char* property, unsigned int value), \
  12287. + TP_ARGS(property, value) \
  12288. + )
  12289. +
  12290. +
  12291. +DEFINE_SSAM_FRAME_EVENT(rx_frame_received);
  12292. +DEFINE_SSAM_COMMAND_EVENT(rx_response_received);
  12293. +DEFINE_SSAM_COMMAND_EVENT(rx_event_received);
  12294. +
  12295. +DEFINE_SSAM_PACKET_EVENT(packet_release);
  12296. +DEFINE_SSAM_PACKET_EVENT(packet_submit);
  12297. +DEFINE_SSAM_PACKET_EVENT(packet_resubmit);
  12298. +DEFINE_SSAM_PACKET_EVENT(packet_timeout);
  12299. +DEFINE_SSAM_PACKET_EVENT(packet_cancel);
  12300. +DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete);
  12301. +DEFINE_SSAM_GENERIC_UINT_EVENT(ptl_timeout_reap);
  12302. +
  12303. +DEFINE_SSAM_REQUEST_EVENT(request_submit);
  12304. +DEFINE_SSAM_REQUEST_EVENT(request_timeout);
  12305. +DEFINE_SSAM_REQUEST_EVENT(request_cancel);
  12306. +DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete);
  12307. +DEFINE_SSAM_GENERIC_UINT_EVENT(rtl_timeout_reap);
  12308. +
  12309. +DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet);
  12310. +DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet);
  12311. +DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet);
  12312. +DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write);
  12313. +DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data);
  12314. +DEFINE_SSAM_GENERIC_UINT_EVENT(ei_rx_corrupt_syn);
  12315. +DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data);
  12316. +DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response);
  12317. +
  12318. +DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc);
  12319. +DEFINE_SSAM_FREE_EVENT(ctrl_packet_free);
  12320. +
  12321. +DEFINE_SSAM_ALLOC_EVENT(event_item_alloc);
  12322. +DEFINE_SSAM_FREE_EVENT(event_item_free);
  12323. +
  12324. +#endif /* _SURFACE_SAM_SSH_TRACE_H */
  12325. +
  12326. +/* This part must be outside protection */
  12327. +#undef TRACE_INCLUDE_PATH
  12328. +#undef TRACE_INCLUDE_FILE
  12329. +
  12330. +#define TRACE_INCLUDE_PATH .
  12331. +#define TRACE_INCLUDE_FILE surface_sam_ssh_trace
  12332. +
  12333. +#include <trace/define_trace.h>
  12334. diff --git a/drivers/platform/x86/surface_sam/surface_sam_vhf.c b/drivers/platform/x86/surface_sam/surface_sam_vhf.c
  12335. new file mode 100644
  12336. index 0000000000000..8455f952c2724
  12337. --- /dev/null
  12338. +++ b/drivers/platform/x86/surface_sam/surface_sam_vhf.c
  12339. @@ -0,0 +1,266 @@
  12340. +// SPDX-License-Identifier: GPL-2.0-or-later
  12341. +/*
  12342. + * Virtual HID Framework (VHF) driver for input events via SAM.
  12343. + * Used for keyboard input events on the Surface Laptops.
  12344. + */
  12345. +
  12346. +#include <linux/acpi.h>
  12347. +#include <linux/hid.h>
  12348. +#include <linux/input.h>
  12349. +#include <linux/platform_device.h>
  12350. +#include <linux/types.h>
  12351. +
  12352. +#include "surface_sam_ssh.h"
  12353. +
  12354. +
  12355. +#define USB_VENDOR_ID_MICROSOFT 0x045e
  12356. +#define USB_DEVICE_ID_MS_VHF 0xf001
  12357. +
  12358. +#define VHF_INPUT_NAME "Microsoft Virtual HID Framework Device"
  12359. +
  12360. +
  12361. +struct vhf_drvdata {
  12362. + struct platform_device *dev;
  12363. + struct ssam_controller *ctrl;
  12364. +
  12365. + struct ssam_event_notifier notif;
  12366. +
  12367. + struct hid_device *hid;
  12368. +};
  12369. +
  12370. +
  12371. +/*
  12372. + * These report descriptors have been extracted from a Surface Book 2.
  12373. + * They seems to be similar enough to be usable on the Surface Laptop.
  12374. + */
  12375. +static const u8 vhf_hid_desc[] = {
  12376. + // keyboard descriptor (event command ID 0x03)
  12377. + 0x05, 0x01, /* Usage Page (Desktop), */
  12378. + 0x09, 0x06, /* Usage (Keyboard), */
  12379. + 0xA1, 0x01, /* Collection (Application), */
  12380. + 0x85, 0x01, /* Report ID (1), */
  12381. + 0x15, 0x00, /* Logical Minimum (0), */
  12382. + 0x25, 0x01, /* Logical Maximum (1), */
  12383. + 0x75, 0x01, /* Report Size (1), */
  12384. + 0x95, 0x08, /* Report Count (8), */
  12385. + 0x05, 0x07, /* Usage Page (Keyboard), */
  12386. + 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */
  12387. + 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */
  12388. + 0x81, 0x02, /* Input (Variable), */
  12389. + 0x75, 0x08, /* Report Size (8), */
  12390. + 0x95, 0x0A, /* Report Count (10), */
  12391. + 0x19, 0x00, /* Usage Minimum (None), */
  12392. + 0x29, 0x91, /* Usage Maximum (KB LANG2), */
  12393. + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
  12394. + 0x81, 0x00, /* Input, */
  12395. + 0x05, 0x0C, /* Usage Page (Consumer), */
  12396. + 0x0A, 0xC0, 0x02, /* Usage (02C0h), */
  12397. + 0xA1, 0x02, /* Collection (Logical), */
  12398. + 0x1A, 0xC1, 0x02, /* Usage Minimum (02C1h), */
  12399. + 0x2A, 0xC6, 0x02, /* Usage Maximum (02C6h), */
  12400. + 0x95, 0x06, /* Report Count (6), */
  12401. + 0xB1, 0x03, /* Feature (Constant, Variable), */
  12402. + 0xC0, /* End Collection, */
  12403. + 0x05, 0x08, /* Usage Page (LED), */
  12404. + 0x19, 0x01, /* Usage Minimum (01h), */
  12405. + 0x29, 0x03, /* Usage Maximum (03h), */
  12406. + 0x75, 0x01, /* Report Size (1), */
  12407. + 0x95, 0x03, /* Report Count (3), */
  12408. + 0x25, 0x01, /* Logical Maximum (1), */
  12409. + 0x91, 0x02, /* Output (Variable), */
  12410. + 0x95, 0x05, /* Report Count (5), */
  12411. + 0x91, 0x01, /* Output (Constant), */
  12412. + 0xC0, /* End Collection, */
  12413. +
  12414. + // media key descriptor (event command ID 0x04)
  12415. + 0x05, 0x0C, /* Usage Page (Consumer), */
  12416. + 0x09, 0x01, /* Usage (Consumer Control), */
  12417. + 0xA1, 0x01, /* Collection (Application), */
  12418. + 0x85, 0x03, /* Report ID (3), */
  12419. + 0x75, 0x10, /* Report Size (16), */
  12420. + 0x15, 0x00, /* Logical Minimum (0), */
  12421. + 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
  12422. + 0x19, 0x00, /* Usage Minimum (00h), */
  12423. + 0x2A, 0xFF, 0x03, /* Usage Maximum (03FFh), */
  12424. + 0x81, 0x00, /* Input, */
  12425. + 0xC0, /* End Collection, */
  12426. +};
  12427. +
  12428. +
  12429. +static int vhf_hid_start(struct hid_device *hid)
  12430. +{
  12431. + hid_dbg(hid, "%s\n", __func__);
  12432. + return 0;
  12433. +}
  12434. +
  12435. +static void vhf_hid_stop(struct hid_device *hid)
  12436. +{
  12437. + hid_dbg(hid, "%s\n", __func__);
  12438. +}
  12439. +
  12440. +static int vhf_hid_open(struct hid_device *hid)
  12441. +{
  12442. + hid_dbg(hid, "%s\n", __func__);
  12443. + return 0;
  12444. +}
  12445. +
  12446. +static void vhf_hid_close(struct hid_device *hid)
  12447. +{
  12448. + hid_dbg(hid, "%s\n", __func__);
  12449. +}
  12450. +
  12451. +static int vhf_hid_parse(struct hid_device *hid)
  12452. +{
  12453. + return hid_parse_report(hid, (u8 *)vhf_hid_desc, ARRAY_SIZE(vhf_hid_desc));
  12454. +}
  12455. +
  12456. +static int vhf_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
  12457. + u8 *buf, size_t len, unsigned char rtype,
  12458. + int reqtype)
  12459. +{
  12460. + hid_dbg(hid, "%s\n", __func__);
  12461. + return 0;
  12462. +}
  12463. +
  12464. +static int vhf_hid_output_report(struct hid_device *hid, u8 *buf, size_t len)
  12465. +{
  12466. + hid_dbg(hid, "%s\n", __func__);
  12467. + print_hex_dump_debug("report:", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
  12468. +
  12469. + return len;
  12470. +}
  12471. +
  12472. +static struct hid_ll_driver vhf_hid_ll_driver = {
  12473. + .start = vhf_hid_start,
  12474. + .stop = vhf_hid_stop,
  12475. + .open = vhf_hid_open,
  12476. + .close = vhf_hid_close,
  12477. + .parse = vhf_hid_parse,
  12478. + .raw_request = vhf_hid_raw_request,
  12479. + .output_report = vhf_hid_output_report,
  12480. +};
  12481. +
  12482. +
  12483. +static struct hid_device *vhf_create_hid_device(struct platform_device *pdev)
  12484. +{
  12485. + struct hid_device *hid;
  12486. +
  12487. + hid = hid_allocate_device();
  12488. + if (IS_ERR(hid))
  12489. + return hid;
  12490. +
  12491. + hid->dev.parent = &pdev->dev;
  12492. +
  12493. + hid->bus = BUS_VIRTUAL;
  12494. + hid->vendor = USB_VENDOR_ID_MICROSOFT;
  12495. + hid->product = USB_DEVICE_ID_MS_VHF;
  12496. +
  12497. + hid->ll_driver = &vhf_hid_ll_driver;
  12498. +
  12499. + sprintf(hid->name, "%s", VHF_INPUT_NAME);
  12500. +
  12501. + return hid;
  12502. +}
  12503. +
  12504. +static u32 vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event)
  12505. +{
  12506. + struct vhf_drvdata *drvdata = container_of(nb, struct vhf_drvdata, notif.base);
  12507. + int status;
  12508. +
  12509. + if (event->target_category != 0x08)
  12510. + return 0;
  12511. +
  12512. + if (event->command_id == 0x03 || event->command_id == 0x04) {
  12513. + status = hid_input_report(drvdata->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 1);
  12514. + return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
  12515. + }
  12516. +
  12517. + return 0;
  12518. +}
  12519. +
  12520. +static int surface_sam_vhf_probe(struct platform_device *pdev)
  12521. +{
  12522. + struct ssam_controller *ctrl;
  12523. + struct vhf_drvdata *drvdata;
  12524. + struct hid_device *hid;
  12525. + int status;
  12526. +
  12527. + // add device link to EC
  12528. + status = ssam_client_bind(&pdev->dev, &ctrl);
  12529. + if (status)
  12530. + return status == -ENXIO ? -EPROBE_DEFER : status;
  12531. +
  12532. + drvdata = kzalloc(sizeof(struct vhf_drvdata), GFP_KERNEL);
  12533. + if (!drvdata)
  12534. + return -ENOMEM;
  12535. +
  12536. + hid = vhf_create_hid_device(pdev);
  12537. + if (IS_ERR(hid)) {
  12538. + status = PTR_ERR(hid);
  12539. + goto err_probe_hid;
  12540. + }
  12541. +
  12542. + status = hid_add_device(hid);
  12543. + if (status)
  12544. + goto err_add_hid;
  12545. +
  12546. + drvdata->dev = pdev;
  12547. + drvdata->ctrl = ctrl;
  12548. + drvdata->hid = hid;
  12549. +
  12550. + drvdata->notif.base.priority = 1;
  12551. + drvdata->notif.base.fn = vhf_event_handler;
  12552. + drvdata->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
  12553. + drvdata->notif.event.id.target_category = SSAM_SSH_TC_KBD;
  12554. + drvdata->notif.event.id.instance = 0;
  12555. + drvdata->notif.event.flags = 0;
  12556. +
  12557. + platform_set_drvdata(pdev, drvdata);
  12558. +
  12559. + status = ssam_notifier_register(ctrl, &drvdata->notif);
  12560. + if (status)
  12561. + goto err_add_hid;
  12562. +
  12563. + return 0;
  12564. +
  12565. +err_add_hid:
  12566. + hid_destroy_device(hid);
  12567. + platform_set_drvdata(pdev, NULL);
  12568. +err_probe_hid:
  12569. + kfree(drvdata);
  12570. + return status;
  12571. +}
  12572. +
  12573. +static int surface_sam_vhf_remove(struct platform_device *pdev)
  12574. +{
  12575. + struct vhf_drvdata *drvdata = platform_get_drvdata(pdev);
  12576. +
  12577. + ssam_notifier_unregister(drvdata->ctrl, &drvdata->notif);
  12578. + hid_destroy_device(drvdata->hid);
  12579. + kfree(drvdata);
  12580. +
  12581. + platform_set_drvdata(pdev, NULL);
  12582. + return 0;
  12583. +}
  12584. +
  12585. +
  12586. +static const struct acpi_device_id surface_sam_vhf_match[] = {
  12587. + { "MSHW0096" },
  12588. + { },
  12589. +};
  12590. +MODULE_DEVICE_TABLE(acpi, surface_sam_vhf_match);
  12591. +
  12592. +static struct platform_driver surface_sam_vhf = {
  12593. + .probe = surface_sam_vhf_probe,
  12594. + .remove = surface_sam_vhf_remove,
  12595. + .driver = {
  12596. + .name = "surface_sam_vhf",
  12597. + .acpi_match_table = surface_sam_vhf_match,
  12598. + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  12599. + },
  12600. +};
  12601. +module_platform_driver(surface_sam_vhf);
  12602. +
  12603. +MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
  12604. +MODULE_DESCRIPTION("Virtual HID Framework Driver for 5th Generation Surface Devices");
  12605. +MODULE_LICENSE("GPL");
  12606. --
  12607. 2.28.0