SoftCPU.cpp 119 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include "SoftCPU.h"
  8. #include "Emulator.h"
  9. #include <AK/Assertions.h>
  10. #include <AK/BitCast.h>
  11. #include <AK/Debug.h>
  12. #include <math.h>
  13. #include <stdio.h>
  14. #include <string.h>
  15. #include <unistd.h>
  16. #if defined(__GNUC__) && !defined(__clang__)
  17. # pragma GCC optimize("O3")
  18. #endif
  19. #define TODO_INSN() \
  20. do { \
  21. reportln("\n=={}== Unimplemented instruction: {}\n", getpid(), __FUNCTION__); \
  22. m_emulator.dump_backtrace(); \
  23. _exit(0); \
  24. } while (0)
  25. #define DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(mnemonic, op) \
  26. void SoftCPU::mnemonic##_RM8_1(const X86::Instruction& insn) { generic_RM8_1(op<ValueWithShadow<u8>>, insn); } \
  27. void SoftCPU::mnemonic##_RM8_CL(const X86::Instruction& insn) { generic_RM8_CL(op<ValueWithShadow<u8>>, insn); } \
  28. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<true, false>(op<ValueWithShadow<u8>>, insn); } \
  29. void SoftCPU::mnemonic##_RM16_1(const X86::Instruction& insn) { generic_RM16_1(op<ValueWithShadow<u16>>, insn); } \
  30. void SoftCPU::mnemonic##_RM16_CL(const X86::Instruction& insn) { generic_RM16_CL(op<ValueWithShadow<u16>>, insn); } \
  31. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_unsigned_imm8<true>(op<ValueWithShadow<u16>>, insn); } \
  32. void SoftCPU::mnemonic##_RM32_1(const X86::Instruction& insn) { generic_RM32_1(op<ValueWithShadow<u32>>, insn); } \
  33. void SoftCPU::mnemonic##_RM32_CL(const X86::Instruction& insn) { generic_RM32_CL(op<ValueWithShadow<u32>>, insn); } \
  34. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_unsigned_imm8<true>(op<ValueWithShadow<u32>>, insn); }
  35. namespace UserspaceEmulator {
  36. template<typename T>
  37. ALWAYS_INLINE void warn_if_uninitialized(T value_with_shadow, const char* message)
  38. {
  39. if (value_with_shadow.is_uninitialized()) [[unlikely]] {
  40. reportln("\033[31;1mWarning! Use of uninitialized value: {}\033[0m\n", message);
  41. Emulator::the().dump_backtrace();
  42. }
  43. }
  44. ALWAYS_INLINE void SoftCPU::warn_if_flags_tainted(const char* message) const
  45. {
  46. if (m_flags_tainted) [[unlikely]] {
  47. reportln("\n=={}== \033[31;1mConditional depends on uninitialized data\033[0m ({})\n", getpid(), message);
  48. Emulator::the().dump_backtrace();
  49. }
  50. }
  51. template<typename T, typename U>
  52. constexpr T sign_extended_to(U value)
  53. {
  54. if (!(value & X86::TypeTrivia<U>::sign_bit))
  55. return value;
  56. return (X86::TypeTrivia<T>::mask & ~X86::TypeTrivia<U>::mask) | value;
  57. }
  58. SoftCPU::SoftCPU(Emulator& emulator)
  59. : m_emulator(emulator)
  60. {
  61. memset(m_gpr, 0, sizeof(m_gpr));
  62. memset(m_gpr_shadow, 1, sizeof(m_gpr_shadow));
  63. m_segment[(int)X86::SegmentRegister::CS] = 0x1b;
  64. m_segment[(int)X86::SegmentRegister::DS] = 0x23;
  65. m_segment[(int)X86::SegmentRegister::ES] = 0x23;
  66. m_segment[(int)X86::SegmentRegister::SS] = 0x23;
  67. m_segment[(int)X86::SegmentRegister::GS] = 0x2b;
  68. }
  69. void SoftCPU::dump() const
  70. {
  71. outln(" eax={:08x} ebx={:08x} ecx={:08x} edx={:08x} ebp={:08x} esp={:08x} esi={:08x} edi={:08x} o={:d} s={:d} z={:d} a={:d} p={:d} c={:d}",
  72. eax(), ebx(), ecx(), edx(), ebp(), esp(), esi(), edi(), of(), sf(), zf(), af(), pf(), cf());
  73. outln("#eax={:08x} #ebx={:08x} #ecx={:08x} #edx={:08x} #ebp={:08x} #esp={:08x} #esi={:08x} #edi={:08x} #f={}",
  74. eax().shadow(), ebx().shadow(), ecx().shadow(), edx().shadow(), ebp().shadow(), esp().shadow(), esi().shadow(), edi().shadow(), m_flags_tainted);
  75. fflush(stdout);
  76. }
  77. void SoftCPU::update_code_cache()
  78. {
  79. auto* region = m_emulator.mmu().find_region({ cs(), eip() });
  80. VERIFY(region);
  81. if (!region->is_executable()) {
  82. reportln("SoftCPU::update_code_cache: Non-executable region @ {:p}", eip());
  83. Emulator::the().dump_backtrace();
  84. TODO();
  85. }
  86. // FIXME: This cache needs to be invalidated if the code region is ever unmapped.
  87. m_cached_code_region = region;
  88. m_cached_code_base_ptr = region->data();
  89. }
  90. ValueWithShadow<u8> SoftCPU::read_memory8(X86::LogicalAddress address)
  91. {
  92. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  93. auto value = m_emulator.mmu().read8(address);
  94. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory8: @{:04x}:{:08x} -> {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  95. return value;
  96. }
  97. ValueWithShadow<u16> SoftCPU::read_memory16(X86::LogicalAddress address)
  98. {
  99. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  100. auto value = m_emulator.mmu().read16(address);
  101. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory16: @{:04x}:{:08x} -> {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  102. return value;
  103. }
  104. ValueWithShadow<u32> SoftCPU::read_memory32(X86::LogicalAddress address)
  105. {
  106. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  107. auto value = m_emulator.mmu().read32(address);
  108. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory32: @{:04x}:{:08x} -> {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  109. return value;
  110. }
  111. ValueWithShadow<u64> SoftCPU::read_memory64(X86::LogicalAddress address)
  112. {
  113. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  114. auto value = m_emulator.mmu().read64(address);
  115. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory64: @{:04x}:{:08x} -> {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  116. return value;
  117. }
  118. ValueWithShadow<u128> SoftCPU::read_memory128(X86::LogicalAddress address)
  119. {
  120. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  121. auto value = m_emulator.mmu().read128(address);
  122. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory128: @{:04x}:{:08x} -> {:032x} ({:032x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  123. return value;
  124. }
  125. ValueWithShadow<u256> SoftCPU::read_memory256(X86::LogicalAddress address)
  126. {
  127. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  128. auto value = m_emulator.mmu().read256(address);
  129. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory256: @{:04x}:{:08x} -> {:064x} ({:064x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  130. return value;
  131. }
  132. void SoftCPU::write_memory8(X86::LogicalAddress address, ValueWithShadow<u8> value)
  133. {
  134. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  135. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory8: @{:04x}:{:08x} <- {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  136. m_emulator.mmu().write8(address, value);
  137. }
  138. void SoftCPU::write_memory16(X86::LogicalAddress address, ValueWithShadow<u16> value)
  139. {
  140. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  141. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory16: @{:04x}:{:08x} <- {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  142. m_emulator.mmu().write16(address, value);
  143. }
  144. void SoftCPU::write_memory32(X86::LogicalAddress address, ValueWithShadow<u32> value)
  145. {
  146. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  147. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory32: @{:04x}:{:08x} <- {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  148. m_emulator.mmu().write32(address, value);
  149. }
  150. void SoftCPU::write_memory64(X86::LogicalAddress address, ValueWithShadow<u64> value)
  151. {
  152. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  153. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory64: @{:04x}:{:08x} <- {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  154. m_emulator.mmu().write64(address, value);
  155. }
  156. void SoftCPU::write_memory128(X86::LogicalAddress address, ValueWithShadow<u128> value)
  157. {
  158. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  159. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory128: @{:04x}:{:08x} <- {:032x} ({:032x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  160. m_emulator.mmu().write128(address, value);
  161. }
  162. void SoftCPU::write_memory256(X86::LogicalAddress address, ValueWithShadow<u256> value)
  163. {
  164. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  165. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory256: @{:04x}:{:08x} <- {:064x} ({:064x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  166. m_emulator.mmu().write256(address, value);
  167. }
  168. void SoftCPU::push_string(const StringView& string)
  169. {
  170. size_t space_to_allocate = round_up_to_power_of_two(string.length() + 1, 16);
  171. set_esp({ esp().value() - space_to_allocate, esp().shadow() });
  172. m_emulator.mmu().copy_to_vm(esp().value(), string.characters_without_null_termination(), string.length());
  173. m_emulator.mmu().write8({ 0x23, esp().value() + string.length() }, shadow_wrap_as_initialized((u8)'\0'));
  174. }
  175. void SoftCPU::push_buffer(const u8* data, size_t size)
  176. {
  177. set_esp({ esp().value() - size, esp().shadow() });
  178. warn_if_uninitialized(esp(), "push_buffer");
  179. m_emulator.mmu().copy_to_vm(esp().value(), data, size);
  180. }
  181. void SoftCPU::push32(ValueWithShadow<u32> value)
  182. {
  183. set_esp({ esp().value() - sizeof(u32), esp().shadow() });
  184. warn_if_uninitialized(esp(), "push32");
  185. write_memory32({ ss(), esp().value() }, value);
  186. }
  187. ValueWithShadow<u32> SoftCPU::pop32()
  188. {
  189. warn_if_uninitialized(esp(), "pop32");
  190. auto value = read_memory32({ ss(), esp().value() });
  191. set_esp({ esp().value() + sizeof(u32), esp().shadow() });
  192. return value;
  193. }
  194. void SoftCPU::push16(ValueWithShadow<u16> value)
  195. {
  196. warn_if_uninitialized(esp(), "push16");
  197. set_esp({ esp().value() - sizeof(u16), esp().shadow() });
  198. write_memory16({ ss(), esp().value() }, value);
  199. }
  200. ValueWithShadow<u16> SoftCPU::pop16()
  201. {
  202. warn_if_uninitialized(esp(), "pop16");
  203. auto value = read_memory16({ ss(), esp().value() });
  204. set_esp({ esp().value() + sizeof(u16), esp().shadow() });
  205. return value;
  206. }
  207. template<bool check_zf, typename Callback>
  208. void SoftCPU::do_once_or_repeat(const X86::Instruction& insn, Callback callback)
  209. {
  210. if (!insn.has_rep_prefix())
  211. return callback();
  212. while (loop_index(insn.a32()).value()) {
  213. callback();
  214. decrement_loop_index(insn.a32());
  215. if constexpr (check_zf) {
  216. warn_if_flags_tainted("repz/repnz");
  217. if (insn.rep_prefix() == X86::Prefix::REPZ && !zf())
  218. break;
  219. if (insn.rep_prefix() == X86::Prefix::REPNZ && zf())
  220. break;
  221. }
  222. }
  223. }
  224. template<typename T>
  225. ALWAYS_INLINE static T op_inc(SoftCPU& cpu, T data)
  226. {
  227. typename T::ValueType result;
  228. u32 new_flags = 0;
  229. if constexpr (sizeof(typename T::ValueType) == 4) {
  230. asm volatile("incl %%eax\n"
  231. : "=a"(result)
  232. : "a"(data.value()));
  233. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  234. asm volatile("incw %%ax\n"
  235. : "=a"(result)
  236. : "a"(data.value()));
  237. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  238. asm volatile("incb %%al\n"
  239. : "=a"(result)
  240. : "a"(data.value()));
  241. }
  242. asm volatile(
  243. "pushf\n"
  244. "pop %%ebx"
  245. : "=b"(new_flags));
  246. cpu.set_flags_oszap(new_flags);
  247. cpu.taint_flags_from(data);
  248. return shadow_wrap_with_taint_from(result, data);
  249. }
  250. template<typename T>
  251. ALWAYS_INLINE static T op_dec(SoftCPU& cpu, T data)
  252. {
  253. typename T::ValueType result;
  254. u32 new_flags = 0;
  255. if constexpr (sizeof(typename T::ValueType) == 4) {
  256. asm volatile("decl %%eax\n"
  257. : "=a"(result)
  258. : "a"(data.value()));
  259. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  260. asm volatile("decw %%ax\n"
  261. : "=a"(result)
  262. : "a"(data.value()));
  263. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  264. asm volatile("decb %%al\n"
  265. : "=a"(result)
  266. : "a"(data.value()));
  267. }
  268. asm volatile(
  269. "pushf\n"
  270. "pop %%ebx"
  271. : "=b"(new_flags));
  272. cpu.set_flags_oszap(new_flags);
  273. cpu.taint_flags_from(data);
  274. return shadow_wrap_with_taint_from(result, data);
  275. }
  276. template<typename T>
  277. ALWAYS_INLINE static T op_xor(SoftCPU& cpu, const T& dest, const T& src)
  278. {
  279. typename T::ValueType result;
  280. u32 new_flags = 0;
  281. if constexpr (sizeof(typename T::ValueType) == 4) {
  282. asm volatile("xorl %%ecx, %%eax\n"
  283. : "=a"(result)
  284. : "a"(dest.value()), "c"(src.value()));
  285. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  286. asm volatile("xor %%cx, %%ax\n"
  287. : "=a"(result)
  288. : "a"(dest.value()), "c"(src.value()));
  289. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  290. asm volatile("xorb %%cl, %%al\n"
  291. : "=a"(result)
  292. : "a"(dest.value()), "c"(src.value()));
  293. } else {
  294. VERIFY_NOT_REACHED();
  295. }
  296. asm volatile(
  297. "pushf\n"
  298. "pop %%ebx"
  299. : "=b"(new_flags));
  300. cpu.set_flags_oszpc(new_flags);
  301. cpu.taint_flags_from(dest, src);
  302. return shadow_wrap_with_taint_from(result, dest, src);
  303. }
  304. template<typename T>
  305. ALWAYS_INLINE static T op_or(SoftCPU& cpu, const T& dest, const T& src)
  306. {
  307. typename T::ValueType result = 0;
  308. u32 new_flags = 0;
  309. if constexpr (sizeof(typename T::ValueType) == 4) {
  310. asm volatile("orl %%ecx, %%eax\n"
  311. : "=a"(result)
  312. : "a"(dest.value()), "c"(src.value()));
  313. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  314. asm volatile("or %%cx, %%ax\n"
  315. : "=a"(result)
  316. : "a"(dest.value()), "c"(src.value()));
  317. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  318. asm volatile("orb %%cl, %%al\n"
  319. : "=a"(result)
  320. : "a"(dest.value()), "c"(src.value()));
  321. } else {
  322. VERIFY_NOT_REACHED();
  323. }
  324. asm volatile(
  325. "pushf\n"
  326. "pop %%ebx"
  327. : "=b"(new_flags));
  328. cpu.set_flags_oszpc(new_flags);
  329. cpu.taint_flags_from(dest, src);
  330. return shadow_wrap_with_taint_from(result, dest, src);
  331. }
  332. template<typename T>
  333. ALWAYS_INLINE static T op_sub(SoftCPU& cpu, const T& dest, const T& src)
  334. {
  335. typename T::ValueType result = 0;
  336. u32 new_flags = 0;
  337. if constexpr (sizeof(typename T::ValueType) == 4) {
  338. asm volatile("subl %%ecx, %%eax\n"
  339. : "=a"(result)
  340. : "a"(dest.value()), "c"(src.value()));
  341. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  342. asm volatile("subw %%cx, %%ax\n"
  343. : "=a"(result)
  344. : "a"(dest.value()), "c"(src.value()));
  345. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  346. asm volatile("subb %%cl, %%al\n"
  347. : "=a"(result)
  348. : "a"(dest.value()), "c"(src.value()));
  349. } else {
  350. VERIFY_NOT_REACHED();
  351. }
  352. asm volatile(
  353. "pushf\n"
  354. "pop %%ebx"
  355. : "=b"(new_flags));
  356. cpu.set_flags_oszapc(new_flags);
  357. cpu.taint_flags_from(dest, src);
  358. return shadow_wrap_with_taint_from(result, dest, src);
  359. }
  360. template<typename T, bool cf>
  361. ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src)
  362. {
  363. typename T::ValueType result = 0;
  364. u32 new_flags = 0;
  365. if constexpr (cf)
  366. asm volatile("stc");
  367. else
  368. asm volatile("clc");
  369. if constexpr (sizeof(typename T::ValueType) == 4) {
  370. asm volatile("sbbl %%ecx, %%eax\n"
  371. : "=a"(result)
  372. : "a"(dest.value()), "c"(src.value()));
  373. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  374. asm volatile("sbbw %%cx, %%ax\n"
  375. : "=a"(result)
  376. : "a"(dest.value()), "c"(src.value()));
  377. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  378. asm volatile("sbbb %%cl, %%al\n"
  379. : "=a"(result)
  380. : "a"(dest.value()), "c"(src.value()));
  381. } else {
  382. VERIFY_NOT_REACHED();
  383. }
  384. asm volatile(
  385. "pushf\n"
  386. "pop %%ebx"
  387. : "=b"(new_flags));
  388. cpu.set_flags_oszapc(new_flags);
  389. cpu.taint_flags_from(dest, src);
  390. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  391. }
  392. template<typename T>
  393. ALWAYS_INLINE static T op_sbb(SoftCPU& cpu, T& dest, const T& src)
  394. {
  395. cpu.warn_if_flags_tainted("sbb");
  396. if (cpu.cf())
  397. return op_sbb_impl<T, true>(cpu, dest, src);
  398. return op_sbb_impl<T, false>(cpu, dest, src);
  399. }
  400. template<typename T>
  401. ALWAYS_INLINE static T op_add(SoftCPU& cpu, T& dest, const T& src)
  402. {
  403. typename T::ValueType result = 0;
  404. u32 new_flags = 0;
  405. if constexpr (sizeof(typename T::ValueType) == 4) {
  406. asm volatile("addl %%ecx, %%eax\n"
  407. : "=a"(result)
  408. : "a"(dest.value()), "c"(src.value()));
  409. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  410. asm volatile("addw %%cx, %%ax\n"
  411. : "=a"(result)
  412. : "a"(dest.value()), "c"(src.value()));
  413. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  414. asm volatile("addb %%cl, %%al\n"
  415. : "=a"(result)
  416. : "a"(dest.value()), "c"(src.value()));
  417. } else {
  418. VERIFY_NOT_REACHED();
  419. }
  420. asm volatile(
  421. "pushf\n"
  422. "pop %%ebx"
  423. : "=b"(new_flags));
  424. cpu.set_flags_oszapc(new_flags);
  425. cpu.taint_flags_from(dest, src);
  426. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  427. }
  428. template<typename T, bool cf>
  429. ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src)
  430. {
  431. typename T::ValueType result = 0;
  432. u32 new_flags = 0;
  433. if constexpr (cf)
  434. asm volatile("stc");
  435. else
  436. asm volatile("clc");
  437. if constexpr (sizeof(typename T::ValueType) == 4) {
  438. asm volatile("adcl %%ecx, %%eax\n"
  439. : "=a"(result)
  440. : "a"(dest.value()), "c"(src.value()));
  441. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  442. asm volatile("adcw %%cx, %%ax\n"
  443. : "=a"(result)
  444. : "a"(dest.value()), "c"(src.value()));
  445. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  446. asm volatile("adcb %%cl, %%al\n"
  447. : "=a"(result)
  448. : "a"(dest.value()), "c"(src.value()));
  449. } else {
  450. VERIFY_NOT_REACHED();
  451. }
  452. asm volatile(
  453. "pushf\n"
  454. "pop %%ebx"
  455. : "=b"(new_flags));
  456. cpu.set_flags_oszapc(new_flags);
  457. cpu.taint_flags_from(dest, src);
  458. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  459. }
  460. template<typename T>
  461. ALWAYS_INLINE static T op_adc(SoftCPU& cpu, T& dest, const T& src)
  462. {
  463. cpu.warn_if_flags_tainted("adc");
  464. if (cpu.cf())
  465. return op_adc_impl<T, true>(cpu, dest, src);
  466. return op_adc_impl<T, false>(cpu, dest, src);
  467. }
  468. template<typename T>
  469. ALWAYS_INLINE static T op_and(SoftCPU& cpu, const T& dest, const T& src)
  470. {
  471. typename T::ValueType result = 0;
  472. u32 new_flags = 0;
  473. if constexpr (sizeof(typename T::ValueType) == 4) {
  474. asm volatile("andl %%ecx, %%eax\n"
  475. : "=a"(result)
  476. : "a"(dest.value()), "c"(src.value()));
  477. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  478. asm volatile("andw %%cx, %%ax\n"
  479. : "=a"(result)
  480. : "a"(dest.value()), "c"(src.value()));
  481. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  482. asm volatile("andb %%cl, %%al\n"
  483. : "=a"(result)
  484. : "a"(dest.value()), "c"(src.value()));
  485. } else {
  486. VERIFY_NOT_REACHED();
  487. }
  488. asm volatile(
  489. "pushf\n"
  490. "pop %%ebx"
  491. : "=b"(new_flags));
  492. cpu.set_flags_oszpc(new_flags);
  493. cpu.taint_flags_from(dest, src);
  494. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  495. }
  496. template<typename T>
  497. ALWAYS_INLINE static void op_imul(SoftCPU& cpu, const T& dest, const T& src, T& result_high, T& result_low)
  498. {
  499. bool did_overflow = false;
  500. if constexpr (sizeof(T) == 4) {
  501. i64 result = (i64)src * (i64)dest;
  502. result_low = result & 0xffffffff;
  503. result_high = result >> 32;
  504. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  505. } else if constexpr (sizeof(T) == 2) {
  506. i32 result = (i32)src * (i32)dest;
  507. result_low = result & 0xffff;
  508. result_high = result >> 16;
  509. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  510. } else if constexpr (sizeof(T) == 1) {
  511. i16 result = (i16)src * (i16)dest;
  512. result_low = result & 0xff;
  513. result_high = result >> 8;
  514. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  515. }
  516. if (did_overflow) {
  517. cpu.set_cf(true);
  518. cpu.set_of(true);
  519. } else {
  520. cpu.set_cf(false);
  521. cpu.set_of(false);
  522. }
  523. }
  524. template<typename T>
  525. ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  526. {
  527. if (steps.value() == 0)
  528. return shadow_wrap_with_taint_from(data.value(), data, steps);
  529. u32 result = 0;
  530. u32 new_flags = 0;
  531. if constexpr (sizeof(typename T::ValueType) == 4) {
  532. asm volatile("shrl %%cl, %%eax\n"
  533. : "=a"(result)
  534. : "a"(data.value()), "c"(steps.value()));
  535. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  536. asm volatile("shrw %%cl, %%ax\n"
  537. : "=a"(result)
  538. : "a"(data.value()), "c"(steps.value()));
  539. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  540. asm volatile("shrb %%cl, %%al\n"
  541. : "=a"(result)
  542. : "a"(data.value()), "c"(steps.value()));
  543. }
  544. asm volatile(
  545. "pushf\n"
  546. "pop %%ebx"
  547. : "=b"(new_flags));
  548. cpu.set_flags_oszapc(new_flags);
  549. cpu.taint_flags_from(data, steps);
  550. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  551. }
  552. template<typename T>
  553. ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  554. {
  555. if (steps.value() == 0)
  556. return shadow_wrap_with_taint_from(data.value(), data, steps);
  557. u32 result = 0;
  558. u32 new_flags = 0;
  559. if constexpr (sizeof(typename T::ValueType) == 4) {
  560. asm volatile("shll %%cl, %%eax\n"
  561. : "=a"(result)
  562. : "a"(data.value()), "c"(steps.value()));
  563. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  564. asm volatile("shlw %%cl, %%ax\n"
  565. : "=a"(result)
  566. : "a"(data.value()), "c"(steps.value()));
  567. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  568. asm volatile("shlb %%cl, %%al\n"
  569. : "=a"(result)
  570. : "a"(data.value()), "c"(steps.value()));
  571. }
  572. asm volatile(
  573. "pushf\n"
  574. "pop %%ebx"
  575. : "=b"(new_flags));
  576. cpu.set_flags_oszapc(new_flags);
  577. cpu.taint_flags_from(data, steps);
  578. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  579. }
  580. template<typename T>
  581. ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  582. {
  583. if (steps.value() == 0)
  584. return shadow_wrap_with_taint_from(data.value(), data, steps);
  585. u32 result = 0;
  586. u32 new_flags = 0;
  587. if constexpr (sizeof(typename T::ValueType) == 4) {
  588. asm volatile("shrd %%cl, %%edx, %%eax\n"
  589. : "=a"(result)
  590. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  591. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  592. asm volatile("shrd %%cl, %%dx, %%ax\n"
  593. : "=a"(result)
  594. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  595. }
  596. asm volatile(
  597. "pushf\n"
  598. "pop %%ebx"
  599. : "=b"(new_flags));
  600. cpu.set_flags_oszapc(new_flags);
  601. cpu.taint_flags_from(data, steps);
  602. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  603. }
  604. template<typename T>
  605. ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  606. {
  607. if (steps.value() == 0)
  608. return shadow_wrap_with_taint_from(data.value(), data, steps);
  609. u32 result = 0;
  610. u32 new_flags = 0;
  611. if constexpr (sizeof(typename T::ValueType) == 4) {
  612. asm volatile("shld %%cl, %%edx, %%eax\n"
  613. : "=a"(result)
  614. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  615. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  616. asm volatile("shld %%cl, %%dx, %%ax\n"
  617. : "=a"(result)
  618. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  619. }
  620. asm volatile(
  621. "pushf\n"
  622. "pop %%ebx"
  623. : "=b"(new_flags));
  624. cpu.set_flags_oszapc(new_flags);
  625. cpu.taint_flags_from(data, steps);
  626. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  627. }
  628. template<bool update_dest, bool is_or, typename Op>
  629. ALWAYS_INLINE void SoftCPU::generic_AL_imm8(Op op, const X86::Instruction& insn)
  630. {
  631. auto dest = al();
  632. auto src = shadow_wrap_as_initialized(insn.imm8());
  633. auto result = op(*this, dest, src);
  634. if (is_or && insn.imm8() == 0xff)
  635. result.set_initialized();
  636. if (update_dest)
  637. set_al(result);
  638. }
  639. template<bool update_dest, bool is_or, typename Op>
  640. ALWAYS_INLINE void SoftCPU::generic_AX_imm16(Op op, const X86::Instruction& insn)
  641. {
  642. auto dest = ax();
  643. auto src = shadow_wrap_as_initialized(insn.imm16());
  644. auto result = op(*this, dest, src);
  645. if (is_or && insn.imm16() == 0xffff)
  646. result.set_initialized();
  647. if (update_dest)
  648. set_ax(result);
  649. }
  650. template<bool update_dest, bool is_or, typename Op>
  651. ALWAYS_INLINE void SoftCPU::generic_EAX_imm32(Op op, const X86::Instruction& insn)
  652. {
  653. auto dest = eax();
  654. auto src = shadow_wrap_as_initialized(insn.imm32());
  655. auto result = op(*this, dest, src);
  656. if (is_or && insn.imm32() == 0xffffffff)
  657. result.set_initialized();
  658. if (update_dest)
  659. set_eax(result);
  660. }
  661. template<bool update_dest, bool is_or, typename Op>
  662. ALWAYS_INLINE void SoftCPU::generic_RM16_imm16(Op op, const X86::Instruction& insn)
  663. {
  664. auto dest = insn.modrm().read16(*this, insn);
  665. auto src = shadow_wrap_as_initialized(insn.imm16());
  666. auto result = op(*this, dest, src);
  667. if (is_or && insn.imm16() == 0xffff)
  668. result.set_initialized();
  669. if (update_dest)
  670. insn.modrm().write16(*this, insn, result);
  671. }
  672. template<bool update_dest, bool is_or, typename Op>
  673. ALWAYS_INLINE void SoftCPU::generic_RM16_imm8(Op op, const X86::Instruction& insn)
  674. {
  675. auto dest = insn.modrm().read16(*this, insn);
  676. auto src = shadow_wrap_as_initialized<u16>(sign_extended_to<u16>(insn.imm8()));
  677. auto result = op(*this, dest, src);
  678. if (is_or && src.value() == 0xffff)
  679. result.set_initialized();
  680. if (update_dest)
  681. insn.modrm().write16(*this, insn, result);
  682. }
  683. template<bool update_dest, typename Op>
  684. ALWAYS_INLINE void SoftCPU::generic_RM16_unsigned_imm8(Op op, const X86::Instruction& insn)
  685. {
  686. auto dest = insn.modrm().read16(*this, insn);
  687. auto src = shadow_wrap_as_initialized(insn.imm8());
  688. auto result = op(*this, dest, src);
  689. if (update_dest)
  690. insn.modrm().write16(*this, insn, result);
  691. }
  692. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  693. ALWAYS_INLINE void SoftCPU::generic_RM16_reg16(Op op, const X86::Instruction& insn)
  694. {
  695. auto dest = insn.modrm().read16(*this, insn);
  696. auto src = const_gpr16(insn.reg16());
  697. auto result = op(*this, dest, src);
  698. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  699. result.set_initialized();
  700. m_flags_tainted = false;
  701. }
  702. if (update_dest)
  703. insn.modrm().write16(*this, insn, result);
  704. }
  705. template<bool update_dest, bool is_or, typename Op>
  706. ALWAYS_INLINE void SoftCPU::generic_RM32_imm32(Op op, const X86::Instruction& insn)
  707. {
  708. auto dest = insn.modrm().read32(*this, insn);
  709. auto src = insn.imm32();
  710. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  711. if (is_or && src == 0xffffffff)
  712. result.set_initialized();
  713. if (update_dest)
  714. insn.modrm().write32(*this, insn, result);
  715. }
  716. template<bool update_dest, bool is_or, typename Op>
  717. ALWAYS_INLINE void SoftCPU::generic_RM32_imm8(Op op, const X86::Instruction& insn)
  718. {
  719. auto dest = insn.modrm().read32(*this, insn);
  720. auto src = sign_extended_to<u32>(insn.imm8());
  721. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  722. if (is_or && src == 0xffffffff)
  723. result.set_initialized();
  724. if (update_dest)
  725. insn.modrm().write32(*this, insn, result);
  726. }
  727. template<bool update_dest, typename Op>
  728. ALWAYS_INLINE void SoftCPU::generic_RM32_unsigned_imm8(Op op, const X86::Instruction& insn)
  729. {
  730. auto dest = insn.modrm().read32(*this, insn);
  731. auto src = shadow_wrap_as_initialized(insn.imm8());
  732. auto result = op(*this, dest, src);
  733. if (update_dest)
  734. insn.modrm().write32(*this, insn, result);
  735. }
  736. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  737. ALWAYS_INLINE void SoftCPU::generic_RM32_reg32(Op op, const X86::Instruction& insn)
  738. {
  739. auto dest = insn.modrm().read32(*this, insn);
  740. auto src = const_gpr32(insn.reg32());
  741. auto result = op(*this, dest, src);
  742. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  743. result.set_initialized();
  744. m_flags_tainted = false;
  745. }
  746. if (update_dest)
  747. insn.modrm().write32(*this, insn, result);
  748. }
  749. template<bool update_dest, bool is_or, typename Op>
  750. ALWAYS_INLINE void SoftCPU::generic_RM8_imm8(Op op, const X86::Instruction& insn)
  751. {
  752. auto dest = insn.modrm().read8(*this, insn);
  753. auto src = insn.imm8();
  754. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  755. if (is_or && src == 0xff)
  756. result.set_initialized();
  757. if (update_dest)
  758. insn.modrm().write8(*this, insn, result);
  759. }
  760. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  761. ALWAYS_INLINE void SoftCPU::generic_RM8_reg8(Op op, const X86::Instruction& insn)
  762. {
  763. auto dest = insn.modrm().read8(*this, insn);
  764. auto src = const_gpr8(insn.reg8());
  765. auto result = op(*this, dest, src);
  766. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  767. result.set_initialized();
  768. m_flags_tainted = false;
  769. }
  770. if (update_dest)
  771. insn.modrm().write8(*this, insn, result);
  772. }
  773. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  774. ALWAYS_INLINE void SoftCPU::generic_reg16_RM16(Op op, const X86::Instruction& insn)
  775. {
  776. auto dest = const_gpr16(insn.reg16());
  777. auto src = insn.modrm().read16(*this, insn);
  778. auto result = op(*this, dest, src);
  779. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  780. result.set_initialized();
  781. m_flags_tainted = false;
  782. }
  783. if (update_dest)
  784. gpr16(insn.reg16()) = result;
  785. }
  786. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  787. ALWAYS_INLINE void SoftCPU::generic_reg32_RM32(Op op, const X86::Instruction& insn)
  788. {
  789. auto dest = const_gpr32(insn.reg32());
  790. auto src = insn.modrm().read32(*this, insn);
  791. auto result = op(*this, dest, src);
  792. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  793. result.set_initialized();
  794. m_flags_tainted = false;
  795. }
  796. if (update_dest)
  797. gpr32(insn.reg32()) = result;
  798. }
  799. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  800. ALWAYS_INLINE void SoftCPU::generic_reg8_RM8(Op op, const X86::Instruction& insn)
  801. {
  802. auto dest = const_gpr8(insn.reg8());
  803. auto src = insn.modrm().read8(*this, insn);
  804. auto result = op(*this, dest, src);
  805. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  806. result.set_initialized();
  807. m_flags_tainted = false;
  808. }
  809. if (update_dest)
  810. gpr8(insn.reg8()) = result;
  811. }
  812. template<typename Op>
  813. ALWAYS_INLINE void SoftCPU::generic_RM8_1(Op op, const X86::Instruction& insn)
  814. {
  815. auto data = insn.modrm().read8(*this, insn);
  816. insn.modrm().write8(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  817. }
  818. template<typename Op>
  819. ALWAYS_INLINE void SoftCPU::generic_RM8_CL(Op op, const X86::Instruction& insn)
  820. {
  821. auto data = insn.modrm().read8(*this, insn);
  822. insn.modrm().write8(*this, insn, op(*this, data, cl()));
  823. }
  824. template<typename Op>
  825. ALWAYS_INLINE void SoftCPU::generic_RM16_1(Op op, const X86::Instruction& insn)
  826. {
  827. auto data = insn.modrm().read16(*this, insn);
  828. insn.modrm().write16(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  829. }
  830. template<typename Op>
  831. ALWAYS_INLINE void SoftCPU::generic_RM16_CL(Op op, const X86::Instruction& insn)
  832. {
  833. auto data = insn.modrm().read16(*this, insn);
  834. insn.modrm().write16(*this, insn, op(*this, data, cl()));
  835. }
  836. template<typename Op>
  837. ALWAYS_INLINE void SoftCPU::generic_RM32_1(Op op, const X86::Instruction& insn)
  838. {
  839. auto data = insn.modrm().read32(*this, insn);
  840. insn.modrm().write32(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  841. }
  842. template<typename Op>
  843. ALWAYS_INLINE void SoftCPU::generic_RM32_CL(Op op, const X86::Instruction& insn)
  844. {
  845. auto data = insn.modrm().read32(*this, insn);
  846. insn.modrm().write32(*this, insn, op(*this, data, cl()));
  847. }
  848. void SoftCPU::AAA(const X86::Instruction&) { TODO_INSN(); }
  849. void SoftCPU::AAD(const X86::Instruction&) { TODO_INSN(); }
  850. void SoftCPU::AAM(const X86::Instruction&) { TODO_INSN(); }
  851. void SoftCPU::AAS(const X86::Instruction&) { TODO_INSN(); }
  852. void SoftCPU::ARPL(const X86::Instruction&) { TODO_INSN(); }
  853. void SoftCPU::BOUND(const X86::Instruction&) { TODO_INSN(); }
  854. template<typename T>
  855. ALWAYS_INLINE static T op_bsf(SoftCPU&, T value)
  856. {
  857. return { (typename T::ValueType)__builtin_ctz(value.value()), value.shadow() };
  858. }
  859. template<typename T>
  860. ALWAYS_INLINE static T op_bsr(SoftCPU&, T value)
  861. {
  862. typename T::ValueType bit_index = 0;
  863. if constexpr (sizeof(typename T::ValueType) == 4) {
  864. asm volatile("bsrl %%eax, %%edx"
  865. : "=d"(bit_index)
  866. : "a"(value.value()));
  867. }
  868. if constexpr (sizeof(typename T::ValueType) == 2) {
  869. asm volatile("bsrw %%ax, %%dx"
  870. : "=d"(bit_index)
  871. : "a"(value.value()));
  872. }
  873. return shadow_wrap_with_taint_from(bit_index, value);
  874. }
  875. void SoftCPU::BSF_reg16_RM16(const X86::Instruction& insn)
  876. {
  877. auto src = insn.modrm().read16(*this, insn);
  878. set_zf(!src.value());
  879. if (src.value())
  880. gpr16(insn.reg16()) = op_bsf(*this, src);
  881. taint_flags_from(src);
  882. }
  883. void SoftCPU::BSF_reg32_RM32(const X86::Instruction& insn)
  884. {
  885. auto src = insn.modrm().read32(*this, insn);
  886. set_zf(!src.value());
  887. if (src.value()) {
  888. gpr32(insn.reg32()) = op_bsf(*this, src);
  889. taint_flags_from(src);
  890. }
  891. }
  892. void SoftCPU::BSR_reg16_RM16(const X86::Instruction& insn)
  893. {
  894. auto src = insn.modrm().read16(*this, insn);
  895. set_zf(!src.value());
  896. if (src.value()) {
  897. gpr16(insn.reg16()) = op_bsr(*this, src);
  898. taint_flags_from(src);
  899. }
  900. }
  901. void SoftCPU::BSR_reg32_RM32(const X86::Instruction& insn)
  902. {
  903. auto src = insn.modrm().read32(*this, insn);
  904. set_zf(!src.value());
  905. if (src.value()) {
  906. gpr32(insn.reg32()) = op_bsr(*this, src);
  907. taint_flags_from(src);
  908. }
  909. }
  910. void SoftCPU::BSWAP_reg32(const X86::Instruction& insn)
  911. {
  912. gpr32(insn.reg32()) = { __builtin_bswap32(gpr32(insn.reg32()).value()), __builtin_bswap32(gpr32(insn.reg32()).shadow()) };
  913. }
  914. template<typename T>
  915. ALWAYS_INLINE static T op_bt(T value, T)
  916. {
  917. return value;
  918. }
  919. template<typename T>
  920. ALWAYS_INLINE static T op_bts(T value, T bit_mask)
  921. {
  922. return value | bit_mask;
  923. }
  924. template<typename T>
  925. ALWAYS_INLINE static T op_btr(T value, T bit_mask)
  926. {
  927. return value & ~bit_mask;
  928. }
  929. template<typename T>
  930. ALWAYS_INLINE static T op_btc(T value, T bit_mask)
  931. {
  932. return value ^ bit_mask;
  933. }
  934. template<bool should_update, typename Op>
  935. ALWAYS_INLINE void BTx_RM16_reg16(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  936. {
  937. if (insn.modrm().is_register()) {
  938. unsigned bit_index = cpu.const_gpr16(insn.reg16()).value() & (X86::TypeTrivia<u16>::bits - 1);
  939. auto original = insn.modrm().read16(cpu, insn);
  940. u16 bit_mask = 1 << bit_index;
  941. u16 result = op(original.value(), bit_mask);
  942. cpu.set_cf((original.value() & bit_mask) != 0);
  943. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), original);
  944. if (should_update)
  945. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), original));
  946. return;
  947. }
  948. // FIXME: Is this supposed to perform a full 16-bit read/modify/write?
  949. unsigned bit_offset_in_array = cpu.const_gpr16(insn.reg16()).value() / 8;
  950. unsigned bit_offset_in_byte = cpu.const_gpr16(insn.reg16()).value() & 7;
  951. auto address = insn.modrm().resolve(cpu, insn);
  952. address.set_offset(address.offset() + bit_offset_in_array);
  953. auto dest = cpu.read_memory8(address);
  954. u8 bit_mask = 1 << bit_offset_in_byte;
  955. u8 result = op(dest.value(), bit_mask);
  956. cpu.set_cf((dest.value() & bit_mask) != 0);
  957. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), dest);
  958. if (should_update)
  959. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), dest));
  960. }
  961. template<bool should_update, typename Op>
  962. ALWAYS_INLINE void BTx_RM32_reg32(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  963. {
  964. if (insn.modrm().is_register()) {
  965. unsigned bit_index = cpu.const_gpr32(insn.reg32()).value() & (X86::TypeTrivia<u32>::bits - 1);
  966. auto original = insn.modrm().read32(cpu, insn);
  967. u32 bit_mask = 1 << bit_index;
  968. u32 result = op(original.value(), bit_mask);
  969. cpu.set_cf((original.value() & bit_mask) != 0);
  970. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), original);
  971. if (should_update)
  972. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), original));
  973. return;
  974. }
  975. // FIXME: Is this supposed to perform a full 32-bit read/modify/write?
  976. unsigned bit_offset_in_array = cpu.const_gpr32(insn.reg32()).value() / 8;
  977. unsigned bit_offset_in_byte = cpu.const_gpr32(insn.reg32()).value() & 7;
  978. auto address = insn.modrm().resolve(cpu, insn);
  979. address.set_offset(address.offset() + bit_offset_in_array);
  980. auto dest = cpu.read_memory8(address);
  981. u8 bit_mask = 1 << bit_offset_in_byte;
  982. u8 result = op(dest.value(), bit_mask);
  983. cpu.set_cf((dest.value() & bit_mask) != 0);
  984. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), dest);
  985. if (should_update)
  986. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), dest));
  987. }
  988. template<bool should_update, typename Op>
  989. ALWAYS_INLINE void BTx_RM16_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  990. {
  991. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u16>::mask);
  992. // FIXME: Support higher bit indices
  993. VERIFY(bit_index < 16);
  994. auto original = insn.modrm().read16(cpu, insn);
  995. u16 bit_mask = 1 << bit_index;
  996. auto result = op(original.value(), bit_mask);
  997. cpu.set_cf((original.value() & bit_mask) != 0);
  998. cpu.taint_flags_from(original);
  999. if (should_update)
  1000. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1001. }
  1002. template<bool should_update, typename Op>
  1003. ALWAYS_INLINE void BTx_RM32_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  1004. {
  1005. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u32>::mask);
  1006. // FIXME: Support higher bit indices
  1007. VERIFY(bit_index < 32);
  1008. auto original = insn.modrm().read32(cpu, insn);
  1009. u32 bit_mask = 1 << bit_index;
  1010. auto result = op(original.value(), bit_mask);
  1011. cpu.set_cf((original.value() & bit_mask) != 0);
  1012. cpu.taint_flags_from(original);
  1013. if (should_update)
  1014. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1015. }
  1016. #define DEFINE_GENERIC_BTx_INSN_HANDLERS(mnemonic, op, update_dest) \
  1017. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { BTx_RM32_reg32<update_dest>(*this, insn, op<u32>); } \
  1018. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { BTx_RM16_reg16<update_dest>(*this, insn, op<u16>); } \
  1019. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { BTx_RM32_imm8<update_dest>(*this, insn, op<u32>); } \
  1020. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { BTx_RM16_imm8<update_dest>(*this, insn, op<u16>); }
  1021. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTS, op_bts, true);
  1022. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTR, op_btr, true);
  1023. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTC, op_btc, true);
  1024. DEFINE_GENERIC_BTx_INSN_HANDLERS(BT, op_bt, false);
  1025. void SoftCPU::CALL_FAR_mem16(const X86::Instruction&)
  1026. {
  1027. TODO();
  1028. }
  1029. void SoftCPU::CALL_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  1030. void SoftCPU::CALL_RM16(const X86::Instruction&) { TODO_INSN(); }
  1031. void SoftCPU::CALL_RM32(const X86::Instruction& insn)
  1032. {
  1033. push32(shadow_wrap_as_initialized(eip()));
  1034. auto address = insn.modrm().read32(*this, insn);
  1035. warn_if_uninitialized(address, "call rm32");
  1036. set_eip(address.value());
  1037. }
  1038. void SoftCPU::CALL_imm16(const X86::Instruction&) { TODO_INSN(); }
  1039. void SoftCPU::CALL_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  1040. void SoftCPU::CALL_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  1041. void SoftCPU::CALL_imm32(const X86::Instruction& insn)
  1042. {
  1043. push32(shadow_wrap_as_initialized(eip()));
  1044. set_eip(eip() + (i32)insn.imm32());
  1045. }
  1046. void SoftCPU::CBW(const X86::Instruction&)
  1047. {
  1048. set_ah(shadow_wrap_with_taint_from<u8>((al().value() & 0x80) ? 0xff : 0x00, al()));
  1049. }
  1050. void SoftCPU::CDQ(const X86::Instruction&)
  1051. {
  1052. if (eax().value() & 0x80000000)
  1053. set_edx(shadow_wrap_with_taint_from<u32>(0xffffffff, eax()));
  1054. else
  1055. set_edx(shadow_wrap_with_taint_from<u32>(0, eax()));
  1056. }
  1057. void SoftCPU::CLC(const X86::Instruction&)
  1058. {
  1059. set_cf(false);
  1060. }
  1061. void SoftCPU::CLD(const X86::Instruction&)
  1062. {
  1063. set_df(false);
  1064. }
  1065. void SoftCPU::CLI(const X86::Instruction&) { TODO_INSN(); }
  1066. void SoftCPU::CLTS(const X86::Instruction&) { TODO_INSN(); }
  1067. void SoftCPU::CMC(const X86::Instruction&)
  1068. {
  1069. set_cf(!cf());
  1070. }
  1071. void SoftCPU::CMOVcc_reg16_RM16(const X86::Instruction& insn)
  1072. {
  1073. warn_if_flags_tainted("cmovcc reg16, rm16");
  1074. if (evaluate_condition(insn.cc()))
  1075. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  1076. }
  1077. void SoftCPU::CMOVcc_reg32_RM32(const X86::Instruction& insn)
  1078. {
  1079. warn_if_flags_tainted("cmovcc reg32, rm32");
  1080. if (evaluate_condition(insn.cc()))
  1081. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  1082. }
  1083. template<typename T>
  1084. ALWAYS_INLINE static void do_cmps(SoftCPU& cpu, const X86::Instruction& insn)
  1085. {
  1086. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1087. cpu.do_once_or_repeat<true>(insn, [&] {
  1088. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1089. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  1090. op_sub(cpu, dest, src);
  1091. cpu.step_source_index(insn.a32(), sizeof(T));
  1092. cpu.step_destination_index(insn.a32(), sizeof(T));
  1093. });
  1094. }
  1095. void SoftCPU::CMPSB(const X86::Instruction& insn)
  1096. {
  1097. do_cmps<u8>(*this, insn);
  1098. }
  1099. void SoftCPU::CMPSD(const X86::Instruction& insn)
  1100. {
  1101. do_cmps<u32>(*this, insn);
  1102. }
  1103. void SoftCPU::CMPSW(const X86::Instruction& insn)
  1104. {
  1105. do_cmps<u16>(*this, insn);
  1106. }
  1107. void SoftCPU::CMPXCHG_RM16_reg16(const X86::Instruction& insn)
  1108. {
  1109. auto current = insn.modrm().read16(*this, insn);
  1110. taint_flags_from(current, ax());
  1111. if (current.value() == ax().value()) {
  1112. set_zf(true);
  1113. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  1114. } else {
  1115. set_zf(false);
  1116. set_ax(current);
  1117. }
  1118. }
  1119. void SoftCPU::CMPXCHG_RM32_reg32(const X86::Instruction& insn)
  1120. {
  1121. auto current = insn.modrm().read32(*this, insn);
  1122. taint_flags_from(current, eax());
  1123. if (current.value() == eax().value()) {
  1124. set_zf(true);
  1125. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  1126. } else {
  1127. set_zf(false);
  1128. set_eax(current);
  1129. }
  1130. }
  1131. void SoftCPU::CMPXCHG_RM8_reg8(const X86::Instruction& insn)
  1132. {
  1133. auto current = insn.modrm().read8(*this, insn);
  1134. taint_flags_from(current, al());
  1135. if (current.value() == al().value()) {
  1136. set_zf(true);
  1137. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  1138. } else {
  1139. set_zf(false);
  1140. set_al(current);
  1141. }
  1142. }
  1143. void SoftCPU::CPUID(const X86::Instruction&)
  1144. {
  1145. if (eax().value() == 0) {
  1146. set_eax(shadow_wrap_as_initialized<u32>(1));
  1147. set_ebx(shadow_wrap_as_initialized<u32>(0x6c6c6548));
  1148. set_edx(shadow_wrap_as_initialized<u32>(0x6972466f));
  1149. set_ecx(shadow_wrap_as_initialized<u32>(0x73646e65));
  1150. return;
  1151. }
  1152. if (eax().value() == 1) {
  1153. u32 stepping = 0;
  1154. u32 model = 1;
  1155. u32 family = 3;
  1156. u32 type = 0;
  1157. set_eax(shadow_wrap_as_initialized<u32>(stepping | (model << 4) | (family << 8) | (type << 12)));
  1158. set_ebx(shadow_wrap_as_initialized<u32>(0));
  1159. set_edx(shadow_wrap_as_initialized<u32>((1 << 15))); // Features (CMOV)
  1160. set_ecx(shadow_wrap_as_initialized<u32>(0));
  1161. return;
  1162. }
  1163. dbgln("Unhandled CPUID with eax={:08x}", eax().value());
  1164. }
  1165. void SoftCPU::CWD(const X86::Instruction&)
  1166. {
  1167. set_dx(shadow_wrap_with_taint_from<u16>((ax().value() & 0x8000) ? 0xffff : 0x0000, ax()));
  1168. }
  1169. void SoftCPU::CWDE(const X86::Instruction&)
  1170. {
  1171. set_eax(shadow_wrap_with_taint_from(sign_extended_to<u32>(ax().value()), ax()));
  1172. }
  1173. void SoftCPU::DAA(const X86::Instruction&) { TODO_INSN(); }
  1174. void SoftCPU::DAS(const X86::Instruction&) { TODO_INSN(); }
  1175. void SoftCPU::DEC_RM16(const X86::Instruction& insn)
  1176. {
  1177. insn.modrm().write16(*this, insn, op_dec(*this, insn.modrm().read16(*this, insn)));
  1178. }
  1179. void SoftCPU::DEC_RM32(const X86::Instruction& insn)
  1180. {
  1181. insn.modrm().write32(*this, insn, op_dec(*this, insn.modrm().read32(*this, insn)));
  1182. }
  1183. void SoftCPU::DEC_RM8(const X86::Instruction& insn)
  1184. {
  1185. insn.modrm().write8(*this, insn, op_dec(*this, insn.modrm().read8(*this, insn)));
  1186. }
  1187. void SoftCPU::DEC_reg16(const X86::Instruction& insn)
  1188. {
  1189. gpr16(insn.reg16()) = op_dec(*this, const_gpr16(insn.reg16()));
  1190. }
  1191. void SoftCPU::DEC_reg32(const X86::Instruction& insn)
  1192. {
  1193. gpr32(insn.reg32()) = op_dec(*this, const_gpr32(insn.reg32()));
  1194. }
  1195. void SoftCPU::DIV_RM16(const X86::Instruction& insn)
  1196. {
  1197. auto divisor = insn.modrm().read16(*this, insn);
  1198. if (divisor.value() == 0) {
  1199. reportln("Divide by zero");
  1200. TODO();
  1201. }
  1202. u32 dividend = ((u32)dx().value() << 16) | ax().value();
  1203. auto quotient = dividend / divisor.value();
  1204. if (quotient > NumericLimits<u16>::max()) {
  1205. reportln("Divide overflow");
  1206. TODO();
  1207. }
  1208. auto remainder = dividend % divisor.value();
  1209. auto original_ax = ax();
  1210. set_ax(shadow_wrap_with_taint_from<u16>(quotient, original_ax, dx()));
  1211. set_dx(shadow_wrap_with_taint_from<u16>(remainder, original_ax, dx()));
  1212. }
  1213. void SoftCPU::DIV_RM32(const X86::Instruction& insn)
  1214. {
  1215. auto divisor = insn.modrm().read32(*this, insn);
  1216. if (divisor.value() == 0) {
  1217. reportln("Divide by zero");
  1218. TODO();
  1219. }
  1220. u64 dividend = ((u64)edx().value() << 32) | eax().value();
  1221. auto quotient = dividend / divisor.value();
  1222. if (quotient > NumericLimits<u32>::max()) {
  1223. reportln("Divide overflow");
  1224. TODO();
  1225. }
  1226. auto remainder = dividend % divisor.value();
  1227. auto original_eax = eax();
  1228. set_eax(shadow_wrap_with_taint_from<u32>(quotient, original_eax, edx(), divisor));
  1229. set_edx(shadow_wrap_with_taint_from<u32>(remainder, original_eax, edx(), divisor));
  1230. }
  1231. void SoftCPU::DIV_RM8(const X86::Instruction& insn)
  1232. {
  1233. auto divisor = insn.modrm().read8(*this, insn);
  1234. if (divisor.value() == 0) {
  1235. reportln("Divide by zero");
  1236. TODO();
  1237. }
  1238. u16 dividend = ax().value();
  1239. auto quotient = dividend / divisor.value();
  1240. if (quotient > NumericLimits<u8>::max()) {
  1241. reportln("Divide overflow");
  1242. TODO();
  1243. }
  1244. auto remainder = dividend % divisor.value();
  1245. auto original_ax = ax();
  1246. set_al(shadow_wrap_with_taint_from<u8>(quotient, original_ax, divisor));
  1247. set_ah(shadow_wrap_with_taint_from<u8>(remainder, original_ax, divisor));
  1248. }
  1249. void SoftCPU::ENTER16(const X86::Instruction&) { TODO_INSN(); }
  1250. void SoftCPU::ENTER32(const X86::Instruction&) { TODO_INSN(); }
  1251. void SoftCPU::ESCAPE(const X86::Instruction&)
  1252. {
  1253. reportln("FIXME: x87 floating-point support");
  1254. m_emulator.dump_backtrace();
  1255. TODO();
  1256. }
  1257. void SoftCPU::FADD_RM32(const X86::Instruction& insn)
  1258. {
  1259. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1260. if (insn.modrm().is_register()) {
  1261. fpu_set(0, fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1262. } else {
  1263. auto new_f32 = insn.modrm().read32(*this, insn);
  1264. // FIXME: Respect shadow values
  1265. auto f32 = bit_cast<float>(new_f32.value());
  1266. fpu_set(0, fpu_get(0) + f32);
  1267. }
  1268. }
  1269. void SoftCPU::FMUL_RM32(const X86::Instruction& insn)
  1270. {
  1271. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1272. if (insn.modrm().is_register()) {
  1273. fpu_set(0, fpu_get(0) * fpu_get(insn.modrm().register_index()));
  1274. } else {
  1275. auto new_f32 = insn.modrm().read32(*this, insn);
  1276. // FIXME: Respect shadow values
  1277. auto f32 = bit_cast<float>(new_f32.value());
  1278. fpu_set(0, fpu_get(0) * f32);
  1279. }
  1280. }
  1281. void SoftCPU::FCOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1282. void SoftCPU::FCOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1283. void SoftCPU::FSUB_RM32(const X86::Instruction& insn)
  1284. {
  1285. if (insn.modrm().is_register()) {
  1286. fpu_set(0, fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1287. } else {
  1288. auto new_f32 = insn.modrm().read32(*this, insn);
  1289. // FIXME: Respect shadow values
  1290. auto f32 = bit_cast<float>(new_f32.value());
  1291. fpu_set(0, fpu_get(0) - f32);
  1292. }
  1293. }
  1294. void SoftCPU::FSUBR_RM32(const X86::Instruction& insn)
  1295. {
  1296. if (insn.modrm().is_register()) {
  1297. fpu_set(0, fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1298. } else {
  1299. auto new_f32 = insn.modrm().read32(*this, insn);
  1300. // FIXME: Respect shadow values
  1301. auto f32 = bit_cast<float>(new_f32.value());
  1302. fpu_set(0, f32 - fpu_get(0));
  1303. }
  1304. }
  1305. void SoftCPU::FDIV_RM32(const X86::Instruction& insn)
  1306. {
  1307. if (insn.modrm().is_register()) {
  1308. fpu_set(0, fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1309. } else {
  1310. auto new_f32 = insn.modrm().read32(*this, insn);
  1311. // FIXME: Respect shadow values
  1312. auto f32 = bit_cast<float>(new_f32.value());
  1313. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1314. fpu_set(0, fpu_get(0) / f32);
  1315. }
  1316. }
  1317. void SoftCPU::FDIVR_RM32(const X86::Instruction& insn)
  1318. {
  1319. if (insn.modrm().is_register()) {
  1320. fpu_set(0, fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1321. } else {
  1322. auto new_f32 = insn.modrm().read32(*this, insn);
  1323. // FIXME: Respect shadow values
  1324. auto f32 = bit_cast<float>(new_f32.value());
  1325. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1326. fpu_set(0, f32 / fpu_get(0));
  1327. }
  1328. }
  1329. void SoftCPU::FLD_RM32(const X86::Instruction& insn)
  1330. {
  1331. if (insn.modrm().is_register()) {
  1332. fpu_push(fpu_get(insn.modrm().register_index()));
  1333. } else {
  1334. auto new_f32 = insn.modrm().read32(*this, insn);
  1335. // FIXME: Respect shadow values
  1336. fpu_push(bit_cast<float>(new_f32.value()));
  1337. }
  1338. }
  1339. void SoftCPU::FXCH(const X86::Instruction& insn)
  1340. {
  1341. VERIFY(insn.modrm().is_register());
  1342. auto tmp = fpu_get(0);
  1343. fpu_set(0, fpu_get(insn.modrm().register_index()));
  1344. fpu_set(insn.modrm().register_index(), tmp);
  1345. }
  1346. void SoftCPU::FST_RM32(const X86::Instruction& insn)
  1347. {
  1348. VERIFY(!insn.modrm().is_register());
  1349. float f32 = (float)fpu_get(0);
  1350. // FIXME: Respect shadow values
  1351. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(f32)));
  1352. }
  1353. void SoftCPU::FNOP(const X86::Instruction&)
  1354. {
  1355. }
  1356. void SoftCPU::FSTP_RM32(const X86::Instruction& insn)
  1357. {
  1358. FST_RM32(insn);
  1359. fpu_pop();
  1360. }
  1361. void SoftCPU::FLDENV(const X86::Instruction&) { TODO_INSN(); }
  1362. void SoftCPU::FCHS(const X86::Instruction&)
  1363. {
  1364. fpu_set(0, -fpu_get(0));
  1365. }
  1366. void SoftCPU::FABS(const X86::Instruction&)
  1367. {
  1368. fpu_set(0, __builtin_fabs(fpu_get(0)));
  1369. }
  1370. void SoftCPU::FTST(const X86::Instruction&) { TODO_INSN(); }
  1371. void SoftCPU::FXAM(const X86::Instruction&) { TODO_INSN(); }
  1372. void SoftCPU::FLDCW(const X86::Instruction& insn)
  1373. {
  1374. m_fpu_cw = insn.modrm().read16(*this, insn);
  1375. }
  1376. void SoftCPU::FLD1(const X86::Instruction&)
  1377. {
  1378. fpu_push(1.0);
  1379. }
  1380. void SoftCPU::FLDL2T(const X86::Instruction&)
  1381. {
  1382. fpu_push(log2f(10.0f));
  1383. }
  1384. void SoftCPU::FLDL2E(const X86::Instruction&)
  1385. {
  1386. fpu_push(log2f(M_E));
  1387. }
  1388. void SoftCPU::FLDPI(const X86::Instruction&)
  1389. {
  1390. fpu_push(M_PI);
  1391. }
  1392. void SoftCPU::FLDLG2(const X86::Instruction&)
  1393. {
  1394. fpu_push(log10f(2.0f));
  1395. }
  1396. void SoftCPU::FLDLN2(const X86::Instruction&)
  1397. {
  1398. fpu_push(M_LN2);
  1399. }
  1400. void SoftCPU::FLDZ(const X86::Instruction&)
  1401. {
  1402. fpu_push(0.0);
  1403. }
  1404. void SoftCPU::FNSTENV(const X86::Instruction&) { TODO_INSN(); }
  1405. void SoftCPU::F2XM1(const X86::Instruction&)
  1406. {
  1407. // FIXME: validate ST(0) is in range –1.0 to +1.0
  1408. auto f32 = fpu_get(0);
  1409. // FIXME: Set C0, C2, C3 in FPU status word.
  1410. fpu_set(0, powf(2, f32) - 1.0f);
  1411. }
  1412. void SoftCPU::FYL2X(const X86::Instruction&)
  1413. {
  1414. // FIXME: Raise IA on +-infinity, +-0, raise Z on +-0
  1415. auto f32 = fpu_get(0);
  1416. // FIXME: Set C0, C2, C3 in FPU status word.
  1417. fpu_set(1, fpu_get(1) * log2f(f32));
  1418. fpu_pop();
  1419. }
  1420. void SoftCPU::FYL2XP1(const X86::Instruction&)
  1421. {
  1422. // FIXME: validate ST(0) range
  1423. auto f32 = fpu_get(0);
  1424. // FIXME: Set C0, C2, C3 in FPU status word.
  1425. fpu_set(1, (fpu_get(1) * log2f(f32 + 1.0f)));
  1426. fpu_pop();
  1427. }
  1428. void SoftCPU::FPTAN(const X86::Instruction&)
  1429. {
  1430. // FIXME: set C1 upon stack overflow or if result was rounded
  1431. // FIXME: Set C2 to 1 if ST(0) is outside range of -2^63 to +2^63; else set to 0
  1432. fpu_set(0, tanf(fpu_get(0)));
  1433. fpu_push(1.0f);
  1434. }
  1435. void SoftCPU::FPATAN(const X86::Instruction&) { TODO_INSN(); }
  1436. void SoftCPU::FXTRACT(const X86::Instruction&) { TODO_INSN(); }
  1437. void SoftCPU::FPREM1(const X86::Instruction&) { TODO_INSN(); }
  1438. void SoftCPU::FDECSTP(const X86::Instruction&)
  1439. {
  1440. m_fpu_top = (m_fpu_top == 0) ? 7 : m_fpu_top - 1;
  1441. set_cf(0);
  1442. }
  1443. void SoftCPU::FINCSTP(const X86::Instruction&)
  1444. {
  1445. m_fpu_top = (m_fpu_top == 7) ? 0 : m_fpu_top + 1;
  1446. set_cf(0);
  1447. }
  1448. void SoftCPU::FNSTCW(const X86::Instruction& insn)
  1449. {
  1450. insn.modrm().write16(*this, insn, m_fpu_cw);
  1451. }
  1452. void SoftCPU::FPREM(const X86::Instruction&)
  1453. {
  1454. fpu_set(0,
  1455. fmodl(fpu_get(0), fpu_get(1)));
  1456. }
  1457. void SoftCPU::FSQRT(const X86::Instruction&)
  1458. {
  1459. fpu_set(0, sqrt(fpu_get(0)));
  1460. }
  1461. void SoftCPU::FSINCOS(const X86::Instruction&)
  1462. {
  1463. long double sin = sinl(fpu_get(0));
  1464. long double cos = cosl(fpu_get(0));
  1465. fpu_set(0, sin);
  1466. fpu_push(cos);
  1467. }
  1468. void SoftCPU::FRNDINT(const X86::Instruction&)
  1469. {
  1470. // FIXME: support rounding mode
  1471. fpu_set(0, round(fpu_get(0)));
  1472. }
  1473. void SoftCPU::FSCALE(const X86::Instruction&)
  1474. {
  1475. // FIXME: set C1 upon stack overflow or if result was rounded
  1476. fpu_set(0, fpu_get(0) * powf(2, floorf(fpu_get(1))));
  1477. }
  1478. void SoftCPU::FSIN(const X86::Instruction&)
  1479. {
  1480. fpu_set(0, sin(fpu_get(0)));
  1481. }
  1482. void SoftCPU::FCOS(const X86::Instruction&)
  1483. {
  1484. fpu_set(0, cos(fpu_get(0)));
  1485. }
  1486. void SoftCPU::FIADD_RM32(const X86::Instruction& insn)
  1487. {
  1488. VERIFY(!insn.modrm().is_register());
  1489. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1490. // FIXME: Respect shadow values
  1491. fpu_set(0, fpu_get(0) + (long double)m32int);
  1492. }
  1493. void SoftCPU::FCMOVB(const X86::Instruction& insn)
  1494. {
  1495. VERIFY(insn.modrm().is_register());
  1496. if (cf())
  1497. fpu_set(0, fpu_get(insn.rm() & 7));
  1498. }
  1499. void SoftCPU::FIMUL_RM32(const X86::Instruction& insn)
  1500. {
  1501. VERIFY(!insn.modrm().is_register());
  1502. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1503. // FIXME: Respect shadow values
  1504. fpu_set(0, fpu_get(0) * (long double)m32int);
  1505. }
  1506. void SoftCPU::FCMOVE(const X86::Instruction& insn)
  1507. {
  1508. VERIFY(insn.modrm().is_register());
  1509. if (zf())
  1510. fpu_set(0, fpu_get(insn.rm() & 7));
  1511. }
  1512. void SoftCPU::FICOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1513. void SoftCPU::FCMOVBE(const X86::Instruction& insn)
  1514. {
  1515. if (evaluate_condition(6))
  1516. fpu_set(0, fpu_get(insn.rm() & 7));
  1517. }
  1518. void SoftCPU::FICOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1519. void SoftCPU::FCMOVU(const X86::Instruction& insn)
  1520. {
  1521. VERIFY(insn.modrm().is_register());
  1522. if (pf())
  1523. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1524. }
  1525. void SoftCPU::FISUB_RM32(const X86::Instruction& insn)
  1526. {
  1527. VERIFY(!insn.modrm().is_register());
  1528. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1529. // FIXME: Respect shadow values
  1530. fpu_set(0, fpu_get(0) - (long double)m32int);
  1531. }
  1532. void SoftCPU::FISUBR_RM32(const X86::Instruction& insn)
  1533. {
  1534. VERIFY(!insn.modrm().is_register());
  1535. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1536. // FIXME: Respect shadow values
  1537. fpu_set(0, (long double)m32int - fpu_get(0));
  1538. }
  1539. void SoftCPU::FIDIV_RM32(const X86::Instruction& insn)
  1540. {
  1541. VERIFY(!insn.modrm().is_register());
  1542. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1543. // FIXME: Respect shadow values
  1544. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1545. fpu_set(0, fpu_get(0) / (long double)m32int);
  1546. }
  1547. void SoftCPU::FIDIVR_RM32(const X86::Instruction& insn)
  1548. {
  1549. VERIFY(!insn.modrm().is_register());
  1550. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1551. // FIXME: Respect shadow values
  1552. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1553. fpu_set(0, (long double)m32int / fpu_get(0));
  1554. }
  1555. void SoftCPU::FILD_RM32(const X86::Instruction& insn)
  1556. {
  1557. VERIFY(!insn.modrm().is_register());
  1558. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1559. // FIXME: Respect shadow values
  1560. fpu_push((long double)m32int);
  1561. }
  1562. void SoftCPU::FCMOVNB(const X86::Instruction& insn)
  1563. {
  1564. VERIFY(insn.modrm().is_register());
  1565. if (!cf())
  1566. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1567. }
  1568. void SoftCPU::FISTTP_RM32(const X86::Instruction& insn)
  1569. {
  1570. VERIFY(!insn.modrm().is_register());
  1571. i32 value = static_cast<i32>(fpu_pop());
  1572. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(value)));
  1573. }
  1574. void SoftCPU::FCMOVNE(const X86::Instruction& insn)
  1575. {
  1576. VERIFY(insn.modrm().is_register());
  1577. if (!zf())
  1578. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1579. }
  1580. void SoftCPU::FIST_RM32(const X86::Instruction& insn)
  1581. {
  1582. VERIFY(!insn.modrm().is_register());
  1583. auto f = fpu_get(0);
  1584. // FIXME: Respect rounding mode in m_fpu_cw.
  1585. auto value = static_cast<i32>(f);
  1586. // FIXME: Respect shadow values
  1587. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(value)));
  1588. }
  1589. void SoftCPU::FCMOVNBE(const X86::Instruction& insn)
  1590. {
  1591. if (evaluate_condition(7))
  1592. fpu_set(0, fpu_get(insn.rm() & 7));
  1593. }
  1594. void SoftCPU::FISTP_RM32(const X86::Instruction& insn)
  1595. {
  1596. FIST_RM32(insn);
  1597. fpu_pop();
  1598. }
  1599. void SoftCPU::FCMOVNU(const X86::Instruction& insn)
  1600. {
  1601. VERIFY(insn.modrm().is_register());
  1602. if (!pf())
  1603. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1604. }
  1605. void SoftCPU::FNENI(const X86::Instruction&) { TODO_INSN(); }
  1606. void SoftCPU::FNDISI(const X86::Instruction&) { TODO_INSN(); }
  1607. void SoftCPU::FNCLEX(const X86::Instruction&) { TODO_INSN(); }
  1608. void SoftCPU::FNINIT(const X86::Instruction&) { TODO_INSN(); }
  1609. void SoftCPU::FNSETPM(const X86::Instruction&) { TODO_INSN(); }
  1610. void SoftCPU::FLD_RM80(const X86::Instruction& insn)
  1611. {
  1612. VERIFY(!insn.modrm().is_register());
  1613. // long doubles can be up to 128 bits wide in memory for reasons (alignment) and only uses 80 bits of precision
  1614. // gcc uses 12 byte in 32 bit and 16 byte in 64 bit mode
  1615. // so in the 32 bit case we read a bit to much, but that shouldnt be that bad
  1616. auto new_f80 = insn.modrm().read128(*this, insn);
  1617. // FIXME: Respect shadow values
  1618. fpu_push(*(long double*)new_f80.value().bytes());
  1619. }
  1620. void SoftCPU::FUCOMI(const X86::Instruction& insn)
  1621. {
  1622. auto i = insn.rm() & 7;
  1623. // FIXME: Unordered comparison checks.
  1624. // FIXME: QNaN / exception handling.
  1625. // FIXME: Set C0, C2, C3 in FPU status word.
  1626. if (__builtin_isnan(fpu_get(0)) || __builtin_isnan(fpu_get(i))) {
  1627. set_zf(true);
  1628. set_pf(true);
  1629. set_cf(true);
  1630. } else {
  1631. set_zf(fpu_get(0) == fpu_get(i));
  1632. set_pf(false);
  1633. set_cf(fpu_get(0) < fpu_get(i));
  1634. set_of(false);
  1635. }
  1636. // FIXME: Taint should be based on ST(0) and ST(i)
  1637. m_flags_tainted = false;
  1638. }
  1639. void SoftCPU::FCOMI(const X86::Instruction& insn)
  1640. {
  1641. auto i = insn.rm() & 7;
  1642. // FIXME: QNaN / exception handling.
  1643. // FIXME: Set C0, C2, C3 in FPU status word.
  1644. set_zf(fpu_get(0) == fpu_get(i));
  1645. set_pf(false);
  1646. set_cf(fpu_get(0) < fpu_get(i));
  1647. set_of(false);
  1648. // FIXME: Taint should be based on ST(0) and ST(i)
  1649. m_flags_tainted = false;
  1650. }
  1651. void SoftCPU::FSTP_RM80(const X86::Instruction& insn)
  1652. {
  1653. if (insn.modrm().is_register()) {
  1654. fpu_set(insn.modrm().register_index(), fpu_pop());
  1655. } else {
  1656. // FIXME: Respect shadow values
  1657. // long doubles can be up to 128 bits wide in memory for reasons (alignment) and only uses 80 bits of precision
  1658. // gcc uses 12 byte in 32 bit and 16 byte in 64 bit mode
  1659. // so in the 32 bit case we have to read first, to not override data on the overly big write
  1660. u128 f80 {};
  1661. if constexpr (sizeof(long double) == 12)
  1662. f80 = insn.modrm().read128(*this, insn).value();
  1663. *(long double*)f80.bytes() = fpu_pop();
  1664. insn.modrm().write128(*this, insn, shadow_wrap_as_initialized(f80));
  1665. }
  1666. }
  1667. void SoftCPU::FADD_RM64(const X86::Instruction& insn)
  1668. {
  1669. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1670. if (insn.modrm().is_register()) {
  1671. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1672. } else {
  1673. auto new_f64 = insn.modrm().read64(*this, insn);
  1674. // FIXME: Respect shadow values
  1675. auto f64 = bit_cast<double>(new_f64.value());
  1676. fpu_set(0, fpu_get(0) + f64);
  1677. }
  1678. }
  1679. void SoftCPU::FMUL_RM64(const X86::Instruction& insn)
  1680. {
  1681. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1682. if (insn.modrm().is_register()) {
  1683. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1684. } else {
  1685. auto new_f64 = insn.modrm().read64(*this, insn);
  1686. // FIXME: Respect shadow values
  1687. auto f64 = bit_cast<double>(new_f64.value());
  1688. fpu_set(0, fpu_get(0) * f64);
  1689. }
  1690. }
  1691. void SoftCPU::FCOM_RM64(const X86::Instruction&) { TODO_INSN(); }
  1692. void SoftCPU::FCOMP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1693. void SoftCPU::FSUB_RM64(const X86::Instruction& insn)
  1694. {
  1695. if (insn.modrm().is_register()) {
  1696. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1697. } else {
  1698. auto new_f64 = insn.modrm().read64(*this, insn);
  1699. // FIXME: Respect shadow values
  1700. auto f64 = bit_cast<double>(new_f64.value());
  1701. fpu_set(0, fpu_get(0) - f64);
  1702. }
  1703. }
  1704. void SoftCPU::FSUBR_RM64(const X86::Instruction& insn)
  1705. {
  1706. if (insn.modrm().is_register()) {
  1707. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1708. } else {
  1709. auto new_f64 = insn.modrm().read64(*this, insn);
  1710. // FIXME: Respect shadow values
  1711. auto f64 = bit_cast<double>(new_f64.value());
  1712. fpu_set(0, f64 - fpu_get(0));
  1713. }
  1714. }
  1715. void SoftCPU::FDIV_RM64(const X86::Instruction& insn)
  1716. {
  1717. if (insn.modrm().is_register()) {
  1718. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1719. } else {
  1720. auto new_f64 = insn.modrm().read64(*this, insn);
  1721. // FIXME: Respect shadow values
  1722. auto f64 = bit_cast<double>(new_f64.value());
  1723. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1724. fpu_set(0, fpu_get(0) / f64);
  1725. }
  1726. }
  1727. void SoftCPU::FDIVR_RM64(const X86::Instruction& insn)
  1728. {
  1729. if (insn.modrm().is_register()) {
  1730. // XXX this is FDIVR, Instruction decodes this weirdly
  1731. //fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1732. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1733. } else {
  1734. auto new_f64 = insn.modrm().read64(*this, insn);
  1735. // FIXME: Respect shadow values
  1736. auto f64 = bit_cast<double>(new_f64.value());
  1737. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1738. fpu_set(0, f64 / fpu_get(0));
  1739. }
  1740. }
  1741. void SoftCPU::FLD_RM64(const X86::Instruction& insn)
  1742. {
  1743. VERIFY(!insn.modrm().is_register());
  1744. auto new_f64 = insn.modrm().read64(*this, insn);
  1745. // FIXME: Respect shadow values
  1746. fpu_push(bit_cast<double>(new_f64.value()));
  1747. }
  1748. void SoftCPU::FFREE(const X86::Instruction&) { TODO_INSN(); }
  1749. void SoftCPU::FISTTP_RM64(const X86::Instruction& insn)
  1750. {
  1751. // is this allowed to be a register?
  1752. VERIFY(!insn.modrm().is_register());
  1753. i64 value = static_cast<i64>(fpu_pop());
  1754. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(value)));
  1755. }
  1756. void SoftCPU::FST_RM64(const X86::Instruction& insn)
  1757. {
  1758. if (insn.modrm().is_register()) {
  1759. fpu_set(insn.modrm().register_index(), fpu_get(0));
  1760. } else {
  1761. // FIXME: Respect shadow values
  1762. double f64 = (double)fpu_get(0);
  1763. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(f64)));
  1764. }
  1765. }
  1766. void SoftCPU::FSTP_RM64(const X86::Instruction& insn)
  1767. {
  1768. FST_RM64(insn);
  1769. fpu_pop();
  1770. }
  1771. void SoftCPU::FRSTOR(const X86::Instruction&) { TODO_INSN(); }
  1772. void SoftCPU::FUCOM(const X86::Instruction&) { TODO_INSN(); }
  1773. void SoftCPU::FUCOMP(const X86::Instruction&) { TODO_INSN(); }
  1774. void SoftCPU::FUCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1775. void SoftCPU::FNSAVE(const X86::Instruction&) { TODO_INSN(); }
  1776. void SoftCPU::FNSTSW(const X86::Instruction&) { TODO_INSN(); }
  1777. void SoftCPU::FIADD_RM16(const X86::Instruction& insn)
  1778. {
  1779. VERIFY(!insn.modrm().is_register());
  1780. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1781. // FIXME: Respect shadow values
  1782. fpu_set(0, fpu_get(0) + (long double)m16int);
  1783. }
  1784. void SoftCPU::FADDP(const X86::Instruction& insn)
  1785. {
  1786. VERIFY(insn.modrm().is_register());
  1787. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1788. fpu_pop();
  1789. }
  1790. void SoftCPU::FIMUL_RM16(const X86::Instruction& insn)
  1791. {
  1792. VERIFY(!insn.modrm().is_register());
  1793. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1794. // FIXME: Respect shadow values
  1795. fpu_set(0, fpu_get(0) * (long double)m16int);
  1796. }
  1797. void SoftCPU::FMULP(const X86::Instruction& insn)
  1798. {
  1799. VERIFY(insn.modrm().is_register());
  1800. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1801. fpu_pop();
  1802. }
  1803. void SoftCPU::FICOM_RM16(const X86::Instruction&) { TODO_INSN(); }
  1804. void SoftCPU::FICOMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1805. void SoftCPU::FCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1806. void SoftCPU::FISUB_RM16(const X86::Instruction& insn)
  1807. {
  1808. VERIFY(!insn.modrm().is_register());
  1809. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1810. // FIXME: Respect shadow values
  1811. fpu_set(0, fpu_get(0) - (long double)m16int);
  1812. }
  1813. void SoftCPU::FSUBRP(const X86::Instruction& insn)
  1814. {
  1815. VERIFY(insn.modrm().is_register());
  1816. fpu_set(insn.modrm().register_index(), fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1817. fpu_pop();
  1818. }
  1819. void SoftCPU::FISUBR_RM16(const X86::Instruction& insn)
  1820. {
  1821. VERIFY(!insn.modrm().is_register());
  1822. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1823. // FIXME: Respect shadow values
  1824. fpu_set(0, (long double)m16int - fpu_get(0));
  1825. }
  1826. void SoftCPU::FSUBP(const X86::Instruction& insn)
  1827. {
  1828. VERIFY(insn.modrm().is_register());
  1829. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1830. fpu_pop();
  1831. }
  1832. void SoftCPU::FIDIV_RM16(const X86::Instruction& insn)
  1833. {
  1834. VERIFY(!insn.modrm().is_register());
  1835. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1836. // FIXME: Respect shadow values
  1837. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1838. fpu_set(0, fpu_get(0) / (long double)m16int);
  1839. }
  1840. void SoftCPU::FDIVRP(const X86::Instruction& insn)
  1841. {
  1842. VERIFY(insn.modrm().is_register());
  1843. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1844. fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1845. fpu_pop();
  1846. }
  1847. void SoftCPU::FIDIVR_RM16(const X86::Instruction& insn)
  1848. {
  1849. VERIFY(!insn.modrm().is_register());
  1850. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1851. // FIXME: Respect shadow values
  1852. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1853. fpu_set(0, (long double)m16int / fpu_get(0));
  1854. }
  1855. void SoftCPU::FDIVP(const X86::Instruction& insn)
  1856. {
  1857. VERIFY(insn.modrm().is_register());
  1858. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1859. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1860. fpu_pop();
  1861. }
  1862. void SoftCPU::FILD_RM16(const X86::Instruction& insn)
  1863. {
  1864. VERIFY(!insn.modrm().is_register());
  1865. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1866. // FIXME: Respect shadow values
  1867. fpu_push((long double)m16int);
  1868. }
  1869. void SoftCPU::FFREEP(const X86::Instruction&) { TODO_INSN(); }
  1870. void SoftCPU::FISTTP_RM16(const X86::Instruction& insn)
  1871. {
  1872. // is this allowed to be a register?
  1873. VERIFY(!insn.modrm().is_register());
  1874. i16 value = static_cast<i16>(fpu_pop());
  1875. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(bit_cast<u16>(value)));
  1876. }
  1877. void SoftCPU::FIST_RM16(const X86::Instruction& insn)
  1878. {
  1879. VERIFY(!insn.modrm().is_register());
  1880. auto f = fpu_get(0);
  1881. // FIXME: Respect rounding mode in m_fpu_cw.
  1882. auto value = static_cast<i16>(f);
  1883. // FIXME: Respect shadow values
  1884. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(bit_cast<u16>(value)));
  1885. }
  1886. void SoftCPU::FISTP_RM16(const X86::Instruction& insn)
  1887. {
  1888. FIST_RM16(insn);
  1889. fpu_pop();
  1890. }
  1891. void SoftCPU::FBLD_M80(const X86::Instruction&) { TODO_INSN(); }
  1892. void SoftCPU::FNSTSW_AX(const X86::Instruction&) { TODO_INSN(); }
  1893. void SoftCPU::FILD_RM64(const X86::Instruction& insn)
  1894. {
  1895. VERIFY(!insn.modrm().is_register());
  1896. auto m64int = (i64)insn.modrm().read64(*this, insn).value();
  1897. // FIXME: Respect shadow values
  1898. fpu_push((long double)m64int);
  1899. }
  1900. void SoftCPU::FUCOMIP(const X86::Instruction& insn)
  1901. {
  1902. FUCOMI(insn);
  1903. fpu_pop();
  1904. }
  1905. void SoftCPU::FBSTP_M80(const X86::Instruction&) { TODO_INSN(); }
  1906. void SoftCPU::FCOMIP(const X86::Instruction& insn)
  1907. {
  1908. FCOMI(insn);
  1909. fpu_pop();
  1910. }
  1911. void SoftCPU::FISTP_RM64(const X86::Instruction& insn)
  1912. {
  1913. VERIFY(!insn.modrm().is_register());
  1914. auto f = fpu_pop();
  1915. // FIXME: Respect rounding mode in m_fpu_cw.
  1916. auto value = static_cast<i64>(f);
  1917. // FIXME: Respect shadow values
  1918. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(value)));
  1919. }
  1920. void SoftCPU::HLT(const X86::Instruction&) { TODO_INSN(); }
  1921. void SoftCPU::IDIV_RM16(const X86::Instruction& insn)
  1922. {
  1923. auto divisor_with_shadow = insn.modrm().read16(*this, insn);
  1924. auto divisor = (i16)divisor_with_shadow.value();
  1925. if (divisor == 0) {
  1926. reportln("Divide by zero");
  1927. TODO();
  1928. }
  1929. i32 dividend = (i32)(((u32)dx().value() << 16) | (u32)ax().value());
  1930. i32 result = dividend / divisor;
  1931. if (result > NumericLimits<i16>::max() || result < NumericLimits<i16>::min()) {
  1932. reportln("Divide overflow");
  1933. TODO();
  1934. }
  1935. auto original_ax = ax();
  1936. set_ax(shadow_wrap_with_taint_from<u16>(result, original_ax, dx(), divisor_with_shadow));
  1937. set_dx(shadow_wrap_with_taint_from<u16>(dividend % divisor, original_ax, dx(), divisor_with_shadow));
  1938. }
  1939. void SoftCPU::IDIV_RM32(const X86::Instruction& insn)
  1940. {
  1941. auto divisor_with_shadow = insn.modrm().read32(*this, insn);
  1942. auto divisor = (i32)divisor_with_shadow.value();
  1943. if (divisor == 0) {
  1944. reportln("Divide by zero");
  1945. TODO();
  1946. }
  1947. i64 dividend = (i64)(((u64)edx().value() << 32) | (u64)eax().value());
  1948. i64 result = dividend / divisor;
  1949. if (result > NumericLimits<i32>::max() || result < NumericLimits<i32>::min()) {
  1950. reportln("Divide overflow");
  1951. TODO();
  1952. }
  1953. auto original_eax = eax();
  1954. set_eax(shadow_wrap_with_taint_from<u32>(result, original_eax, edx(), divisor_with_shadow));
  1955. set_edx(shadow_wrap_with_taint_from<u32>(dividend % divisor, original_eax, edx(), divisor_with_shadow));
  1956. }
  1957. void SoftCPU::IDIV_RM8(const X86::Instruction& insn)
  1958. {
  1959. auto divisor_with_shadow = insn.modrm().read8(*this, insn);
  1960. auto divisor = (i8)divisor_with_shadow.value();
  1961. if (divisor == 0) {
  1962. reportln("Divide by zero");
  1963. TODO();
  1964. }
  1965. i16 dividend = ax().value();
  1966. i16 result = dividend / divisor;
  1967. if (result > NumericLimits<i8>::max() || result < NumericLimits<i8>::min()) {
  1968. reportln("Divide overflow");
  1969. TODO();
  1970. }
  1971. auto original_ax = ax();
  1972. set_al(shadow_wrap_with_taint_from<u8>(result, divisor_with_shadow, original_ax));
  1973. set_ah(shadow_wrap_with_taint_from<u8>(dividend % divisor, divisor_with_shadow, original_ax));
  1974. }
  1975. void SoftCPU::IMUL_RM16(const X86::Instruction& insn)
  1976. {
  1977. i16 result_high;
  1978. i16 result_low;
  1979. auto src = insn.modrm().read16(*this, insn);
  1980. op_imul<i16>(*this, src.value(), ax().value(), result_high, result_low);
  1981. gpr16(X86::RegisterDX) = shadow_wrap_with_taint_from<u16>(result_high, src, ax());
  1982. gpr16(X86::RegisterAX) = shadow_wrap_with_taint_from<u16>(result_low, src, ax());
  1983. }
  1984. void SoftCPU::IMUL_RM32(const X86::Instruction& insn)
  1985. {
  1986. i32 result_high;
  1987. i32 result_low;
  1988. auto src = insn.modrm().read32(*this, insn);
  1989. op_imul<i32>(*this, src.value(), eax().value(), result_high, result_low);
  1990. gpr32(X86::RegisterEDX) = shadow_wrap_with_taint_from<u32>(result_high, src, eax());
  1991. gpr32(X86::RegisterEAX) = shadow_wrap_with_taint_from<u32>(result_low, src, eax());
  1992. }
  1993. void SoftCPU::IMUL_RM8(const X86::Instruction& insn)
  1994. {
  1995. i8 result_high;
  1996. i8 result_low;
  1997. auto src = insn.modrm().read8(*this, insn);
  1998. op_imul<i8>(*this, src.value(), al().value(), result_high, result_low);
  1999. gpr8(X86::RegisterAH) = shadow_wrap_with_taint_from<u8>(result_high, src, al());
  2000. gpr8(X86::RegisterAL) = shadow_wrap_with_taint_from<u8>(result_low, src, al());
  2001. }
  2002. void SoftCPU::IMUL_reg16_RM16(const X86::Instruction& insn)
  2003. {
  2004. i16 result_high;
  2005. i16 result_low;
  2006. auto src = insn.modrm().read16(*this, insn);
  2007. op_imul<i16>(*this, gpr16(insn.reg16()).value(), src.value(), result_high, result_low);
  2008. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src, gpr16(insn.reg16()));
  2009. }
  2010. void SoftCPU::IMUL_reg16_RM16_imm16(const X86::Instruction& insn)
  2011. {
  2012. i16 result_high;
  2013. i16 result_low;
  2014. auto src = insn.modrm().read16(*this, insn);
  2015. op_imul<i16>(*this, src.value(), insn.imm16(), result_high, result_low);
  2016. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  2017. }
  2018. void SoftCPU::IMUL_reg16_RM16_imm8(const X86::Instruction& insn)
  2019. {
  2020. i16 result_high;
  2021. i16 result_low;
  2022. auto src = insn.modrm().read16(*this, insn);
  2023. op_imul<i16>(*this, src.value(), sign_extended_to<i16>(insn.imm8()), result_high, result_low);
  2024. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  2025. }
  2026. void SoftCPU::IMUL_reg32_RM32(const X86::Instruction& insn)
  2027. {
  2028. i32 result_high;
  2029. i32 result_low;
  2030. auto src = insn.modrm().read32(*this, insn);
  2031. op_imul<i32>(*this, gpr32(insn.reg32()).value(), src.value(), result_high, result_low);
  2032. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src, gpr32(insn.reg32()));
  2033. }
  2034. void SoftCPU::IMUL_reg32_RM32_imm32(const X86::Instruction& insn)
  2035. {
  2036. i32 result_high;
  2037. i32 result_low;
  2038. auto src = insn.modrm().read32(*this, insn);
  2039. op_imul<i32>(*this, src.value(), insn.imm32(), result_high, result_low);
  2040. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  2041. }
  2042. void SoftCPU::IMUL_reg32_RM32_imm8(const X86::Instruction& insn)
  2043. {
  2044. i32 result_high;
  2045. i32 result_low;
  2046. auto src = insn.modrm().read32(*this, insn);
  2047. op_imul<i32>(*this, src.value(), sign_extended_to<i32>(insn.imm8()), result_high, result_low);
  2048. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  2049. }
  2050. void SoftCPU::INC_RM16(const X86::Instruction& insn)
  2051. {
  2052. insn.modrm().write16(*this, insn, op_inc(*this, insn.modrm().read16(*this, insn)));
  2053. }
  2054. void SoftCPU::INC_RM32(const X86::Instruction& insn)
  2055. {
  2056. insn.modrm().write32(*this, insn, op_inc(*this, insn.modrm().read32(*this, insn)));
  2057. }
  2058. void SoftCPU::INC_RM8(const X86::Instruction& insn)
  2059. {
  2060. insn.modrm().write8(*this, insn, op_inc(*this, insn.modrm().read8(*this, insn)));
  2061. }
  2062. void SoftCPU::INC_reg16(const X86::Instruction& insn)
  2063. {
  2064. gpr16(insn.reg16()) = op_inc(*this, const_gpr16(insn.reg16()));
  2065. }
  2066. void SoftCPU::INC_reg32(const X86::Instruction& insn)
  2067. {
  2068. gpr32(insn.reg32()) = op_inc(*this, const_gpr32(insn.reg32()));
  2069. }
  2070. void SoftCPU::INSB(const X86::Instruction&) { TODO_INSN(); }
  2071. void SoftCPU::INSD(const X86::Instruction&) { TODO_INSN(); }
  2072. void SoftCPU::INSW(const X86::Instruction&) { TODO_INSN(); }
  2073. void SoftCPU::INT3(const X86::Instruction&) { TODO_INSN(); }
  2074. void SoftCPU::INTO(const X86::Instruction&) { TODO_INSN(); }
  2075. void SoftCPU::INT_imm8(const X86::Instruction& insn)
  2076. {
  2077. VERIFY(insn.imm8() == 0x82);
  2078. // FIXME: virt_syscall should take ValueWithShadow and whine about uninitialized arguments
  2079. set_eax(shadow_wrap_as_initialized(m_emulator.virt_syscall(eax().value(), edx().value(), ecx().value(), ebx().value())));
  2080. }
  2081. void SoftCPU::INVLPG(const X86::Instruction&) { TODO_INSN(); }
  2082. void SoftCPU::IN_AL_DX(const X86::Instruction&) { TODO_INSN(); }
  2083. void SoftCPU::IN_AL_imm8(const X86::Instruction&) { TODO_INSN(); }
  2084. void SoftCPU::IN_AX_DX(const X86::Instruction&) { TODO_INSN(); }
  2085. void SoftCPU::IN_AX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2086. void SoftCPU::IN_EAX_DX(const X86::Instruction&) { TODO_INSN(); }
  2087. void SoftCPU::IN_EAX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2088. void SoftCPU::IRET(const X86::Instruction&) { TODO_INSN(); }
  2089. void SoftCPU::JCXZ_imm8(const X86::Instruction& insn)
  2090. {
  2091. if (insn.a32()) {
  2092. warn_if_uninitialized(ecx(), "jecxz imm8");
  2093. if (ecx().value() == 0)
  2094. set_eip(eip() + (i8)insn.imm8());
  2095. } else {
  2096. warn_if_uninitialized(cx(), "jcxz imm8");
  2097. if (cx().value() == 0)
  2098. set_eip(eip() + (i8)insn.imm8());
  2099. }
  2100. }
  2101. void SoftCPU::JMP_FAR_mem16(const X86::Instruction&) { TODO_INSN(); }
  2102. void SoftCPU::JMP_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  2103. void SoftCPU::JMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  2104. void SoftCPU::JMP_RM32(const X86::Instruction& insn)
  2105. {
  2106. set_eip(insn.modrm().read32(*this, insn).value());
  2107. }
  2108. void SoftCPU::JMP_imm16(const X86::Instruction& insn)
  2109. {
  2110. set_eip(eip() + (i16)insn.imm16());
  2111. }
  2112. void SoftCPU::JMP_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  2113. void SoftCPU::JMP_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  2114. void SoftCPU::JMP_imm32(const X86::Instruction& insn)
  2115. {
  2116. set_eip(eip() + (i32)insn.imm32());
  2117. }
  2118. void SoftCPU::JMP_short_imm8(const X86::Instruction& insn)
  2119. {
  2120. set_eip(eip() + (i8)insn.imm8());
  2121. }
  2122. void SoftCPU::Jcc_NEAR_imm(const X86::Instruction& insn)
  2123. {
  2124. warn_if_flags_tainted("jcc near imm32");
  2125. if (evaluate_condition(insn.cc()))
  2126. set_eip(eip() + (i32)insn.imm32());
  2127. }
  2128. void SoftCPU::Jcc_imm8(const X86::Instruction& insn)
  2129. {
  2130. warn_if_flags_tainted("jcc imm8");
  2131. if (evaluate_condition(insn.cc()))
  2132. set_eip(eip() + (i8)insn.imm8());
  2133. }
  2134. void SoftCPU::LAHF(const X86::Instruction&) { TODO_INSN(); }
  2135. void SoftCPU::LAR_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2136. void SoftCPU::LAR_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2137. void SoftCPU::LDS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2138. void SoftCPU::LDS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2139. void SoftCPU::LEAVE16(const X86::Instruction&) { TODO_INSN(); }
  2140. void SoftCPU::LEAVE32(const X86::Instruction&)
  2141. {
  2142. auto new_ebp = read_memory32({ ss(), ebp().value() });
  2143. set_esp({ ebp().value() + 4, ebp().shadow() });
  2144. set_ebp(new_ebp);
  2145. }
  2146. void SoftCPU::LEA_reg16_mem16(const X86::Instruction& insn)
  2147. {
  2148. // FIXME: Respect shadow values
  2149. gpr16(insn.reg16()) = shadow_wrap_as_initialized<u16>(insn.modrm().resolve(*this, insn).offset());
  2150. }
  2151. void SoftCPU::LEA_reg32_mem32(const X86::Instruction& insn)
  2152. {
  2153. // FIXME: Respect shadow values
  2154. gpr32(insn.reg32()) = shadow_wrap_as_initialized<u32>(insn.modrm().resolve(*this, insn).offset());
  2155. }
  2156. void SoftCPU::LES_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2157. void SoftCPU::LES_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2158. void SoftCPU::LFS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2159. void SoftCPU::LFS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2160. void SoftCPU::LGDT(const X86::Instruction&) { TODO_INSN(); }
  2161. void SoftCPU::LGS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2162. void SoftCPU::LGS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2163. void SoftCPU::LIDT(const X86::Instruction&) { TODO_INSN(); }
  2164. void SoftCPU::LLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2165. void SoftCPU::LMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2166. template<typename T>
  2167. ALWAYS_INLINE static void do_lods(SoftCPU& cpu, const X86::Instruction& insn)
  2168. {
  2169. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2170. cpu.do_once_or_repeat<true>(insn, [&] {
  2171. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2172. cpu.gpr<T>(X86::RegisterAL) = src;
  2173. cpu.step_source_index(insn.a32(), sizeof(T));
  2174. });
  2175. }
  2176. void SoftCPU::LODSB(const X86::Instruction& insn)
  2177. {
  2178. do_lods<u8>(*this, insn);
  2179. }
  2180. void SoftCPU::LODSD(const X86::Instruction& insn)
  2181. {
  2182. do_lods<u32>(*this, insn);
  2183. }
  2184. void SoftCPU::LODSW(const X86::Instruction& insn)
  2185. {
  2186. do_lods<u16>(*this, insn);
  2187. }
  2188. void SoftCPU::LOOPNZ_imm8(const X86::Instruction& insn)
  2189. {
  2190. warn_if_flags_tainted("loopnz");
  2191. if (insn.a32()) {
  2192. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2193. if (ecx().value() != 0 && !zf())
  2194. set_eip(eip() + (i8)insn.imm8());
  2195. } else {
  2196. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2197. if (cx().value() != 0 && !zf())
  2198. set_eip(eip() + (i8)insn.imm8());
  2199. }
  2200. }
  2201. void SoftCPU::LOOPZ_imm8(const X86::Instruction& insn)
  2202. {
  2203. warn_if_flags_tainted("loopz");
  2204. if (insn.a32()) {
  2205. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2206. if (ecx().value() != 0 && zf())
  2207. set_eip(eip() + (i8)insn.imm8());
  2208. } else {
  2209. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2210. if (cx().value() != 0 && zf())
  2211. set_eip(eip() + (i8)insn.imm8());
  2212. }
  2213. }
  2214. void SoftCPU::LOOP_imm8(const X86::Instruction& insn)
  2215. {
  2216. if (insn.a32()) {
  2217. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2218. if (ecx().value() != 0)
  2219. set_eip(eip() + (i8)insn.imm8());
  2220. } else {
  2221. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2222. if (cx().value() != 0)
  2223. set_eip(eip() + (i8)insn.imm8());
  2224. }
  2225. }
  2226. void SoftCPU::LSL_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2227. void SoftCPU::LSL_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2228. void SoftCPU::LSS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2229. void SoftCPU::LSS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2230. void SoftCPU::LTR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2231. template<typename T>
  2232. ALWAYS_INLINE static void do_movs(SoftCPU& cpu, const X86::Instruction& insn)
  2233. {
  2234. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2235. cpu.do_once_or_repeat<false>(insn, [&] {
  2236. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2237. cpu.write_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() }, src);
  2238. cpu.step_source_index(insn.a32(), sizeof(T));
  2239. cpu.step_destination_index(insn.a32(), sizeof(T));
  2240. });
  2241. }
  2242. void SoftCPU::MOVSB(const X86::Instruction& insn)
  2243. {
  2244. do_movs<u8>(*this, insn);
  2245. }
  2246. void SoftCPU::MOVSD(const X86::Instruction& insn)
  2247. {
  2248. do_movs<u32>(*this, insn);
  2249. }
  2250. void SoftCPU::MOVSW(const X86::Instruction& insn)
  2251. {
  2252. do_movs<u16>(*this, insn);
  2253. }
  2254. void SoftCPU::MOVSX_reg16_RM8(const X86::Instruction& insn)
  2255. {
  2256. auto src = insn.modrm().read8(*this, insn);
  2257. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(sign_extended_to<u16>(src.value()), src);
  2258. }
  2259. void SoftCPU::MOVSX_reg32_RM16(const X86::Instruction& insn)
  2260. {
  2261. auto src = insn.modrm().read16(*this, insn);
  2262. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2263. }
  2264. void SoftCPU::MOVSX_reg32_RM8(const X86::Instruction& insn)
  2265. {
  2266. auto src = insn.modrm().read8(*this, insn);
  2267. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2268. }
  2269. void SoftCPU::MOVZX_reg16_RM8(const X86::Instruction& insn)
  2270. {
  2271. auto src = insn.modrm().read8(*this, insn);
  2272. gpr16(insn.reg16()) = ValueWithShadow<u16>(src.value(), 0x0100 | (src.shadow() & 0xff));
  2273. }
  2274. void SoftCPU::MOVZX_reg32_RM16(const X86::Instruction& insn)
  2275. {
  2276. auto src = insn.modrm().read16(*this, insn);
  2277. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010000 | (src.shadow() & 0xffff));
  2278. }
  2279. void SoftCPU::MOVZX_reg32_RM8(const X86::Instruction& insn)
  2280. {
  2281. auto src = insn.modrm().read8(*this, insn);
  2282. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010100 | (src.shadow() & 0xff));
  2283. }
  2284. void SoftCPU::MOV_AL_moff8(const X86::Instruction& insn)
  2285. {
  2286. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2287. }
  2288. void SoftCPU::MOV_AX_moff16(const X86::Instruction& insn)
  2289. {
  2290. set_ax(read_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2291. }
  2292. void SoftCPU::MOV_CR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2293. void SoftCPU::MOV_DR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2294. void SoftCPU::MOV_EAX_moff32(const X86::Instruction& insn)
  2295. {
  2296. set_eax(read_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2297. }
  2298. void SoftCPU::MOV_RM16_imm16(const X86::Instruction& insn)
  2299. {
  2300. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(insn.imm16()));
  2301. }
  2302. void SoftCPU::MOV_RM16_reg16(const X86::Instruction& insn)
  2303. {
  2304. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2305. }
  2306. void SoftCPU::MOV_RM16_seg(const X86::Instruction&) { TODO_INSN(); }
  2307. void SoftCPU::MOV_RM32_imm32(const X86::Instruction& insn)
  2308. {
  2309. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(insn.imm32()));
  2310. }
  2311. void SoftCPU::MOV_RM32_reg32(const X86::Instruction& insn)
  2312. {
  2313. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2314. }
  2315. void SoftCPU::MOV_RM8_imm8(const X86::Instruction& insn)
  2316. {
  2317. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized(insn.imm8()));
  2318. }
  2319. void SoftCPU::MOV_RM8_reg8(const X86::Instruction& insn)
  2320. {
  2321. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2322. }
  2323. void SoftCPU::MOV_moff16_AX(const X86::Instruction& insn)
  2324. {
  2325. write_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, ax());
  2326. }
  2327. void SoftCPU::MOV_moff32_EAX(const X86::Instruction& insn)
  2328. {
  2329. write_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, eax());
  2330. }
  2331. void SoftCPU::MOV_moff8_AL(const X86::Instruction& insn)
  2332. {
  2333. write_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, al());
  2334. }
  2335. void SoftCPU::MOV_reg16_RM16(const X86::Instruction& insn)
  2336. {
  2337. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  2338. }
  2339. void SoftCPU::MOV_reg16_imm16(const X86::Instruction& insn)
  2340. {
  2341. gpr16(insn.reg16()) = shadow_wrap_as_initialized(insn.imm16());
  2342. }
  2343. void SoftCPU::MOV_reg32_CR(const X86::Instruction&) { TODO_INSN(); }
  2344. void SoftCPU::MOV_reg32_DR(const X86::Instruction&) { TODO_INSN(); }
  2345. void SoftCPU::MOV_reg32_RM32(const X86::Instruction& insn)
  2346. {
  2347. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  2348. }
  2349. void SoftCPU::MOV_reg32_imm32(const X86::Instruction& insn)
  2350. {
  2351. gpr32(insn.reg32()) = shadow_wrap_as_initialized(insn.imm32());
  2352. }
  2353. void SoftCPU::MOV_reg8_RM8(const X86::Instruction& insn)
  2354. {
  2355. gpr8(insn.reg8()) = insn.modrm().read8(*this, insn);
  2356. }
  2357. void SoftCPU::MOV_reg8_imm8(const X86::Instruction& insn)
  2358. {
  2359. gpr8(insn.reg8()) = shadow_wrap_as_initialized(insn.imm8());
  2360. }
  2361. void SoftCPU::MOV_seg_RM16(const X86::Instruction&) { TODO_INSN(); }
  2362. void SoftCPU::MOV_seg_RM32(const X86::Instruction&) { TODO_INSN(); }
  2363. void SoftCPU::MUL_RM16(const X86::Instruction& insn)
  2364. {
  2365. auto src = insn.modrm().read16(*this, insn);
  2366. u32 result = (u32)ax().value() * (u32)src.value();
  2367. auto original_ax = ax();
  2368. set_ax(shadow_wrap_with_taint_from<u16>(result & 0xffff, src, original_ax));
  2369. set_dx(shadow_wrap_with_taint_from<u16>(result >> 16, src, original_ax));
  2370. taint_flags_from(src, original_ax);
  2371. set_cf(dx().value() != 0);
  2372. set_of(dx().value() != 0);
  2373. }
  2374. void SoftCPU::MUL_RM32(const X86::Instruction& insn)
  2375. {
  2376. auto src = insn.modrm().read32(*this, insn);
  2377. u64 result = (u64)eax().value() * (u64)src.value();
  2378. auto original_eax = eax();
  2379. set_eax(shadow_wrap_with_taint_from<u32>(result, src, original_eax));
  2380. set_edx(shadow_wrap_with_taint_from<u32>(result >> 32, src, original_eax));
  2381. taint_flags_from(src, original_eax);
  2382. set_cf(edx().value() != 0);
  2383. set_of(edx().value() != 0);
  2384. }
  2385. void SoftCPU::MUL_RM8(const X86::Instruction& insn)
  2386. {
  2387. auto src = insn.modrm().read8(*this, insn);
  2388. u16 result = (u16)al().value() * src.value();
  2389. auto original_al = al();
  2390. set_ax(shadow_wrap_with_taint_from(result, src, original_al));
  2391. taint_flags_from(src, original_al);
  2392. set_cf((result & 0xff00) != 0);
  2393. set_of((result & 0xff00) != 0);
  2394. }
  2395. void SoftCPU::NEG_RM16(const X86::Instruction& insn)
  2396. {
  2397. insn.modrm().write16(*this, insn, op_sub<ValueWithShadow<u16>>(*this, shadow_wrap_as_initialized<u16>(0), insn.modrm().read16(*this, insn)));
  2398. }
  2399. void SoftCPU::NEG_RM32(const X86::Instruction& insn)
  2400. {
  2401. insn.modrm().write32(*this, insn, op_sub<ValueWithShadow<u32>>(*this, shadow_wrap_as_initialized<u32>(0), insn.modrm().read32(*this, insn)));
  2402. }
  2403. void SoftCPU::NEG_RM8(const X86::Instruction& insn)
  2404. {
  2405. insn.modrm().write8(*this, insn, op_sub<ValueWithShadow<u8>>(*this, shadow_wrap_as_initialized<u8>(0), insn.modrm().read8(*this, insn)));
  2406. }
  2407. void SoftCPU::NOP(const X86::Instruction&)
  2408. {
  2409. }
  2410. void SoftCPU::NOT_RM16(const X86::Instruction& insn)
  2411. {
  2412. auto data = insn.modrm().read16(*this, insn);
  2413. insn.modrm().write16(*this, insn, ValueWithShadow<u16>(~data.value(), data.shadow()));
  2414. }
  2415. void SoftCPU::NOT_RM32(const X86::Instruction& insn)
  2416. {
  2417. auto data = insn.modrm().read32(*this, insn);
  2418. insn.modrm().write32(*this, insn, ValueWithShadow<u32>(~data.value(), data.shadow()));
  2419. }
  2420. void SoftCPU::NOT_RM8(const X86::Instruction& insn)
  2421. {
  2422. auto data = insn.modrm().read8(*this, insn);
  2423. insn.modrm().write8(*this, insn, ValueWithShadow<u8>(~data.value(), data.shadow()));
  2424. }
  2425. void SoftCPU::OUTSB(const X86::Instruction&) { TODO_INSN(); }
  2426. void SoftCPU::OUTSD(const X86::Instruction&) { TODO_INSN(); }
  2427. void SoftCPU::OUTSW(const X86::Instruction&) { TODO_INSN(); }
  2428. void SoftCPU::OUT_DX_AL(const X86::Instruction&) { TODO_INSN(); }
  2429. void SoftCPU::OUT_DX_AX(const X86::Instruction&) { TODO_INSN(); }
  2430. void SoftCPU::OUT_DX_EAX(const X86::Instruction&) { TODO_INSN(); }
  2431. void SoftCPU::OUT_imm8_AL(const X86::Instruction&) { TODO_INSN(); }
  2432. void SoftCPU::OUT_imm8_AX(const X86::Instruction&) { TODO_INSN(); }
  2433. void SoftCPU::OUT_imm8_EAX(const X86::Instruction&) { TODO_INSN(); }
  2434. void SoftCPU::PACKSSDW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2435. void SoftCPU::PACKSSWB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2436. void SoftCPU::PACKUSWB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2437. void SoftCPU::PADDB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2438. void SoftCPU::PADDW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2439. void SoftCPU::PADDD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2440. void SoftCPU::PADDSB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2441. void SoftCPU::PADDSW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2442. void SoftCPU::PADDUSB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2443. void SoftCPU::PADDUSW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2444. void SoftCPU::PAND_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2445. void SoftCPU::PANDN_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2446. void SoftCPU::PCMPEQB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2447. void SoftCPU::PCMPEQW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2448. void SoftCPU::PCMPEQD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2449. void SoftCPU::PCMPGTB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2450. void SoftCPU::PCMPGTW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2451. void SoftCPU::PCMPGTD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2452. void SoftCPU::PMADDWD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2453. void SoftCPU::PMULHW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2454. void SoftCPU::PMULLW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2455. void SoftCPU::POPA(const X86::Instruction&)
  2456. {
  2457. set_di(pop16());
  2458. set_si(pop16());
  2459. set_bp(pop16());
  2460. pop16();
  2461. set_bx(pop16());
  2462. set_dx(pop16());
  2463. set_cx(pop16());
  2464. set_ax(pop16());
  2465. }
  2466. void SoftCPU::POPAD(const X86::Instruction&)
  2467. {
  2468. set_edi(pop32());
  2469. set_esi(pop32());
  2470. set_ebp(pop32());
  2471. pop32();
  2472. set_ebx(pop32());
  2473. set_edx(pop32());
  2474. set_ecx(pop32());
  2475. set_eax(pop32());
  2476. }
  2477. void SoftCPU::POPF(const X86::Instruction&)
  2478. {
  2479. auto popped_value = pop16();
  2480. m_eflags &= ~0xffff;
  2481. m_eflags |= popped_value.value();
  2482. taint_flags_from(popped_value);
  2483. }
  2484. void SoftCPU::POPFD(const X86::Instruction&)
  2485. {
  2486. auto popped_value = pop32();
  2487. m_eflags &= ~0x00fcffff;
  2488. m_eflags |= popped_value.value() & 0x00fcffff;
  2489. taint_flags_from(popped_value);
  2490. }
  2491. void SoftCPU::POP_DS(const X86::Instruction&) { TODO_INSN(); }
  2492. void SoftCPU::POP_ES(const X86::Instruction&) { TODO_INSN(); }
  2493. void SoftCPU::POP_FS(const X86::Instruction&) { TODO_INSN(); }
  2494. void SoftCPU::POP_GS(const X86::Instruction&) { TODO_INSN(); }
  2495. void SoftCPU::POP_RM16(const X86::Instruction& insn)
  2496. {
  2497. insn.modrm().write16(*this, insn, pop16());
  2498. }
  2499. void SoftCPU::POP_RM32(const X86::Instruction& insn)
  2500. {
  2501. insn.modrm().write32(*this, insn, pop32());
  2502. }
  2503. void SoftCPU::POP_SS(const X86::Instruction&) { TODO_INSN(); }
  2504. void SoftCPU::POP_reg16(const X86::Instruction& insn)
  2505. {
  2506. gpr16(insn.reg16()) = pop16();
  2507. }
  2508. void SoftCPU::POP_reg32(const X86::Instruction& insn)
  2509. {
  2510. gpr32(insn.reg32()) = pop32();
  2511. }
  2512. void SoftCPU::POR_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2513. void SoftCPU::PSLLW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2514. void SoftCPU::PSLLW_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2515. void SoftCPU::PSLLD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2516. void SoftCPU::PSLLD_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2517. void SoftCPU::PSLLQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2518. void SoftCPU::PSLLQ_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2519. void SoftCPU::PSRAW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2520. void SoftCPU::PSRAW_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2521. void SoftCPU::PSRAD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2522. void SoftCPU::PSRAD_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2523. void SoftCPU::PSRLW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2524. void SoftCPU::PSRLW_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2525. void SoftCPU::PSRLD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2526. void SoftCPU::PSRLD_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2527. void SoftCPU::PSRLQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2528. void SoftCPU::PSRLQ_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2529. void SoftCPU::PSUBB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2530. void SoftCPU::PSUBW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2531. void SoftCPU::PSUBD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2532. void SoftCPU::PSUBSB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2533. void SoftCPU::PSUBSW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2534. void SoftCPU::PSUBUSB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2535. void SoftCPU::PSUBUSW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2536. void SoftCPU::PUNPCKHBW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2537. void SoftCPU::PUNPCKHWD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2538. void SoftCPU::PUNPCKHDQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2539. void SoftCPU::PUNPCKLBW_mm1_mm2m32(const X86::Instruction&) { TODO_INSN(); };
  2540. void SoftCPU::PUNPCKLWD_mm1_mm2m32(const X86::Instruction&) { TODO_INSN(); };
  2541. void SoftCPU::PUNPCKLDQ_mm1_mm2m32(const X86::Instruction&) { TODO_INSN(); };
  2542. void SoftCPU::PUSHA(const X86::Instruction&)
  2543. {
  2544. auto temp = sp();
  2545. push16(ax());
  2546. push16(cx());
  2547. push16(dx());
  2548. push16(bx());
  2549. push16(temp);
  2550. push16(bp());
  2551. push16(si());
  2552. push16(di());
  2553. }
  2554. void SoftCPU::PUSHAD(const X86::Instruction&)
  2555. {
  2556. auto temp = esp();
  2557. push32(eax());
  2558. push32(ecx());
  2559. push32(edx());
  2560. push32(ebx());
  2561. push32(temp);
  2562. push32(ebp());
  2563. push32(esi());
  2564. push32(edi());
  2565. }
  2566. void SoftCPU::PUSHF(const X86::Instruction&)
  2567. {
  2568. // FIXME: Respect shadow flags when they exist!
  2569. push16(shadow_wrap_as_initialized<u16>(m_eflags & 0xffff));
  2570. }
  2571. void SoftCPU::PUSHFD(const X86::Instruction&)
  2572. {
  2573. // FIXME: Respect shadow flags when they exist!
  2574. push32(shadow_wrap_as_initialized(m_eflags & 0x00fcffff));
  2575. }
  2576. void SoftCPU::PUSH_CS(const X86::Instruction&) { TODO_INSN(); }
  2577. void SoftCPU::PUSH_DS(const X86::Instruction&) { TODO_INSN(); }
  2578. void SoftCPU::PUSH_ES(const X86::Instruction&) { TODO_INSN(); }
  2579. void SoftCPU::PUSH_FS(const X86::Instruction&) { TODO_INSN(); }
  2580. void SoftCPU::PUSH_GS(const X86::Instruction&) { TODO_INSN(); }
  2581. void SoftCPU::PUSH_RM16(const X86::Instruction& insn)
  2582. {
  2583. push16(insn.modrm().read16(*this, insn));
  2584. }
  2585. void SoftCPU::PUSH_RM32(const X86::Instruction& insn)
  2586. {
  2587. push32(insn.modrm().read32(*this, insn));
  2588. }
  2589. void SoftCPU::PUSH_SP_8086_80186(const X86::Instruction&) { TODO_INSN(); }
  2590. void SoftCPU::PUSH_SS(const X86::Instruction&) { TODO_INSN(); }
  2591. void SoftCPU::PUSH_imm16(const X86::Instruction& insn)
  2592. {
  2593. push16(shadow_wrap_as_initialized(insn.imm16()));
  2594. }
  2595. void SoftCPU::PUSH_imm32(const X86::Instruction& insn)
  2596. {
  2597. push32(shadow_wrap_as_initialized(insn.imm32()));
  2598. }
  2599. void SoftCPU::PUSH_imm8(const X86::Instruction& insn)
  2600. {
  2601. VERIFY(!insn.has_operand_size_override_prefix());
  2602. push32(shadow_wrap_as_initialized<u32>(sign_extended_to<i32>(insn.imm8())));
  2603. }
  2604. void SoftCPU::PUSH_reg16(const X86::Instruction& insn)
  2605. {
  2606. push16(gpr16(insn.reg16()));
  2607. }
  2608. void SoftCPU::PUSH_reg32(const X86::Instruction& insn)
  2609. {
  2610. push32(gpr32(insn.reg32()));
  2611. }
  2612. void SoftCPU::PXOR_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2613. template<typename T, bool cf>
  2614. ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2615. {
  2616. if (steps.value() == 0)
  2617. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2618. u32 result = 0;
  2619. u32 new_flags = 0;
  2620. if constexpr (cf)
  2621. asm volatile("stc");
  2622. else
  2623. asm volatile("clc");
  2624. if constexpr (sizeof(typename T::ValueType) == 4) {
  2625. asm volatile("rcll %%cl, %%eax\n"
  2626. : "=a"(result)
  2627. : "a"(data.value()), "c"(steps.value()));
  2628. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2629. asm volatile("rclw %%cl, %%ax\n"
  2630. : "=a"(result)
  2631. : "a"(data.value()), "c"(steps.value()));
  2632. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2633. asm volatile("rclb %%cl, %%al\n"
  2634. : "=a"(result)
  2635. : "a"(data.value()), "c"(steps.value()));
  2636. }
  2637. asm volatile(
  2638. "pushf\n"
  2639. "pop %%ebx"
  2640. : "=b"(new_flags));
  2641. cpu.set_flags_oc(new_flags);
  2642. cpu.taint_flags_from(data, steps);
  2643. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2644. }
  2645. template<typename T>
  2646. ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2647. {
  2648. cpu.warn_if_flags_tainted("rcl");
  2649. if (cpu.cf())
  2650. return op_rcl_impl<T, true>(cpu, data, steps);
  2651. return op_rcl_impl<T, false>(cpu, data, steps);
  2652. }
  2653. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCL, op_rcl)
  2654. template<typename T, bool cf>
  2655. ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2656. {
  2657. if (steps.value() == 0)
  2658. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2659. u32 result = 0;
  2660. u32 new_flags = 0;
  2661. if constexpr (cf)
  2662. asm volatile("stc");
  2663. else
  2664. asm volatile("clc");
  2665. if constexpr (sizeof(typename T::ValueType) == 4) {
  2666. asm volatile("rcrl %%cl, %%eax\n"
  2667. : "=a"(result)
  2668. : "a"(data.value()), "c"(steps.value()));
  2669. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2670. asm volatile("rcrw %%cl, %%ax\n"
  2671. : "=a"(result)
  2672. : "a"(data.value()), "c"(steps.value()));
  2673. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2674. asm volatile("rcrb %%cl, %%al\n"
  2675. : "=a"(result)
  2676. : "a"(data.value()), "c"(steps.value()));
  2677. }
  2678. asm volatile(
  2679. "pushf\n"
  2680. "pop %%ebx"
  2681. : "=b"(new_flags));
  2682. cpu.set_flags_oc(new_flags);
  2683. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2684. }
  2685. template<typename T>
  2686. ALWAYS_INLINE static T op_rcr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2687. {
  2688. cpu.warn_if_flags_tainted("rcr");
  2689. if (cpu.cf())
  2690. return op_rcr_impl<T, true>(cpu, data, steps);
  2691. return op_rcr_impl<T, false>(cpu, data, steps);
  2692. }
  2693. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCR, op_rcr)
  2694. void SoftCPU::RDTSC(const X86::Instruction&) { TODO_INSN(); }
  2695. void SoftCPU::RET(const X86::Instruction& insn)
  2696. {
  2697. VERIFY(!insn.has_operand_size_override_prefix());
  2698. auto ret_address = pop32();
  2699. warn_if_uninitialized(ret_address, "ret");
  2700. set_eip(ret_address.value());
  2701. }
  2702. void SoftCPU::RETF(const X86::Instruction&) { TODO_INSN(); }
  2703. void SoftCPU::RETF_imm16(const X86::Instruction&) { TODO_INSN(); }
  2704. void SoftCPU::RET_imm16(const X86::Instruction& insn)
  2705. {
  2706. VERIFY(!insn.has_operand_size_override_prefix());
  2707. auto ret_address = pop32();
  2708. warn_if_uninitialized(ret_address, "ret imm16");
  2709. set_eip(ret_address.value());
  2710. set_esp({ esp().value() + insn.imm16(), esp().shadow() });
  2711. }
  2712. template<typename T>
  2713. ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2714. {
  2715. if (steps.value() == 0)
  2716. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2717. u32 result = 0;
  2718. u32 new_flags = 0;
  2719. if constexpr (sizeof(typename T::ValueType) == 4) {
  2720. asm volatile("roll %%cl, %%eax\n"
  2721. : "=a"(result)
  2722. : "a"(data.value()), "c"(steps.value()));
  2723. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2724. asm volatile("rolw %%cl, %%ax\n"
  2725. : "=a"(result)
  2726. : "a"(data.value()), "c"(steps.value()));
  2727. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2728. asm volatile("rolb %%cl, %%al\n"
  2729. : "=a"(result)
  2730. : "a"(data.value()), "c"(steps.value()));
  2731. }
  2732. asm volatile(
  2733. "pushf\n"
  2734. "pop %%ebx"
  2735. : "=b"(new_flags));
  2736. cpu.set_flags_oc(new_flags);
  2737. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2738. }
  2739. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROL, op_rol)
  2740. template<typename T>
  2741. ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2742. {
  2743. if (steps.value() == 0)
  2744. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2745. u32 result = 0;
  2746. u32 new_flags = 0;
  2747. if constexpr (sizeof(typename T::ValueType) == 4) {
  2748. asm volatile("rorl %%cl, %%eax\n"
  2749. : "=a"(result)
  2750. : "a"(data.value()), "c"(steps.value()));
  2751. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2752. asm volatile("rorw %%cl, %%ax\n"
  2753. : "=a"(result)
  2754. : "a"(data.value()), "c"(steps.value()));
  2755. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2756. asm volatile("rorb %%cl, %%al\n"
  2757. : "=a"(result)
  2758. : "a"(data.value()), "c"(steps.value()));
  2759. }
  2760. asm volatile(
  2761. "pushf\n"
  2762. "pop %%ebx"
  2763. : "=b"(new_flags));
  2764. cpu.set_flags_oc(new_flags);
  2765. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2766. }
  2767. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROR, op_ror)
  2768. void SoftCPU::SAHF(const X86::Instruction&)
  2769. {
  2770. // FIXME: Respect shadow flags once they exists!
  2771. set_al(shadow_wrap_as_initialized<u8>(eflags() & 0xff));
  2772. }
  2773. void SoftCPU::SALC(const X86::Instruction&)
  2774. {
  2775. // FIXME: Respect shadow flags once they exists!
  2776. set_al(shadow_wrap_as_initialized<u8>(cf() ? 0xff : 0x00));
  2777. }
  2778. template<typename T>
  2779. static T op_sar(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2780. {
  2781. if (steps.value() == 0)
  2782. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2783. u32 result = 0;
  2784. u32 new_flags = 0;
  2785. if constexpr (sizeof(typename T::ValueType) == 4) {
  2786. asm volatile("sarl %%cl, %%eax\n"
  2787. : "=a"(result)
  2788. : "a"(data.value()), "c"(steps.value()));
  2789. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2790. asm volatile("sarw %%cl, %%ax\n"
  2791. : "=a"(result)
  2792. : "a"(data.value()), "c"(steps.value()));
  2793. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2794. asm volatile("sarb %%cl, %%al\n"
  2795. : "=a"(result)
  2796. : "a"(data.value()), "c"(steps.value()));
  2797. }
  2798. asm volatile(
  2799. "pushf\n"
  2800. "pop %%ebx"
  2801. : "=b"(new_flags));
  2802. cpu.set_flags_oszapc(new_flags);
  2803. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2804. }
  2805. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SAR, op_sar)
  2806. template<typename T>
  2807. ALWAYS_INLINE static void do_scas(SoftCPU& cpu, const X86::Instruction& insn)
  2808. {
  2809. cpu.do_once_or_repeat<true>(insn, [&] {
  2810. auto src = cpu.const_gpr<T>(X86::RegisterAL);
  2811. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  2812. op_sub(cpu, dest, src);
  2813. cpu.step_destination_index(insn.a32(), sizeof(T));
  2814. });
  2815. }
  2816. void SoftCPU::SCASB(const X86::Instruction& insn)
  2817. {
  2818. do_scas<u8>(*this, insn);
  2819. }
  2820. void SoftCPU::SCASD(const X86::Instruction& insn)
  2821. {
  2822. do_scas<u32>(*this, insn);
  2823. }
  2824. void SoftCPU::SCASW(const X86::Instruction& insn)
  2825. {
  2826. do_scas<u16>(*this, insn);
  2827. }
  2828. void SoftCPU::SETcc_RM8(const X86::Instruction& insn)
  2829. {
  2830. warn_if_flags_tainted("setcc");
  2831. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized<u8>(evaluate_condition(insn.cc())));
  2832. }
  2833. void SoftCPU::SGDT(const X86::Instruction&) { TODO_INSN(); }
  2834. void SoftCPU::SHLD_RM16_reg16_CL(const X86::Instruction& insn)
  2835. {
  2836. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2837. }
  2838. void SoftCPU::SHLD_RM16_reg16_imm8(const X86::Instruction& insn)
  2839. {
  2840. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2841. }
  2842. void SoftCPU::SHLD_RM32_reg32_CL(const X86::Instruction& insn)
  2843. {
  2844. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2845. }
  2846. void SoftCPU::SHLD_RM32_reg32_imm8(const X86::Instruction& insn)
  2847. {
  2848. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2849. }
  2850. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHL, op_shl)
  2851. void SoftCPU::SHRD_RM16_reg16_CL(const X86::Instruction& insn)
  2852. {
  2853. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2854. }
  2855. void SoftCPU::SHRD_RM16_reg16_imm8(const X86::Instruction& insn)
  2856. {
  2857. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2858. }
  2859. void SoftCPU::SHRD_RM32_reg32_CL(const X86::Instruction& insn)
  2860. {
  2861. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2862. }
  2863. void SoftCPU::SHRD_RM32_reg32_imm8(const X86::Instruction& insn)
  2864. {
  2865. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2866. }
  2867. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHR, op_shr)
  2868. void SoftCPU::SIDT(const X86::Instruction&) { TODO_INSN(); }
  2869. void SoftCPU::SLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2870. void SoftCPU::SMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2871. void SoftCPU::STC(const X86::Instruction&)
  2872. {
  2873. set_cf(true);
  2874. }
  2875. void SoftCPU::STD(const X86::Instruction&)
  2876. {
  2877. set_df(true);
  2878. }
  2879. void SoftCPU::STI(const X86::Instruction&) { TODO_INSN(); }
  2880. void SoftCPU::STOSB(const X86::Instruction& insn)
  2881. {
  2882. if (insn.has_rep_prefix() && !df()) {
  2883. // Fast path for 8-bit forward memory fill.
  2884. if (m_emulator.mmu().fast_fill_memory8({ es(), destination_index(insn.a32()).value() }, ecx().value(), al())) {
  2885. if (insn.a32()) {
  2886. // FIXME: Should an uninitialized ECX taint EDI here?
  2887. set_edi({ (u32)(edi().value() + ecx().value()), edi().shadow() });
  2888. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2889. } else {
  2890. // FIXME: Should an uninitialized CX taint DI here?
  2891. set_di({ (u16)(di().value() + cx().value()), di().shadow() });
  2892. set_cx(shadow_wrap_as_initialized<u16>(0));
  2893. }
  2894. return;
  2895. }
  2896. }
  2897. do_once_or_repeat<false>(insn, [&] {
  2898. write_memory8({ es(), destination_index(insn.a32()).value() }, al());
  2899. step_destination_index(insn.a32(), 1);
  2900. });
  2901. }
  2902. void SoftCPU::STOSD(const X86::Instruction& insn)
  2903. {
  2904. if (insn.has_rep_prefix() && !df()) {
  2905. // Fast path for 32-bit forward memory fill.
  2906. if (m_emulator.mmu().fast_fill_memory32({ es(), destination_index(insn.a32()).value() }, ecx().value(), eax())) {
  2907. if (insn.a32()) {
  2908. // FIXME: Should an uninitialized ECX taint EDI here?
  2909. set_edi({ (u32)(edi().value() + (ecx().value() * sizeof(u32))), edi().shadow() });
  2910. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2911. } else {
  2912. // FIXME: Should an uninitialized CX taint DI here?
  2913. set_di({ (u16)(di().value() + (cx().value() * sizeof(u32))), di().shadow() });
  2914. set_cx(shadow_wrap_as_initialized<u16>(0));
  2915. }
  2916. return;
  2917. }
  2918. }
  2919. do_once_or_repeat<false>(insn, [&] {
  2920. write_memory32({ es(), destination_index(insn.a32()).value() }, eax());
  2921. step_destination_index(insn.a32(), 4);
  2922. });
  2923. }
  2924. void SoftCPU::STOSW(const X86::Instruction& insn)
  2925. {
  2926. do_once_or_repeat<false>(insn, [&] {
  2927. write_memory16({ es(), destination_index(insn.a32()).value() }, ax());
  2928. step_destination_index(insn.a32(), 2);
  2929. });
  2930. }
  2931. void SoftCPU::STR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2932. void SoftCPU::UD0(const X86::Instruction&) { TODO_INSN(); }
  2933. void SoftCPU::UD1(const X86::Instruction&) { TODO_INSN(); }
  2934. void SoftCPU::UD2(const X86::Instruction&) { TODO_INSN(); }
  2935. void SoftCPU::VERR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2936. void SoftCPU::VERW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2937. void SoftCPU::WAIT(const X86::Instruction&) { TODO_INSN(); }
  2938. void SoftCPU::WBINVD(const X86::Instruction&) { TODO_INSN(); }
  2939. void SoftCPU::XADD_RM16_reg16(const X86::Instruction& insn)
  2940. {
  2941. auto dest = insn.modrm().read16(*this, insn);
  2942. auto src = const_gpr16(insn.reg16());
  2943. auto result = op_add(*this, dest, src);
  2944. gpr16(insn.reg16()) = dest;
  2945. insn.modrm().write16(*this, insn, result);
  2946. }
  2947. void SoftCPU::XADD_RM32_reg32(const X86::Instruction& insn)
  2948. {
  2949. auto dest = insn.modrm().read32(*this, insn);
  2950. auto src = const_gpr32(insn.reg32());
  2951. auto result = op_add(*this, dest, src);
  2952. gpr32(insn.reg32()) = dest;
  2953. insn.modrm().write32(*this, insn, result);
  2954. }
  2955. void SoftCPU::XADD_RM8_reg8(const X86::Instruction& insn)
  2956. {
  2957. auto dest = insn.modrm().read8(*this, insn);
  2958. auto src = const_gpr8(insn.reg8());
  2959. auto result = op_add(*this, dest, src);
  2960. gpr8(insn.reg8()) = dest;
  2961. insn.modrm().write8(*this, insn, result);
  2962. }
  2963. void SoftCPU::XCHG_AX_reg16(const X86::Instruction& insn)
  2964. {
  2965. auto temp = gpr16(insn.reg16());
  2966. gpr16(insn.reg16()) = ax();
  2967. set_ax(temp);
  2968. }
  2969. void SoftCPU::XCHG_EAX_reg32(const X86::Instruction& insn)
  2970. {
  2971. auto temp = gpr32(insn.reg32());
  2972. gpr32(insn.reg32()) = eax();
  2973. set_eax(temp);
  2974. }
  2975. void SoftCPU::XCHG_reg16_RM16(const X86::Instruction& insn)
  2976. {
  2977. auto temp = insn.modrm().read16(*this, insn);
  2978. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2979. gpr16(insn.reg16()) = temp;
  2980. }
  2981. void SoftCPU::XCHG_reg32_RM32(const X86::Instruction& insn)
  2982. {
  2983. auto temp = insn.modrm().read32(*this, insn);
  2984. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2985. gpr32(insn.reg32()) = temp;
  2986. }
  2987. void SoftCPU::XCHG_reg8_RM8(const X86::Instruction& insn)
  2988. {
  2989. auto temp = insn.modrm().read8(*this, insn);
  2990. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2991. gpr8(insn.reg8()) = temp;
  2992. }
  2993. void SoftCPU::XLAT(const X86::Instruction& insn)
  2994. {
  2995. if (insn.a32())
  2996. warn_if_uninitialized(ebx(), "xlat ebx");
  2997. else
  2998. warn_if_uninitialized(bx(), "xlat bx");
  2999. warn_if_uninitialized(al(), "xlat al");
  3000. u32 offset = (insn.a32() ? ebx().value() : bx().value()) + al().value();
  3001. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), offset }));
  3002. }
  3003. #define DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  3004. void SoftCPU::mnemonic##_AL_imm8(const X86::Instruction& insn) { generic_AL_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  3005. void SoftCPU::mnemonic##_AX_imm16(const X86::Instruction& insn) { generic_AX_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  3006. void SoftCPU::mnemonic##_EAX_imm32(const X86::Instruction& insn) { generic_EAX_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  3007. void SoftCPU::mnemonic##_RM16_imm16(const X86::Instruction& insn) { generic_RM16_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  3008. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { generic_RM16_reg16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  3009. void SoftCPU::mnemonic##_RM32_imm32(const X86::Instruction& insn) { generic_RM32_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  3010. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { generic_RM32_reg32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  3011. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  3012. void SoftCPU::mnemonic##_RM8_reg8(const X86::Instruction& insn) { generic_RM8_reg8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  3013. #define DEFINE_GENERIC_INSN_HANDLERS(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  3014. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  3015. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  3016. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  3017. void SoftCPU::mnemonic##_reg16_RM16(const X86::Instruction& insn) { generic_reg16_RM16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  3018. void SoftCPU::mnemonic##_reg32_RM32(const X86::Instruction& insn) { generic_reg32_RM32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  3019. void SoftCPU::mnemonic##_reg8_RM8(const X86::Instruction& insn) { generic_reg8_RM8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  3020. DEFINE_GENERIC_INSN_HANDLERS(XOR, op_xor, true, true, false)
  3021. DEFINE_GENERIC_INSN_HANDLERS(OR, op_or, true, false, true)
  3022. DEFINE_GENERIC_INSN_HANDLERS(ADD, op_add, true, false, false)
  3023. DEFINE_GENERIC_INSN_HANDLERS(ADC, op_adc, true, false, false)
  3024. DEFINE_GENERIC_INSN_HANDLERS(SUB, op_sub, true, true, false)
  3025. DEFINE_GENERIC_INSN_HANDLERS(SBB, op_sbb, true, false, false)
  3026. DEFINE_GENERIC_INSN_HANDLERS(AND, op_and, true, false, false)
  3027. DEFINE_GENERIC_INSN_HANDLERS(CMP, op_sub, false, false, false)
  3028. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(TEST, op_and, false, false, false)
  3029. void SoftCPU::MOVQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  3030. void SoftCPU::EMMS(const X86::Instruction&) { TODO_INSN(); }
  3031. void SoftCPU::MOVQ_mm1_m64_mm2(const X86::Instruction&) { TODO_INSN(); }
  3032. void SoftCPU::wrap_0xC0(const X86::Instruction&) { TODO_INSN(); }
  3033. void SoftCPU::wrap_0xC1_16(const X86::Instruction&) { TODO_INSN(); }
  3034. void SoftCPU::wrap_0xC1_32(const X86::Instruction&) { TODO_INSN(); }
  3035. void SoftCPU::wrap_0xD0(const X86::Instruction&) { TODO_INSN(); }
  3036. void SoftCPU::wrap_0xD1_16(const X86::Instruction&) { TODO_INSN(); }
  3037. void SoftCPU::wrap_0xD1_32(const X86::Instruction&) { TODO_INSN(); }
  3038. void SoftCPU::wrap_0xD2(const X86::Instruction&) { TODO_INSN(); }
  3039. void SoftCPU::wrap_0xD3_16(const X86::Instruction&) { TODO_INSN(); }
  3040. void SoftCPU::wrap_0xD3_32(const X86::Instruction&) { TODO_INSN(); }
  3041. }