SoftCPU.cpp 112 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include "SoftCPU.h"
  27. #include "Emulator.h"
  28. #include <AK/Assertions.h>
  29. #include <AK/Debug.h>
  30. #include <math.h>
  31. #include <stdio.h>
  32. #include <string.h>
  33. #include <unistd.h>
  34. #if defined(__GNUC__) && !defined(__clang__)
  35. # pragma GCC optimize("O3")
  36. #endif
  37. #define TODO_INSN() \
  38. do { \
  39. reportln("\n=={}== Unimplemented instruction: {}\n", getpid(), __FUNCTION__); \
  40. m_emulator.dump_backtrace(); \
  41. _exit(0); \
  42. } while (0)
  43. #define DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(mnemonic, op) \
  44. void SoftCPU::mnemonic##_RM8_1(const X86::Instruction& insn) { generic_RM8_1(op<ValueWithShadow<u8>>, insn); } \
  45. void SoftCPU::mnemonic##_RM8_CL(const X86::Instruction& insn) { generic_RM8_CL(op<ValueWithShadow<u8>>, insn); } \
  46. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<true, false>(op<ValueWithShadow<u8>>, insn); } \
  47. void SoftCPU::mnemonic##_RM16_1(const X86::Instruction& insn) { generic_RM16_1(op<ValueWithShadow<u16>>, insn); } \
  48. void SoftCPU::mnemonic##_RM16_CL(const X86::Instruction& insn) { generic_RM16_CL(op<ValueWithShadow<u16>>, insn); } \
  49. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_unsigned_imm8<true>(op<ValueWithShadow<u16>>, insn); } \
  50. void SoftCPU::mnemonic##_RM32_1(const X86::Instruction& insn) { generic_RM32_1(op<ValueWithShadow<u32>>, insn); } \
  51. void SoftCPU::mnemonic##_RM32_CL(const X86::Instruction& insn) { generic_RM32_CL(op<ValueWithShadow<u32>>, insn); } \
  52. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_unsigned_imm8<true>(op<ValueWithShadow<u32>>, insn); }
  53. namespace UserspaceEmulator {
  54. template<class Dest, class Source>
  55. static inline Dest bit_cast(Source source)
  56. {
  57. static_assert(sizeof(Dest) == sizeof(Source));
  58. Dest dest;
  59. memcpy(&dest, &source, sizeof(dest));
  60. return dest;
  61. }
  62. template<typename T>
  63. ALWAYS_INLINE void warn_if_uninitialized(T value_with_shadow, const char* message)
  64. {
  65. if (value_with_shadow.is_uninitialized()) [[unlikely]] {
  66. reportln("\033[31;1mWarning! Use of uninitialized value: {}\033[0m\n", message);
  67. Emulator::the().dump_backtrace();
  68. }
  69. }
  70. ALWAYS_INLINE void SoftCPU::warn_if_flags_tainted(const char* message) const
  71. {
  72. if (m_flags_tainted) [[unlikely]] {
  73. reportln("\n=={}== \033[31;1mConditional depends on uninitialized data\033[0m ({})\n", getpid(), message);
  74. Emulator::the().dump_backtrace();
  75. }
  76. }
  77. template<typename T, typename U>
  78. constexpr T sign_extended_to(U value)
  79. {
  80. if (!(value & X86::TypeTrivia<U>::sign_bit))
  81. return value;
  82. return (X86::TypeTrivia<T>::mask & ~X86::TypeTrivia<U>::mask) | value;
  83. }
  84. SoftCPU::SoftCPU(Emulator& emulator)
  85. : m_emulator(emulator)
  86. {
  87. memset(m_gpr, 0, sizeof(m_gpr));
  88. memset(m_gpr_shadow, 1, sizeof(m_gpr_shadow));
  89. m_segment[(int)X86::SegmentRegister::CS] = 0x1b;
  90. m_segment[(int)X86::SegmentRegister::DS] = 0x23;
  91. m_segment[(int)X86::SegmentRegister::ES] = 0x23;
  92. m_segment[(int)X86::SegmentRegister::SS] = 0x23;
  93. m_segment[(int)X86::SegmentRegister::GS] = 0x2b;
  94. }
  95. void SoftCPU::dump() const
  96. {
  97. outln(" eax={:08x} ebx={:08x} ecx={:08x} edx={:08x} ebp={:08x} esp={:08x} esi={:08x} edi={:08x} o={:d} s={:d} z={:d} a={:d} p={:d} c={:d}",
  98. eax(), ebx(), ecx(), edx(), ebp(), esp(), esi(), edi(), of(), sf(), zf(), af(), pf(), cf());
  99. outln("#eax={:08x} #ebx={:08x} #ecx={:08x} #edx={:08x} #ebp={:08x} #esp={:08x} #esi={:08x} #edi={:08x} #f={}",
  100. eax().shadow(), ebx().shadow(), ecx().shadow(), edx().shadow(), ebp().shadow(), esp().shadow(), esi().shadow(), edi().shadow(), m_flags_tainted);
  101. fflush(stdout);
  102. }
  103. void SoftCPU::update_code_cache()
  104. {
  105. auto* region = m_emulator.mmu().find_region({ cs(), eip() });
  106. VERIFY(region);
  107. if (!region->is_executable()) {
  108. reportln("SoftCPU::update_code_cache: Non-executable region @ {:p}", eip());
  109. Emulator::the().dump_backtrace();
  110. TODO();
  111. }
  112. // FIXME: This cache needs to be invalidated if the code region is ever unmapped.
  113. m_cached_code_region = region;
  114. m_cached_code_base_ptr = region->data();
  115. }
  116. ValueWithShadow<u8> SoftCPU::read_memory8(X86::LogicalAddress address)
  117. {
  118. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  119. auto value = m_emulator.mmu().read8(address);
  120. #if MEMORY_DEBUG
  121. outln("\033[36;1mread_memory8: @{:04x}:{:08x} -> {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  122. #endif
  123. return value;
  124. }
  125. ValueWithShadow<u16> SoftCPU::read_memory16(X86::LogicalAddress address)
  126. {
  127. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  128. auto value = m_emulator.mmu().read16(address);
  129. #if MEMORY_DEBUG
  130. outln("\033[36;1mread_memory16: @{:04x}:{:08x} -> {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  131. #endif
  132. return value;
  133. }
  134. ValueWithShadow<u32> SoftCPU::read_memory32(X86::LogicalAddress address)
  135. {
  136. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  137. auto value = m_emulator.mmu().read32(address);
  138. #if MEMORY_DEBUG
  139. outln("\033[36;1mread_memory32: @{:04x}:{:08x} -> {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  140. #endif
  141. return value;
  142. }
  143. ValueWithShadow<u64> SoftCPU::read_memory64(X86::LogicalAddress address)
  144. {
  145. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  146. auto value = m_emulator.mmu().read64(address);
  147. #if MEMORY_DEBUG
  148. outln("\033[36;1mread_memory64: @{:04x}:{:08x} -> {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  149. #endif
  150. return value;
  151. }
  152. void SoftCPU::write_memory8(X86::LogicalAddress address, ValueWithShadow<u8> value)
  153. {
  154. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  155. #if MEMORY_DEBUG
  156. outln("\033[36;1mwrite_memory8: @{:04x}:{:08x} <- {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  157. #endif
  158. m_emulator.mmu().write8(address, value);
  159. }
  160. void SoftCPU::write_memory16(X86::LogicalAddress address, ValueWithShadow<u16> value)
  161. {
  162. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  163. #if MEMORY_DEBUG
  164. outln("\033[36;1mwrite_memory16: @{:04x}:{:08x} <- {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  165. #endif
  166. m_emulator.mmu().write16(address, value);
  167. }
  168. void SoftCPU::write_memory32(X86::LogicalAddress address, ValueWithShadow<u32> value)
  169. {
  170. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  171. #if MEMORY_DEBUG
  172. outln("\033[36;1mwrite_memory32: @{:04x}:{:08x} <- {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  173. #endif
  174. m_emulator.mmu().write32(address, value);
  175. }
  176. void SoftCPU::write_memory64(X86::LogicalAddress address, ValueWithShadow<u64> value)
  177. {
  178. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  179. #if MEMORY_DEBUG
  180. outln("\033[36;1mwrite_memory64: @{:04x}:{:08x} <- {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  181. #endif
  182. m_emulator.mmu().write64(address, value);
  183. }
  184. void SoftCPU::push_string(const StringView& string)
  185. {
  186. size_t space_to_allocate = round_up_to_power_of_two(string.length() + 1, 16);
  187. set_esp({ esp().value() - space_to_allocate, esp().shadow() });
  188. m_emulator.mmu().copy_to_vm(esp().value(), string.characters_without_null_termination(), string.length());
  189. m_emulator.mmu().write8({ 0x23, esp().value() + string.length() }, shadow_wrap_as_initialized((u8)'\0'));
  190. }
  191. void SoftCPU::push_buffer(const u8* data, size_t size)
  192. {
  193. set_esp({ esp().value() - size, esp().shadow() });
  194. warn_if_uninitialized(esp(), "push_buffer");
  195. m_emulator.mmu().copy_to_vm(esp().value(), data, size);
  196. }
  197. void SoftCPU::push32(ValueWithShadow<u32> value)
  198. {
  199. set_esp({ esp().value() - sizeof(u32), esp().shadow() });
  200. warn_if_uninitialized(esp(), "push32");
  201. write_memory32({ ss(), esp().value() }, value);
  202. }
  203. ValueWithShadow<u32> SoftCPU::pop32()
  204. {
  205. warn_if_uninitialized(esp(), "pop32");
  206. auto value = read_memory32({ ss(), esp().value() });
  207. set_esp({ esp().value() + sizeof(u32), esp().shadow() });
  208. return value;
  209. }
  210. void SoftCPU::push16(ValueWithShadow<u16> value)
  211. {
  212. warn_if_uninitialized(esp(), "push16");
  213. set_esp({ esp().value() - sizeof(u16), esp().shadow() });
  214. write_memory16({ ss(), esp().value() }, value);
  215. }
  216. ValueWithShadow<u16> SoftCPU::pop16()
  217. {
  218. warn_if_uninitialized(esp(), "pop16");
  219. auto value = read_memory16({ ss(), esp().value() });
  220. set_esp({ esp().value() + sizeof(u16), esp().shadow() });
  221. return value;
  222. }
  223. template<bool check_zf, typename Callback>
  224. void SoftCPU::do_once_or_repeat(const X86::Instruction& insn, Callback callback)
  225. {
  226. if (!insn.has_rep_prefix())
  227. return callback();
  228. while (loop_index(insn.a32()).value()) {
  229. callback();
  230. decrement_loop_index(insn.a32());
  231. if constexpr (check_zf) {
  232. warn_if_flags_tainted("repz/repnz");
  233. if (insn.rep_prefix() == X86::Prefix::REPZ && !zf())
  234. break;
  235. if (insn.rep_prefix() == X86::Prefix::REPNZ && zf())
  236. break;
  237. }
  238. }
  239. }
  240. template<typename T>
  241. ALWAYS_INLINE static T op_inc(SoftCPU& cpu, T data)
  242. {
  243. typename T::ValueType result;
  244. u32 new_flags = 0;
  245. if constexpr (sizeof(typename T::ValueType) == 4) {
  246. asm volatile("incl %%eax\n"
  247. : "=a"(result)
  248. : "a"(data.value()));
  249. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  250. asm volatile("incw %%ax\n"
  251. : "=a"(result)
  252. : "a"(data.value()));
  253. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  254. asm volatile("incb %%al\n"
  255. : "=a"(result)
  256. : "a"(data.value()));
  257. }
  258. asm volatile(
  259. "pushf\n"
  260. "pop %%ebx"
  261. : "=b"(new_flags));
  262. cpu.set_flags_oszap(new_flags);
  263. cpu.taint_flags_from(data);
  264. return shadow_wrap_with_taint_from(result, data);
  265. }
  266. template<typename T>
  267. ALWAYS_INLINE static T op_dec(SoftCPU& cpu, T data)
  268. {
  269. typename T::ValueType result;
  270. u32 new_flags = 0;
  271. if constexpr (sizeof(typename T::ValueType) == 4) {
  272. asm volatile("decl %%eax\n"
  273. : "=a"(result)
  274. : "a"(data.value()));
  275. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  276. asm volatile("decw %%ax\n"
  277. : "=a"(result)
  278. : "a"(data.value()));
  279. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  280. asm volatile("decb %%al\n"
  281. : "=a"(result)
  282. : "a"(data.value()));
  283. }
  284. asm volatile(
  285. "pushf\n"
  286. "pop %%ebx"
  287. : "=b"(new_flags));
  288. cpu.set_flags_oszap(new_flags);
  289. cpu.taint_flags_from(data);
  290. return shadow_wrap_with_taint_from(result, data);
  291. }
  292. template<typename T>
  293. ALWAYS_INLINE static T op_xor(SoftCPU& cpu, const T& dest, const T& src)
  294. {
  295. typename T::ValueType result;
  296. u32 new_flags = 0;
  297. if constexpr (sizeof(typename T::ValueType) == 4) {
  298. asm volatile("xorl %%ecx, %%eax\n"
  299. : "=a"(result)
  300. : "a"(dest.value()), "c"(src.value()));
  301. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  302. asm volatile("xor %%cx, %%ax\n"
  303. : "=a"(result)
  304. : "a"(dest.value()), "c"(src.value()));
  305. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  306. asm volatile("xorb %%cl, %%al\n"
  307. : "=a"(result)
  308. : "a"(dest.value()), "c"(src.value()));
  309. } else {
  310. VERIFY_NOT_REACHED();
  311. }
  312. asm volatile(
  313. "pushf\n"
  314. "pop %%ebx"
  315. : "=b"(new_flags));
  316. cpu.set_flags_oszpc(new_flags);
  317. cpu.taint_flags_from(dest, src);
  318. return shadow_wrap_with_taint_from(result, dest, src);
  319. }
  320. template<typename T>
  321. ALWAYS_INLINE static T op_or(SoftCPU& cpu, const T& dest, const T& src)
  322. {
  323. typename T::ValueType result = 0;
  324. u32 new_flags = 0;
  325. if constexpr (sizeof(typename T::ValueType) == 4) {
  326. asm volatile("orl %%ecx, %%eax\n"
  327. : "=a"(result)
  328. : "a"(dest.value()), "c"(src.value()));
  329. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  330. asm volatile("or %%cx, %%ax\n"
  331. : "=a"(result)
  332. : "a"(dest.value()), "c"(src.value()));
  333. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  334. asm volatile("orb %%cl, %%al\n"
  335. : "=a"(result)
  336. : "a"(dest.value()), "c"(src.value()));
  337. } else {
  338. VERIFY_NOT_REACHED();
  339. }
  340. asm volatile(
  341. "pushf\n"
  342. "pop %%ebx"
  343. : "=b"(new_flags));
  344. cpu.set_flags_oszpc(new_flags);
  345. cpu.taint_flags_from(dest, src);
  346. return shadow_wrap_with_taint_from(result, dest, src);
  347. }
  348. template<typename T>
  349. ALWAYS_INLINE static T op_sub(SoftCPU& cpu, const T& dest, const T& src)
  350. {
  351. typename T::ValueType result = 0;
  352. u32 new_flags = 0;
  353. if constexpr (sizeof(typename T::ValueType) == 4) {
  354. asm volatile("subl %%ecx, %%eax\n"
  355. : "=a"(result)
  356. : "a"(dest.value()), "c"(src.value()));
  357. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  358. asm volatile("subw %%cx, %%ax\n"
  359. : "=a"(result)
  360. : "a"(dest.value()), "c"(src.value()));
  361. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  362. asm volatile("subb %%cl, %%al\n"
  363. : "=a"(result)
  364. : "a"(dest.value()), "c"(src.value()));
  365. } else {
  366. VERIFY_NOT_REACHED();
  367. }
  368. asm volatile(
  369. "pushf\n"
  370. "pop %%ebx"
  371. : "=b"(new_flags));
  372. cpu.set_flags_oszapc(new_flags);
  373. cpu.taint_flags_from(dest, src);
  374. return shadow_wrap_with_taint_from(result, dest, src);
  375. }
  376. template<typename T, bool cf>
  377. ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src)
  378. {
  379. typename T::ValueType result = 0;
  380. u32 new_flags = 0;
  381. if constexpr (cf)
  382. asm volatile("stc");
  383. else
  384. asm volatile("clc");
  385. if constexpr (sizeof(typename T::ValueType) == 4) {
  386. asm volatile("sbbl %%ecx, %%eax\n"
  387. : "=a"(result)
  388. : "a"(dest.value()), "c"(src.value()));
  389. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  390. asm volatile("sbbw %%cx, %%ax\n"
  391. : "=a"(result)
  392. : "a"(dest.value()), "c"(src.value()));
  393. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  394. asm volatile("sbbb %%cl, %%al\n"
  395. : "=a"(result)
  396. : "a"(dest.value()), "c"(src.value()));
  397. } else {
  398. VERIFY_NOT_REACHED();
  399. }
  400. asm volatile(
  401. "pushf\n"
  402. "pop %%ebx"
  403. : "=b"(new_flags));
  404. cpu.set_flags_oszapc(new_flags);
  405. cpu.taint_flags_from(dest, src);
  406. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  407. }
  408. template<typename T>
  409. ALWAYS_INLINE static T op_sbb(SoftCPU& cpu, T& dest, const T& src)
  410. {
  411. cpu.warn_if_flags_tainted("sbb");
  412. if (cpu.cf())
  413. return op_sbb_impl<T, true>(cpu, dest, src);
  414. return op_sbb_impl<T, false>(cpu, dest, src);
  415. }
  416. template<typename T>
  417. ALWAYS_INLINE static T op_add(SoftCPU& cpu, T& dest, const T& src)
  418. {
  419. typename T::ValueType result = 0;
  420. u32 new_flags = 0;
  421. if constexpr (sizeof(typename T::ValueType) == 4) {
  422. asm volatile("addl %%ecx, %%eax\n"
  423. : "=a"(result)
  424. : "a"(dest.value()), "c"(src.value()));
  425. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  426. asm volatile("addw %%cx, %%ax\n"
  427. : "=a"(result)
  428. : "a"(dest.value()), "c"(src.value()));
  429. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  430. asm volatile("addb %%cl, %%al\n"
  431. : "=a"(result)
  432. : "a"(dest.value()), "c"(src.value()));
  433. } else {
  434. VERIFY_NOT_REACHED();
  435. }
  436. asm volatile(
  437. "pushf\n"
  438. "pop %%ebx"
  439. : "=b"(new_flags));
  440. cpu.set_flags_oszapc(new_flags);
  441. cpu.taint_flags_from(dest, src);
  442. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  443. }
  444. template<typename T, bool cf>
  445. ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src)
  446. {
  447. typename T::ValueType result = 0;
  448. u32 new_flags = 0;
  449. if constexpr (cf)
  450. asm volatile("stc");
  451. else
  452. asm volatile("clc");
  453. if constexpr (sizeof(typename T::ValueType) == 4) {
  454. asm volatile("adcl %%ecx, %%eax\n"
  455. : "=a"(result)
  456. : "a"(dest.value()), "c"(src.value()));
  457. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  458. asm volatile("adcw %%cx, %%ax\n"
  459. : "=a"(result)
  460. : "a"(dest.value()), "c"(src.value()));
  461. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  462. asm volatile("adcb %%cl, %%al\n"
  463. : "=a"(result)
  464. : "a"(dest.value()), "c"(src.value()));
  465. } else {
  466. VERIFY_NOT_REACHED();
  467. }
  468. asm volatile(
  469. "pushf\n"
  470. "pop %%ebx"
  471. : "=b"(new_flags));
  472. cpu.set_flags_oszapc(new_flags);
  473. cpu.taint_flags_from(dest, src);
  474. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  475. }
  476. template<typename T>
  477. ALWAYS_INLINE static T op_adc(SoftCPU& cpu, T& dest, const T& src)
  478. {
  479. cpu.warn_if_flags_tainted("adc");
  480. if (cpu.cf())
  481. return op_adc_impl<T, true>(cpu, dest, src);
  482. return op_adc_impl<T, false>(cpu, dest, src);
  483. }
  484. template<typename T>
  485. ALWAYS_INLINE static T op_and(SoftCPU& cpu, const T& dest, const T& src)
  486. {
  487. typename T::ValueType result = 0;
  488. u32 new_flags = 0;
  489. if constexpr (sizeof(typename T::ValueType) == 4) {
  490. asm volatile("andl %%ecx, %%eax\n"
  491. : "=a"(result)
  492. : "a"(dest.value()), "c"(src.value()));
  493. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  494. asm volatile("andw %%cx, %%ax\n"
  495. : "=a"(result)
  496. : "a"(dest.value()), "c"(src.value()));
  497. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  498. asm volatile("andb %%cl, %%al\n"
  499. : "=a"(result)
  500. : "a"(dest.value()), "c"(src.value()));
  501. } else {
  502. VERIFY_NOT_REACHED();
  503. }
  504. asm volatile(
  505. "pushf\n"
  506. "pop %%ebx"
  507. : "=b"(new_flags));
  508. cpu.set_flags_oszpc(new_flags);
  509. cpu.taint_flags_from(dest, src);
  510. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  511. }
  512. template<typename T>
  513. ALWAYS_INLINE static void op_imul(SoftCPU& cpu, const T& dest, const T& src, T& result_high, T& result_low)
  514. {
  515. bool did_overflow = false;
  516. if constexpr (sizeof(T) == 4) {
  517. i64 result = (i64)src * (i64)dest;
  518. result_low = result & 0xffffffff;
  519. result_high = result >> 32;
  520. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  521. } else if constexpr (sizeof(T) == 2) {
  522. i32 result = (i32)src * (i32)dest;
  523. result_low = result & 0xffff;
  524. result_high = result >> 16;
  525. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  526. } else if constexpr (sizeof(T) == 1) {
  527. i16 result = (i16)src * (i16)dest;
  528. result_low = result & 0xff;
  529. result_high = result >> 8;
  530. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  531. }
  532. if (did_overflow) {
  533. cpu.set_cf(true);
  534. cpu.set_of(true);
  535. } else {
  536. cpu.set_cf(false);
  537. cpu.set_of(false);
  538. }
  539. }
  540. template<typename T>
  541. ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  542. {
  543. if (steps.value() == 0)
  544. return shadow_wrap_with_taint_from(data.value(), data, steps);
  545. u32 result = 0;
  546. u32 new_flags = 0;
  547. if constexpr (sizeof(typename T::ValueType) == 4) {
  548. asm volatile("shrl %%cl, %%eax\n"
  549. : "=a"(result)
  550. : "a"(data.value()), "c"(steps.value()));
  551. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  552. asm volatile("shrw %%cl, %%ax\n"
  553. : "=a"(result)
  554. : "a"(data.value()), "c"(steps.value()));
  555. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  556. asm volatile("shrb %%cl, %%al\n"
  557. : "=a"(result)
  558. : "a"(data.value()), "c"(steps.value()));
  559. }
  560. asm volatile(
  561. "pushf\n"
  562. "pop %%ebx"
  563. : "=b"(new_flags));
  564. cpu.set_flags_oszapc(new_flags);
  565. cpu.taint_flags_from(data, steps);
  566. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  567. }
  568. template<typename T>
  569. ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  570. {
  571. if (steps.value() == 0)
  572. return shadow_wrap_with_taint_from(data.value(), data, steps);
  573. u32 result = 0;
  574. u32 new_flags = 0;
  575. if constexpr (sizeof(typename T::ValueType) == 4) {
  576. asm volatile("shll %%cl, %%eax\n"
  577. : "=a"(result)
  578. : "a"(data.value()), "c"(steps.value()));
  579. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  580. asm volatile("shlw %%cl, %%ax\n"
  581. : "=a"(result)
  582. : "a"(data.value()), "c"(steps.value()));
  583. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  584. asm volatile("shlb %%cl, %%al\n"
  585. : "=a"(result)
  586. : "a"(data.value()), "c"(steps.value()));
  587. }
  588. asm volatile(
  589. "pushf\n"
  590. "pop %%ebx"
  591. : "=b"(new_flags));
  592. cpu.set_flags_oszapc(new_flags);
  593. cpu.taint_flags_from(data, steps);
  594. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  595. }
  596. template<typename T>
  597. ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  598. {
  599. if (steps.value() == 0)
  600. return shadow_wrap_with_taint_from(data.value(), data, steps);
  601. u32 result = 0;
  602. u32 new_flags = 0;
  603. if constexpr (sizeof(typename T::ValueType) == 4) {
  604. asm volatile("shrd %%cl, %%edx, %%eax\n"
  605. : "=a"(result)
  606. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  607. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  608. asm volatile("shrd %%cl, %%dx, %%ax\n"
  609. : "=a"(result)
  610. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  611. }
  612. asm volatile(
  613. "pushf\n"
  614. "pop %%ebx"
  615. : "=b"(new_flags));
  616. cpu.set_flags_oszapc(new_flags);
  617. cpu.taint_flags_from(data, steps);
  618. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  619. }
  620. template<typename T>
  621. ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  622. {
  623. if (steps.value() == 0)
  624. return shadow_wrap_with_taint_from(data.value(), data, steps);
  625. u32 result = 0;
  626. u32 new_flags = 0;
  627. if constexpr (sizeof(typename T::ValueType) == 4) {
  628. asm volatile("shld %%cl, %%edx, %%eax\n"
  629. : "=a"(result)
  630. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  631. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  632. asm volatile("shld %%cl, %%dx, %%ax\n"
  633. : "=a"(result)
  634. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  635. }
  636. asm volatile(
  637. "pushf\n"
  638. "pop %%ebx"
  639. : "=b"(new_flags));
  640. cpu.set_flags_oszapc(new_flags);
  641. cpu.taint_flags_from(data, steps);
  642. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  643. }
  644. template<bool update_dest, bool is_or, typename Op>
  645. ALWAYS_INLINE void SoftCPU::generic_AL_imm8(Op op, const X86::Instruction& insn)
  646. {
  647. auto dest = al();
  648. auto src = shadow_wrap_as_initialized(insn.imm8());
  649. auto result = op(*this, dest, src);
  650. if (is_or && insn.imm8() == 0xff)
  651. result.set_initialized();
  652. if (update_dest)
  653. set_al(result);
  654. }
  655. template<bool update_dest, bool is_or, typename Op>
  656. ALWAYS_INLINE void SoftCPU::generic_AX_imm16(Op op, const X86::Instruction& insn)
  657. {
  658. auto dest = ax();
  659. auto src = shadow_wrap_as_initialized(insn.imm16());
  660. auto result = op(*this, dest, src);
  661. if (is_or && insn.imm16() == 0xffff)
  662. result.set_initialized();
  663. if (update_dest)
  664. set_ax(result);
  665. }
  666. template<bool update_dest, bool is_or, typename Op>
  667. ALWAYS_INLINE void SoftCPU::generic_EAX_imm32(Op op, const X86::Instruction& insn)
  668. {
  669. auto dest = eax();
  670. auto src = shadow_wrap_as_initialized(insn.imm32());
  671. auto result = op(*this, dest, src);
  672. if (is_or && insn.imm32() == 0xffffffff)
  673. result.set_initialized();
  674. if (update_dest)
  675. set_eax(result);
  676. }
  677. template<bool update_dest, bool is_or, typename Op>
  678. ALWAYS_INLINE void SoftCPU::generic_RM16_imm16(Op op, const X86::Instruction& insn)
  679. {
  680. auto dest = insn.modrm().read16(*this, insn);
  681. auto src = shadow_wrap_as_initialized(insn.imm16());
  682. auto result = op(*this, dest, src);
  683. if (is_or && insn.imm16() == 0xffff)
  684. result.set_initialized();
  685. if (update_dest)
  686. insn.modrm().write16(*this, insn, result);
  687. }
  688. template<bool update_dest, bool is_or, typename Op>
  689. ALWAYS_INLINE void SoftCPU::generic_RM16_imm8(Op op, const X86::Instruction& insn)
  690. {
  691. auto dest = insn.modrm().read16(*this, insn);
  692. auto src = shadow_wrap_as_initialized<u16>(sign_extended_to<u16>(insn.imm8()));
  693. auto result = op(*this, dest, src);
  694. if (is_or && src.value() == 0xffff)
  695. result.set_initialized();
  696. if (update_dest)
  697. insn.modrm().write16(*this, insn, result);
  698. }
  699. template<bool update_dest, typename Op>
  700. ALWAYS_INLINE void SoftCPU::generic_RM16_unsigned_imm8(Op op, const X86::Instruction& insn)
  701. {
  702. auto dest = insn.modrm().read16(*this, insn);
  703. auto src = shadow_wrap_as_initialized(insn.imm8());
  704. auto result = op(*this, dest, src);
  705. if (update_dest)
  706. insn.modrm().write16(*this, insn, result);
  707. }
  708. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  709. ALWAYS_INLINE void SoftCPU::generic_RM16_reg16(Op op, const X86::Instruction& insn)
  710. {
  711. auto dest = insn.modrm().read16(*this, insn);
  712. auto src = const_gpr16(insn.reg16());
  713. auto result = op(*this, dest, src);
  714. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  715. result.set_initialized();
  716. m_flags_tainted = false;
  717. }
  718. if (update_dest)
  719. insn.modrm().write16(*this, insn, result);
  720. }
  721. template<bool update_dest, bool is_or, typename Op>
  722. ALWAYS_INLINE void SoftCPU::generic_RM32_imm32(Op op, const X86::Instruction& insn)
  723. {
  724. auto dest = insn.modrm().read32(*this, insn);
  725. auto src = insn.imm32();
  726. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  727. if (is_or && src == 0xffffffff)
  728. result.set_initialized();
  729. if (update_dest)
  730. insn.modrm().write32(*this, insn, result);
  731. }
  732. template<bool update_dest, bool is_or, typename Op>
  733. ALWAYS_INLINE void SoftCPU::generic_RM32_imm8(Op op, const X86::Instruction& insn)
  734. {
  735. auto dest = insn.modrm().read32(*this, insn);
  736. auto src = sign_extended_to<u32>(insn.imm8());
  737. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  738. if (is_or && src == 0xffffffff)
  739. result.set_initialized();
  740. if (update_dest)
  741. insn.modrm().write32(*this, insn, result);
  742. }
  743. template<bool update_dest, typename Op>
  744. ALWAYS_INLINE void SoftCPU::generic_RM32_unsigned_imm8(Op op, const X86::Instruction& insn)
  745. {
  746. auto dest = insn.modrm().read32(*this, insn);
  747. auto src = shadow_wrap_as_initialized(insn.imm8());
  748. auto result = op(*this, dest, src);
  749. if (update_dest)
  750. insn.modrm().write32(*this, insn, result);
  751. }
  752. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  753. ALWAYS_INLINE void SoftCPU::generic_RM32_reg32(Op op, const X86::Instruction& insn)
  754. {
  755. auto dest = insn.modrm().read32(*this, insn);
  756. auto src = const_gpr32(insn.reg32());
  757. auto result = op(*this, dest, src);
  758. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  759. result.set_initialized();
  760. m_flags_tainted = false;
  761. }
  762. if (update_dest)
  763. insn.modrm().write32(*this, insn, result);
  764. }
  765. template<bool update_dest, bool is_or, typename Op>
  766. ALWAYS_INLINE void SoftCPU::generic_RM8_imm8(Op op, const X86::Instruction& insn)
  767. {
  768. auto dest = insn.modrm().read8(*this, insn);
  769. auto src = insn.imm8();
  770. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  771. if (is_or && src == 0xff)
  772. result.set_initialized();
  773. if (update_dest)
  774. insn.modrm().write8(*this, insn, result);
  775. }
  776. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  777. ALWAYS_INLINE void SoftCPU::generic_RM8_reg8(Op op, const X86::Instruction& insn)
  778. {
  779. auto dest = insn.modrm().read8(*this, insn);
  780. auto src = const_gpr8(insn.reg8());
  781. auto result = op(*this, dest, src);
  782. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  783. result.set_initialized();
  784. m_flags_tainted = false;
  785. }
  786. if (update_dest)
  787. insn.modrm().write8(*this, insn, result);
  788. }
  789. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  790. ALWAYS_INLINE void SoftCPU::generic_reg16_RM16(Op op, const X86::Instruction& insn)
  791. {
  792. auto dest = const_gpr16(insn.reg16());
  793. auto src = insn.modrm().read16(*this, insn);
  794. auto result = op(*this, dest, src);
  795. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  796. result.set_initialized();
  797. m_flags_tainted = false;
  798. }
  799. if (update_dest)
  800. gpr16(insn.reg16()) = result;
  801. }
  802. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  803. ALWAYS_INLINE void SoftCPU::generic_reg32_RM32(Op op, const X86::Instruction& insn)
  804. {
  805. auto dest = const_gpr32(insn.reg32());
  806. auto src = insn.modrm().read32(*this, insn);
  807. auto result = op(*this, dest, src);
  808. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  809. result.set_initialized();
  810. m_flags_tainted = false;
  811. }
  812. if (update_dest)
  813. gpr32(insn.reg32()) = result;
  814. }
  815. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  816. ALWAYS_INLINE void SoftCPU::generic_reg8_RM8(Op op, const X86::Instruction& insn)
  817. {
  818. auto dest = const_gpr8(insn.reg8());
  819. auto src = insn.modrm().read8(*this, insn);
  820. auto result = op(*this, dest, src);
  821. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  822. result.set_initialized();
  823. m_flags_tainted = false;
  824. }
  825. if (update_dest)
  826. gpr8(insn.reg8()) = result;
  827. }
  828. template<typename Op>
  829. ALWAYS_INLINE void SoftCPU::generic_RM8_1(Op op, const X86::Instruction& insn)
  830. {
  831. auto data = insn.modrm().read8(*this, insn);
  832. insn.modrm().write8(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  833. }
  834. template<typename Op>
  835. ALWAYS_INLINE void SoftCPU::generic_RM8_CL(Op op, const X86::Instruction& insn)
  836. {
  837. auto data = insn.modrm().read8(*this, insn);
  838. insn.modrm().write8(*this, insn, op(*this, data, cl()));
  839. }
  840. template<typename Op>
  841. ALWAYS_INLINE void SoftCPU::generic_RM16_1(Op op, const X86::Instruction& insn)
  842. {
  843. auto data = insn.modrm().read16(*this, insn);
  844. insn.modrm().write16(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  845. }
  846. template<typename Op>
  847. ALWAYS_INLINE void SoftCPU::generic_RM16_CL(Op op, const X86::Instruction& insn)
  848. {
  849. auto data = insn.modrm().read16(*this, insn);
  850. insn.modrm().write16(*this, insn, op(*this, data, cl()));
  851. }
  852. template<typename Op>
  853. ALWAYS_INLINE void SoftCPU::generic_RM32_1(Op op, const X86::Instruction& insn)
  854. {
  855. auto data = insn.modrm().read32(*this, insn);
  856. insn.modrm().write32(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  857. }
  858. template<typename Op>
  859. ALWAYS_INLINE void SoftCPU::generic_RM32_CL(Op op, const X86::Instruction& insn)
  860. {
  861. auto data = insn.modrm().read32(*this, insn);
  862. insn.modrm().write32(*this, insn, op(*this, data, cl()));
  863. }
  864. void SoftCPU::AAA(const X86::Instruction&) { TODO_INSN(); }
  865. void SoftCPU::AAD(const X86::Instruction&) { TODO_INSN(); }
  866. void SoftCPU::AAM(const X86::Instruction&) { TODO_INSN(); }
  867. void SoftCPU::AAS(const X86::Instruction&) { TODO_INSN(); }
  868. void SoftCPU::ARPL(const X86::Instruction&) { TODO_INSN(); }
  869. void SoftCPU::BOUND(const X86::Instruction&) { TODO_INSN(); }
  870. template<typename T>
  871. ALWAYS_INLINE static T op_bsf(SoftCPU&, T value)
  872. {
  873. return { (typename T::ValueType)__builtin_ctz(value.value()), value.shadow() };
  874. }
  875. template<typename T>
  876. ALWAYS_INLINE static T op_bsr(SoftCPU&, T value)
  877. {
  878. typename T::ValueType bit_index = 0;
  879. if constexpr (sizeof(typename T::ValueType) == 4) {
  880. asm volatile("bsrl %%eax, %%edx"
  881. : "=d"(bit_index)
  882. : "a"(value.value()));
  883. }
  884. if constexpr (sizeof(typename T::ValueType) == 2) {
  885. asm volatile("bsrw %%ax, %%dx"
  886. : "=d"(bit_index)
  887. : "a"(value.value()));
  888. }
  889. return shadow_wrap_with_taint_from(bit_index, value);
  890. }
  891. void SoftCPU::BSF_reg16_RM16(const X86::Instruction& insn)
  892. {
  893. auto src = insn.modrm().read16(*this, insn);
  894. set_zf(!src.value());
  895. if (src.value())
  896. gpr16(insn.reg16()) = op_bsf(*this, src);
  897. taint_flags_from(src);
  898. }
  899. void SoftCPU::BSF_reg32_RM32(const X86::Instruction& insn)
  900. {
  901. auto src = insn.modrm().read32(*this, insn);
  902. set_zf(!src.value());
  903. if (src.value()) {
  904. gpr32(insn.reg32()) = op_bsf(*this, src);
  905. taint_flags_from(src);
  906. }
  907. }
  908. void SoftCPU::BSR_reg16_RM16(const X86::Instruction& insn)
  909. {
  910. auto src = insn.modrm().read16(*this, insn);
  911. set_zf(!src.value());
  912. if (src.value()) {
  913. gpr16(insn.reg16()) = op_bsr(*this, src);
  914. taint_flags_from(src);
  915. }
  916. }
  917. void SoftCPU::BSR_reg32_RM32(const X86::Instruction& insn)
  918. {
  919. auto src = insn.modrm().read32(*this, insn);
  920. set_zf(!src.value());
  921. if (src.value()) {
  922. gpr32(insn.reg32()) = op_bsr(*this, src);
  923. taint_flags_from(src);
  924. }
  925. }
  926. void SoftCPU::BSWAP_reg32(const X86::Instruction& insn)
  927. {
  928. gpr32(insn.reg32()) = { __builtin_bswap32(gpr32(insn.reg32()).value()), __builtin_bswap32(gpr32(insn.reg32()).shadow()) };
  929. }
  930. template<typename T>
  931. ALWAYS_INLINE static T op_bt(T value, T)
  932. {
  933. return value;
  934. }
  935. template<typename T>
  936. ALWAYS_INLINE static T op_bts(T value, T bit_mask)
  937. {
  938. return value | bit_mask;
  939. }
  940. template<typename T>
  941. ALWAYS_INLINE static T op_btr(T value, T bit_mask)
  942. {
  943. return value & ~bit_mask;
  944. }
  945. template<typename T>
  946. ALWAYS_INLINE static T op_btc(T value, T bit_mask)
  947. {
  948. return value ^ bit_mask;
  949. }
  950. template<bool should_update, typename Op>
  951. ALWAYS_INLINE void BTx_RM16_reg16(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  952. {
  953. if (insn.modrm().is_register()) {
  954. unsigned bit_index = cpu.const_gpr16(insn.reg16()).value() & (X86::TypeTrivia<u16>::bits - 1);
  955. auto original = insn.modrm().read16(cpu, insn);
  956. u16 bit_mask = 1 << bit_index;
  957. u16 result = op(original.value(), bit_mask);
  958. cpu.set_cf((original.value() & bit_mask) != 0);
  959. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), original);
  960. if (should_update)
  961. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), original));
  962. return;
  963. }
  964. // FIXME: Is this supposed to perform a full 16-bit read/modify/write?
  965. unsigned bit_offset_in_array = cpu.const_gpr16(insn.reg16()).value() / 8;
  966. unsigned bit_offset_in_byte = cpu.const_gpr16(insn.reg16()).value() & 7;
  967. auto address = insn.modrm().resolve(cpu, insn);
  968. address.set_offset(address.offset() + bit_offset_in_array);
  969. auto dest = cpu.read_memory8(address);
  970. u8 bit_mask = 1 << bit_offset_in_byte;
  971. u8 result = op(dest.value(), bit_mask);
  972. cpu.set_cf((dest.value() & bit_mask) != 0);
  973. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), dest);
  974. if (should_update)
  975. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), dest));
  976. }
  977. template<bool should_update, typename Op>
  978. ALWAYS_INLINE void BTx_RM32_reg32(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  979. {
  980. if (insn.modrm().is_register()) {
  981. unsigned bit_index = cpu.const_gpr32(insn.reg32()).value() & (X86::TypeTrivia<u32>::bits - 1);
  982. auto original = insn.modrm().read32(cpu, insn);
  983. u32 bit_mask = 1 << bit_index;
  984. u32 result = op(original.value(), bit_mask);
  985. cpu.set_cf((original.value() & bit_mask) != 0);
  986. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), original);
  987. if (should_update)
  988. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), original));
  989. return;
  990. }
  991. // FIXME: Is this supposed to perform a full 32-bit read/modify/write?
  992. unsigned bit_offset_in_array = cpu.const_gpr32(insn.reg32()).value() / 8;
  993. unsigned bit_offset_in_byte = cpu.const_gpr32(insn.reg32()).value() & 7;
  994. auto address = insn.modrm().resolve(cpu, insn);
  995. address.set_offset(address.offset() + bit_offset_in_array);
  996. auto dest = cpu.read_memory8(address);
  997. u8 bit_mask = 1 << bit_offset_in_byte;
  998. u8 result = op(dest.value(), bit_mask);
  999. cpu.set_cf((dest.value() & bit_mask) != 0);
  1000. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), dest);
  1001. if (should_update)
  1002. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), dest));
  1003. }
  1004. template<bool should_update, typename Op>
  1005. ALWAYS_INLINE void BTx_RM16_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  1006. {
  1007. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u16>::mask);
  1008. // FIXME: Support higher bit indices
  1009. VERIFY(bit_index < 16);
  1010. auto original = insn.modrm().read16(cpu, insn);
  1011. u16 bit_mask = 1 << bit_index;
  1012. auto result = op(original.value(), bit_mask);
  1013. cpu.set_cf((original.value() & bit_mask) != 0);
  1014. cpu.taint_flags_from(original);
  1015. if (should_update)
  1016. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1017. }
  1018. template<bool should_update, typename Op>
  1019. ALWAYS_INLINE void BTx_RM32_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  1020. {
  1021. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u32>::mask);
  1022. // FIXME: Support higher bit indices
  1023. VERIFY(bit_index < 32);
  1024. auto original = insn.modrm().read32(cpu, insn);
  1025. u32 bit_mask = 1 << bit_index;
  1026. auto result = op(original.value(), bit_mask);
  1027. cpu.set_cf((original.value() & bit_mask) != 0);
  1028. cpu.taint_flags_from(original);
  1029. if (should_update)
  1030. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1031. }
  1032. #define DEFINE_GENERIC_BTx_INSN_HANDLERS(mnemonic, op, update_dest) \
  1033. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { BTx_RM32_reg32<update_dest>(*this, insn, op<u32>); } \
  1034. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { BTx_RM16_reg16<update_dest>(*this, insn, op<u16>); } \
  1035. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { BTx_RM32_imm8<update_dest>(*this, insn, op<u32>); } \
  1036. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { BTx_RM16_imm8<update_dest>(*this, insn, op<u16>); }
  1037. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTS, op_bts, true);
  1038. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTR, op_btr, true);
  1039. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTC, op_btc, true);
  1040. DEFINE_GENERIC_BTx_INSN_HANDLERS(BT, op_bt, false);
  1041. void SoftCPU::CALL_FAR_mem16(const X86::Instruction&)
  1042. {
  1043. TODO();
  1044. }
  1045. void SoftCPU::CALL_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  1046. void SoftCPU::CALL_RM16(const X86::Instruction&) { TODO_INSN(); }
  1047. void SoftCPU::CALL_RM32(const X86::Instruction& insn)
  1048. {
  1049. push32(shadow_wrap_as_initialized(eip()));
  1050. auto address = insn.modrm().read32(*this, insn);
  1051. warn_if_uninitialized(address, "call rm32");
  1052. set_eip(address.value());
  1053. }
  1054. void SoftCPU::CALL_imm16(const X86::Instruction&) { TODO_INSN(); }
  1055. void SoftCPU::CALL_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  1056. void SoftCPU::CALL_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  1057. void SoftCPU::CALL_imm32(const X86::Instruction& insn)
  1058. {
  1059. push32(shadow_wrap_as_initialized(eip()));
  1060. set_eip(eip() + (i32)insn.imm32());
  1061. }
  1062. void SoftCPU::CBW(const X86::Instruction&)
  1063. {
  1064. set_ah(shadow_wrap_with_taint_from<u8>((al().value() & 0x80) ? 0xff : 0x00, al()));
  1065. }
  1066. void SoftCPU::CDQ(const X86::Instruction&)
  1067. {
  1068. if (eax().value() & 0x80000000)
  1069. set_edx(shadow_wrap_with_taint_from<u32>(0xffffffff, eax()));
  1070. else
  1071. set_edx(shadow_wrap_with_taint_from<u32>(0, eax()));
  1072. }
  1073. void SoftCPU::CLC(const X86::Instruction&)
  1074. {
  1075. set_cf(false);
  1076. }
  1077. void SoftCPU::CLD(const X86::Instruction&)
  1078. {
  1079. set_df(false);
  1080. }
  1081. void SoftCPU::CLI(const X86::Instruction&) { TODO_INSN(); }
  1082. void SoftCPU::CLTS(const X86::Instruction&) { TODO_INSN(); }
  1083. void SoftCPU::CMC(const X86::Instruction&) { TODO_INSN(); }
  1084. void SoftCPU::CMOVcc_reg16_RM16(const X86::Instruction& insn)
  1085. {
  1086. warn_if_flags_tainted("cmovcc reg16, rm16");
  1087. if (evaluate_condition(insn.cc()))
  1088. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  1089. }
  1090. void SoftCPU::CMOVcc_reg32_RM32(const X86::Instruction& insn)
  1091. {
  1092. warn_if_flags_tainted("cmovcc reg32, rm32");
  1093. if (evaluate_condition(insn.cc()))
  1094. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  1095. }
  1096. template<typename T>
  1097. ALWAYS_INLINE static void do_cmps(SoftCPU& cpu, const X86::Instruction& insn)
  1098. {
  1099. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1100. cpu.do_once_or_repeat<true>(insn, [&] {
  1101. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1102. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  1103. op_sub(cpu, dest, src);
  1104. cpu.step_source_index(insn.a32(), sizeof(T));
  1105. cpu.step_destination_index(insn.a32(), sizeof(T));
  1106. });
  1107. }
  1108. void SoftCPU::CMPSB(const X86::Instruction& insn)
  1109. {
  1110. do_cmps<u8>(*this, insn);
  1111. }
  1112. void SoftCPU::CMPSD(const X86::Instruction& insn)
  1113. {
  1114. do_cmps<u32>(*this, insn);
  1115. }
  1116. void SoftCPU::CMPSW(const X86::Instruction& insn)
  1117. {
  1118. do_cmps<u16>(*this, insn);
  1119. }
  1120. void SoftCPU::CMPXCHG_RM16_reg16(const X86::Instruction& insn)
  1121. {
  1122. auto current = insn.modrm().read16(*this, insn);
  1123. taint_flags_from(current, ax());
  1124. if (current.value() == ax().value()) {
  1125. set_zf(true);
  1126. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  1127. } else {
  1128. set_zf(false);
  1129. set_ax(current);
  1130. }
  1131. }
  1132. void SoftCPU::CMPXCHG_RM32_reg32(const X86::Instruction& insn)
  1133. {
  1134. auto current = insn.modrm().read32(*this, insn);
  1135. taint_flags_from(current, eax());
  1136. if (current.value() == eax().value()) {
  1137. set_zf(true);
  1138. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  1139. } else {
  1140. set_zf(false);
  1141. set_eax(current);
  1142. }
  1143. }
  1144. void SoftCPU::CMPXCHG_RM8_reg8(const X86::Instruction& insn)
  1145. {
  1146. auto current = insn.modrm().read8(*this, insn);
  1147. taint_flags_from(current, al());
  1148. if (current.value() == al().value()) {
  1149. set_zf(true);
  1150. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  1151. } else {
  1152. set_zf(false);
  1153. set_al(current);
  1154. }
  1155. }
  1156. void SoftCPU::CPUID(const X86::Instruction&)
  1157. {
  1158. if (eax().value() == 0) {
  1159. set_eax(shadow_wrap_as_initialized<u32>(1));
  1160. set_ebx(shadow_wrap_as_initialized<u32>(0x6c6c6548));
  1161. set_edx(shadow_wrap_as_initialized<u32>(0x6972466f));
  1162. set_ecx(shadow_wrap_as_initialized<u32>(0x73646e65));
  1163. return;
  1164. }
  1165. if (eax().value() == 1) {
  1166. u32 stepping = 0;
  1167. u32 model = 1;
  1168. u32 family = 3;
  1169. u32 type = 0;
  1170. set_eax(shadow_wrap_as_initialized<u32>(stepping | (model << 4) | (family << 8) | (type << 12)));
  1171. set_ebx(shadow_wrap_as_initialized<u32>(0));
  1172. set_edx(shadow_wrap_as_initialized<u32>((1 << 15))); // Features (CMOV)
  1173. set_ecx(shadow_wrap_as_initialized<u32>(0));
  1174. return;
  1175. }
  1176. dbgln("Unhandled CPUID with eax={:08x}", eax().value());
  1177. }
  1178. void SoftCPU::CWD(const X86::Instruction&)
  1179. {
  1180. set_dx(shadow_wrap_with_taint_from<u16>((ax().value() & 0x8000) ? 0xffff : 0x0000, ax()));
  1181. }
  1182. void SoftCPU::CWDE(const X86::Instruction&)
  1183. {
  1184. set_eax(shadow_wrap_with_taint_from(sign_extended_to<u32>(ax().value()), ax()));
  1185. }
  1186. void SoftCPU::DAA(const X86::Instruction&) { TODO_INSN(); }
  1187. void SoftCPU::DAS(const X86::Instruction&) { TODO_INSN(); }
  1188. void SoftCPU::DEC_RM16(const X86::Instruction& insn)
  1189. {
  1190. insn.modrm().write16(*this, insn, op_dec(*this, insn.modrm().read16(*this, insn)));
  1191. }
  1192. void SoftCPU::DEC_RM32(const X86::Instruction& insn)
  1193. {
  1194. insn.modrm().write32(*this, insn, op_dec(*this, insn.modrm().read32(*this, insn)));
  1195. }
  1196. void SoftCPU::DEC_RM8(const X86::Instruction& insn)
  1197. {
  1198. insn.modrm().write8(*this, insn, op_dec(*this, insn.modrm().read8(*this, insn)));
  1199. }
  1200. void SoftCPU::DEC_reg16(const X86::Instruction& insn)
  1201. {
  1202. gpr16(insn.reg16()) = op_dec(*this, const_gpr16(insn.reg16()));
  1203. }
  1204. void SoftCPU::DEC_reg32(const X86::Instruction& insn)
  1205. {
  1206. gpr32(insn.reg32()) = op_dec(*this, const_gpr32(insn.reg32()));
  1207. }
  1208. void SoftCPU::DIV_RM16(const X86::Instruction& insn)
  1209. {
  1210. auto divisor = insn.modrm().read16(*this, insn);
  1211. if (divisor.value() == 0) {
  1212. reportln("Divide by zero");
  1213. TODO();
  1214. }
  1215. u32 dividend = ((u32)dx().value() << 16) | ax().value();
  1216. auto quotient = dividend / divisor.value();
  1217. if (quotient > NumericLimits<u16>::max()) {
  1218. reportln("Divide overflow");
  1219. TODO();
  1220. }
  1221. auto remainder = dividend % divisor.value();
  1222. auto original_ax = ax();
  1223. set_ax(shadow_wrap_with_taint_from<u16>(quotient, original_ax, dx()));
  1224. set_dx(shadow_wrap_with_taint_from<u16>(remainder, original_ax, dx()));
  1225. }
  1226. void SoftCPU::DIV_RM32(const X86::Instruction& insn)
  1227. {
  1228. auto divisor = insn.modrm().read32(*this, insn);
  1229. if (divisor.value() == 0) {
  1230. reportln("Divide by zero");
  1231. TODO();
  1232. }
  1233. u64 dividend = ((u64)edx().value() << 32) | eax().value();
  1234. auto quotient = dividend / divisor.value();
  1235. if (quotient > NumericLimits<u32>::max()) {
  1236. reportln("Divide overflow");
  1237. TODO();
  1238. }
  1239. auto remainder = dividend % divisor.value();
  1240. auto original_eax = eax();
  1241. set_eax(shadow_wrap_with_taint_from<u32>(quotient, original_eax, edx(), divisor));
  1242. set_edx(shadow_wrap_with_taint_from<u32>(remainder, original_eax, edx(), divisor));
  1243. }
  1244. void SoftCPU::DIV_RM8(const X86::Instruction& insn)
  1245. {
  1246. auto divisor = insn.modrm().read8(*this, insn);
  1247. if (divisor.value() == 0) {
  1248. reportln("Divide by zero");
  1249. TODO();
  1250. }
  1251. u16 dividend = ax().value();
  1252. auto quotient = dividend / divisor.value();
  1253. if (quotient > NumericLimits<u8>::max()) {
  1254. reportln("Divide overflow");
  1255. TODO();
  1256. }
  1257. auto remainder = dividend % divisor.value();
  1258. auto original_ax = ax();
  1259. set_al(shadow_wrap_with_taint_from<u8>(quotient, original_ax, divisor));
  1260. set_ah(shadow_wrap_with_taint_from<u8>(remainder, original_ax, divisor));
  1261. }
  1262. void SoftCPU::ENTER16(const X86::Instruction&) { TODO_INSN(); }
  1263. void SoftCPU::ENTER32(const X86::Instruction&) { TODO_INSN(); }
  1264. void SoftCPU::ESCAPE(const X86::Instruction&)
  1265. {
  1266. reportln("FIXME: x87 floating-point support");
  1267. m_emulator.dump_backtrace();
  1268. TODO();
  1269. }
  1270. void SoftCPU::FADD_RM32(const X86::Instruction& insn)
  1271. {
  1272. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1273. if (insn.modrm().is_register()) {
  1274. fpu_set(0, fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1275. } else {
  1276. auto new_f32 = insn.modrm().read32(*this, insn);
  1277. // FIXME: Respect shadow values
  1278. auto f32 = bit_cast<float>(new_f32.value());
  1279. fpu_set(0, fpu_get(0) + f32);
  1280. }
  1281. }
  1282. void SoftCPU::FMUL_RM32(const X86::Instruction& insn)
  1283. {
  1284. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1285. if (insn.modrm().is_register()) {
  1286. fpu_set(0, fpu_get(0) * fpu_get(insn.modrm().register_index()));
  1287. } else {
  1288. auto new_f32 = insn.modrm().read32(*this, insn);
  1289. // FIXME: Respect shadow values
  1290. auto f32 = bit_cast<float>(new_f32.value());
  1291. fpu_set(0, fpu_get(0) * f32);
  1292. }
  1293. }
  1294. void SoftCPU::FCOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1295. void SoftCPU::FCOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1296. void SoftCPU::FSUB_RM32(const X86::Instruction& insn)
  1297. {
  1298. if (insn.modrm().is_register()) {
  1299. fpu_set(0, fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1300. } else {
  1301. auto new_f32 = insn.modrm().read32(*this, insn);
  1302. // FIXME: Respect shadow values
  1303. auto f32 = bit_cast<float>(new_f32.value());
  1304. fpu_set(0, fpu_get(0) - f32);
  1305. }
  1306. }
  1307. void SoftCPU::FSUBR_RM32(const X86::Instruction& insn)
  1308. {
  1309. if (insn.modrm().is_register()) {
  1310. fpu_set(0, fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1311. } else {
  1312. auto new_f32 = insn.modrm().read32(*this, insn);
  1313. // FIXME: Respect shadow values
  1314. auto f32 = bit_cast<float>(new_f32.value());
  1315. fpu_set(0, f32 - fpu_get(0));
  1316. }
  1317. }
  1318. void SoftCPU::FDIV_RM32(const X86::Instruction& insn)
  1319. {
  1320. if (insn.modrm().is_register()) {
  1321. fpu_set(0, fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1322. } else {
  1323. auto new_f32 = insn.modrm().read32(*this, insn);
  1324. // FIXME: Respect shadow values
  1325. auto f32 = bit_cast<float>(new_f32.value());
  1326. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1327. fpu_set(0, fpu_get(0) / f32);
  1328. }
  1329. }
  1330. void SoftCPU::FDIVR_RM32(const X86::Instruction& insn)
  1331. {
  1332. if (insn.modrm().is_register()) {
  1333. fpu_set(0, fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1334. } else {
  1335. auto new_f32 = insn.modrm().read32(*this, insn);
  1336. // FIXME: Respect shadow values
  1337. auto f32 = bit_cast<float>(new_f32.value());
  1338. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1339. fpu_set(0, f32 / fpu_get(0));
  1340. }
  1341. }
  1342. void SoftCPU::FLD_RM32(const X86::Instruction& insn)
  1343. {
  1344. if (insn.modrm().is_register()) {
  1345. fpu_push(fpu_get(insn.modrm().register_index()));
  1346. } else {
  1347. auto new_f32 = insn.modrm().read32(*this, insn);
  1348. // FIXME: Respect shadow values
  1349. fpu_push(bit_cast<float>(new_f32.value()));
  1350. }
  1351. }
  1352. void SoftCPU::FXCH(const X86::Instruction& insn)
  1353. {
  1354. VERIFY(insn.modrm().is_register());
  1355. auto tmp = fpu_get(0);
  1356. fpu_set(0, fpu_get(insn.modrm().register_index()));
  1357. fpu_set(insn.modrm().register_index(), tmp);
  1358. }
  1359. void SoftCPU::FST_RM32(const X86::Instruction& insn)
  1360. {
  1361. VERIFY(!insn.modrm().is_register());
  1362. float f32 = (float)fpu_get(0);
  1363. // FIXME: Respect shadow values
  1364. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(f32)));
  1365. }
  1366. void SoftCPU::FNOP(const X86::Instruction&) { TODO_INSN(); }
  1367. void SoftCPU::FSTP_RM32(const X86::Instruction& insn)
  1368. {
  1369. FST_RM32(insn);
  1370. fpu_pop();
  1371. }
  1372. void SoftCPU::FLDENV(const X86::Instruction&) { TODO_INSN(); }
  1373. void SoftCPU::FCHS(const X86::Instruction&)
  1374. {
  1375. fpu_set(0, -fpu_get(0));
  1376. }
  1377. void SoftCPU::FABS(const X86::Instruction&)
  1378. {
  1379. fpu_set(0, __builtin_fabs(fpu_get(0)));
  1380. }
  1381. void SoftCPU::FTST(const X86::Instruction&) { TODO_INSN(); }
  1382. void SoftCPU::FXAM(const X86::Instruction&) { TODO_INSN(); }
  1383. void SoftCPU::FLDCW(const X86::Instruction& insn)
  1384. {
  1385. m_fpu_cw = insn.modrm().read16(*this, insn);
  1386. }
  1387. void SoftCPU::FLD1(const X86::Instruction&)
  1388. {
  1389. fpu_push(1.0);
  1390. }
  1391. void SoftCPU::FLDL2T(const X86::Instruction&)
  1392. {
  1393. fpu_push(log2f(10.0f));
  1394. }
  1395. void SoftCPU::FLDL2E(const X86::Instruction&)
  1396. {
  1397. fpu_push(log2f(M_E));
  1398. }
  1399. void SoftCPU::FLDPI(const X86::Instruction&)
  1400. {
  1401. fpu_push(M_PI);
  1402. }
  1403. void SoftCPU::FLDLG2(const X86::Instruction&)
  1404. {
  1405. fpu_push(log10f(2.0f));
  1406. }
  1407. void SoftCPU::FLDLN2(const X86::Instruction&)
  1408. {
  1409. fpu_push(M_LN2);
  1410. }
  1411. void SoftCPU::FLDZ(const X86::Instruction&)
  1412. {
  1413. fpu_push(0.0);
  1414. }
  1415. void SoftCPU::FNSTENV(const X86::Instruction&) { TODO_INSN(); }
  1416. void SoftCPU::F2XM1(const X86::Instruction&)
  1417. {
  1418. // FIXME: validate ST(0) is in range –1.0 to +1.0
  1419. auto f32 = fpu_get(0);
  1420. // FIXME: Set C0, C2, C3 in FPU status word.
  1421. fpu_set(0, powf(2, f32) - 1.0f);
  1422. }
  1423. void SoftCPU::FYL2X(const X86::Instruction&)
  1424. {
  1425. // FIXME: Raise IA on +-infinity, +-0, raise Z on +-0
  1426. auto f32 = fpu_get(0);
  1427. // FIXME: Set C0, C2, C3 in FPU status word.
  1428. fpu_set(1, fpu_get(1) * log2f(f32));
  1429. fpu_pop();
  1430. }
  1431. void SoftCPU::FYL2XP1(const X86::Instruction&)
  1432. {
  1433. // FIXME: validate ST(0) range
  1434. auto f32 = fpu_get(0);
  1435. // FIXME: Set C0, C2, C3 in FPU status word.
  1436. fpu_set(1, (fpu_get(1) * log2f(f32 + 1.0f)));
  1437. fpu_pop();
  1438. }
  1439. void SoftCPU::FPTAN(const X86::Instruction&)
  1440. {
  1441. // FIXME: set C1 upon stack overflow or if result was rounded
  1442. // FIXME: Set C2 to 1 if ST(0) is outside range of -2^63 to +2^63; else set to 0
  1443. fpu_set(0, tanf(fpu_get(0)));
  1444. fpu_push(1.0f);
  1445. }
  1446. void SoftCPU::FPATAN(const X86::Instruction&) { TODO_INSN(); }
  1447. void SoftCPU::FXTRACT(const X86::Instruction&) { TODO_INSN(); }
  1448. void SoftCPU::FPREM1(const X86::Instruction&) { TODO_INSN(); }
  1449. void SoftCPU::FDECSTP(const X86::Instruction&)
  1450. {
  1451. m_fpu_top = (m_fpu_top == 0) ? 7 : m_fpu_top - 1;
  1452. set_cf(0);
  1453. }
  1454. void SoftCPU::FINCSTP(const X86::Instruction&)
  1455. {
  1456. m_fpu_top = (m_fpu_top == 7) ? 0 : m_fpu_top + 1;
  1457. set_cf(0);
  1458. }
  1459. void SoftCPU::FNSTCW(const X86::Instruction& insn)
  1460. {
  1461. insn.modrm().write16(*this, insn, m_fpu_cw);
  1462. }
  1463. void SoftCPU::FPREM(const X86::Instruction&) { TODO_INSN(); }
  1464. void SoftCPU::FSQRT(const X86::Instruction&)
  1465. {
  1466. fpu_set(0, sqrt(fpu_get(0)));
  1467. }
  1468. void SoftCPU::FSINCOS(const X86::Instruction&) { TODO_INSN(); }
  1469. void SoftCPU::FRNDINT(const X86::Instruction&)
  1470. {
  1471. // FIXME: support rounding mode
  1472. fpu_set(0, round(fpu_get(0)));
  1473. }
  1474. void SoftCPU::FSCALE(const X86::Instruction&)
  1475. {
  1476. // FIXME: set C1 upon stack overflow or if result was rounded
  1477. fpu_set(0, fpu_get(0) * powf(2, floorf(fpu_get(1))));
  1478. }
  1479. void SoftCPU::FSIN(const X86::Instruction&)
  1480. {
  1481. fpu_set(0, sin(fpu_get(0)));
  1482. }
  1483. void SoftCPU::FCOS(const X86::Instruction&)
  1484. {
  1485. fpu_set(0, cos(fpu_get(0)));
  1486. }
  1487. void SoftCPU::FIADD_RM32(const X86::Instruction& insn)
  1488. {
  1489. VERIFY(!insn.modrm().is_register());
  1490. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1491. // FIXME: Respect shadow values
  1492. fpu_set(0, fpu_get(0) + (long double)m32int);
  1493. }
  1494. void SoftCPU::FCMOVB(const X86::Instruction&) { TODO_INSN(); }
  1495. void SoftCPU::FIMUL_RM32(const X86::Instruction& insn)
  1496. {
  1497. VERIFY(!insn.modrm().is_register());
  1498. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1499. // FIXME: Respect shadow values
  1500. fpu_set(0, fpu_get(0) * (long double)m32int);
  1501. }
  1502. void SoftCPU::FCMOVE(const X86::Instruction&) { TODO_INSN(); }
  1503. void SoftCPU::FICOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1504. void SoftCPU::FCMOVBE(const X86::Instruction& insn)
  1505. {
  1506. if (evaluate_condition(6))
  1507. fpu_set(0, fpu_get(insn.rm() & 7));
  1508. }
  1509. void SoftCPU::FICOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1510. void SoftCPU::FCMOVU(const X86::Instruction&) { TODO_INSN(); }
  1511. void SoftCPU::FISUB_RM32(const X86::Instruction& insn)
  1512. {
  1513. VERIFY(!insn.modrm().is_register());
  1514. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1515. // FIXME: Respect shadow values
  1516. fpu_set(0, fpu_get(0) - (long double)m32int);
  1517. }
  1518. void SoftCPU::FISUBR_RM32(const X86::Instruction& insn)
  1519. {
  1520. VERIFY(!insn.modrm().is_register());
  1521. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1522. // FIXME: Respect shadow values
  1523. fpu_set(0, (long double)m32int - fpu_get(0));
  1524. }
  1525. void SoftCPU::FIDIV_RM32(const X86::Instruction& insn)
  1526. {
  1527. VERIFY(!insn.modrm().is_register());
  1528. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1529. // FIXME: Respect shadow values
  1530. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1531. fpu_set(0, fpu_get(0) / (long double)m32int);
  1532. }
  1533. void SoftCPU::FIDIVR_RM32(const X86::Instruction& insn)
  1534. {
  1535. VERIFY(!insn.modrm().is_register());
  1536. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1537. // FIXME: Respect shadow values
  1538. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1539. fpu_set(0, (long double)m32int / fpu_get(0));
  1540. }
  1541. void SoftCPU::FILD_RM32(const X86::Instruction& insn)
  1542. {
  1543. VERIFY(!insn.modrm().is_register());
  1544. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1545. // FIXME: Respect shadow values
  1546. fpu_push((long double)m32int);
  1547. }
  1548. void SoftCPU::FCMOVNB(const X86::Instruction&) { TODO_INSN(); }
  1549. void SoftCPU::FISTTP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1550. void SoftCPU::FCMOVNE(const X86::Instruction&) { TODO_INSN(); }
  1551. void SoftCPU::FIST_RM32(const X86::Instruction& insn)
  1552. {
  1553. VERIFY(!insn.modrm().is_register());
  1554. auto f = fpu_get(0);
  1555. // FIXME: Respect rounding mode in m_fpu_cw.
  1556. auto i32 = static_cast<int32_t>(f);
  1557. // FIXME: Respect shadow values
  1558. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(i32)));
  1559. }
  1560. void SoftCPU::FCMOVNBE(const X86::Instruction& insn)
  1561. {
  1562. if (evaluate_condition(7))
  1563. fpu_set(0, fpu_get(insn.rm() & 7));
  1564. }
  1565. void SoftCPU::FISTP_RM32(const X86::Instruction& insn)
  1566. {
  1567. FIST_RM32(insn);
  1568. fpu_pop();
  1569. }
  1570. void SoftCPU::FCMOVNU(const X86::Instruction&) { TODO_INSN(); }
  1571. void SoftCPU::FNENI(const X86::Instruction&) { TODO_INSN(); }
  1572. void SoftCPU::FNDISI(const X86::Instruction&) { TODO_INSN(); }
  1573. void SoftCPU::FNCLEX(const X86::Instruction&) { TODO_INSN(); }
  1574. void SoftCPU::FNINIT(const X86::Instruction&) { TODO_INSN(); }
  1575. void SoftCPU::FNSETPM(const X86::Instruction&) { TODO_INSN(); }
  1576. void SoftCPU::FLD_RM80(const X86::Instruction&) { TODO_INSN(); }
  1577. void SoftCPU::FUCOMI(const X86::Instruction& insn)
  1578. {
  1579. auto i = insn.rm() & 7;
  1580. // FIXME: Unordered comparison checks.
  1581. // FIXME: QNaN / exception handling.
  1582. // FIXME: Set C0, C2, C3 in FPU status word.
  1583. if (__builtin_isnan(fpu_get(0)) || __builtin_isnan(fpu_get(i))) {
  1584. set_zf(true);
  1585. set_pf(true);
  1586. set_cf(true);
  1587. } else {
  1588. set_zf(fpu_get(0) == fpu_get(i));
  1589. set_pf(false);
  1590. set_cf(fpu_get(0) < fpu_get(i));
  1591. set_of(false);
  1592. }
  1593. // FIXME: Taint should be based on ST(0) and ST(i)
  1594. m_flags_tainted = false;
  1595. }
  1596. void SoftCPU::FCOMI(const X86::Instruction& insn)
  1597. {
  1598. auto i = insn.rm() & 7;
  1599. // FIXME: QNaN / exception handling.
  1600. // FIXME: Set C0, C2, C3 in FPU status word.
  1601. set_zf(fpu_get(0) == fpu_get(i));
  1602. set_pf(false);
  1603. set_cf(fpu_get(0) < fpu_get(i));
  1604. set_of(false);
  1605. // FIXME: Taint should be based on ST(0) and ST(i)
  1606. m_flags_tainted = false;
  1607. }
  1608. void SoftCPU::FSTP_RM80(const X86::Instruction&) { TODO_INSN(); }
  1609. void SoftCPU::FADD_RM64(const X86::Instruction& insn)
  1610. {
  1611. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1612. if (insn.modrm().is_register()) {
  1613. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1614. } else {
  1615. auto new_f64 = insn.modrm().read64(*this, insn);
  1616. // FIXME: Respect shadow values
  1617. auto f64 = bit_cast<double>(new_f64.value());
  1618. fpu_set(0, fpu_get(0) + f64);
  1619. }
  1620. }
  1621. void SoftCPU::FMUL_RM64(const X86::Instruction& insn)
  1622. {
  1623. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1624. if (insn.modrm().is_register()) {
  1625. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1626. } else {
  1627. auto new_f64 = insn.modrm().read64(*this, insn);
  1628. // FIXME: Respect shadow values
  1629. auto f64 = bit_cast<double>(new_f64.value());
  1630. fpu_set(0, fpu_get(0) * f64);
  1631. }
  1632. }
  1633. void SoftCPU::FCOM_RM64(const X86::Instruction&) { TODO_INSN(); }
  1634. void SoftCPU::FCOMP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1635. void SoftCPU::FSUB_RM64(const X86::Instruction& insn)
  1636. {
  1637. if (insn.modrm().is_register()) {
  1638. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1639. } else {
  1640. auto new_f64 = insn.modrm().read64(*this, insn);
  1641. // FIXME: Respect shadow values
  1642. auto f64 = bit_cast<double>(new_f64.value());
  1643. fpu_set(0, fpu_get(0) - f64);
  1644. }
  1645. }
  1646. void SoftCPU::FSUBR_RM64(const X86::Instruction& insn)
  1647. {
  1648. if (insn.modrm().is_register()) {
  1649. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1650. } else {
  1651. auto new_f64 = insn.modrm().read64(*this, insn);
  1652. // FIXME: Respect shadow values
  1653. auto f64 = bit_cast<double>(new_f64.value());
  1654. fpu_set(0, f64 - fpu_get(0));
  1655. }
  1656. }
  1657. void SoftCPU::FDIV_RM64(const X86::Instruction& insn)
  1658. {
  1659. if (insn.modrm().is_register()) {
  1660. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1661. } else {
  1662. auto new_f64 = insn.modrm().read64(*this, insn);
  1663. // FIXME: Respect shadow values
  1664. auto f64 = bit_cast<double>(new_f64.value());
  1665. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1666. fpu_set(0, fpu_get(0) / f64);
  1667. }
  1668. }
  1669. void SoftCPU::FDIVR_RM64(const X86::Instruction& insn)
  1670. {
  1671. if (insn.modrm().is_register()) {
  1672. // XXX this is FDIVR, Instruction decodes this weirdly
  1673. //fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1674. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1675. } else {
  1676. auto new_f64 = insn.modrm().read64(*this, insn);
  1677. // FIXME: Respect shadow values
  1678. auto f64 = bit_cast<double>(new_f64.value());
  1679. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1680. fpu_set(0, f64 / fpu_get(0));
  1681. }
  1682. }
  1683. void SoftCPU::FLD_RM64(const X86::Instruction& insn)
  1684. {
  1685. VERIFY(!insn.modrm().is_register());
  1686. auto new_f64 = insn.modrm().read64(*this, insn);
  1687. // FIXME: Respect shadow values
  1688. fpu_push(bit_cast<double>(new_f64.value()));
  1689. }
  1690. void SoftCPU::FFREE(const X86::Instruction&) { TODO_INSN(); }
  1691. void SoftCPU::FISTTP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1692. void SoftCPU::FST_RM64(const X86::Instruction& insn)
  1693. {
  1694. if (insn.modrm().is_register()) {
  1695. fpu_set(insn.modrm().register_index(), fpu_get(0));
  1696. } else {
  1697. // FIXME: Respect shadow values
  1698. double f64 = (double)fpu_get(0);
  1699. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(f64)));
  1700. }
  1701. }
  1702. void SoftCPU::FSTP_RM64(const X86::Instruction& insn)
  1703. {
  1704. FST_RM64(insn);
  1705. fpu_pop();
  1706. }
  1707. void SoftCPU::FRSTOR(const X86::Instruction&) { TODO_INSN(); }
  1708. void SoftCPU::FUCOM(const X86::Instruction&) { TODO_INSN(); }
  1709. void SoftCPU::FUCOMP(const X86::Instruction&) { TODO_INSN(); }
  1710. void SoftCPU::FUCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1711. void SoftCPU::FNSAVE(const X86::Instruction&) { TODO_INSN(); }
  1712. void SoftCPU::FNSTSW(const X86::Instruction&) { TODO_INSN(); }
  1713. void SoftCPU::FIADD_RM16(const X86::Instruction& insn)
  1714. {
  1715. VERIFY(!insn.modrm().is_register());
  1716. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1717. // FIXME: Respect shadow values
  1718. fpu_set(0, fpu_get(0) + (long double)m16int);
  1719. }
  1720. void SoftCPU::FADDP(const X86::Instruction& insn)
  1721. {
  1722. VERIFY(insn.modrm().is_register());
  1723. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1724. fpu_pop();
  1725. }
  1726. void SoftCPU::FIMUL_RM16(const X86::Instruction& insn)
  1727. {
  1728. VERIFY(!insn.modrm().is_register());
  1729. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1730. // FIXME: Respect shadow values
  1731. fpu_set(0, fpu_get(0) * (long double)m16int);
  1732. }
  1733. void SoftCPU::FMULP(const X86::Instruction& insn)
  1734. {
  1735. VERIFY(insn.modrm().is_register());
  1736. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1737. fpu_pop();
  1738. }
  1739. void SoftCPU::FICOM_RM16(const X86::Instruction&) { TODO_INSN(); }
  1740. void SoftCPU::FICOMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1741. void SoftCPU::FCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1742. void SoftCPU::FISUB_RM16(const X86::Instruction& insn)
  1743. {
  1744. VERIFY(!insn.modrm().is_register());
  1745. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1746. // FIXME: Respect shadow values
  1747. fpu_set(0, fpu_get(0) - (long double)m16int);
  1748. }
  1749. void SoftCPU::FSUBRP(const X86::Instruction& insn)
  1750. {
  1751. VERIFY(insn.modrm().is_register());
  1752. fpu_set(insn.modrm().register_index(), fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1753. fpu_pop();
  1754. }
  1755. void SoftCPU::FISUBR_RM16(const X86::Instruction& insn)
  1756. {
  1757. VERIFY(!insn.modrm().is_register());
  1758. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1759. // FIXME: Respect shadow values
  1760. fpu_set(0, (long double)m16int - fpu_get(0));
  1761. }
  1762. void SoftCPU::FSUBP(const X86::Instruction& insn)
  1763. {
  1764. VERIFY(insn.modrm().is_register());
  1765. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1766. fpu_pop();
  1767. }
  1768. void SoftCPU::FIDIV_RM16(const X86::Instruction& insn)
  1769. {
  1770. VERIFY(!insn.modrm().is_register());
  1771. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1772. // FIXME: Respect shadow values
  1773. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1774. fpu_set(0, fpu_get(0) / (long double)m16int);
  1775. }
  1776. void SoftCPU::FDIVRP(const X86::Instruction& insn)
  1777. {
  1778. VERIFY(insn.modrm().is_register());
  1779. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1780. fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1781. fpu_pop();
  1782. }
  1783. void SoftCPU::FIDIVR_RM16(const X86::Instruction& insn)
  1784. {
  1785. VERIFY(!insn.modrm().is_register());
  1786. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1787. // FIXME: Respect shadow values
  1788. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1789. fpu_set(0, (long double)m16int / fpu_get(0));
  1790. }
  1791. void SoftCPU::FDIVP(const X86::Instruction& insn)
  1792. {
  1793. VERIFY(insn.modrm().is_register());
  1794. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1795. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1796. fpu_pop();
  1797. }
  1798. void SoftCPU::FILD_RM16(const X86::Instruction& insn)
  1799. {
  1800. VERIFY(!insn.modrm().is_register());
  1801. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1802. // FIXME: Respect shadow values
  1803. fpu_push((long double)m16int);
  1804. }
  1805. void SoftCPU::FFREEP(const X86::Instruction&) { TODO_INSN(); }
  1806. void SoftCPU::FISTTP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1807. void SoftCPU::FIST_RM16(const X86::Instruction& insn)
  1808. {
  1809. VERIFY(!insn.modrm().is_register());
  1810. auto f = fpu_get(0);
  1811. // FIXME: Respect rounding mode in m_fpu_cw.
  1812. auto i16 = static_cast<int16_t>(f);
  1813. // FIXME: Respect shadow values
  1814. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(bit_cast<u16>(i16)));
  1815. }
  1816. void SoftCPU::FISTP_RM16(const X86::Instruction& insn)
  1817. {
  1818. FIST_RM16(insn);
  1819. fpu_pop();
  1820. }
  1821. void SoftCPU::FBLD_M80(const X86::Instruction&) { TODO_INSN(); }
  1822. void SoftCPU::FNSTSW_AX(const X86::Instruction&) { TODO_INSN(); }
  1823. void SoftCPU::FILD_RM64(const X86::Instruction& insn)
  1824. {
  1825. VERIFY(!insn.modrm().is_register());
  1826. auto m64int = (i64)insn.modrm().read64(*this, insn).value();
  1827. // FIXME: Respect shadow values
  1828. fpu_push((long double)m64int);
  1829. }
  1830. void SoftCPU::FUCOMIP(const X86::Instruction& insn)
  1831. {
  1832. FUCOMI(insn);
  1833. fpu_pop();
  1834. }
  1835. void SoftCPU::FBSTP_M80(const X86::Instruction&) { TODO_INSN(); }
  1836. void SoftCPU::FCOMIP(const X86::Instruction& insn)
  1837. {
  1838. FCOMI(insn);
  1839. fpu_pop();
  1840. }
  1841. void SoftCPU::FISTP_RM64(const X86::Instruction& insn)
  1842. {
  1843. VERIFY(!insn.modrm().is_register());
  1844. auto f = fpu_pop();
  1845. // FIXME: Respect rounding mode in m_fpu_cw.
  1846. auto i64 = static_cast<int64_t>(f);
  1847. // FIXME: Respect shadow values
  1848. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(i64)));
  1849. }
  1850. void SoftCPU::HLT(const X86::Instruction&) { TODO_INSN(); }
  1851. void SoftCPU::IDIV_RM16(const X86::Instruction& insn)
  1852. {
  1853. auto divisor_with_shadow = insn.modrm().read16(*this, insn);
  1854. auto divisor = (i16)divisor_with_shadow.value();
  1855. if (divisor == 0) {
  1856. reportln("Divide by zero");
  1857. TODO();
  1858. }
  1859. i32 dividend = (i32)(((u32)dx().value() << 16) | (u32)ax().value());
  1860. i32 result = dividend / divisor;
  1861. if (result > NumericLimits<i16>::max() || result < NumericLimits<i16>::min()) {
  1862. reportln("Divide overflow");
  1863. TODO();
  1864. }
  1865. auto original_ax = ax();
  1866. set_ax(shadow_wrap_with_taint_from<u16>(result, original_ax, dx(), divisor_with_shadow));
  1867. set_dx(shadow_wrap_with_taint_from<u16>(dividend % divisor, original_ax, dx(), divisor_with_shadow));
  1868. }
  1869. void SoftCPU::IDIV_RM32(const X86::Instruction& insn)
  1870. {
  1871. auto divisor_with_shadow = insn.modrm().read32(*this, insn);
  1872. auto divisor = (i32)divisor_with_shadow.value();
  1873. if (divisor == 0) {
  1874. reportln("Divide by zero");
  1875. TODO();
  1876. }
  1877. i64 dividend = (i64)(((u64)edx().value() << 32) | (u64)eax().value());
  1878. i64 result = dividend / divisor;
  1879. if (result > NumericLimits<i32>::max() || result < NumericLimits<i32>::min()) {
  1880. reportln("Divide overflow");
  1881. TODO();
  1882. }
  1883. auto original_eax = eax();
  1884. set_eax(shadow_wrap_with_taint_from<u32>(result, original_eax, edx(), divisor_with_shadow));
  1885. set_edx(shadow_wrap_with_taint_from<u32>(dividend % divisor, original_eax, edx(), divisor_with_shadow));
  1886. }
  1887. void SoftCPU::IDIV_RM8(const X86::Instruction& insn)
  1888. {
  1889. auto divisor_with_shadow = insn.modrm().read8(*this, insn);
  1890. auto divisor = (i8)divisor_with_shadow.value();
  1891. if (divisor == 0) {
  1892. reportln("Divide by zero");
  1893. TODO();
  1894. }
  1895. i16 dividend = ax().value();
  1896. i16 result = dividend / divisor;
  1897. if (result > NumericLimits<i8>::max() || result < NumericLimits<i8>::min()) {
  1898. reportln("Divide overflow");
  1899. TODO();
  1900. }
  1901. auto original_ax = ax();
  1902. set_al(shadow_wrap_with_taint_from<u8>(result, divisor_with_shadow, original_ax));
  1903. set_ah(shadow_wrap_with_taint_from<u8>(dividend % divisor, divisor_with_shadow, original_ax));
  1904. }
  1905. void SoftCPU::IMUL_RM16(const X86::Instruction& insn)
  1906. {
  1907. i16 result_high;
  1908. i16 result_low;
  1909. auto src = insn.modrm().read16(*this, insn);
  1910. op_imul<i16>(*this, src.value(), ax().value(), result_high, result_low);
  1911. gpr16(X86::RegisterDX) = shadow_wrap_with_taint_from<u16>(result_high, src, ax());
  1912. gpr16(X86::RegisterAX) = shadow_wrap_with_taint_from<u16>(result_low, src, ax());
  1913. }
  1914. void SoftCPU::IMUL_RM32(const X86::Instruction& insn)
  1915. {
  1916. i32 result_high;
  1917. i32 result_low;
  1918. auto src = insn.modrm().read32(*this, insn);
  1919. op_imul<i32>(*this, src.value(), eax().value(), result_high, result_low);
  1920. gpr32(X86::RegisterEDX) = shadow_wrap_with_taint_from<u32>(result_high, src, eax());
  1921. gpr32(X86::RegisterEAX) = shadow_wrap_with_taint_from<u32>(result_low, src, eax());
  1922. }
  1923. void SoftCPU::IMUL_RM8(const X86::Instruction& insn)
  1924. {
  1925. i8 result_high;
  1926. i8 result_low;
  1927. auto src = insn.modrm().read8(*this, insn);
  1928. op_imul<i8>(*this, src.value(), al().value(), result_high, result_low);
  1929. gpr8(X86::RegisterAH) = shadow_wrap_with_taint_from<u8>(result_high, src, al());
  1930. gpr8(X86::RegisterAL) = shadow_wrap_with_taint_from<u8>(result_low, src, al());
  1931. }
  1932. void SoftCPU::IMUL_reg16_RM16(const X86::Instruction& insn)
  1933. {
  1934. i16 result_high;
  1935. i16 result_low;
  1936. auto src = insn.modrm().read16(*this, insn);
  1937. op_imul<i16>(*this, gpr16(insn.reg16()).value(), src.value(), result_high, result_low);
  1938. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src, gpr16(insn.reg16()));
  1939. }
  1940. void SoftCPU::IMUL_reg16_RM16_imm16(const X86::Instruction& insn)
  1941. {
  1942. i16 result_high;
  1943. i16 result_low;
  1944. auto src = insn.modrm().read16(*this, insn);
  1945. op_imul<i16>(*this, src.value(), insn.imm16(), result_high, result_low);
  1946. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  1947. }
  1948. void SoftCPU::IMUL_reg16_RM16_imm8(const X86::Instruction& insn)
  1949. {
  1950. i16 result_high;
  1951. i16 result_low;
  1952. auto src = insn.modrm().read16(*this, insn);
  1953. op_imul<i16>(*this, src.value(), sign_extended_to<i16>(insn.imm8()), result_high, result_low);
  1954. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  1955. }
  1956. void SoftCPU::IMUL_reg32_RM32(const X86::Instruction& insn)
  1957. {
  1958. i32 result_high;
  1959. i32 result_low;
  1960. auto src = insn.modrm().read32(*this, insn);
  1961. op_imul<i32>(*this, gpr32(insn.reg32()).value(), src.value(), result_high, result_low);
  1962. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src, gpr32(insn.reg32()));
  1963. }
  1964. void SoftCPU::IMUL_reg32_RM32_imm32(const X86::Instruction& insn)
  1965. {
  1966. i32 result_high;
  1967. i32 result_low;
  1968. auto src = insn.modrm().read32(*this, insn);
  1969. op_imul<i32>(*this, src.value(), insn.imm32(), result_high, result_low);
  1970. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  1971. }
  1972. void SoftCPU::IMUL_reg32_RM32_imm8(const X86::Instruction& insn)
  1973. {
  1974. i32 result_high;
  1975. i32 result_low;
  1976. auto src = insn.modrm().read32(*this, insn);
  1977. op_imul<i32>(*this, src.value(), sign_extended_to<i32>(insn.imm8()), result_high, result_low);
  1978. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  1979. }
  1980. void SoftCPU::INC_RM16(const X86::Instruction& insn)
  1981. {
  1982. insn.modrm().write16(*this, insn, op_inc(*this, insn.modrm().read16(*this, insn)));
  1983. }
  1984. void SoftCPU::INC_RM32(const X86::Instruction& insn)
  1985. {
  1986. insn.modrm().write32(*this, insn, op_inc(*this, insn.modrm().read32(*this, insn)));
  1987. }
  1988. void SoftCPU::INC_RM8(const X86::Instruction& insn)
  1989. {
  1990. insn.modrm().write8(*this, insn, op_inc(*this, insn.modrm().read8(*this, insn)));
  1991. }
  1992. void SoftCPU::INC_reg16(const X86::Instruction& insn)
  1993. {
  1994. gpr16(insn.reg16()) = op_inc(*this, const_gpr16(insn.reg16()));
  1995. }
  1996. void SoftCPU::INC_reg32(const X86::Instruction& insn)
  1997. {
  1998. gpr32(insn.reg32()) = op_inc(*this, const_gpr32(insn.reg32()));
  1999. }
  2000. void SoftCPU::INSB(const X86::Instruction&) { TODO_INSN(); }
  2001. void SoftCPU::INSD(const X86::Instruction&) { TODO_INSN(); }
  2002. void SoftCPU::INSW(const X86::Instruction&) { TODO_INSN(); }
  2003. void SoftCPU::INT3(const X86::Instruction&) { TODO_INSN(); }
  2004. void SoftCPU::INTO(const X86::Instruction&) { TODO_INSN(); }
  2005. void SoftCPU::INT_imm8(const X86::Instruction& insn)
  2006. {
  2007. VERIFY(insn.imm8() == 0x82);
  2008. // FIXME: virt_syscall should take ValueWithShadow and whine about uninitialized arguments
  2009. set_eax(shadow_wrap_as_initialized(m_emulator.virt_syscall(eax().value(), edx().value(), ecx().value(), ebx().value())));
  2010. }
  2011. void SoftCPU::INVLPG(const X86::Instruction&) { TODO_INSN(); }
  2012. void SoftCPU::IN_AL_DX(const X86::Instruction&) { TODO_INSN(); }
  2013. void SoftCPU::IN_AL_imm8(const X86::Instruction&) { TODO_INSN(); }
  2014. void SoftCPU::IN_AX_DX(const X86::Instruction&) { TODO_INSN(); }
  2015. void SoftCPU::IN_AX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2016. void SoftCPU::IN_EAX_DX(const X86::Instruction&) { TODO_INSN(); }
  2017. void SoftCPU::IN_EAX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2018. void SoftCPU::IRET(const X86::Instruction&) { TODO_INSN(); }
  2019. void SoftCPU::JCXZ_imm8(const X86::Instruction& insn)
  2020. {
  2021. if (insn.a32()) {
  2022. warn_if_uninitialized(ecx(), "jecxz imm8");
  2023. if (ecx().value() == 0)
  2024. set_eip(eip() + (i8)insn.imm8());
  2025. } else {
  2026. warn_if_uninitialized(cx(), "jcxz imm8");
  2027. if (cx().value() == 0)
  2028. set_eip(eip() + (i8)insn.imm8());
  2029. }
  2030. }
  2031. void SoftCPU::JMP_FAR_mem16(const X86::Instruction&) { TODO_INSN(); }
  2032. void SoftCPU::JMP_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  2033. void SoftCPU::JMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  2034. void SoftCPU::JMP_RM32(const X86::Instruction& insn)
  2035. {
  2036. set_eip(insn.modrm().read32(*this, insn).value());
  2037. }
  2038. void SoftCPU::JMP_imm16(const X86::Instruction& insn)
  2039. {
  2040. set_eip(eip() + (i16)insn.imm16());
  2041. }
  2042. void SoftCPU::JMP_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  2043. void SoftCPU::JMP_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  2044. void SoftCPU::JMP_imm32(const X86::Instruction& insn)
  2045. {
  2046. set_eip(eip() + (i32)insn.imm32());
  2047. }
  2048. void SoftCPU::JMP_short_imm8(const X86::Instruction& insn)
  2049. {
  2050. set_eip(eip() + (i8)insn.imm8());
  2051. }
  2052. void SoftCPU::Jcc_NEAR_imm(const X86::Instruction& insn)
  2053. {
  2054. warn_if_flags_tainted("jcc near imm32");
  2055. if (evaluate_condition(insn.cc()))
  2056. set_eip(eip() + (i32)insn.imm32());
  2057. }
  2058. void SoftCPU::Jcc_imm8(const X86::Instruction& insn)
  2059. {
  2060. warn_if_flags_tainted("jcc imm8");
  2061. if (evaluate_condition(insn.cc()))
  2062. set_eip(eip() + (i8)insn.imm8());
  2063. }
  2064. void SoftCPU::LAHF(const X86::Instruction&) { TODO_INSN(); }
  2065. void SoftCPU::LAR_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2066. void SoftCPU::LAR_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2067. void SoftCPU::LDS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2068. void SoftCPU::LDS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2069. void SoftCPU::LEAVE16(const X86::Instruction&) { TODO_INSN(); }
  2070. void SoftCPU::LEAVE32(const X86::Instruction&)
  2071. {
  2072. auto new_ebp = read_memory32({ ss(), ebp().value() });
  2073. set_esp({ ebp().value() + 4, ebp().shadow() });
  2074. set_ebp(new_ebp);
  2075. }
  2076. void SoftCPU::LEA_reg16_mem16(const X86::Instruction& insn)
  2077. {
  2078. // FIXME: Respect shadow values
  2079. gpr16(insn.reg16()) = shadow_wrap_as_initialized<u16>(insn.modrm().resolve(*this, insn).offset());
  2080. }
  2081. void SoftCPU::LEA_reg32_mem32(const X86::Instruction& insn)
  2082. {
  2083. // FIXME: Respect shadow values
  2084. gpr32(insn.reg32()) = shadow_wrap_as_initialized<u32>(insn.modrm().resolve(*this, insn).offset());
  2085. }
  2086. void SoftCPU::LES_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2087. void SoftCPU::LES_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2088. void SoftCPU::LFS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2089. void SoftCPU::LFS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2090. void SoftCPU::LGDT(const X86::Instruction&) { TODO_INSN(); }
  2091. void SoftCPU::LGS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2092. void SoftCPU::LGS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2093. void SoftCPU::LIDT(const X86::Instruction&) { TODO_INSN(); }
  2094. void SoftCPU::LLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2095. void SoftCPU::LMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2096. template<typename T>
  2097. ALWAYS_INLINE static void do_lods(SoftCPU& cpu, const X86::Instruction& insn)
  2098. {
  2099. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2100. cpu.do_once_or_repeat<true>(insn, [&] {
  2101. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2102. cpu.gpr<T>(X86::RegisterAL) = src;
  2103. cpu.step_source_index(insn.a32(), sizeof(T));
  2104. });
  2105. }
  2106. void SoftCPU::LODSB(const X86::Instruction& insn)
  2107. {
  2108. do_lods<u8>(*this, insn);
  2109. }
  2110. void SoftCPU::LODSD(const X86::Instruction& insn)
  2111. {
  2112. do_lods<u32>(*this, insn);
  2113. }
  2114. void SoftCPU::LODSW(const X86::Instruction& insn)
  2115. {
  2116. do_lods<u16>(*this, insn);
  2117. }
  2118. void SoftCPU::LOOPNZ_imm8(const X86::Instruction& insn)
  2119. {
  2120. warn_if_flags_tainted("loopnz");
  2121. if (insn.a32()) {
  2122. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2123. if (ecx().value() != 0 && !zf())
  2124. set_eip(eip() + (i8)insn.imm8());
  2125. } else {
  2126. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2127. if (cx().value() != 0 && !zf())
  2128. set_eip(eip() + (i8)insn.imm8());
  2129. }
  2130. }
  2131. void SoftCPU::LOOPZ_imm8(const X86::Instruction& insn)
  2132. {
  2133. warn_if_flags_tainted("loopz");
  2134. if (insn.a32()) {
  2135. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2136. if (ecx().value() != 0 && zf())
  2137. set_eip(eip() + (i8)insn.imm8());
  2138. } else {
  2139. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2140. if (cx().value() != 0 && zf())
  2141. set_eip(eip() + (i8)insn.imm8());
  2142. }
  2143. }
  2144. void SoftCPU::LOOP_imm8(const X86::Instruction& insn)
  2145. {
  2146. if (insn.a32()) {
  2147. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2148. if (ecx().value() != 0)
  2149. set_eip(eip() + (i8)insn.imm8());
  2150. } else {
  2151. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2152. if (cx().value() != 0)
  2153. set_eip(eip() + (i8)insn.imm8());
  2154. }
  2155. }
  2156. void SoftCPU::LSL_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2157. void SoftCPU::LSL_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2158. void SoftCPU::LSS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2159. void SoftCPU::LSS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2160. void SoftCPU::LTR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2161. template<typename T>
  2162. ALWAYS_INLINE static void do_movs(SoftCPU& cpu, const X86::Instruction& insn)
  2163. {
  2164. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2165. cpu.do_once_or_repeat<false>(insn, [&] {
  2166. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2167. cpu.write_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() }, src);
  2168. cpu.step_source_index(insn.a32(), sizeof(T));
  2169. cpu.step_destination_index(insn.a32(), sizeof(T));
  2170. });
  2171. }
  2172. void SoftCPU::MOVSB(const X86::Instruction& insn)
  2173. {
  2174. do_movs<u8>(*this, insn);
  2175. }
  2176. void SoftCPU::MOVSD(const X86::Instruction& insn)
  2177. {
  2178. do_movs<u32>(*this, insn);
  2179. }
  2180. void SoftCPU::MOVSW(const X86::Instruction& insn)
  2181. {
  2182. do_movs<u16>(*this, insn);
  2183. }
  2184. void SoftCPU::MOVSX_reg16_RM8(const X86::Instruction& insn)
  2185. {
  2186. auto src = insn.modrm().read8(*this, insn);
  2187. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(sign_extended_to<u16>(src.value()), src);
  2188. }
  2189. void SoftCPU::MOVSX_reg32_RM16(const X86::Instruction& insn)
  2190. {
  2191. auto src = insn.modrm().read16(*this, insn);
  2192. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2193. }
  2194. void SoftCPU::MOVSX_reg32_RM8(const X86::Instruction& insn)
  2195. {
  2196. auto src = insn.modrm().read8(*this, insn);
  2197. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2198. }
  2199. void SoftCPU::MOVZX_reg16_RM8(const X86::Instruction& insn)
  2200. {
  2201. auto src = insn.modrm().read8(*this, insn);
  2202. gpr16(insn.reg16()) = ValueWithShadow<u16>(src.value(), 0x0100 | (src.shadow() & 0xff));
  2203. }
  2204. void SoftCPU::MOVZX_reg32_RM16(const X86::Instruction& insn)
  2205. {
  2206. auto src = insn.modrm().read16(*this, insn);
  2207. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010000 | (src.shadow() & 0xffff));
  2208. }
  2209. void SoftCPU::MOVZX_reg32_RM8(const X86::Instruction& insn)
  2210. {
  2211. auto src = insn.modrm().read8(*this, insn);
  2212. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010100 | (src.shadow() & 0xff));
  2213. }
  2214. void SoftCPU::MOV_AL_moff8(const X86::Instruction& insn)
  2215. {
  2216. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2217. }
  2218. void SoftCPU::MOV_AX_moff16(const X86::Instruction& insn)
  2219. {
  2220. set_ax(read_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2221. }
  2222. void SoftCPU::MOV_CR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2223. void SoftCPU::MOV_DR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2224. void SoftCPU::MOV_EAX_moff32(const X86::Instruction& insn)
  2225. {
  2226. set_eax(read_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2227. }
  2228. void SoftCPU::MOV_RM16_imm16(const X86::Instruction& insn)
  2229. {
  2230. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(insn.imm16()));
  2231. }
  2232. void SoftCPU::MOV_RM16_reg16(const X86::Instruction& insn)
  2233. {
  2234. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2235. }
  2236. void SoftCPU::MOV_RM16_seg(const X86::Instruction&) { TODO_INSN(); }
  2237. void SoftCPU::MOV_RM32_imm32(const X86::Instruction& insn)
  2238. {
  2239. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(insn.imm32()));
  2240. }
  2241. void SoftCPU::MOV_RM32_reg32(const X86::Instruction& insn)
  2242. {
  2243. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2244. }
  2245. void SoftCPU::MOV_RM8_imm8(const X86::Instruction& insn)
  2246. {
  2247. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized(insn.imm8()));
  2248. }
  2249. void SoftCPU::MOV_RM8_reg8(const X86::Instruction& insn)
  2250. {
  2251. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2252. }
  2253. void SoftCPU::MOV_moff16_AX(const X86::Instruction& insn)
  2254. {
  2255. write_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, ax());
  2256. }
  2257. void SoftCPU::MOV_moff32_EAX(const X86::Instruction& insn)
  2258. {
  2259. write_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, eax());
  2260. }
  2261. void SoftCPU::MOV_moff8_AL(const X86::Instruction& insn)
  2262. {
  2263. write_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, al());
  2264. }
  2265. void SoftCPU::MOV_reg16_RM16(const X86::Instruction& insn)
  2266. {
  2267. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  2268. }
  2269. void SoftCPU::MOV_reg16_imm16(const X86::Instruction& insn)
  2270. {
  2271. gpr16(insn.reg16()) = shadow_wrap_as_initialized(insn.imm16());
  2272. }
  2273. void SoftCPU::MOV_reg32_CR(const X86::Instruction&) { TODO_INSN(); }
  2274. void SoftCPU::MOV_reg32_DR(const X86::Instruction&) { TODO_INSN(); }
  2275. void SoftCPU::MOV_reg32_RM32(const X86::Instruction& insn)
  2276. {
  2277. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  2278. }
  2279. void SoftCPU::MOV_reg32_imm32(const X86::Instruction& insn)
  2280. {
  2281. gpr32(insn.reg32()) = shadow_wrap_as_initialized(insn.imm32());
  2282. }
  2283. void SoftCPU::MOV_reg8_RM8(const X86::Instruction& insn)
  2284. {
  2285. gpr8(insn.reg8()) = insn.modrm().read8(*this, insn);
  2286. }
  2287. void SoftCPU::MOV_reg8_imm8(const X86::Instruction& insn)
  2288. {
  2289. gpr8(insn.reg8()) = shadow_wrap_as_initialized(insn.imm8());
  2290. }
  2291. void SoftCPU::MOV_seg_RM16(const X86::Instruction&) { TODO_INSN(); }
  2292. void SoftCPU::MOV_seg_RM32(const X86::Instruction&) { TODO_INSN(); }
  2293. void SoftCPU::MUL_RM16(const X86::Instruction& insn)
  2294. {
  2295. auto src = insn.modrm().read16(*this, insn);
  2296. u32 result = (u32)ax().value() * (u32)src.value();
  2297. auto original_ax = ax();
  2298. set_ax(shadow_wrap_with_taint_from<u16>(result & 0xffff, src, original_ax));
  2299. set_dx(shadow_wrap_with_taint_from<u16>(result >> 16, src, original_ax));
  2300. taint_flags_from(src, original_ax);
  2301. set_cf(dx().value() != 0);
  2302. set_of(dx().value() != 0);
  2303. }
  2304. void SoftCPU::MUL_RM32(const X86::Instruction& insn)
  2305. {
  2306. auto src = insn.modrm().read32(*this, insn);
  2307. u64 result = (u64)eax().value() * (u64)src.value();
  2308. auto original_eax = eax();
  2309. set_eax(shadow_wrap_with_taint_from<u32>(result, src, original_eax));
  2310. set_edx(shadow_wrap_with_taint_from<u32>(result >> 32, src, original_eax));
  2311. taint_flags_from(src, original_eax);
  2312. set_cf(edx().value() != 0);
  2313. set_of(edx().value() != 0);
  2314. }
  2315. void SoftCPU::MUL_RM8(const X86::Instruction& insn)
  2316. {
  2317. auto src = insn.modrm().read8(*this, insn);
  2318. u16 result = (u16)al().value() * src.value();
  2319. auto original_al = al();
  2320. set_ax(shadow_wrap_with_taint_from(result, src, original_al));
  2321. taint_flags_from(src, original_al);
  2322. set_cf((result & 0xff00) != 0);
  2323. set_of((result & 0xff00) != 0);
  2324. }
  2325. void SoftCPU::NEG_RM16(const X86::Instruction& insn)
  2326. {
  2327. insn.modrm().write16(*this, insn, op_sub<ValueWithShadow<u16>>(*this, shadow_wrap_as_initialized<u16>(0), insn.modrm().read16(*this, insn)));
  2328. }
  2329. void SoftCPU::NEG_RM32(const X86::Instruction& insn)
  2330. {
  2331. insn.modrm().write32(*this, insn, op_sub<ValueWithShadow<u32>>(*this, shadow_wrap_as_initialized<u32>(0), insn.modrm().read32(*this, insn)));
  2332. }
  2333. void SoftCPU::NEG_RM8(const X86::Instruction& insn)
  2334. {
  2335. insn.modrm().write8(*this, insn, op_sub<ValueWithShadow<u8>>(*this, shadow_wrap_as_initialized<u8>(0), insn.modrm().read8(*this, insn)));
  2336. }
  2337. void SoftCPU::NOP(const X86::Instruction&)
  2338. {
  2339. }
  2340. void SoftCPU::NOT_RM16(const X86::Instruction& insn)
  2341. {
  2342. auto data = insn.modrm().read16(*this, insn);
  2343. insn.modrm().write16(*this, insn, ValueWithShadow<u16>(~data.value(), data.shadow()));
  2344. }
  2345. void SoftCPU::NOT_RM32(const X86::Instruction& insn)
  2346. {
  2347. auto data = insn.modrm().read32(*this, insn);
  2348. insn.modrm().write32(*this, insn, ValueWithShadow<u32>(~data.value(), data.shadow()));
  2349. }
  2350. void SoftCPU::NOT_RM8(const X86::Instruction& insn)
  2351. {
  2352. auto data = insn.modrm().read8(*this, insn);
  2353. insn.modrm().write8(*this, insn, ValueWithShadow<u8>(~data.value(), data.shadow()));
  2354. }
  2355. void SoftCPU::OUTSB(const X86::Instruction&) { TODO_INSN(); }
  2356. void SoftCPU::OUTSD(const X86::Instruction&) { TODO_INSN(); }
  2357. void SoftCPU::OUTSW(const X86::Instruction&) { TODO_INSN(); }
  2358. void SoftCPU::OUT_DX_AL(const X86::Instruction&) { TODO_INSN(); }
  2359. void SoftCPU::OUT_DX_AX(const X86::Instruction&) { TODO_INSN(); }
  2360. void SoftCPU::OUT_DX_EAX(const X86::Instruction&) { TODO_INSN(); }
  2361. void SoftCPU::OUT_imm8_AL(const X86::Instruction&) { TODO_INSN(); }
  2362. void SoftCPU::OUT_imm8_AX(const X86::Instruction&) { TODO_INSN(); }
  2363. void SoftCPU::OUT_imm8_EAX(const X86::Instruction&) { TODO_INSN(); }
  2364. void SoftCPU::PADDB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2365. void SoftCPU::PADDW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2366. void SoftCPU::PADDD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2367. void SoftCPU::POPA(const X86::Instruction&) { TODO_INSN(); }
  2368. void SoftCPU::POPAD(const X86::Instruction&) { TODO_INSN(); }
  2369. void SoftCPU::POPF(const X86::Instruction&) { TODO_INSN(); }
  2370. void SoftCPU::POPFD(const X86::Instruction&)
  2371. {
  2372. auto popped_value = pop32();
  2373. m_eflags &= ~0x00fcffff;
  2374. m_eflags |= popped_value.value() & 0x00fcffff;
  2375. taint_flags_from(popped_value);
  2376. }
  2377. void SoftCPU::POP_DS(const X86::Instruction&) { TODO_INSN(); }
  2378. void SoftCPU::POP_ES(const X86::Instruction&) { TODO_INSN(); }
  2379. void SoftCPU::POP_FS(const X86::Instruction&) { TODO_INSN(); }
  2380. void SoftCPU::POP_GS(const X86::Instruction&) { TODO_INSN(); }
  2381. void SoftCPU::POP_RM16(const X86::Instruction& insn)
  2382. {
  2383. insn.modrm().write16(*this, insn, pop16());
  2384. }
  2385. void SoftCPU::POP_RM32(const X86::Instruction& insn)
  2386. {
  2387. insn.modrm().write32(*this, insn, pop32());
  2388. }
  2389. void SoftCPU::POP_SS(const X86::Instruction&) { TODO_INSN(); }
  2390. void SoftCPU::POP_reg16(const X86::Instruction& insn)
  2391. {
  2392. gpr16(insn.reg16()) = pop16();
  2393. }
  2394. void SoftCPU::POP_reg32(const X86::Instruction& insn)
  2395. {
  2396. gpr32(insn.reg32()) = pop32();
  2397. }
  2398. void SoftCPU::PUSHA(const X86::Instruction&) { TODO_INSN(); }
  2399. void SoftCPU::PUSHAD(const X86::Instruction&) { TODO_INSN(); }
  2400. void SoftCPU::PUSHF(const X86::Instruction&) { TODO_INSN(); }
  2401. void SoftCPU::PUSHFD(const X86::Instruction&)
  2402. {
  2403. // FIXME: Respect shadow flags when they exist!
  2404. push32(shadow_wrap_as_initialized(m_eflags & 0x00fcffff));
  2405. }
  2406. void SoftCPU::PUSH_CS(const X86::Instruction&) { TODO_INSN(); }
  2407. void SoftCPU::PUSH_DS(const X86::Instruction&) { TODO_INSN(); }
  2408. void SoftCPU::PUSH_ES(const X86::Instruction&) { TODO_INSN(); }
  2409. void SoftCPU::PUSH_FS(const X86::Instruction&) { TODO_INSN(); }
  2410. void SoftCPU::PUSH_GS(const X86::Instruction&) { TODO_INSN(); }
  2411. void SoftCPU::PUSH_RM16(const X86::Instruction&) { TODO_INSN(); }
  2412. void SoftCPU::PUSH_RM32(const X86::Instruction& insn)
  2413. {
  2414. push32(insn.modrm().read32(*this, insn));
  2415. }
  2416. void SoftCPU::PUSH_SP_8086_80186(const X86::Instruction&) { TODO_INSN(); }
  2417. void SoftCPU::PUSH_SS(const X86::Instruction&) { TODO_INSN(); }
  2418. void SoftCPU::PUSH_imm16(const X86::Instruction& insn)
  2419. {
  2420. push16(shadow_wrap_as_initialized(insn.imm16()));
  2421. }
  2422. void SoftCPU::PUSH_imm32(const X86::Instruction& insn)
  2423. {
  2424. push32(shadow_wrap_as_initialized(insn.imm32()));
  2425. }
  2426. void SoftCPU::PUSH_imm8(const X86::Instruction& insn)
  2427. {
  2428. VERIFY(!insn.has_operand_size_override_prefix());
  2429. push32(shadow_wrap_as_initialized<u32>(sign_extended_to<i32>(insn.imm8())));
  2430. }
  2431. void SoftCPU::PUSH_reg16(const X86::Instruction& insn)
  2432. {
  2433. push16(gpr16(insn.reg16()));
  2434. }
  2435. void SoftCPU::PUSH_reg32(const X86::Instruction& insn)
  2436. {
  2437. push32(gpr32(insn.reg32()));
  2438. }
  2439. template<typename T, bool cf>
  2440. ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2441. {
  2442. if (steps.value() == 0)
  2443. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2444. u32 result = 0;
  2445. u32 new_flags = 0;
  2446. if constexpr (cf)
  2447. asm volatile("stc");
  2448. else
  2449. asm volatile("clc");
  2450. if constexpr (sizeof(typename T::ValueType) == 4) {
  2451. asm volatile("rcll %%cl, %%eax\n"
  2452. : "=a"(result)
  2453. : "a"(data.value()), "c"(steps.value()));
  2454. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2455. asm volatile("rclw %%cl, %%ax\n"
  2456. : "=a"(result)
  2457. : "a"(data.value()), "c"(steps.value()));
  2458. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2459. asm volatile("rclb %%cl, %%al\n"
  2460. : "=a"(result)
  2461. : "a"(data.value()), "c"(steps.value()));
  2462. }
  2463. asm volatile(
  2464. "pushf\n"
  2465. "pop %%ebx"
  2466. : "=b"(new_flags));
  2467. cpu.set_flags_oc(new_flags);
  2468. cpu.taint_flags_from(data, steps);
  2469. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2470. }
  2471. template<typename T>
  2472. ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2473. {
  2474. cpu.warn_if_flags_tainted("rcl");
  2475. if (cpu.cf())
  2476. return op_rcl_impl<T, true>(cpu, data, steps);
  2477. return op_rcl_impl<T, false>(cpu, data, steps);
  2478. }
  2479. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCL, op_rcl)
  2480. template<typename T, bool cf>
  2481. ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2482. {
  2483. if (steps.value() == 0)
  2484. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2485. u32 result = 0;
  2486. u32 new_flags = 0;
  2487. if constexpr (cf)
  2488. asm volatile("stc");
  2489. else
  2490. asm volatile("clc");
  2491. if constexpr (sizeof(typename T::ValueType) == 4) {
  2492. asm volatile("rcrl %%cl, %%eax\n"
  2493. : "=a"(result)
  2494. : "a"(data.value()), "c"(steps.value()));
  2495. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2496. asm volatile("rcrw %%cl, %%ax\n"
  2497. : "=a"(result)
  2498. : "a"(data.value()), "c"(steps.value()));
  2499. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2500. asm volatile("rcrb %%cl, %%al\n"
  2501. : "=a"(result)
  2502. : "a"(data.value()), "c"(steps.value()));
  2503. }
  2504. asm volatile(
  2505. "pushf\n"
  2506. "pop %%ebx"
  2507. : "=b"(new_flags));
  2508. cpu.set_flags_oc(new_flags);
  2509. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2510. }
  2511. template<typename T>
  2512. ALWAYS_INLINE static T op_rcr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2513. {
  2514. cpu.warn_if_flags_tainted("rcr");
  2515. if (cpu.cf())
  2516. return op_rcr_impl<T, true>(cpu, data, steps);
  2517. return op_rcr_impl<T, false>(cpu, data, steps);
  2518. }
  2519. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCR, op_rcr)
  2520. void SoftCPU::RDTSC(const X86::Instruction&) { TODO_INSN(); }
  2521. void SoftCPU::RET(const X86::Instruction& insn)
  2522. {
  2523. VERIFY(!insn.has_operand_size_override_prefix());
  2524. auto ret_address = pop32();
  2525. warn_if_uninitialized(ret_address, "ret");
  2526. set_eip(ret_address.value());
  2527. }
  2528. void SoftCPU::RETF(const X86::Instruction&) { TODO_INSN(); }
  2529. void SoftCPU::RETF_imm16(const X86::Instruction&) { TODO_INSN(); }
  2530. void SoftCPU::RET_imm16(const X86::Instruction& insn)
  2531. {
  2532. VERIFY(!insn.has_operand_size_override_prefix());
  2533. auto ret_address = pop32();
  2534. warn_if_uninitialized(ret_address, "ret imm16");
  2535. set_eip(ret_address.value());
  2536. set_esp({ esp().value() + insn.imm16(), esp().shadow() });
  2537. }
  2538. template<typename T>
  2539. ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2540. {
  2541. if (steps.value() == 0)
  2542. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2543. u32 result = 0;
  2544. u32 new_flags = 0;
  2545. if constexpr (sizeof(typename T::ValueType) == 4) {
  2546. asm volatile("roll %%cl, %%eax\n"
  2547. : "=a"(result)
  2548. : "a"(data.value()), "c"(steps.value()));
  2549. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2550. asm volatile("rolw %%cl, %%ax\n"
  2551. : "=a"(result)
  2552. : "a"(data.value()), "c"(steps.value()));
  2553. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2554. asm volatile("rolb %%cl, %%al\n"
  2555. : "=a"(result)
  2556. : "a"(data.value()), "c"(steps.value()));
  2557. }
  2558. asm volatile(
  2559. "pushf\n"
  2560. "pop %%ebx"
  2561. : "=b"(new_flags));
  2562. cpu.set_flags_oc(new_flags);
  2563. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2564. }
  2565. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROL, op_rol)
  2566. template<typename T>
  2567. ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2568. {
  2569. if (steps.value() == 0)
  2570. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2571. u32 result = 0;
  2572. u32 new_flags = 0;
  2573. if constexpr (sizeof(typename T::ValueType) == 4) {
  2574. asm volatile("rorl %%cl, %%eax\n"
  2575. : "=a"(result)
  2576. : "a"(data.value()), "c"(steps.value()));
  2577. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2578. asm volatile("rorw %%cl, %%ax\n"
  2579. : "=a"(result)
  2580. : "a"(data.value()), "c"(steps.value()));
  2581. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2582. asm volatile("rorb %%cl, %%al\n"
  2583. : "=a"(result)
  2584. : "a"(data.value()), "c"(steps.value()));
  2585. }
  2586. asm volatile(
  2587. "pushf\n"
  2588. "pop %%ebx"
  2589. : "=b"(new_flags));
  2590. cpu.set_flags_oc(new_flags);
  2591. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2592. }
  2593. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROR, op_ror)
  2594. void SoftCPU::SAHF(const X86::Instruction&) { TODO_INSN(); }
  2595. void SoftCPU::SALC(const X86::Instruction&)
  2596. {
  2597. // FIXME: Respect shadow flags once they exists!
  2598. set_al(shadow_wrap_as_initialized<u8>(cf() ? 0xff : 0x00));
  2599. }
  2600. template<typename T>
  2601. static T op_sar(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2602. {
  2603. if (steps.value() == 0)
  2604. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2605. u32 result = 0;
  2606. u32 new_flags = 0;
  2607. if constexpr (sizeof(typename T::ValueType) == 4) {
  2608. asm volatile("sarl %%cl, %%eax\n"
  2609. : "=a"(result)
  2610. : "a"(data.value()), "c"(steps.value()));
  2611. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2612. asm volatile("sarw %%cl, %%ax\n"
  2613. : "=a"(result)
  2614. : "a"(data.value()), "c"(steps.value()));
  2615. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2616. asm volatile("sarb %%cl, %%al\n"
  2617. : "=a"(result)
  2618. : "a"(data.value()), "c"(steps.value()));
  2619. }
  2620. asm volatile(
  2621. "pushf\n"
  2622. "pop %%ebx"
  2623. : "=b"(new_flags));
  2624. cpu.set_flags_oszapc(new_flags);
  2625. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2626. }
  2627. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SAR, op_sar)
  2628. template<typename T>
  2629. ALWAYS_INLINE static void do_scas(SoftCPU& cpu, const X86::Instruction& insn)
  2630. {
  2631. cpu.do_once_or_repeat<true>(insn, [&] {
  2632. auto src = cpu.const_gpr<T>(X86::RegisterAL);
  2633. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  2634. op_sub(cpu, dest, src);
  2635. cpu.step_destination_index(insn.a32(), sizeof(T));
  2636. });
  2637. }
  2638. void SoftCPU::SCASB(const X86::Instruction& insn)
  2639. {
  2640. do_scas<u8>(*this, insn);
  2641. }
  2642. void SoftCPU::SCASD(const X86::Instruction& insn)
  2643. {
  2644. do_scas<u32>(*this, insn);
  2645. }
  2646. void SoftCPU::SCASW(const X86::Instruction& insn)
  2647. {
  2648. do_scas<u16>(*this, insn);
  2649. }
  2650. void SoftCPU::SETcc_RM8(const X86::Instruction& insn)
  2651. {
  2652. warn_if_flags_tainted("setcc");
  2653. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized<u8>(evaluate_condition(insn.cc())));
  2654. }
  2655. void SoftCPU::SGDT(const X86::Instruction&) { TODO_INSN(); }
  2656. void SoftCPU::SHLD_RM16_reg16_CL(const X86::Instruction& insn)
  2657. {
  2658. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2659. }
  2660. void SoftCPU::SHLD_RM16_reg16_imm8(const X86::Instruction& insn)
  2661. {
  2662. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2663. }
  2664. void SoftCPU::SHLD_RM32_reg32_CL(const X86::Instruction& insn)
  2665. {
  2666. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2667. }
  2668. void SoftCPU::SHLD_RM32_reg32_imm8(const X86::Instruction& insn)
  2669. {
  2670. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2671. }
  2672. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHL, op_shl)
  2673. void SoftCPU::SHRD_RM16_reg16_CL(const X86::Instruction& insn)
  2674. {
  2675. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2676. }
  2677. void SoftCPU::SHRD_RM16_reg16_imm8(const X86::Instruction& insn)
  2678. {
  2679. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2680. }
  2681. void SoftCPU::SHRD_RM32_reg32_CL(const X86::Instruction& insn)
  2682. {
  2683. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2684. }
  2685. void SoftCPU::SHRD_RM32_reg32_imm8(const X86::Instruction& insn)
  2686. {
  2687. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2688. }
  2689. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHR, op_shr)
  2690. void SoftCPU::SIDT(const X86::Instruction&) { TODO_INSN(); }
  2691. void SoftCPU::SLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2692. void SoftCPU::SMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2693. void SoftCPU::STC(const X86::Instruction&)
  2694. {
  2695. set_cf(true);
  2696. }
  2697. void SoftCPU::STD(const X86::Instruction&)
  2698. {
  2699. set_df(true);
  2700. }
  2701. void SoftCPU::STI(const X86::Instruction&) { TODO_INSN(); }
  2702. void SoftCPU::STOSB(const X86::Instruction& insn)
  2703. {
  2704. if (insn.has_rep_prefix() && !df()) {
  2705. // Fast path for 8-bit forward memory fill.
  2706. if (m_emulator.mmu().fast_fill_memory8({ es(), destination_index(insn.a32()).value() }, ecx().value(), al())) {
  2707. if (insn.a32()) {
  2708. // FIXME: Should an uninitialized ECX taint EDI here?
  2709. set_edi({ (u32)(edi().value() + ecx().value()), edi().shadow() });
  2710. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2711. } else {
  2712. // FIXME: Should an uninitialized CX taint DI here?
  2713. set_di({ (u16)(di().value() + cx().value()), di().shadow() });
  2714. set_cx(shadow_wrap_as_initialized<u16>(0));
  2715. }
  2716. return;
  2717. }
  2718. }
  2719. do_once_or_repeat<false>(insn, [&] {
  2720. write_memory8({ es(), destination_index(insn.a32()).value() }, al());
  2721. step_destination_index(insn.a32(), 1);
  2722. });
  2723. }
  2724. void SoftCPU::STOSD(const X86::Instruction& insn)
  2725. {
  2726. if (insn.has_rep_prefix() && !df()) {
  2727. // Fast path for 32-bit forward memory fill.
  2728. if (m_emulator.mmu().fast_fill_memory32({ es(), destination_index(insn.a32()).value() }, ecx().value(), eax())) {
  2729. if (insn.a32()) {
  2730. // FIXME: Should an uninitialized ECX taint EDI here?
  2731. set_edi({ (u32)(edi().value() + (ecx().value() * sizeof(u32))), edi().shadow() });
  2732. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2733. } else {
  2734. // FIXME: Should an uninitialized CX taint DI here?
  2735. set_di({ (u16)(di().value() + (cx().value() * sizeof(u32))), di().shadow() });
  2736. set_cx(shadow_wrap_as_initialized<u16>(0));
  2737. }
  2738. return;
  2739. }
  2740. }
  2741. do_once_or_repeat<false>(insn, [&] {
  2742. write_memory32({ es(), destination_index(insn.a32()).value() }, eax());
  2743. step_destination_index(insn.a32(), 4);
  2744. });
  2745. }
  2746. void SoftCPU::STOSW(const X86::Instruction& insn)
  2747. {
  2748. do_once_or_repeat<false>(insn, [&] {
  2749. write_memory16({ es(), destination_index(insn.a32()).value() }, ax());
  2750. step_destination_index(insn.a32(), 2);
  2751. });
  2752. }
  2753. void SoftCPU::STR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2754. void SoftCPU::UD0(const X86::Instruction&) { TODO_INSN(); }
  2755. void SoftCPU::UD1(const X86::Instruction&) { TODO_INSN(); }
  2756. void SoftCPU::UD2(const X86::Instruction&) { TODO_INSN(); }
  2757. void SoftCPU::VERR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2758. void SoftCPU::VERW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2759. void SoftCPU::WAIT(const X86::Instruction&) { TODO_INSN(); }
  2760. void SoftCPU::WBINVD(const X86::Instruction&) { TODO_INSN(); }
  2761. void SoftCPU::XADD_RM16_reg16(const X86::Instruction& insn)
  2762. {
  2763. auto dest = insn.modrm().read16(*this, insn);
  2764. auto src = const_gpr16(insn.reg16());
  2765. auto result = op_add(*this, dest, src);
  2766. gpr16(insn.reg16()) = dest;
  2767. insn.modrm().write16(*this, insn, result);
  2768. }
  2769. void SoftCPU::XADD_RM32_reg32(const X86::Instruction& insn)
  2770. {
  2771. auto dest = insn.modrm().read32(*this, insn);
  2772. auto src = const_gpr32(insn.reg32());
  2773. auto result = op_add(*this, dest, src);
  2774. gpr32(insn.reg32()) = dest;
  2775. insn.modrm().write32(*this, insn, result);
  2776. }
  2777. void SoftCPU::XADD_RM8_reg8(const X86::Instruction& insn)
  2778. {
  2779. auto dest = insn.modrm().read8(*this, insn);
  2780. auto src = const_gpr8(insn.reg8());
  2781. auto result = op_add(*this, dest, src);
  2782. gpr8(insn.reg8()) = dest;
  2783. insn.modrm().write8(*this, insn, result);
  2784. }
  2785. void SoftCPU::XCHG_AX_reg16(const X86::Instruction& insn)
  2786. {
  2787. auto temp = gpr16(insn.reg16());
  2788. gpr16(insn.reg16()) = ax();
  2789. set_ax(temp);
  2790. }
  2791. void SoftCPU::XCHG_EAX_reg32(const X86::Instruction& insn)
  2792. {
  2793. auto temp = gpr32(insn.reg32());
  2794. gpr32(insn.reg32()) = eax();
  2795. set_eax(temp);
  2796. }
  2797. void SoftCPU::XCHG_reg16_RM16(const X86::Instruction& insn)
  2798. {
  2799. auto temp = insn.modrm().read16(*this, insn);
  2800. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2801. gpr16(insn.reg16()) = temp;
  2802. }
  2803. void SoftCPU::XCHG_reg32_RM32(const X86::Instruction& insn)
  2804. {
  2805. auto temp = insn.modrm().read32(*this, insn);
  2806. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2807. gpr32(insn.reg32()) = temp;
  2808. }
  2809. void SoftCPU::XCHG_reg8_RM8(const X86::Instruction& insn)
  2810. {
  2811. auto temp = insn.modrm().read8(*this, insn);
  2812. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2813. gpr8(insn.reg8()) = temp;
  2814. }
  2815. void SoftCPU::XLAT(const X86::Instruction& insn)
  2816. {
  2817. if (insn.a32())
  2818. warn_if_uninitialized(ebx(), "xlat ebx");
  2819. else
  2820. warn_if_uninitialized(bx(), "xlat bx");
  2821. warn_if_uninitialized(al(), "xlat al");
  2822. u32 offset = (insn.a32() ? ebx().value() : bx().value()) + al().value();
  2823. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), offset }));
  2824. }
  2825. #define DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2826. void SoftCPU::mnemonic##_AL_imm8(const X86::Instruction& insn) { generic_AL_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  2827. void SoftCPU::mnemonic##_AX_imm16(const X86::Instruction& insn) { generic_AX_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2828. void SoftCPU::mnemonic##_EAX_imm32(const X86::Instruction& insn) { generic_EAX_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2829. void SoftCPU::mnemonic##_RM16_imm16(const X86::Instruction& insn) { generic_RM16_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2830. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { generic_RM16_reg16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2831. void SoftCPU::mnemonic##_RM32_imm32(const X86::Instruction& insn) { generic_RM32_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2832. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { generic_RM32_reg32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2833. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  2834. void SoftCPU::mnemonic##_RM8_reg8(const X86::Instruction& insn) { generic_RM8_reg8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2835. #define DEFINE_GENERIC_INSN_HANDLERS(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2836. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2837. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2838. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2839. void SoftCPU::mnemonic##_reg16_RM16(const X86::Instruction& insn) { generic_reg16_RM16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2840. void SoftCPU::mnemonic##_reg32_RM32(const X86::Instruction& insn) { generic_reg32_RM32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2841. void SoftCPU::mnemonic##_reg8_RM8(const X86::Instruction& insn) { generic_reg8_RM8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2842. DEFINE_GENERIC_INSN_HANDLERS(XOR, op_xor, true, true, false)
  2843. DEFINE_GENERIC_INSN_HANDLERS(OR, op_or, true, false, true)
  2844. DEFINE_GENERIC_INSN_HANDLERS(ADD, op_add, true, false, false)
  2845. DEFINE_GENERIC_INSN_HANDLERS(ADC, op_adc, true, false, false)
  2846. DEFINE_GENERIC_INSN_HANDLERS(SUB, op_sub, true, true, false)
  2847. DEFINE_GENERIC_INSN_HANDLERS(SBB, op_sbb, true, false, false)
  2848. DEFINE_GENERIC_INSN_HANDLERS(AND, op_and, true, false, false)
  2849. DEFINE_GENERIC_INSN_HANDLERS(CMP, op_sub, false, false, false)
  2850. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(TEST, op_and, false, false, false)
  2851. void SoftCPU::MOVQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2852. void SoftCPU::EMMS(const X86::Instruction&) { TODO_INSN(); }
  2853. void SoftCPU::MOVQ_mm1_m64_mm2(const X86::Instruction&) { TODO_INSN(); }
  2854. void SoftCPU::wrap_0xC0(const X86::Instruction&) { TODO_INSN(); }
  2855. void SoftCPU::wrap_0xC1_16(const X86::Instruction&) { TODO_INSN(); }
  2856. void SoftCPU::wrap_0xC1_32(const X86::Instruction&) { TODO_INSN(); }
  2857. void SoftCPU::wrap_0xD0(const X86::Instruction&) { TODO_INSN(); }
  2858. void SoftCPU::wrap_0xD1_16(const X86::Instruction&) { TODO_INSN(); }
  2859. void SoftCPU::wrap_0xD1_32(const X86::Instruction&) { TODO_INSN(); }
  2860. void SoftCPU::wrap_0xD2(const X86::Instruction&) { TODO_INSN(); }
  2861. void SoftCPU::wrap_0xD3_16(const X86::Instruction&) { TODO_INSN(); }
  2862. void SoftCPU::wrap_0xD3_32(const X86::Instruction&) { TODO_INSN(); }
  2863. }