SoftCPU.cpp 92 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include "SoftCPU.h"
  27. #include "Emulator.h"
  28. #include <AK/Assertions.h>
  29. #include <stdio.h>
  30. #include <string.h>
  31. #if defined(__GNUC__) && !defined(__clang__)
  32. # pragma GCC optimize("O3")
  33. #endif
  34. //#define MEMORY_DEBUG
  35. #define DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(mnemonic, op) \
  36. void SoftCPU::mnemonic##_RM8_1(const X86::Instruction& insn) { generic_RM8_1(op<ValueWithShadow<u8>>, insn); } \
  37. void SoftCPU::mnemonic##_RM8_CL(const X86::Instruction& insn) { generic_RM8_CL(op<ValueWithShadow<u8>>, insn); } \
  38. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<true>(op<ValueWithShadow<u8>>, insn); } \
  39. void SoftCPU::mnemonic##_RM16_1(const X86::Instruction& insn) { generic_RM16_1(op<ValueWithShadow<u16>>, insn); } \
  40. void SoftCPU::mnemonic##_RM16_CL(const X86::Instruction& insn) { generic_RM16_CL(op<ValueWithShadow<u16>>, insn); } \
  41. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_unsigned_imm8<true>(op<ValueWithShadow<u16>>, insn); } \
  42. void SoftCPU::mnemonic##_RM32_1(const X86::Instruction& insn) { generic_RM32_1(op<ValueWithShadow<u32>>, insn); } \
  43. void SoftCPU::mnemonic##_RM32_CL(const X86::Instruction& insn) { generic_RM32_CL(op<ValueWithShadow<u32>>, insn); } \
  44. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_unsigned_imm8<true>(op<ValueWithShadow<u32>>, insn); }
  45. namespace UserspaceEmulator {
  46. template<typename T>
  47. void warn_if_uninitialized(T value_with_shadow, const char* message)
  48. {
  49. if (value_with_shadow.is_uninitialized()) {
  50. dbgprintf("\033[31;1mWarning! Use of uninitialized value: %s\033[0m\n", message);
  51. Emulator::the().dump_backtrace();
  52. }
  53. }
  54. void SoftCPU::warn_if_flags_tainted(const char* message) const
  55. {
  56. if (m_flags_tainted) {
  57. dbgprintf("\n");
  58. dbgprintf("==%d== \033[31;1mConditional depends on uninitialized data\033[0m (%s)\n", getpid(), message);
  59. Emulator::the().dump_backtrace();
  60. }
  61. }
  62. template<typename T, typename U>
  63. inline constexpr T sign_extended_to(U value)
  64. {
  65. if (!(value & X86::TypeTrivia<U>::sign_bit))
  66. return value;
  67. return (X86::TypeTrivia<T>::mask & ~X86::TypeTrivia<U>::mask) | value;
  68. }
  69. SoftCPU::SoftCPU(Emulator& emulator)
  70. : m_emulator(emulator)
  71. {
  72. memset(m_gpr, 0, sizeof(m_gpr));
  73. memset(m_gpr_shadow, 1, sizeof(m_gpr_shadow));
  74. m_segment[(int)X86::SegmentRegister::CS] = 0x18;
  75. m_segment[(int)X86::SegmentRegister::DS] = 0x20;
  76. m_segment[(int)X86::SegmentRegister::ES] = 0x20;
  77. m_segment[(int)X86::SegmentRegister::SS] = 0x20;
  78. m_segment[(int)X86::SegmentRegister::GS] = 0x28;
  79. }
  80. void SoftCPU::dump() const
  81. {
  82. printf("eax=%08x ebx=%08x ecx=%08x edx=%08x ", eax().value(), ebx().value(), ecx().value(), edx().value());
  83. printf("ebp=%08x esp=%08x esi=%08x edi=%08x ", ebp().value(), esp().value(), esi().value(), edi().value());
  84. printf("o=%u s=%u z=%u a=%u p=%u c=%u\n", of(), sf(), zf(), af(), pf(), cf());
  85. printf("#ax=%08x #bx=%08x #cx=%08x #dx=%08x ", eax().shadow(), ebx().shadow(), ecx().shadow(), edx().shadow());
  86. printf("#bp=%08x #sp=%08x #si=%08x #di=%08x ", ebp().shadow(), esp().shadow(), esi().shadow(), edi().shadow());
  87. printf("#f=%u\n", m_flags_tainted);
  88. fflush(stdout);
  89. }
  90. void SoftCPU::did_receive_secret_data()
  91. {
  92. if (m_secret_data[0] == 1) {
  93. if (auto* tracer = m_emulator.malloc_tracer())
  94. tracer->target_did_malloc({}, m_secret_data[2], m_secret_data[1]);
  95. } else if (m_secret_data[0] == 2) {
  96. if (auto* tracer = m_emulator.malloc_tracer())
  97. tracer->target_did_free({}, m_secret_data[1]);
  98. } else {
  99. ASSERT_NOT_REACHED();
  100. }
  101. }
  102. void SoftCPU::update_code_cache()
  103. {
  104. auto* region = m_emulator.mmu().find_region({ cs(), eip() });
  105. ASSERT(region);
  106. m_cached_code_ptr = region->cacheable_ptr(eip() - region->base());
  107. m_cached_code_end = region->cacheable_ptr(region->size());
  108. }
  109. ValueWithShadow<u8> SoftCPU::read_memory8(X86::LogicalAddress address)
  110. {
  111. ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28);
  112. auto value = m_emulator.mmu().read8(address);
  113. #ifdef MEMORY_DEBUG
  114. printf("\033[36;1mread_memory8: @%08x:%08x -> %02x (%02x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  115. #endif
  116. return value;
  117. }
  118. ValueWithShadow<u16> SoftCPU::read_memory16(X86::LogicalAddress address)
  119. {
  120. ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28);
  121. auto value = m_emulator.mmu().read16(address);
  122. #ifdef MEMORY_DEBUG
  123. printf("\033[36;1mread_memory16: @%04x:%08x -> %04x (%04x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  124. #endif
  125. return value;
  126. }
  127. ValueWithShadow<u32> SoftCPU::read_memory32(X86::LogicalAddress address)
  128. {
  129. ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28);
  130. auto value = m_emulator.mmu().read32(address);
  131. #ifdef MEMORY_DEBUG
  132. printf("\033[36;1mread_memory32: @%04x:%08x -> %08x (%08x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  133. #endif
  134. return value;
  135. }
  136. void SoftCPU::write_memory8(X86::LogicalAddress address, ValueWithShadow<u8> value)
  137. {
  138. ASSERT(address.selector() == 0x20 || address.selector() == 0x28);
  139. #ifdef MEMORY_DEBUG
  140. printf("\033[35;1mwrite_memory8: @%04x:%08x <- %02x (%02x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  141. #endif
  142. m_emulator.mmu().write8(address, value);
  143. }
  144. void SoftCPU::write_memory16(X86::LogicalAddress address, ValueWithShadow<u16> value)
  145. {
  146. ASSERT(address.selector() == 0x20 || address.selector() == 0x28);
  147. #ifdef MEMORY_DEBUG
  148. printf("\033[35;1mwrite_memory16: @%04x:%08x <- %04x (%04x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  149. #endif
  150. m_emulator.mmu().write16(address, value);
  151. }
  152. void SoftCPU::write_memory32(X86::LogicalAddress address, ValueWithShadow<u32> value)
  153. {
  154. ASSERT(address.selector() == 0x20 || address.selector() == 0x28);
  155. #ifdef MEMORY_DEBUG
  156. printf("\033[35;1mwrite_memory32: @%04x:%08x <- %08x (%08x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  157. #endif
  158. m_emulator.mmu().write32(address, value);
  159. }
  160. void SoftCPU::push_string(const StringView& string)
  161. {
  162. size_t space_to_allocate = round_up_to_power_of_two(string.length() + 1, 16);
  163. set_esp({ esp().value() - space_to_allocate, esp().shadow() });
  164. m_emulator.mmu().copy_to_vm(esp().value(), string.characters_without_null_termination(), string.length());
  165. m_emulator.mmu().write8({ 0x20, esp().value() + string.length() }, shadow_wrap_as_initialized((u8)'\0'));
  166. }
  167. void SoftCPU::push32(ValueWithShadow<u32> value)
  168. {
  169. set_esp({ esp().value() - sizeof(u32), esp().shadow() });
  170. warn_if_uninitialized(esp(), "push32");
  171. write_memory32({ ss(), esp().value() }, value);
  172. }
  173. ValueWithShadow<u32> SoftCPU::pop32()
  174. {
  175. warn_if_uninitialized(esp(), "pop32");
  176. auto value = read_memory32({ ss(), esp().value() });
  177. set_esp({ esp().value() + sizeof(u32), esp().shadow() });
  178. return value;
  179. }
  180. void SoftCPU::push16(ValueWithShadow<u16> value)
  181. {
  182. warn_if_uninitialized(esp(), "push16");
  183. set_esp({ esp().value() - sizeof(u16), esp().shadow() });
  184. write_memory16({ ss(), esp().value() }, value);
  185. }
  186. ValueWithShadow<u16> SoftCPU::pop16()
  187. {
  188. warn_if_uninitialized(esp(), "pop16");
  189. auto value = read_memory16({ ss(), esp().value() });
  190. set_esp({ esp().value() + sizeof(u16), esp().shadow() });
  191. return value;
  192. }
  193. template<bool check_zf, typename Callback>
  194. void SoftCPU::do_once_or_repeat(const X86::Instruction& insn, Callback callback)
  195. {
  196. if (!insn.has_rep_prefix())
  197. return callback();
  198. while (loop_index(insn.a32()).value()) {
  199. callback();
  200. decrement_loop_index(insn.a32());
  201. if constexpr (check_zf) {
  202. warn_if_flags_tainted("repz/repnz");
  203. if (insn.rep_prefix() == X86::Prefix::REPZ && !zf())
  204. break;
  205. if (insn.rep_prefix() == X86::Prefix::REPNZ && zf())
  206. break;
  207. }
  208. }
  209. }
  210. template<typename T>
  211. ALWAYS_INLINE static T op_inc(SoftCPU& cpu, T data)
  212. {
  213. typename T::ValueType result;
  214. u32 new_flags = 0;
  215. if constexpr (sizeof(typename T::ValueType) == 4) {
  216. asm volatile("incl %%eax\n"
  217. : "=a"(result)
  218. : "a"(data.value()));
  219. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  220. asm volatile("incw %%ax\n"
  221. : "=a"(result)
  222. : "a"(data.value()));
  223. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  224. asm volatile("incb %%al\n"
  225. : "=a"(result)
  226. : "a"(data.value()));
  227. }
  228. asm volatile(
  229. "pushf\n"
  230. "pop %%ebx"
  231. : "=b"(new_flags));
  232. cpu.set_flags_oszap(new_flags);
  233. cpu.taint_flags_from(data);
  234. return shadow_wrap_with_taint_from(result, data);
  235. }
  236. template<typename T>
  237. ALWAYS_INLINE static T op_dec(SoftCPU& cpu, T data)
  238. {
  239. typename T::ValueType result;
  240. u32 new_flags = 0;
  241. if constexpr (sizeof(typename T::ValueType) == 4) {
  242. asm volatile("decl %%eax\n"
  243. : "=a"(result)
  244. : "a"(data.value()));
  245. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  246. asm volatile("decw %%ax\n"
  247. : "=a"(result)
  248. : "a"(data.value()));
  249. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  250. asm volatile("decb %%al\n"
  251. : "=a"(result)
  252. : "a"(data.value()));
  253. }
  254. asm volatile(
  255. "pushf\n"
  256. "pop %%ebx"
  257. : "=b"(new_flags));
  258. cpu.set_flags_oszap(new_flags);
  259. cpu.taint_flags_from(data);
  260. return shadow_wrap_with_taint_from(result, data);
  261. }
  262. template<typename T>
  263. ALWAYS_INLINE static T op_xor(SoftCPU& cpu, const T& dest, const T& src)
  264. {
  265. typename T::ValueType result;
  266. u32 new_flags = 0;
  267. if constexpr (sizeof(typename T::ValueType) == 4) {
  268. asm volatile("xorl %%ecx, %%eax\n"
  269. : "=a"(result)
  270. : "a"(dest.value()), "c"(src.value()));
  271. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  272. asm volatile("xor %%cx, %%ax\n"
  273. : "=a"(result)
  274. : "a"(dest.value()), "c"(src.value()));
  275. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  276. asm volatile("xorb %%cl, %%al\n"
  277. : "=a"(result)
  278. : "a"(dest.value()), "c"(src.value()));
  279. } else {
  280. ASSERT_NOT_REACHED();
  281. }
  282. asm volatile(
  283. "pushf\n"
  284. "pop %%ebx"
  285. : "=b"(new_flags));
  286. cpu.set_flags_oszpc(new_flags);
  287. cpu.taint_flags_from(dest, src);
  288. return shadow_wrap_with_taint_from(result, dest, src);
  289. }
  290. template<typename T>
  291. ALWAYS_INLINE static T op_or(SoftCPU& cpu, const T& dest, const T& src)
  292. {
  293. typename T::ValueType result = 0;
  294. u32 new_flags = 0;
  295. if constexpr (sizeof(typename T::ValueType) == 4) {
  296. asm volatile("orl %%ecx, %%eax\n"
  297. : "=a"(result)
  298. : "a"(dest.value()), "c"(src.value()));
  299. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  300. asm volatile("or %%cx, %%ax\n"
  301. : "=a"(result)
  302. : "a"(dest.value()), "c"(src.value()));
  303. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  304. asm volatile("orb %%cl, %%al\n"
  305. : "=a"(result)
  306. : "a"(dest.value()), "c"(src.value()));
  307. } else {
  308. ASSERT_NOT_REACHED();
  309. }
  310. asm volatile(
  311. "pushf\n"
  312. "pop %%ebx"
  313. : "=b"(new_flags));
  314. cpu.set_flags_oszpc(new_flags);
  315. cpu.taint_flags_from(dest, src);
  316. return shadow_wrap_with_taint_from(result, dest, src);
  317. }
  318. template<typename T>
  319. ALWAYS_INLINE static T op_sub(SoftCPU& cpu, const T& dest, const T& src)
  320. {
  321. typename T::ValueType result = 0;
  322. u32 new_flags = 0;
  323. if constexpr (sizeof(typename T::ValueType) == 4) {
  324. asm volatile("subl %%ecx, %%eax\n"
  325. : "=a"(result)
  326. : "a"(dest.value()), "c"(src.value()));
  327. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  328. asm volatile("subw %%cx, %%ax\n"
  329. : "=a"(result)
  330. : "a"(dest.value()), "c"(src.value()));
  331. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  332. asm volatile("subb %%cl, %%al\n"
  333. : "=a"(result)
  334. : "a"(dest.value()), "c"(src.value()));
  335. } else {
  336. ASSERT_NOT_REACHED();
  337. }
  338. asm volatile(
  339. "pushf\n"
  340. "pop %%ebx"
  341. : "=b"(new_flags));
  342. cpu.set_flags_oszapc(new_flags);
  343. cpu.taint_flags_from(dest, src);
  344. return shadow_wrap_with_taint_from(result, dest, src);
  345. }
  346. template<typename T, bool cf>
  347. ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src)
  348. {
  349. typename T::ValueType result = 0;
  350. u32 new_flags = 0;
  351. if constexpr (cf)
  352. asm volatile("stc");
  353. else
  354. asm volatile("clc");
  355. if constexpr (sizeof(typename T::ValueType) == 4) {
  356. asm volatile("sbbl %%ecx, %%eax\n"
  357. : "=a"(result)
  358. : "a"(dest.value()), "c"(src.value()));
  359. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  360. asm volatile("sbbw %%cx, %%ax\n"
  361. : "=a"(result)
  362. : "a"(dest.value()), "c"(src.value()));
  363. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  364. asm volatile("sbbb %%cl, %%al\n"
  365. : "=a"(result)
  366. : "a"(dest.value()), "c"(src.value()));
  367. } else {
  368. ASSERT_NOT_REACHED();
  369. }
  370. asm volatile(
  371. "pushf\n"
  372. "pop %%ebx"
  373. : "=b"(new_flags));
  374. cpu.set_flags_oszapc(new_flags);
  375. cpu.taint_flags_from(dest, src);
  376. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  377. }
  378. template<typename T>
  379. ALWAYS_INLINE static T op_sbb(SoftCPU& cpu, T& dest, const T& src)
  380. {
  381. cpu.warn_if_flags_tainted("sbb");
  382. if (cpu.cf())
  383. return op_sbb_impl<T, true>(cpu, dest, src);
  384. return op_sbb_impl<T, false>(cpu, dest, src);
  385. }
  386. template<typename T>
  387. ALWAYS_INLINE static T op_add(SoftCPU& cpu, T& dest, const T& src)
  388. {
  389. typename T::ValueType result = 0;
  390. u32 new_flags = 0;
  391. if constexpr (sizeof(typename T::ValueType) == 4) {
  392. asm volatile("addl %%ecx, %%eax\n"
  393. : "=a"(result)
  394. : "a"(dest.value()), "c"(src.value()));
  395. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  396. asm volatile("addw %%cx, %%ax\n"
  397. : "=a"(result)
  398. : "a"(dest.value()), "c"(src.value()));
  399. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  400. asm volatile("addb %%cl, %%al\n"
  401. : "=a"(result)
  402. : "a"(dest.value()), "c"(src.value()));
  403. } else {
  404. ASSERT_NOT_REACHED();
  405. }
  406. asm volatile(
  407. "pushf\n"
  408. "pop %%ebx"
  409. : "=b"(new_flags));
  410. cpu.set_flags_oszapc(new_flags);
  411. cpu.taint_flags_from(dest, src);
  412. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  413. }
  414. template<typename T, bool cf>
  415. ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src)
  416. {
  417. typename T::ValueType result = 0;
  418. u32 new_flags = 0;
  419. if constexpr (cf)
  420. asm volatile("stc");
  421. else
  422. asm volatile("clc");
  423. if constexpr (sizeof(typename T::ValueType) == 4) {
  424. asm volatile("adcl %%ecx, %%eax\n"
  425. : "=a"(result)
  426. : "a"(dest.value()), "c"(src.value()));
  427. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  428. asm volatile("adcw %%cx, %%ax\n"
  429. : "=a"(result)
  430. : "a"(dest.value()), "c"(src.value()));
  431. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  432. asm volatile("adcb %%cl, %%al\n"
  433. : "=a"(result)
  434. : "a"(dest.value()), "c"(src.value()));
  435. } else {
  436. ASSERT_NOT_REACHED();
  437. }
  438. asm volatile(
  439. "pushf\n"
  440. "pop %%ebx"
  441. : "=b"(new_flags));
  442. cpu.set_flags_oszapc(new_flags);
  443. cpu.taint_flags_from(dest, src);
  444. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  445. }
  446. template<typename T>
  447. ALWAYS_INLINE static T op_adc(SoftCPU& cpu, T& dest, const T& src)
  448. {
  449. cpu.warn_if_flags_tainted("adc");
  450. if (cpu.cf())
  451. return op_adc_impl<T, true>(cpu, dest, src);
  452. return op_adc_impl<T, false>(cpu, dest, src);
  453. }
  454. template<typename T>
  455. ALWAYS_INLINE static T op_and(SoftCPU& cpu, const T& dest, const T& src)
  456. {
  457. typename T::ValueType result = 0;
  458. u32 new_flags = 0;
  459. if constexpr (sizeof(typename T::ValueType) == 4) {
  460. asm volatile("andl %%ecx, %%eax\n"
  461. : "=a"(result)
  462. : "a"(dest.value()), "c"(src.value()));
  463. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  464. asm volatile("andw %%cx, %%ax\n"
  465. : "=a"(result)
  466. : "a"(dest.value()), "c"(src.value()));
  467. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  468. asm volatile("andb %%cl, %%al\n"
  469. : "=a"(result)
  470. : "a"(dest.value()), "c"(src.value()));
  471. } else {
  472. ASSERT_NOT_REACHED();
  473. }
  474. asm volatile(
  475. "pushf\n"
  476. "pop %%ebx"
  477. : "=b"(new_flags));
  478. cpu.set_flags_oszpc(new_flags);
  479. cpu.taint_flags_from(dest, src);
  480. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  481. }
  482. template<typename T>
  483. ALWAYS_INLINE static void op_imul(SoftCPU& cpu, const T& dest, const T& src, T& result_high, T& result_low)
  484. {
  485. bool did_overflow = false;
  486. if constexpr (sizeof(T) == 4) {
  487. i64 result = (i64)src * (i64)dest;
  488. result_low = result & 0xffffffff;
  489. result_high = result >> 32;
  490. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  491. } else if constexpr (sizeof(T) == 2) {
  492. i32 result = (i32)src * (i32)dest;
  493. result_low = result & 0xffff;
  494. result_high = result >> 16;
  495. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  496. } else if constexpr (sizeof(T) == 1) {
  497. i16 result = (i16)src * (i16)dest;
  498. result_low = result & 0xff;
  499. result_high = result >> 8;
  500. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  501. }
  502. if (did_overflow) {
  503. cpu.set_cf(true);
  504. cpu.set_of(true);
  505. } else {
  506. cpu.set_cf(false);
  507. cpu.set_of(false);
  508. }
  509. }
  510. template<typename T>
  511. ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  512. {
  513. if (steps.value() == 0)
  514. return shadow_wrap_with_taint_from(data.value(), data, steps);
  515. u32 result = 0;
  516. u32 new_flags = 0;
  517. if constexpr (sizeof(typename T::ValueType) == 4) {
  518. asm volatile("shrl %%cl, %%eax\n"
  519. : "=a"(result)
  520. : "a"(data.value()), "c"(steps.value()));
  521. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  522. asm volatile("shrw %%cl, %%ax\n"
  523. : "=a"(result)
  524. : "a"(data.value()), "c"(steps.value()));
  525. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  526. asm volatile("shrb %%cl, %%al\n"
  527. : "=a"(result)
  528. : "a"(data.value()), "c"(steps.value()));
  529. }
  530. asm volatile(
  531. "pushf\n"
  532. "pop %%ebx"
  533. : "=b"(new_flags));
  534. cpu.set_flags_oszapc(new_flags);
  535. cpu.taint_flags_from(data, steps);
  536. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  537. }
  538. template<typename T>
  539. ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  540. {
  541. if (steps.value() == 0)
  542. return shadow_wrap_with_taint_from(data.value(), data, steps);
  543. u32 result = 0;
  544. u32 new_flags = 0;
  545. if constexpr (sizeof(typename T::ValueType) == 4) {
  546. asm volatile("shll %%cl, %%eax\n"
  547. : "=a"(result)
  548. : "a"(data.value()), "c"(steps.value()));
  549. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  550. asm volatile("shlw %%cl, %%ax\n"
  551. : "=a"(result)
  552. : "a"(data.value()), "c"(steps.value()));
  553. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  554. asm volatile("shlb %%cl, %%al\n"
  555. : "=a"(result)
  556. : "a"(data.value()), "c"(steps.value()));
  557. }
  558. asm volatile(
  559. "pushf\n"
  560. "pop %%ebx"
  561. : "=b"(new_flags));
  562. cpu.set_flags_oszapc(new_flags);
  563. cpu.taint_flags_from(data, steps);
  564. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  565. }
  566. template<typename T>
  567. ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  568. {
  569. if (steps.value() == 0)
  570. return shadow_wrap_with_taint_from(data.value(), data, steps);
  571. u32 result = 0;
  572. u32 new_flags = 0;
  573. if constexpr (sizeof(typename T::ValueType) == 4) {
  574. asm volatile("shrd %%cl, %%edx, %%eax\n"
  575. : "=a"(result)
  576. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  577. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  578. asm volatile("shrd %%cl, %%dx, %%ax\n"
  579. : "=a"(result)
  580. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  581. }
  582. asm volatile(
  583. "pushf\n"
  584. "pop %%ebx"
  585. : "=b"(new_flags));
  586. cpu.set_flags_oszapc(new_flags);
  587. cpu.taint_flags_from(data, steps);
  588. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  589. }
  590. template<typename T>
  591. ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  592. {
  593. if (steps.value() == 0)
  594. return shadow_wrap_with_taint_from(data.value(), data, steps);
  595. u32 result = 0;
  596. u32 new_flags = 0;
  597. if constexpr (sizeof(typename T::ValueType) == 4) {
  598. asm volatile("shld %%cl, %%edx, %%eax\n"
  599. : "=a"(result)
  600. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  601. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  602. asm volatile("shld %%cl, %%dx, %%ax\n"
  603. : "=a"(result)
  604. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  605. }
  606. asm volatile(
  607. "pushf\n"
  608. "pop %%ebx"
  609. : "=b"(new_flags));
  610. cpu.set_flags_oszapc(new_flags);
  611. cpu.taint_flags_from(data, steps);
  612. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  613. }
  614. template<bool update_dest, typename Op>
  615. ALWAYS_INLINE void SoftCPU::generic_AL_imm8(Op op, const X86::Instruction& insn)
  616. {
  617. auto dest = al();
  618. auto src = shadow_wrap_as_initialized(insn.imm8());
  619. auto result = op(*this, dest, src);
  620. if (update_dest)
  621. set_al(result);
  622. }
  623. template<bool update_dest, typename Op>
  624. ALWAYS_INLINE void SoftCPU::generic_AX_imm16(Op op, const X86::Instruction& insn)
  625. {
  626. auto dest = ax();
  627. auto src = shadow_wrap_as_initialized(insn.imm16());
  628. auto result = op(*this, dest, src);
  629. if (update_dest)
  630. set_ax(result);
  631. }
  632. template<bool update_dest, typename Op>
  633. ALWAYS_INLINE void SoftCPU::generic_EAX_imm32(Op op, const X86::Instruction& insn)
  634. {
  635. auto dest = eax();
  636. auto src = shadow_wrap_as_initialized(insn.imm32());
  637. auto result = op(*this, dest, src);
  638. if (update_dest)
  639. set_eax(result);
  640. }
  641. template<bool update_dest, typename Op>
  642. ALWAYS_INLINE void SoftCPU::generic_RM16_imm16(Op op, const X86::Instruction& insn)
  643. {
  644. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  645. auto src = shadow_wrap_as_initialized(insn.imm16());
  646. auto result = op(*this, dest, src);
  647. if (update_dest)
  648. insn.modrm().write16(*this, insn, result);
  649. }
  650. template<bool update_dest, typename Op>
  651. ALWAYS_INLINE void SoftCPU::generic_RM16_imm8(Op op, const X86::Instruction& insn)
  652. {
  653. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  654. auto src = shadow_wrap_as_initialized<u16>(sign_extended_to<u16>(insn.imm8()));
  655. auto result = op(*this, dest, src);
  656. if (update_dest)
  657. insn.modrm().write16(*this, insn, result);
  658. }
  659. template<bool update_dest, typename Op>
  660. ALWAYS_INLINE void SoftCPU::generic_RM16_unsigned_imm8(Op op, const X86::Instruction& insn)
  661. {
  662. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  663. auto src = shadow_wrap_as_initialized(insn.imm8());
  664. auto result = op(*this, dest, src);
  665. if (update_dest)
  666. insn.modrm().write16(*this, insn, result);
  667. }
  668. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  669. ALWAYS_INLINE void SoftCPU::generic_RM16_reg16(Op op, const X86::Instruction& insn)
  670. {
  671. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  672. auto src = const_gpr16(insn.reg16());
  673. auto result = op(*this, dest, src);
  674. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  675. result.set_initialized();
  676. m_flags_tainted = false;
  677. }
  678. if (update_dest)
  679. insn.modrm().write16(*this, insn, result);
  680. }
  681. template<bool update_dest, typename Op>
  682. ALWAYS_INLINE void SoftCPU::generic_RM32_imm32(Op op, const X86::Instruction& insn)
  683. {
  684. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  685. auto src = insn.imm32();
  686. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  687. if (update_dest)
  688. insn.modrm().write32(*this, insn, result);
  689. }
  690. template<bool update_dest, typename Op>
  691. ALWAYS_INLINE void SoftCPU::generic_RM32_imm8(Op op, const X86::Instruction& insn)
  692. {
  693. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  694. auto src = sign_extended_to<u32>(insn.imm8());
  695. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  696. if (update_dest)
  697. insn.modrm().write32(*this, insn, result);
  698. }
  699. template<bool update_dest, typename Op>
  700. ALWAYS_INLINE void SoftCPU::generic_RM32_unsigned_imm8(Op op, const X86::Instruction& insn)
  701. {
  702. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  703. auto src = shadow_wrap_as_initialized(insn.imm8());
  704. auto result = op(*this, dest, src);
  705. if (update_dest)
  706. insn.modrm().write32(*this, insn, result);
  707. }
  708. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  709. ALWAYS_INLINE void SoftCPU::generic_RM32_reg32(Op op, const X86::Instruction& insn)
  710. {
  711. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  712. auto src = const_gpr32(insn.reg32());
  713. auto result = op(*this, dest, src);
  714. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  715. result.set_initialized();
  716. m_flags_tainted = false;
  717. }
  718. if (update_dest)
  719. insn.modrm().write32(*this, insn, result);
  720. }
  721. template<bool update_dest, typename Op>
  722. ALWAYS_INLINE void SoftCPU::generic_RM8_imm8(Op op, const X86::Instruction& insn)
  723. {
  724. auto dest = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  725. auto src = insn.imm8();
  726. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  727. if (update_dest)
  728. insn.modrm().write8(*this, insn, result);
  729. }
  730. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  731. ALWAYS_INLINE void SoftCPU::generic_RM8_reg8(Op op, const X86::Instruction& insn)
  732. {
  733. auto dest = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  734. auto src = const_gpr8(insn.reg8());
  735. auto result = op(*this, dest, src);
  736. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  737. result.set_initialized();
  738. m_flags_tainted = false;
  739. }
  740. if (update_dest)
  741. insn.modrm().write8(*this, insn, result);
  742. }
  743. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  744. ALWAYS_INLINE void SoftCPU::generic_reg16_RM16(Op op, const X86::Instruction& insn)
  745. {
  746. auto dest = const_gpr16(insn.reg16());
  747. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  748. auto result = op(*this, dest, src);
  749. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  750. result.set_initialized();
  751. m_flags_tainted = false;
  752. }
  753. if (update_dest)
  754. gpr16(insn.reg16()) = result;
  755. }
  756. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  757. ALWAYS_INLINE void SoftCPU::generic_reg32_RM32(Op op, const X86::Instruction& insn)
  758. {
  759. auto dest = const_gpr32(insn.reg32());
  760. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  761. auto result = op(*this, dest, src);
  762. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  763. result.set_initialized();
  764. m_flags_tainted = false;
  765. }
  766. if (update_dest)
  767. gpr32(insn.reg32()) = result;
  768. }
  769. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  770. ALWAYS_INLINE void SoftCPU::generic_reg8_RM8(Op op, const X86::Instruction& insn)
  771. {
  772. auto dest = const_gpr8(insn.reg8());
  773. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  774. auto result = op(*this, dest, src);
  775. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  776. result.set_initialized();
  777. m_flags_tainted = false;
  778. }
  779. if (update_dest)
  780. gpr8(insn.reg8()) = result;
  781. }
  782. template<typename Op>
  783. ALWAYS_INLINE void SoftCPU::generic_RM8_1(Op op, const X86::Instruction& insn)
  784. {
  785. auto data = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  786. insn.modrm().write8(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  787. }
  788. template<typename Op>
  789. ALWAYS_INLINE void SoftCPU::generic_RM8_CL(Op op, const X86::Instruction& insn)
  790. {
  791. auto data = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  792. insn.modrm().write8(*this, insn, op(*this, data, cl()));
  793. }
  794. template<typename Op>
  795. ALWAYS_INLINE void SoftCPU::generic_RM16_1(Op op, const X86::Instruction& insn)
  796. {
  797. auto data = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  798. insn.modrm().write16(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  799. }
  800. template<typename Op>
  801. ALWAYS_INLINE void SoftCPU::generic_RM16_CL(Op op, const X86::Instruction& insn)
  802. {
  803. auto data = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  804. insn.modrm().write16(*this, insn, op(*this, data, cl()));
  805. }
  806. template<typename Op>
  807. ALWAYS_INLINE void SoftCPU::generic_RM32_1(Op op, const X86::Instruction& insn)
  808. {
  809. auto data = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  810. insn.modrm().write32(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  811. }
  812. template<typename Op>
  813. ALWAYS_INLINE void SoftCPU::generic_RM32_CL(Op op, const X86::Instruction& insn)
  814. {
  815. auto data = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  816. insn.modrm().write32(*this, insn, op(*this, data, cl()));
  817. }
  818. void SoftCPU::AAA(const X86::Instruction&) { TODO(); }
  819. void SoftCPU::AAD(const X86::Instruction&) { TODO(); }
  820. void SoftCPU::AAM(const X86::Instruction&) { TODO(); }
  821. void SoftCPU::AAS(const X86::Instruction&) { TODO(); }
  822. void SoftCPU::ARPL(const X86::Instruction&) { TODO(); }
  823. void SoftCPU::BOUND(const X86::Instruction&) { TODO(); }
  824. template<typename T>
  825. ALWAYS_INLINE static T op_bsf(SoftCPU&, T value)
  826. {
  827. return { (typename T::ValueType)__builtin_ctz(value.value()), value.shadow() };
  828. }
  829. template<typename T>
  830. ALWAYS_INLINE static T op_bsr(SoftCPU&, T value)
  831. {
  832. typename T::ValueType bit_index = 0;
  833. if constexpr (sizeof(typename T::ValueType) == 4) {
  834. asm volatile("bsrl %%eax, %%edx"
  835. : "=d"(bit_index)
  836. : "a"(value.value()));
  837. }
  838. if constexpr (sizeof(typename T::ValueType) == 2) {
  839. asm volatile("bsrw %%ax, %%dx"
  840. : "=d"(bit_index)
  841. : "a"(value.value()));
  842. }
  843. return shadow_wrap_with_taint_from(bit_index, value);
  844. }
  845. void SoftCPU::BSF_reg16_RM16(const X86::Instruction& insn)
  846. {
  847. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  848. set_zf(!src.value());
  849. if (src.value())
  850. gpr16(insn.reg16()) = op_bsf(*this, src);
  851. taint_flags_from(src);
  852. }
  853. void SoftCPU::BSF_reg32_RM32(const X86::Instruction& insn)
  854. {
  855. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  856. set_zf(!src.value());
  857. if (src.value()) {
  858. gpr32(insn.reg32()) = op_bsf(*this, src);
  859. taint_flags_from(src);
  860. }
  861. }
  862. void SoftCPU::BSR_reg16_RM16(const X86::Instruction& insn)
  863. {
  864. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  865. set_zf(!src.value());
  866. if (src.value()) {
  867. gpr16(insn.reg16()) = op_bsr(*this, src);
  868. taint_flags_from(src);
  869. }
  870. }
  871. void SoftCPU::BSR_reg32_RM32(const X86::Instruction& insn)
  872. {
  873. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  874. set_zf(!src.value());
  875. if (src.value()) {
  876. gpr32(insn.reg32()) = op_bsr(*this, src);
  877. taint_flags_from(src);
  878. }
  879. }
  880. void SoftCPU::BSWAP_reg32(const X86::Instruction& insn)
  881. {
  882. gpr32(insn.reg32()) = { __builtin_bswap32(gpr32(insn.reg32()).value()), __builtin_bswap32(gpr32(insn.reg32()).shadow()) };
  883. }
  884. template<typename T>
  885. ALWAYS_INLINE static T op_bt(T value, T)
  886. {
  887. return value;
  888. }
  889. template<typename T>
  890. ALWAYS_INLINE static T op_bts(T value, T bit_mask)
  891. {
  892. return value | bit_mask;
  893. }
  894. template<typename T>
  895. ALWAYS_INLINE static T op_btr(T value, T bit_mask)
  896. {
  897. return value & ~bit_mask;
  898. }
  899. template<typename T>
  900. ALWAYS_INLINE static T op_btc(T value, T bit_mask)
  901. {
  902. return value ^ bit_mask;
  903. }
  904. template<bool should_update, typename Op>
  905. ALWAYS_INLINE void BTx_RM16_reg16(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  906. {
  907. if (insn.modrm().is_register()) {
  908. unsigned bit_index = cpu.const_gpr16(insn.reg16()).value() & (X86::TypeTrivia<u16>::bits - 1);
  909. auto original = insn.modrm().read16<ValueWithShadow<u16>>(cpu, insn);
  910. u16 bit_mask = 1 << bit_index;
  911. u16 result = op(original.value(), bit_mask);
  912. cpu.set_cf((original.value() & bit_mask) != 0);
  913. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), original);
  914. if (should_update)
  915. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), original));
  916. return;
  917. }
  918. // FIXME: Is this supposed to perform a full 16-bit read/modify/write?
  919. unsigned bit_offset_in_array = cpu.const_gpr16(insn.reg16()).value() / 8;
  920. unsigned bit_offset_in_byte = cpu.const_gpr16(insn.reg16()).value() & 7;
  921. auto address = insn.modrm().resolve(cpu, insn);
  922. address.set_offset(address.offset() + bit_offset_in_array);
  923. auto dest = cpu.read_memory8(address);
  924. u8 bit_mask = 1 << bit_offset_in_byte;
  925. u8 result = op(dest.value(), bit_mask);
  926. cpu.set_cf((dest.value() & bit_mask) != 0);
  927. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), dest);
  928. if (should_update)
  929. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), dest));
  930. }
  931. template<bool should_update, typename Op>
  932. ALWAYS_INLINE void BTx_RM32_reg32(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  933. {
  934. if (insn.modrm().is_register()) {
  935. unsigned bit_index = cpu.const_gpr32(insn.reg32()).value() & (X86::TypeTrivia<u32>::bits - 1);
  936. auto original = insn.modrm().read32<ValueWithShadow<u32>>(cpu, insn);
  937. u32 bit_mask = 1 << bit_index;
  938. u32 result = op(original.value(), bit_mask);
  939. cpu.set_cf((original.value() & bit_mask) != 0);
  940. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), original);
  941. if (should_update)
  942. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), original));
  943. return;
  944. }
  945. // FIXME: Is this supposed to perform a full 32-bit read/modify/write?
  946. unsigned bit_offset_in_array = cpu.const_gpr32(insn.reg32()).value() / 8;
  947. unsigned bit_offset_in_byte = cpu.const_gpr32(insn.reg32()).value() & 7;
  948. auto address = insn.modrm().resolve(cpu, insn);
  949. address.set_offset(address.offset() + bit_offset_in_array);
  950. auto dest = cpu.read_memory8(address);
  951. u8 bit_mask = 1 << bit_offset_in_byte;
  952. u8 result = op(dest.value(), bit_mask);
  953. cpu.set_cf((dest.value() & bit_mask) != 0);
  954. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), dest);
  955. if (should_update)
  956. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), dest));
  957. }
  958. template<bool should_update, typename Op>
  959. ALWAYS_INLINE void BTx_RM16_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  960. {
  961. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u16>::mask);
  962. // FIXME: Support higher bit indices
  963. ASSERT(bit_index < 16);
  964. auto original = insn.modrm().read16<ValueWithShadow<u16>>(cpu, insn);
  965. u16 bit_mask = 1 << bit_index;
  966. auto result = op(original.value(), bit_mask);
  967. cpu.set_cf((original.value() & bit_mask) != 0);
  968. cpu.taint_flags_from(original);
  969. if (should_update)
  970. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, original));
  971. }
  972. template<bool should_update, typename Op>
  973. ALWAYS_INLINE void BTx_RM32_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  974. {
  975. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u32>::mask);
  976. // FIXME: Support higher bit indices
  977. ASSERT(bit_index < 32);
  978. auto original = insn.modrm().read32<ValueWithShadow<u32>>(cpu, insn);
  979. u32 bit_mask = 1 << bit_index;
  980. auto result = op(original.value(), bit_mask);
  981. cpu.set_cf((original.value() & bit_mask) != 0);
  982. cpu.taint_flags_from(original);
  983. if (should_update)
  984. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, original));
  985. }
  986. #define DEFINE_GENERIC_BTx_INSN_HANDLERS(mnemonic, op, update_dest) \
  987. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { BTx_RM32_reg32<update_dest>(*this, insn, op<u32>); } \
  988. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { BTx_RM16_reg16<update_dest>(*this, insn, op<u16>); } \
  989. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { BTx_RM32_imm8<update_dest>(*this, insn, op<u32>); } \
  990. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { BTx_RM16_imm8<update_dest>(*this, insn, op<u16>); }
  991. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTS, op_bts, true);
  992. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTR, op_btr, true);
  993. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTC, op_btc, true);
  994. DEFINE_GENERIC_BTx_INSN_HANDLERS(BT, op_bt, false);
  995. void SoftCPU::CALL_FAR_mem16(const X86::Instruction&)
  996. {
  997. TODO();
  998. }
  999. void SoftCPU::CALL_FAR_mem32(const X86::Instruction&) { TODO(); }
  1000. void SoftCPU::CALL_RM16(const X86::Instruction&) { TODO(); }
  1001. void SoftCPU::CALL_RM32(const X86::Instruction& insn)
  1002. {
  1003. push32(shadow_wrap_as_initialized(eip()));
  1004. auto address = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1005. warn_if_uninitialized(address, "call rm32");
  1006. set_eip(address.value());
  1007. }
  1008. void SoftCPU::CALL_imm16(const X86::Instruction&) { TODO(); }
  1009. void SoftCPU::CALL_imm16_imm16(const X86::Instruction&) { TODO(); }
  1010. void SoftCPU::CALL_imm16_imm32(const X86::Instruction&) { TODO(); }
  1011. void SoftCPU::CALL_imm32(const X86::Instruction& insn)
  1012. {
  1013. push32(shadow_wrap_as_initialized(eip()));
  1014. set_eip(eip() + (i32)insn.imm32());
  1015. }
  1016. void SoftCPU::CBW(const X86::Instruction&)
  1017. {
  1018. set_ah(shadow_wrap_with_taint_from<u8>((al().value() & 0x80) ? 0xff : 0x00, al()));
  1019. }
  1020. void SoftCPU::CDQ(const X86::Instruction&)
  1021. {
  1022. if (eax().value() & 0x80000000)
  1023. set_edx(shadow_wrap_with_taint_from<u32>(0xffffffff, eax()));
  1024. else
  1025. set_edx(shadow_wrap_with_taint_from<u32>(0, eax()));
  1026. }
  1027. void SoftCPU::CLC(const X86::Instruction&)
  1028. {
  1029. set_cf(false);
  1030. }
  1031. void SoftCPU::CLD(const X86::Instruction&)
  1032. {
  1033. set_df(false);
  1034. }
  1035. void SoftCPU::CLI(const X86::Instruction&) { TODO(); }
  1036. void SoftCPU::CLTS(const X86::Instruction&) { TODO(); }
  1037. void SoftCPU::CMC(const X86::Instruction&) { TODO(); }
  1038. void SoftCPU::CMOVcc_reg16_RM16(const X86::Instruction& insn)
  1039. {
  1040. warn_if_flags_tainted("cmovcc reg16, rm16");
  1041. if (evaluate_condition(insn.cc()))
  1042. gpr16(insn.reg16()) = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1043. }
  1044. void SoftCPU::CMOVcc_reg32_RM32(const X86::Instruction& insn)
  1045. {
  1046. warn_if_flags_tainted("cmovcc reg32, rm32");
  1047. if (evaluate_condition(insn.cc()))
  1048. gpr32(insn.reg32()) = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1049. }
  1050. template<typename T>
  1051. ALWAYS_INLINE static void do_cmps(SoftCPU& cpu, const X86::Instruction& insn)
  1052. {
  1053. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1054. cpu.do_once_or_repeat<true>(insn, [&] {
  1055. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1056. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  1057. op_sub(cpu, dest, src);
  1058. cpu.step_source_index(insn.a32(), sizeof(T));
  1059. cpu.step_destination_index(insn.a32(), sizeof(T));
  1060. });
  1061. }
  1062. void SoftCPU::CMPSB(const X86::Instruction& insn)
  1063. {
  1064. do_cmps<u8>(*this, insn);
  1065. }
  1066. void SoftCPU::CMPSD(const X86::Instruction& insn)
  1067. {
  1068. do_cmps<u32>(*this, insn);
  1069. }
  1070. void SoftCPU::CMPSW(const X86::Instruction& insn)
  1071. {
  1072. do_cmps<u16>(*this, insn);
  1073. }
  1074. void SoftCPU::CMPXCHG_RM16_reg16(const X86::Instruction& insn)
  1075. {
  1076. auto current = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1077. taint_flags_from(current, ax());
  1078. if (current.value() == ax().value()) {
  1079. set_zf(true);
  1080. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  1081. } else {
  1082. set_zf(false);
  1083. set_ax(current);
  1084. }
  1085. }
  1086. void SoftCPU::CMPXCHG_RM32_reg32(const X86::Instruction& insn)
  1087. {
  1088. auto current = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1089. taint_flags_from(current, eax());
  1090. if (current.value() == eax().value()) {
  1091. set_zf(true);
  1092. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  1093. } else {
  1094. set_zf(false);
  1095. set_eax(current);
  1096. }
  1097. }
  1098. void SoftCPU::CMPXCHG_RM8_reg8(const X86::Instruction& insn)
  1099. {
  1100. auto current = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1101. taint_flags_from(current, al());
  1102. if (current.value() == al().value()) {
  1103. set_zf(true);
  1104. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  1105. } else {
  1106. set_zf(false);
  1107. set_al(current);
  1108. }
  1109. }
  1110. void SoftCPU::CPUID(const X86::Instruction&) { TODO(); }
  1111. void SoftCPU::CWD(const X86::Instruction&)
  1112. {
  1113. set_dx(shadow_wrap_with_taint_from<u16>((ax().value() & 0x8000) ? 0xffff : 0x0000, ax()));
  1114. }
  1115. void SoftCPU::CWDE(const X86::Instruction&)
  1116. {
  1117. set_eax(shadow_wrap_with_taint_from(sign_extended_to<u32>(ax().value()), ax()));
  1118. }
  1119. void SoftCPU::DAA(const X86::Instruction&) { TODO(); }
  1120. void SoftCPU::DAS(const X86::Instruction&) { TODO(); }
  1121. void SoftCPU::DEC_RM16(const X86::Instruction& insn)
  1122. {
  1123. insn.modrm().write16(*this, insn, op_dec(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn)));
  1124. }
  1125. void SoftCPU::DEC_RM32(const X86::Instruction& insn)
  1126. {
  1127. insn.modrm().write32(*this, insn, op_dec(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn)));
  1128. }
  1129. void SoftCPU::DEC_RM8(const X86::Instruction& insn)
  1130. {
  1131. insn.modrm().write8(*this, insn, op_dec(*this, insn.modrm().read8<ValueWithShadow<u8>>(*this, insn)));
  1132. }
  1133. void SoftCPU::DEC_reg16(const X86::Instruction& insn)
  1134. {
  1135. gpr16(insn.reg16()) = op_dec(*this, const_gpr16(insn.reg16()));
  1136. }
  1137. void SoftCPU::DEC_reg32(const X86::Instruction& insn)
  1138. {
  1139. gpr32(insn.reg32()) = op_dec(*this, const_gpr32(insn.reg32()));
  1140. }
  1141. void SoftCPU::DIV_RM16(const X86::Instruction& insn)
  1142. {
  1143. auto divisor = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1144. if (divisor.value() == 0) {
  1145. warn() << "Divide by zero";
  1146. TODO();
  1147. }
  1148. u32 dividend = ((u32)dx().value() << 16) | ax().value();
  1149. auto quotient = dividend / divisor.value();
  1150. if (quotient > NumericLimits<u16>::max()) {
  1151. warn() << "Divide overflow";
  1152. TODO();
  1153. }
  1154. auto remainder = dividend % divisor.value();
  1155. auto original_ax = ax();
  1156. set_ax(shadow_wrap_with_taint_from<u16>(quotient, original_ax, dx()));
  1157. set_dx(shadow_wrap_with_taint_from<u16>(remainder, original_ax, dx()));
  1158. }
  1159. void SoftCPU::DIV_RM32(const X86::Instruction& insn)
  1160. {
  1161. auto divisor = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1162. if (divisor.value() == 0) {
  1163. warn() << "Divide by zero";
  1164. TODO();
  1165. }
  1166. u64 dividend = ((u64)edx().value() << 32) | eax().value();
  1167. auto quotient = dividend / divisor.value();
  1168. if (quotient > NumericLimits<u32>::max()) {
  1169. warn() << "Divide overflow";
  1170. TODO();
  1171. }
  1172. auto remainder = dividend % divisor.value();
  1173. auto original_eax = eax();
  1174. set_eax(shadow_wrap_with_taint_from<u32>(quotient, original_eax, edx(), divisor));
  1175. set_edx(shadow_wrap_with_taint_from<u32>(remainder, original_eax, edx(), divisor));
  1176. }
  1177. void SoftCPU::DIV_RM8(const X86::Instruction& insn)
  1178. {
  1179. auto divisor = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1180. if (divisor.value() == 0) {
  1181. warn() << "Divide by zero";
  1182. TODO();
  1183. }
  1184. u16 dividend = ax().value();
  1185. auto quotient = dividend / divisor.value();
  1186. if (quotient > NumericLimits<u8>::max()) {
  1187. warn() << "Divide overflow";
  1188. TODO();
  1189. }
  1190. auto remainder = dividend % divisor.value();
  1191. auto original_ax = ax();
  1192. set_al(shadow_wrap_with_taint_from<u8>(quotient, original_ax, divisor));
  1193. set_ah(shadow_wrap_with_taint_from<u8>(remainder, original_ax, divisor));
  1194. }
  1195. void SoftCPU::ENTER16(const X86::Instruction&) { TODO(); }
  1196. void SoftCPU::ENTER32(const X86::Instruction&) { TODO(); }
  1197. void SoftCPU::ESCAPE(const X86::Instruction&)
  1198. {
  1199. dbg() << "FIXME: x87 floating-point support";
  1200. m_emulator.dump_backtrace();
  1201. TODO();
  1202. }
  1203. void SoftCPU::FADD_RM32(const X86::Instruction&) { TODO(); }
  1204. void SoftCPU::FMUL_RM32(const X86::Instruction&) { TODO(); }
  1205. void SoftCPU::FCOM_RM32(const X86::Instruction&) { TODO(); }
  1206. void SoftCPU::FCOMP_RM32(const X86::Instruction&) { TODO(); }
  1207. void SoftCPU::FSUB_RM32(const X86::Instruction&) { TODO(); }
  1208. void SoftCPU::FSUBR_RM32(const X86::Instruction&) { TODO(); }
  1209. void SoftCPU::FDIV_RM32(const X86::Instruction&) { TODO(); }
  1210. void SoftCPU::FDIVR_RM32(const X86::Instruction&) { TODO(); }
  1211. void SoftCPU::FLD_RM32(const X86::Instruction&) { TODO(); }
  1212. void SoftCPU::FXCH(const X86::Instruction&) { TODO(); }
  1213. void SoftCPU::FST_RM32(const X86::Instruction&) { TODO(); }
  1214. void SoftCPU::FNOP(const X86::Instruction&) { TODO(); }
  1215. void SoftCPU::FSTP_RM32(const X86::Instruction&) { TODO(); }
  1216. void SoftCPU::FLDENV(const X86::Instruction&) { TODO(); }
  1217. void SoftCPU::FCHS(const X86::Instruction&) { TODO(); }
  1218. void SoftCPU::FABS(const X86::Instruction&) { TODO(); }
  1219. void SoftCPU::FTST(const X86::Instruction&) { TODO(); }
  1220. void SoftCPU::FXAM(const X86::Instruction&) { TODO(); }
  1221. void SoftCPU::FLDCW(const X86::Instruction&) { TODO(); }
  1222. void SoftCPU::FLD1(const X86::Instruction&) { TODO(); }
  1223. void SoftCPU::FLDL2T(const X86::Instruction&) { TODO(); }
  1224. void SoftCPU::FLDL2E(const X86::Instruction&) { TODO(); }
  1225. void SoftCPU::FLDPI(const X86::Instruction&) { TODO(); }
  1226. void SoftCPU::FLDLG2(const X86::Instruction&) { TODO(); }
  1227. void SoftCPU::FLDLN2(const X86::Instruction&) { TODO(); }
  1228. void SoftCPU::FLDZ(const X86::Instruction&) { TODO(); }
  1229. void SoftCPU::FNSTENV(const X86::Instruction&) { TODO(); }
  1230. void SoftCPU::F2XM1(const X86::Instruction&) { TODO(); };
  1231. void SoftCPU::FYL2X(const X86::Instruction&) { TODO(); };
  1232. void SoftCPU::FPTAN(const X86::Instruction&) { TODO(); };
  1233. void SoftCPU::FPATAN(const X86::Instruction&) { TODO(); };
  1234. void SoftCPU::FXTRACT(const X86::Instruction&) { TODO(); };
  1235. void SoftCPU::FPREM1(const X86::Instruction&) { TODO(); };
  1236. void SoftCPU::FDECSTP(const X86::Instruction&) { TODO(); };
  1237. void SoftCPU::FINCSTP(const X86::Instruction&) { TODO(); };
  1238. void SoftCPU::FNSTCW(const X86::Instruction&) { TODO(); };
  1239. void SoftCPU::FPREM(const X86::Instruction&) { TODO(); };
  1240. void SoftCPU::FYL2XP1(const X86::Instruction&) { TODO(); };
  1241. void SoftCPU::FSQRT(const X86::Instruction&) { TODO(); };
  1242. void SoftCPU::FSINCOS(const X86::Instruction&) { TODO(); };
  1243. void SoftCPU::FRNDINT(const X86::Instruction&) { TODO(); };
  1244. void SoftCPU::FSCALE(const X86::Instruction&) { TODO(); };
  1245. void SoftCPU::FSIN(const X86::Instruction&) { TODO(); };
  1246. void SoftCPU::FCOS(const X86::Instruction&) { TODO(); };
  1247. void SoftCPU::FADD_RM64(const X86::Instruction&) { TODO(); }
  1248. void SoftCPU::FMUL_RM64(const X86::Instruction&) { TODO(); }
  1249. void SoftCPU::FCOM_RM64(const X86::Instruction&) { TODO(); }
  1250. void SoftCPU::FCOMP_RM64(const X86::Instruction&) { TODO(); }
  1251. void SoftCPU::FSUB_RM64(const X86::Instruction&) { TODO(); }
  1252. void SoftCPU::FSUBR_RM64(const X86::Instruction&) { TODO(); }
  1253. void SoftCPU::FDIV_RM64(const X86::Instruction&) { TODO(); }
  1254. void SoftCPU::FDIVR_RM64(const X86::Instruction&) { TODO(); }
  1255. void SoftCPU::HLT(const X86::Instruction&) { TODO(); }
  1256. void SoftCPU::IDIV_RM16(const X86::Instruction& insn)
  1257. {
  1258. auto divisor_with_shadow = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1259. auto divisor = (i16)divisor_with_shadow.value();
  1260. if (divisor == 0) {
  1261. warn() << "Divide by zero";
  1262. TODO();
  1263. }
  1264. i32 dividend = (i32)(((u32)dx().value() << 16) | (u32)ax().value());
  1265. i32 result = dividend / divisor;
  1266. if (result > NumericLimits<i16>::max() || result < NumericLimits<i16>::min()) {
  1267. warn() << "Divide overflow";
  1268. TODO();
  1269. }
  1270. auto original_ax = ax();
  1271. set_ax(shadow_wrap_with_taint_from<u16>(result, original_ax, dx(), divisor_with_shadow));
  1272. set_dx(shadow_wrap_with_taint_from<u16>(dividend % divisor, original_ax, dx(), divisor_with_shadow));
  1273. }
  1274. void SoftCPU::IDIV_RM32(const X86::Instruction& insn)
  1275. {
  1276. auto divisor_with_shadow = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1277. auto divisor = (i32)divisor_with_shadow.value();
  1278. if (divisor == 0) {
  1279. warn() << "Divide by zero";
  1280. TODO();
  1281. }
  1282. i64 dividend = (i64)(((u64)edx().value() << 32) | (u64)eax().value());
  1283. i64 result = dividend / divisor;
  1284. if (result > NumericLimits<i32>::max() || result < NumericLimits<i32>::min()) {
  1285. warn() << "Divide overflow";
  1286. TODO();
  1287. }
  1288. auto original_eax = eax();
  1289. set_eax(shadow_wrap_with_taint_from<u32>(result, original_eax, edx(), divisor_with_shadow));
  1290. set_edx(shadow_wrap_with_taint_from<u32>(dividend % divisor, original_eax, edx(), divisor_with_shadow));
  1291. }
  1292. void SoftCPU::IDIV_RM8(const X86::Instruction& insn)
  1293. {
  1294. auto divisor_with_shadow = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1295. auto divisor = (i8)divisor_with_shadow.value();
  1296. if (divisor == 0) {
  1297. warn() << "Divide by zero";
  1298. TODO();
  1299. }
  1300. i16 dividend = ax().value();
  1301. i16 result = dividend / divisor;
  1302. if (result > NumericLimits<i8>::max() || result < NumericLimits<i8>::min()) {
  1303. warn() << "Divide overflow";
  1304. TODO();
  1305. }
  1306. auto original_ax = ax();
  1307. set_al(shadow_wrap_with_taint_from<u8>(result, divisor_with_shadow, original_ax));
  1308. set_ah(shadow_wrap_with_taint_from<u8>(dividend % divisor, divisor_with_shadow, original_ax));
  1309. }
  1310. void SoftCPU::IMUL_RM16(const X86::Instruction& insn)
  1311. {
  1312. i16 result_high;
  1313. i16 result_low;
  1314. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1315. op_imul<i16>(*this, src.value(), ax().value(), result_high, result_low);
  1316. gpr16(X86::RegisterDX) = shadow_wrap_with_taint_from<u16>(result_high, src, ax());
  1317. gpr16(X86::RegisterAX) = shadow_wrap_with_taint_from<u16>(result_low, src, ax());
  1318. }
  1319. void SoftCPU::IMUL_RM32(const X86::Instruction& insn)
  1320. {
  1321. i32 result_high;
  1322. i32 result_low;
  1323. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1324. op_imul<i32>(*this, src.value(), eax().value(), result_high, result_low);
  1325. gpr32(X86::RegisterEDX) = shadow_wrap_with_taint_from<u32>(result_high, src, eax());
  1326. gpr32(X86::RegisterEAX) = shadow_wrap_with_taint_from<u32>(result_low, src, eax());
  1327. }
  1328. void SoftCPU::IMUL_RM8(const X86::Instruction& insn)
  1329. {
  1330. i8 result_high;
  1331. i8 result_low;
  1332. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1333. op_imul<i8>(*this, src.value(), al().value(), result_high, result_low);
  1334. gpr8(X86::RegisterAH) = shadow_wrap_with_taint_from<u8>(result_high, src, al());
  1335. gpr8(X86::RegisterAL) = shadow_wrap_with_taint_from<u8>(result_low, src, al());
  1336. }
  1337. void SoftCPU::IMUL_reg16_RM16(const X86::Instruction& insn)
  1338. {
  1339. i16 result_high;
  1340. i16 result_low;
  1341. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1342. op_imul<i16>(*this, gpr16(insn.reg16()).value(), src.value(), result_high, result_low);
  1343. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src, gpr16(insn.reg16()));
  1344. }
  1345. void SoftCPU::IMUL_reg16_RM16_imm16(const X86::Instruction& insn)
  1346. {
  1347. i16 result_high;
  1348. i16 result_low;
  1349. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1350. op_imul<i16>(*this, src.value(), insn.imm16(), result_high, result_low);
  1351. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  1352. }
  1353. void SoftCPU::IMUL_reg16_RM16_imm8(const X86::Instruction& insn)
  1354. {
  1355. i16 result_high;
  1356. i16 result_low;
  1357. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1358. op_imul<i16>(*this, src.value(), sign_extended_to<i16>(insn.imm8()), result_high, result_low);
  1359. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  1360. }
  1361. void SoftCPU::IMUL_reg32_RM32(const X86::Instruction& insn)
  1362. {
  1363. i32 result_high;
  1364. i32 result_low;
  1365. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1366. op_imul<i32>(*this, gpr32(insn.reg32()).value(), src.value(), result_high, result_low);
  1367. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src, gpr32(insn.reg32()));
  1368. }
  1369. void SoftCPU::IMUL_reg32_RM32_imm32(const X86::Instruction& insn)
  1370. {
  1371. i32 result_high;
  1372. i32 result_low;
  1373. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1374. op_imul<i32>(*this, src.value(), insn.imm32(), result_high, result_low);
  1375. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  1376. }
  1377. void SoftCPU::IMUL_reg32_RM32_imm8(const X86::Instruction& insn)
  1378. {
  1379. i32 result_high;
  1380. i32 result_low;
  1381. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1382. op_imul<i32>(*this, src.value(), sign_extended_to<i32>(insn.imm8()), result_high, result_low);
  1383. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  1384. }
  1385. void SoftCPU::INC_RM16(const X86::Instruction& insn)
  1386. {
  1387. insn.modrm().write16(*this, insn, op_inc(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn)));
  1388. }
  1389. void SoftCPU::INC_RM32(const X86::Instruction& insn)
  1390. {
  1391. insn.modrm().write32(*this, insn, op_inc(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn)));
  1392. }
  1393. void SoftCPU::INC_RM8(const X86::Instruction& insn)
  1394. {
  1395. insn.modrm().write8(*this, insn, op_inc(*this, insn.modrm().read8<ValueWithShadow<u8>>(*this, insn)));
  1396. }
  1397. void SoftCPU::INC_reg16(const X86::Instruction& insn)
  1398. {
  1399. gpr16(insn.reg16()) = op_inc(*this, const_gpr16(insn.reg16()));
  1400. }
  1401. void SoftCPU::INC_reg32(const X86::Instruction& insn)
  1402. {
  1403. gpr32(insn.reg32()) = op_inc(*this, const_gpr32(insn.reg32()));
  1404. }
  1405. void SoftCPU::INSB(const X86::Instruction&) { TODO(); }
  1406. void SoftCPU::INSD(const X86::Instruction&) { TODO(); }
  1407. void SoftCPU::INSW(const X86::Instruction&) { TODO(); }
  1408. void SoftCPU::INT3(const X86::Instruction&) { TODO(); }
  1409. void SoftCPU::INTO(const X86::Instruction&) { TODO(); }
  1410. void SoftCPU::INT_imm8(const X86::Instruction& insn)
  1411. {
  1412. ASSERT(insn.imm8() == 0x82);
  1413. // FIXME: virt_syscall should take ValueWithShadow and whine about uninitialized arguments
  1414. set_eax(shadow_wrap_as_initialized(m_emulator.virt_syscall(eax().value(), edx().value(), ecx().value(), ebx().value())));
  1415. }
  1416. void SoftCPU::INVLPG(const X86::Instruction&) { TODO(); }
  1417. void SoftCPU::IN_AL_DX(const X86::Instruction&) { TODO(); }
  1418. void SoftCPU::IN_AL_imm8(const X86::Instruction&) { TODO(); }
  1419. void SoftCPU::IN_AX_DX(const X86::Instruction&) { TODO(); }
  1420. void SoftCPU::IN_AX_imm8(const X86::Instruction&) { TODO(); }
  1421. void SoftCPU::IN_EAX_DX(const X86::Instruction&) { TODO(); }
  1422. void SoftCPU::IN_EAX_imm8(const X86::Instruction&) { TODO(); }
  1423. void SoftCPU::IRET(const X86::Instruction&) { TODO(); }
  1424. void SoftCPU::JCXZ_imm8(const X86::Instruction& insn)
  1425. {
  1426. if (insn.a32()) {
  1427. warn_if_uninitialized(ecx(), "jecxz imm8");
  1428. if (ecx().value() == 0)
  1429. set_eip(eip() + (i8)insn.imm8());
  1430. } else {
  1431. warn_if_uninitialized(cx(), "jcxz imm8");
  1432. if (cx().value() == 0)
  1433. set_eip(eip() + (i8)insn.imm8());
  1434. }
  1435. }
  1436. void SoftCPU::JMP_FAR_mem16(const X86::Instruction&) { TODO(); }
  1437. void SoftCPU::JMP_FAR_mem32(const X86::Instruction&) { TODO(); }
  1438. void SoftCPU::JMP_RM16(const X86::Instruction&) { TODO(); }
  1439. void SoftCPU::JMP_RM32(const X86::Instruction& insn)
  1440. {
  1441. set_eip(insn.modrm().read32<ValueWithShadow<u32>>(*this, insn).value());
  1442. }
  1443. void SoftCPU::JMP_imm16(const X86::Instruction& insn)
  1444. {
  1445. set_eip(eip() + (i16)insn.imm16());
  1446. }
  1447. void SoftCPU::JMP_imm16_imm16(const X86::Instruction&) { TODO(); }
  1448. void SoftCPU::JMP_imm16_imm32(const X86::Instruction&) { TODO(); }
  1449. void SoftCPU::JMP_imm32(const X86::Instruction& insn)
  1450. {
  1451. set_eip(eip() + (i32)insn.imm32());
  1452. }
  1453. void SoftCPU::JMP_short_imm8(const X86::Instruction& insn)
  1454. {
  1455. set_eip(eip() + (i8)insn.imm8());
  1456. }
  1457. void SoftCPU::Jcc_NEAR_imm(const X86::Instruction& insn)
  1458. {
  1459. warn_if_flags_tainted("jcc near imm32");
  1460. if (evaluate_condition(insn.cc()))
  1461. set_eip(eip() + (i32)insn.imm32());
  1462. }
  1463. void SoftCPU::Jcc_imm8(const X86::Instruction& insn)
  1464. {
  1465. warn_if_flags_tainted("jcc imm8");
  1466. if (evaluate_condition(insn.cc()))
  1467. set_eip(eip() + (i8)insn.imm8());
  1468. }
  1469. void SoftCPU::LAHF(const X86::Instruction&) { TODO(); }
  1470. void SoftCPU::LAR_reg16_RM16(const X86::Instruction&) { TODO(); }
  1471. void SoftCPU::LAR_reg32_RM32(const X86::Instruction&) { TODO(); }
  1472. void SoftCPU::LDS_reg16_mem16(const X86::Instruction&) { TODO(); }
  1473. void SoftCPU::LDS_reg32_mem32(const X86::Instruction&) { TODO(); }
  1474. void SoftCPU::LEAVE16(const X86::Instruction&) { TODO(); }
  1475. void SoftCPU::LEAVE32(const X86::Instruction&)
  1476. {
  1477. auto new_ebp = read_memory32({ ss(), ebp().value() });
  1478. set_esp({ ebp().value() + 4, ebp().shadow() });
  1479. set_ebp(new_ebp);
  1480. }
  1481. void SoftCPU::LEA_reg16_mem16(const X86::Instruction& insn)
  1482. {
  1483. // FIXME: Respect shadow values
  1484. gpr16(insn.reg16()) = shadow_wrap_as_initialized<u16>(insn.modrm().resolve(*this, insn).offset());
  1485. }
  1486. void SoftCPU::LEA_reg32_mem32(const X86::Instruction& insn)
  1487. {
  1488. // FIXME: Respect shadow values
  1489. gpr32(insn.reg32()) = shadow_wrap_as_initialized<u32>(insn.modrm().resolve(*this, insn).offset());
  1490. }
  1491. void SoftCPU::LES_reg16_mem16(const X86::Instruction&) { TODO(); }
  1492. void SoftCPU::LES_reg32_mem32(const X86::Instruction&) { TODO(); }
  1493. void SoftCPU::LFS_reg16_mem16(const X86::Instruction&) { TODO(); }
  1494. void SoftCPU::LFS_reg32_mem32(const X86::Instruction&) { TODO(); }
  1495. void SoftCPU::LGDT(const X86::Instruction&) { TODO(); }
  1496. void SoftCPU::LGS_reg16_mem16(const X86::Instruction&) { TODO(); }
  1497. void SoftCPU::LGS_reg32_mem32(const X86::Instruction&) { TODO(); }
  1498. void SoftCPU::LIDT(const X86::Instruction&) { TODO(); }
  1499. void SoftCPU::LLDT_RM16(const X86::Instruction&) { TODO(); }
  1500. void SoftCPU::LMSW_RM16(const X86::Instruction&) { TODO(); }
  1501. template<typename T>
  1502. ALWAYS_INLINE static void do_lods(SoftCPU& cpu, const X86::Instruction& insn)
  1503. {
  1504. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1505. cpu.do_once_or_repeat<true>(insn, [&] {
  1506. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1507. cpu.gpr<T>(X86::RegisterAL) = src;
  1508. cpu.step_source_index(insn.a32(), sizeof(T));
  1509. });
  1510. }
  1511. void SoftCPU::LODSB(const X86::Instruction& insn)
  1512. {
  1513. do_lods<u8>(*this, insn);
  1514. }
  1515. void SoftCPU::LODSD(const X86::Instruction& insn)
  1516. {
  1517. do_lods<u32>(*this, insn);
  1518. }
  1519. void SoftCPU::LODSW(const X86::Instruction& insn)
  1520. {
  1521. do_lods<u16>(*this, insn);
  1522. }
  1523. void SoftCPU::LOOPNZ_imm8(const X86::Instruction& insn)
  1524. {
  1525. warn_if_flags_tainted("loopnz");
  1526. if (insn.a32()) {
  1527. set_ecx({ ecx().value() - 1, ecx().shadow() });
  1528. if (ecx().value() != 0 && !zf())
  1529. set_eip(eip() + (i8)insn.imm8());
  1530. } else {
  1531. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  1532. if (cx().value() != 0 && !zf())
  1533. set_eip(eip() + (i8)insn.imm8());
  1534. }
  1535. }
  1536. void SoftCPU::LOOPZ_imm8(const X86::Instruction& insn)
  1537. {
  1538. warn_if_flags_tainted("loopz");
  1539. if (insn.a32()) {
  1540. set_ecx({ ecx().value() - 1, ecx().shadow() });
  1541. if (ecx().value() != 0 && zf())
  1542. set_eip(eip() + (i8)insn.imm8());
  1543. } else {
  1544. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  1545. if (cx().value() != 0 && zf())
  1546. set_eip(eip() + (i8)insn.imm8());
  1547. }
  1548. }
  1549. void SoftCPU::LOOP_imm8(const X86::Instruction& insn)
  1550. {
  1551. if (insn.a32()) {
  1552. set_ecx({ ecx().value() - 1, ecx().shadow() });
  1553. if (ecx().value() != 0)
  1554. set_eip(eip() + (i8)insn.imm8());
  1555. } else {
  1556. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  1557. if (cx().value() != 0)
  1558. set_eip(eip() + (i8)insn.imm8());
  1559. }
  1560. }
  1561. void SoftCPU::LSL_reg16_RM16(const X86::Instruction&) { TODO(); }
  1562. void SoftCPU::LSL_reg32_RM32(const X86::Instruction&) { TODO(); }
  1563. void SoftCPU::LSS_reg16_mem16(const X86::Instruction&) { TODO(); }
  1564. void SoftCPU::LSS_reg32_mem32(const X86::Instruction&) { TODO(); }
  1565. void SoftCPU::LTR_RM16(const X86::Instruction&) { TODO(); }
  1566. template<typename T>
  1567. ALWAYS_INLINE static void do_movs(SoftCPU& cpu, const X86::Instruction& insn)
  1568. {
  1569. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1570. cpu.do_once_or_repeat<false>(insn, [&] {
  1571. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1572. cpu.write_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() }, src);
  1573. cpu.step_source_index(insn.a32(), sizeof(T));
  1574. cpu.step_destination_index(insn.a32(), sizeof(T));
  1575. });
  1576. }
  1577. void SoftCPU::MOVSB(const X86::Instruction& insn)
  1578. {
  1579. do_movs<u8>(*this, insn);
  1580. }
  1581. void SoftCPU::MOVSD(const X86::Instruction& insn)
  1582. {
  1583. do_movs<u32>(*this, insn);
  1584. }
  1585. void SoftCPU::MOVSW(const X86::Instruction& insn)
  1586. {
  1587. do_movs<u16>(*this, insn);
  1588. }
  1589. void SoftCPU::MOVSX_reg16_RM8(const X86::Instruction& insn)
  1590. {
  1591. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1592. gpr16(insn.reg16()) = ValueWithShadow<u16>(sign_extended_to<u16>(src.value()), 0x0100 | (src.shadow()));
  1593. }
  1594. void SoftCPU::MOVSX_reg32_RM16(const X86::Instruction& insn)
  1595. {
  1596. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1597. gpr32(insn.reg32()) = ValueWithShadow<u32>(sign_extended_to<u32>(src.value()), 0x01010000 | (src.shadow()));
  1598. }
  1599. void SoftCPU::MOVSX_reg32_RM8(const X86::Instruction& insn)
  1600. {
  1601. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1602. gpr32(insn.reg32()) = ValueWithShadow<u32>(sign_extended_to<u32>(src.value()), 0x01010100 | (src.shadow()));
  1603. }
  1604. void SoftCPU::MOVZX_reg16_RM8(const X86::Instruction& insn)
  1605. {
  1606. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1607. gpr16(insn.reg16()) = ValueWithShadow<u16>(src.value(), 0x0100 | (src.shadow() & 0xff));
  1608. }
  1609. void SoftCPU::MOVZX_reg32_RM16(const X86::Instruction& insn)
  1610. {
  1611. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1612. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010000 | (src.shadow() & 0xffff));
  1613. }
  1614. void SoftCPU::MOVZX_reg32_RM8(const X86::Instruction& insn)
  1615. {
  1616. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1617. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010100 | (src.shadow() & 0xff));
  1618. }
  1619. void SoftCPU::MOV_AL_moff8(const X86::Instruction& insn)
  1620. {
  1621. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  1622. }
  1623. void SoftCPU::MOV_AX_moff16(const X86::Instruction& insn)
  1624. {
  1625. set_ax(read_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  1626. }
  1627. void SoftCPU::MOV_CR_reg32(const X86::Instruction&) { TODO(); }
  1628. void SoftCPU::MOV_DR_reg32(const X86::Instruction&) { TODO(); }
  1629. void SoftCPU::MOV_EAX_moff32(const X86::Instruction& insn)
  1630. {
  1631. set_eax(read_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  1632. }
  1633. void SoftCPU::MOV_RM16_imm16(const X86::Instruction& insn)
  1634. {
  1635. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(insn.imm16()));
  1636. }
  1637. void SoftCPU::MOV_RM16_reg16(const X86::Instruction& insn)
  1638. {
  1639. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  1640. }
  1641. void SoftCPU::MOV_RM16_seg(const X86::Instruction&) { TODO(); }
  1642. void SoftCPU::MOV_RM32_imm32(const X86::Instruction& insn)
  1643. {
  1644. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(insn.imm32()));
  1645. }
  1646. void SoftCPU::MOV_RM32_reg32(const X86::Instruction& insn)
  1647. {
  1648. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  1649. }
  1650. void SoftCPU::MOV_RM8_imm8(const X86::Instruction& insn)
  1651. {
  1652. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized(insn.imm8()));
  1653. }
  1654. void SoftCPU::MOV_RM8_reg8(const X86::Instruction& insn)
  1655. {
  1656. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  1657. }
  1658. void SoftCPU::MOV_moff16_AX(const X86::Instruction& insn)
  1659. {
  1660. write_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, ax());
  1661. }
  1662. void SoftCPU::MOV_moff32_EAX(const X86::Instruction& insn)
  1663. {
  1664. write_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, eax());
  1665. }
  1666. void SoftCPU::MOV_moff8_AL(const X86::Instruction& insn)
  1667. {
  1668. write_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, al());
  1669. }
  1670. void SoftCPU::MOV_reg16_RM16(const X86::Instruction& insn)
  1671. {
  1672. gpr16(insn.reg16()) = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1673. }
  1674. void SoftCPU::MOV_reg16_imm16(const X86::Instruction& insn)
  1675. {
  1676. gpr16(insn.reg16()) = shadow_wrap_as_initialized(insn.imm16());
  1677. }
  1678. void SoftCPU::MOV_reg32_CR(const X86::Instruction&) { TODO(); }
  1679. void SoftCPU::MOV_reg32_DR(const X86::Instruction&) { TODO(); }
  1680. void SoftCPU::MOV_reg32_RM32(const X86::Instruction& insn)
  1681. {
  1682. gpr32(insn.reg32()) = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1683. }
  1684. void SoftCPU::MOV_reg32_imm32(const X86::Instruction& insn)
  1685. {
  1686. gpr32(insn.reg32()) = shadow_wrap_as_initialized(insn.imm32());
  1687. }
  1688. void SoftCPU::MOV_reg8_RM8(const X86::Instruction& insn)
  1689. {
  1690. gpr8(insn.reg8()) = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1691. }
  1692. void SoftCPU::MOV_reg8_imm8(const X86::Instruction& insn)
  1693. {
  1694. gpr8(insn.reg8()) = shadow_wrap_as_initialized(insn.imm8());
  1695. }
  1696. void SoftCPU::MOV_seg_RM16(const X86::Instruction&) { TODO(); }
  1697. void SoftCPU::MOV_seg_RM32(const X86::Instruction&) { TODO(); }
  1698. void SoftCPU::MUL_RM16(const X86::Instruction& insn)
  1699. {
  1700. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1701. u32 result = (u32)ax().value() * (u32)src.value();
  1702. auto original_ax = ax();
  1703. set_ax(shadow_wrap_with_taint_from<u16>(result & 0xffff, src, original_ax));
  1704. set_dx(shadow_wrap_with_taint_from<u16>(result >> 16, src, original_ax));
  1705. taint_flags_from(src, original_ax);
  1706. set_cf(dx().value() != 0);
  1707. set_of(dx().value() != 0);
  1708. }
  1709. void SoftCPU::MUL_RM32(const X86::Instruction& insn)
  1710. {
  1711. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1712. u64 result = (u64)eax().value() * (u64)src.value();
  1713. auto original_eax = eax();
  1714. set_eax(shadow_wrap_with_taint_from<u32>(result, src, original_eax));
  1715. set_edx(shadow_wrap_with_taint_from<u32>(result >> 32, src, original_eax));
  1716. taint_flags_from(src, original_eax);
  1717. set_cf(edx().value() != 0);
  1718. set_of(edx().value() != 0);
  1719. }
  1720. void SoftCPU::MUL_RM8(const X86::Instruction& insn)
  1721. {
  1722. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1723. u16 result = (u16)al().value() * src.value();
  1724. auto original_al = al();
  1725. set_ax(shadow_wrap_with_taint_from(result, src, original_al));
  1726. taint_flags_from(src, original_al);
  1727. set_cf((result & 0xff00) != 0);
  1728. set_of((result & 0xff00) != 0);
  1729. }
  1730. void SoftCPU::NEG_RM16(const X86::Instruction& insn)
  1731. {
  1732. insn.modrm().write16(*this, insn, op_sub<ValueWithShadow<u16>>(*this, shadow_wrap_as_initialized<u16>(0), insn.modrm().read16<ValueWithShadow<u16>>(*this, insn)));
  1733. }
  1734. void SoftCPU::NEG_RM32(const X86::Instruction& insn)
  1735. {
  1736. insn.modrm().write32(*this, insn, op_sub<ValueWithShadow<u32>>(*this, shadow_wrap_as_initialized<u32>(0), insn.modrm().read32<ValueWithShadow<u32>>(*this, insn)));
  1737. }
  1738. void SoftCPU::NEG_RM8(const X86::Instruction& insn)
  1739. {
  1740. insn.modrm().write8(*this, insn, op_sub<ValueWithShadow<u8>>(*this, shadow_wrap_as_initialized<u8>(0), insn.modrm().read8<ValueWithShadow<u8>>(*this, insn)));
  1741. }
  1742. void SoftCPU::NOP(const X86::Instruction&)
  1743. {
  1744. }
  1745. void SoftCPU::NOT_RM16(const X86::Instruction& insn)
  1746. {
  1747. auto data = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1748. insn.modrm().write16(*this, insn, ValueWithShadow<u16>(~data.value(), data.shadow()));
  1749. }
  1750. void SoftCPU::NOT_RM32(const X86::Instruction& insn)
  1751. {
  1752. auto data = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1753. insn.modrm().write32(*this, insn, ValueWithShadow<u32>(~data.value(), data.shadow()));
  1754. }
  1755. void SoftCPU::NOT_RM8(const X86::Instruction& insn)
  1756. {
  1757. auto data = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1758. insn.modrm().write8(*this, insn, ValueWithShadow<u8>(~data.value(), data.shadow()));
  1759. }
  1760. void SoftCPU::OUTSB(const X86::Instruction&) { TODO(); }
  1761. void SoftCPU::OUTSD(const X86::Instruction&) { TODO(); }
  1762. void SoftCPU::OUTSW(const X86::Instruction&) { TODO(); }
  1763. void SoftCPU::OUT_DX_AL(const X86::Instruction&) { TODO(); }
  1764. void SoftCPU::OUT_DX_AX(const X86::Instruction&) { TODO(); }
  1765. void SoftCPU::OUT_DX_EAX(const X86::Instruction&) { TODO(); }
  1766. void SoftCPU::OUT_imm8_AL(const X86::Instruction&) { TODO(); }
  1767. void SoftCPU::OUT_imm8_AX(const X86::Instruction&) { TODO(); }
  1768. void SoftCPU::OUT_imm8_EAX(const X86::Instruction&) { TODO(); }
  1769. void SoftCPU::PADDB_mm1_mm2m64(const X86::Instruction&) { TODO(); }
  1770. void SoftCPU::PADDW_mm1_mm2m64(const X86::Instruction&) { TODO(); }
  1771. void SoftCPU::PADDD_mm1_mm2m64(const X86::Instruction&) { TODO(); }
  1772. void SoftCPU::POPA(const X86::Instruction&) { TODO(); }
  1773. void SoftCPU::POPAD(const X86::Instruction&) { TODO(); }
  1774. void SoftCPU::POPF(const X86::Instruction&) { TODO(); }
  1775. void SoftCPU::POPFD(const X86::Instruction&)
  1776. {
  1777. auto popped_value = pop32();
  1778. m_eflags &= ~0x00fcffff;
  1779. m_eflags |= popped_value.value() & 0x00fcffff;
  1780. taint_flags_from(popped_value);
  1781. }
  1782. void SoftCPU::POP_DS(const X86::Instruction&) { TODO(); }
  1783. void SoftCPU::POP_ES(const X86::Instruction&) { TODO(); }
  1784. void SoftCPU::POP_FS(const X86::Instruction&) { TODO(); }
  1785. void SoftCPU::POP_GS(const X86::Instruction&) { TODO(); }
  1786. void SoftCPU::POP_RM16(const X86::Instruction& insn)
  1787. {
  1788. insn.modrm().write16(*this, insn, pop16());
  1789. }
  1790. void SoftCPU::POP_RM32(const X86::Instruction& insn)
  1791. {
  1792. insn.modrm().write32(*this, insn, pop32());
  1793. }
  1794. void SoftCPU::POP_SS(const X86::Instruction&) { TODO(); }
  1795. void SoftCPU::POP_reg16(const X86::Instruction& insn)
  1796. {
  1797. gpr16(insn.reg16()) = pop16();
  1798. }
  1799. void SoftCPU::POP_reg32(const X86::Instruction& insn)
  1800. {
  1801. gpr32(insn.reg32()) = pop32();
  1802. }
  1803. void SoftCPU::PUSHA(const X86::Instruction&) { TODO(); }
  1804. void SoftCPU::PUSHAD(const X86::Instruction&) { TODO(); }
  1805. void SoftCPU::PUSHF(const X86::Instruction&) { TODO(); }
  1806. void SoftCPU::PUSHFD(const X86::Instruction&)
  1807. {
  1808. // FIXME: Respect shadow flags when they exist!
  1809. push32(shadow_wrap_as_initialized(m_eflags & 0x00fcffff));
  1810. }
  1811. void SoftCPU::PUSH_CS(const X86::Instruction&) { TODO(); }
  1812. void SoftCPU::PUSH_DS(const X86::Instruction&) { TODO(); }
  1813. void SoftCPU::PUSH_ES(const X86::Instruction&) { TODO(); }
  1814. void SoftCPU::PUSH_FS(const X86::Instruction&) { TODO(); }
  1815. void SoftCPU::PUSH_GS(const X86::Instruction&) { TODO(); }
  1816. void SoftCPU::PUSH_RM16(const X86::Instruction&) { TODO(); }
  1817. void SoftCPU::PUSH_RM32(const X86::Instruction& insn)
  1818. {
  1819. push32(insn.modrm().read32<ValueWithShadow<u32>>(*this, insn));
  1820. }
  1821. void SoftCPU::PUSH_SP_8086_80186(const X86::Instruction&) { TODO(); }
  1822. void SoftCPU::PUSH_SS(const X86::Instruction&) { TODO(); }
  1823. void SoftCPU::PUSH_imm16(const X86::Instruction& insn)
  1824. {
  1825. push16(shadow_wrap_as_initialized(insn.imm16()));
  1826. }
  1827. void SoftCPU::PUSH_imm32(const X86::Instruction& insn)
  1828. {
  1829. push32(shadow_wrap_as_initialized(insn.imm32()));
  1830. }
  1831. void SoftCPU::PUSH_imm8(const X86::Instruction& insn)
  1832. {
  1833. ASSERT(!insn.has_operand_size_override_prefix());
  1834. push32(shadow_wrap_as_initialized<u32>(sign_extended_to<i32>(insn.imm8())));
  1835. }
  1836. void SoftCPU::PUSH_reg16(const X86::Instruction& insn)
  1837. {
  1838. push16(gpr16(insn.reg16()));
  1839. }
  1840. void SoftCPU::PUSH_reg32(const X86::Instruction& insn)
  1841. {
  1842. push32(gpr32(insn.reg32()));
  1843. if (m_secret_handshake_state == 2) {
  1844. m_secret_data[0] = gpr32(insn.reg32()).value();
  1845. ++m_secret_handshake_state;
  1846. } else if (m_secret_handshake_state == 3) {
  1847. m_secret_data[1] = gpr32(insn.reg32()).value();
  1848. ++m_secret_handshake_state;
  1849. } else if (m_secret_handshake_state == 4) {
  1850. m_secret_data[2] = gpr32(insn.reg32()).value();
  1851. m_secret_handshake_state = 0;
  1852. did_receive_secret_data();
  1853. }
  1854. }
  1855. template<typename T, bool cf>
  1856. ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  1857. {
  1858. if (steps.value() == 0)
  1859. return shadow_wrap_with_taint_from(data.value(), data, steps);
  1860. u32 result = 0;
  1861. u32 new_flags = 0;
  1862. if constexpr (cf)
  1863. asm volatile("stc");
  1864. else
  1865. asm volatile("clc");
  1866. if constexpr (sizeof(typename T::ValueType) == 4) {
  1867. asm volatile("rcll %%cl, %%eax\n"
  1868. : "=a"(result)
  1869. : "a"(data.value()), "c"(steps.value()));
  1870. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  1871. asm volatile("rclw %%cl, %%ax\n"
  1872. : "=a"(result)
  1873. : "a"(data.value()), "c"(steps.value()));
  1874. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  1875. asm volatile("rclb %%cl, %%al\n"
  1876. : "=a"(result)
  1877. : "a"(data.value()), "c"(steps.value()));
  1878. }
  1879. asm volatile(
  1880. "pushf\n"
  1881. "pop %%ebx"
  1882. : "=b"(new_flags));
  1883. cpu.set_flags_oc(new_flags);
  1884. cpu.taint_flags_from(data, steps);
  1885. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  1886. }
  1887. template<typename T>
  1888. ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  1889. {
  1890. cpu.warn_if_flags_tainted("rcl");
  1891. if (cpu.cf())
  1892. return op_rcl_impl<T, true>(cpu, data, steps);
  1893. return op_rcl_impl<T, false>(cpu, data, steps);
  1894. }
  1895. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCL, op_rcl)
  1896. template<typename T, bool cf>
  1897. ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  1898. {
  1899. if (steps.value() == 0)
  1900. return shadow_wrap_with_taint_from(data.value(), data, steps);
  1901. u32 result = 0;
  1902. u32 new_flags = 0;
  1903. if constexpr (cf)
  1904. asm volatile("stc");
  1905. else
  1906. asm volatile("clc");
  1907. if constexpr (sizeof(typename T::ValueType) == 4) {
  1908. asm volatile("rcrl %%cl, %%eax\n"
  1909. : "=a"(result)
  1910. : "a"(data.value()), "c"(steps.value()));
  1911. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  1912. asm volatile("rcrw %%cl, %%ax\n"
  1913. : "=a"(result)
  1914. : "a"(data.value()), "c"(steps.value()));
  1915. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  1916. asm volatile("rcrb %%cl, %%al\n"
  1917. : "=a"(result)
  1918. : "a"(data.value()), "c"(steps.value()));
  1919. }
  1920. asm volatile(
  1921. "pushf\n"
  1922. "pop %%ebx"
  1923. : "=b"(new_flags));
  1924. cpu.set_flags_oc(new_flags);
  1925. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  1926. }
  1927. template<typename T>
  1928. ALWAYS_INLINE static T op_rcr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  1929. {
  1930. cpu.warn_if_flags_tainted("rcr");
  1931. if (cpu.cf())
  1932. return op_rcr_impl<T, true>(cpu, data, steps);
  1933. return op_rcr_impl<T, false>(cpu, data, steps);
  1934. }
  1935. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCR, op_rcr)
  1936. void SoftCPU::RDTSC(const X86::Instruction&) { TODO(); }
  1937. void SoftCPU::RET(const X86::Instruction& insn)
  1938. {
  1939. ASSERT(!insn.has_operand_size_override_prefix());
  1940. auto ret_address = pop32();
  1941. warn_if_uninitialized(ret_address, "ret");
  1942. set_eip(ret_address.value());
  1943. }
  1944. void SoftCPU::RETF(const X86::Instruction&) { TODO(); }
  1945. void SoftCPU::RETF_imm16(const X86::Instruction&) { TODO(); }
  1946. void SoftCPU::RET_imm16(const X86::Instruction& insn)
  1947. {
  1948. ASSERT(!insn.has_operand_size_override_prefix());
  1949. auto ret_address = pop32();
  1950. warn_if_uninitialized(ret_address, "ret imm16");
  1951. set_eip(ret_address.value());
  1952. set_esp({ esp().value() + insn.imm16(), esp().shadow() });
  1953. }
  1954. template<typename T>
  1955. ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  1956. {
  1957. if (steps.value() == 0)
  1958. return shadow_wrap_with_taint_from(data.value(), data, steps);
  1959. u32 result = 0;
  1960. u32 new_flags = 0;
  1961. if constexpr (sizeof(typename T::ValueType) == 4) {
  1962. asm volatile("roll %%cl, %%eax\n"
  1963. : "=a"(result)
  1964. : "a"(data.value()), "c"(steps.value()));
  1965. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  1966. asm volatile("rolw %%cl, %%ax\n"
  1967. : "=a"(result)
  1968. : "a"(data.value()), "c"(steps.value()));
  1969. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  1970. asm volatile("rolb %%cl, %%al\n"
  1971. : "=a"(result)
  1972. : "a"(data.value()), "c"(steps.value()));
  1973. }
  1974. asm volatile(
  1975. "pushf\n"
  1976. "pop %%ebx"
  1977. : "=b"(new_flags));
  1978. cpu.set_flags_oc(new_flags);
  1979. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  1980. }
  1981. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROL, op_rol)
  1982. template<typename T>
  1983. ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  1984. {
  1985. if (steps.value() == 0)
  1986. return shadow_wrap_with_taint_from(data.value(), data, steps);
  1987. u32 result = 0;
  1988. u32 new_flags = 0;
  1989. if constexpr (sizeof(typename T::ValueType) == 4) {
  1990. asm volatile("rorl %%cl, %%eax\n"
  1991. : "=a"(result)
  1992. : "a"(data.value()), "c"(steps.value()));
  1993. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  1994. asm volatile("rorw %%cl, %%ax\n"
  1995. : "=a"(result)
  1996. : "a"(data.value()), "c"(steps.value()));
  1997. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  1998. asm volatile("rorb %%cl, %%al\n"
  1999. : "=a"(result)
  2000. : "a"(data.value()), "c"(steps.value()));
  2001. }
  2002. asm volatile(
  2003. "pushf\n"
  2004. "pop %%ebx"
  2005. : "=b"(new_flags));
  2006. cpu.set_flags_oc(new_flags);
  2007. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2008. }
  2009. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROR, op_ror)
  2010. void SoftCPU::SAHF(const X86::Instruction&) { TODO(); }
  2011. void SoftCPU::SALC(const X86::Instruction&)
  2012. {
  2013. // FIXME: Respect shadow flags once they exists!
  2014. set_al(shadow_wrap_as_initialized<u8>(cf() ? 0xff : 0x00));
  2015. if (m_secret_handshake_state < 2)
  2016. ++m_secret_handshake_state;
  2017. else
  2018. m_secret_handshake_state = 0;
  2019. }
  2020. template<typename T>
  2021. static T op_sar(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2022. {
  2023. if (steps.value() == 0)
  2024. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2025. u32 result = 0;
  2026. u32 new_flags = 0;
  2027. if constexpr (sizeof(typename T::ValueType) == 4) {
  2028. asm volatile("sarl %%cl, %%eax\n"
  2029. : "=a"(result)
  2030. : "a"(data.value()), "c"(steps.value()));
  2031. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2032. asm volatile("sarw %%cl, %%ax\n"
  2033. : "=a"(result)
  2034. : "a"(data.value()), "c"(steps.value()));
  2035. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2036. asm volatile("sarb %%cl, %%al\n"
  2037. : "=a"(result)
  2038. : "a"(data.value()), "c"(steps.value()));
  2039. }
  2040. asm volatile(
  2041. "pushf\n"
  2042. "pop %%ebx"
  2043. : "=b"(new_flags));
  2044. cpu.set_flags_oszapc(new_flags);
  2045. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2046. }
  2047. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SAR, op_sar)
  2048. template<typename T>
  2049. ALWAYS_INLINE static void do_scas(SoftCPU& cpu, const X86::Instruction& insn)
  2050. {
  2051. cpu.do_once_or_repeat<true>(insn, [&] {
  2052. auto src = cpu.const_gpr<T>(X86::RegisterAL);
  2053. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  2054. op_sub(cpu, dest, src);
  2055. cpu.step_destination_index(insn.a32(), sizeof(T));
  2056. });
  2057. }
  2058. void SoftCPU::SCASB(const X86::Instruction& insn)
  2059. {
  2060. do_scas<u8>(*this, insn);
  2061. }
  2062. void SoftCPU::SCASD(const X86::Instruction& insn)
  2063. {
  2064. do_scas<u32>(*this, insn);
  2065. }
  2066. void SoftCPU::SCASW(const X86::Instruction& insn)
  2067. {
  2068. do_scas<u16>(*this, insn);
  2069. }
  2070. void SoftCPU::SETcc_RM8(const X86::Instruction& insn)
  2071. {
  2072. warn_if_flags_tainted("setcc");
  2073. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized<u8>(evaluate_condition(insn.cc())));
  2074. }
  2075. void SoftCPU::SGDT(const X86::Instruction&) { TODO(); }
  2076. void SoftCPU::SHLD_RM16_reg16_CL(const X86::Instruction& insn)
  2077. {
  2078. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn), const_gpr16(insn.reg16()), cl()));
  2079. }
  2080. void SoftCPU::SHLD_RM16_reg16_imm8(const X86::Instruction& insn)
  2081. {
  2082. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2083. }
  2084. void SoftCPU::SHLD_RM32_reg32_CL(const X86::Instruction& insn)
  2085. {
  2086. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn), const_gpr32(insn.reg32()), cl()));
  2087. }
  2088. void SoftCPU::SHLD_RM32_reg32_imm8(const X86::Instruction& insn)
  2089. {
  2090. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2091. }
  2092. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHL, op_shl)
  2093. void SoftCPU::SHRD_RM16_reg16_CL(const X86::Instruction& insn)
  2094. {
  2095. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn), const_gpr16(insn.reg16()), cl()));
  2096. }
  2097. void SoftCPU::SHRD_RM16_reg16_imm8(const X86::Instruction& insn)
  2098. {
  2099. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2100. }
  2101. void SoftCPU::SHRD_RM32_reg32_CL(const X86::Instruction& insn)
  2102. {
  2103. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn), const_gpr32(insn.reg32()), cl()));
  2104. }
  2105. void SoftCPU::SHRD_RM32_reg32_imm8(const X86::Instruction& insn)
  2106. {
  2107. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2108. }
  2109. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHR, op_shr)
  2110. void SoftCPU::SIDT(const X86::Instruction&) { TODO(); }
  2111. void SoftCPU::SLDT_RM16(const X86::Instruction&) { TODO(); }
  2112. void SoftCPU::SMSW_RM16(const X86::Instruction&) { TODO(); }
  2113. void SoftCPU::STC(const X86::Instruction&)
  2114. {
  2115. set_cf(true);
  2116. }
  2117. void SoftCPU::STD(const X86::Instruction&)
  2118. {
  2119. set_df(true);
  2120. }
  2121. void SoftCPU::STI(const X86::Instruction&) { TODO(); }
  2122. void SoftCPU::STOSB(const X86::Instruction& insn)
  2123. {
  2124. do_once_or_repeat<false>(insn, [&] {
  2125. write_memory8({ es(), destination_index(insn.a32()).value() }, al());
  2126. step_destination_index(insn.a32(), 1);
  2127. });
  2128. }
  2129. void SoftCPU::STOSD(const X86::Instruction& insn)
  2130. {
  2131. do_once_or_repeat<false>(insn, [&] {
  2132. write_memory32({ es(), destination_index(insn.a32()).value() }, eax());
  2133. step_destination_index(insn.a32(), 4);
  2134. });
  2135. }
  2136. void SoftCPU::STOSW(const X86::Instruction& insn)
  2137. {
  2138. do_once_or_repeat<false>(insn, [&] {
  2139. write_memory16({ es(), destination_index(insn.a32()).value() }, ax());
  2140. step_destination_index(insn.a32(), 2);
  2141. });
  2142. }
  2143. void SoftCPU::STR_RM16(const X86::Instruction&) { TODO(); }
  2144. void SoftCPU::UD0(const X86::Instruction&) { TODO(); }
  2145. void SoftCPU::UD1(const X86::Instruction&) { TODO(); }
  2146. void SoftCPU::UD2(const X86::Instruction&) { TODO(); }
  2147. void SoftCPU::VERR_RM16(const X86::Instruction&) { TODO(); }
  2148. void SoftCPU::VERW_RM16(const X86::Instruction&) { TODO(); }
  2149. void SoftCPU::WAIT(const X86::Instruction&) { TODO(); }
  2150. void SoftCPU::WBINVD(const X86::Instruction&) { TODO(); }
  2151. void SoftCPU::XADD_RM16_reg16(const X86::Instruction& insn)
  2152. {
  2153. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  2154. auto src = const_gpr16(insn.reg16());
  2155. auto result = op_add(*this, dest, src);
  2156. gpr16(insn.reg16()) = dest;
  2157. insn.modrm().write16(*this, insn, result);
  2158. }
  2159. void SoftCPU::XADD_RM32_reg32(const X86::Instruction& insn)
  2160. {
  2161. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  2162. auto src = const_gpr32(insn.reg32());
  2163. auto result = op_add(*this, dest, src);
  2164. gpr32(insn.reg32()) = dest;
  2165. insn.modrm().write32(*this, insn, result);
  2166. }
  2167. void SoftCPU::XADD_RM8_reg8(const X86::Instruction& insn)
  2168. {
  2169. auto dest = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  2170. auto src = const_gpr8(insn.reg8());
  2171. auto result = op_add(*this, dest, src);
  2172. gpr8(insn.reg8()) = dest;
  2173. insn.modrm().write8(*this, insn, result);
  2174. }
  2175. void SoftCPU::XCHG_AX_reg16(const X86::Instruction& insn)
  2176. {
  2177. auto temp = gpr16(insn.reg16());
  2178. gpr16(insn.reg16()) = ax();
  2179. set_ax(temp);
  2180. }
  2181. void SoftCPU::XCHG_EAX_reg32(const X86::Instruction& insn)
  2182. {
  2183. auto temp = gpr32(insn.reg32());
  2184. gpr32(insn.reg32()) = eax();
  2185. set_eax(temp);
  2186. }
  2187. void SoftCPU::XCHG_reg16_RM16(const X86::Instruction& insn)
  2188. {
  2189. auto temp = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  2190. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2191. gpr16(insn.reg16()) = temp;
  2192. }
  2193. void SoftCPU::XCHG_reg32_RM32(const X86::Instruction& insn)
  2194. {
  2195. auto temp = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  2196. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2197. gpr32(insn.reg32()) = temp;
  2198. }
  2199. void SoftCPU::XCHG_reg8_RM8(const X86::Instruction& insn)
  2200. {
  2201. auto temp = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  2202. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2203. gpr8(insn.reg8()) = temp;
  2204. }
  2205. void SoftCPU::XLAT(const X86::Instruction& insn)
  2206. {
  2207. if (insn.a32())
  2208. warn_if_uninitialized(ebx(), "xlat ebx");
  2209. else
  2210. warn_if_uninitialized(bx(), "xlat bx");
  2211. warn_if_uninitialized(al(), "xlat al");
  2212. u32 offset = (insn.a32() ? ebx().value() : bx().value()) + al().value();
  2213. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), offset }));
  2214. }
  2215. #define DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same) \
  2216. void SoftCPU::mnemonic##_AL_imm8(const X86::Instruction& insn) { generic_AL_imm8<update_dest>(op<ValueWithShadow<u8>>, insn); } \
  2217. void SoftCPU::mnemonic##_AX_imm16(const X86::Instruction& insn) { generic_AX_imm16<update_dest>(op<ValueWithShadow<u16>>, insn); } \
  2218. void SoftCPU::mnemonic##_EAX_imm32(const X86::Instruction& insn) { generic_EAX_imm32<update_dest>(op<ValueWithShadow<u32>>, insn); } \
  2219. void SoftCPU::mnemonic##_RM16_imm16(const X86::Instruction& insn) { generic_RM16_imm16<update_dest>(op<ValueWithShadow<u16>>, insn); } \
  2220. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { generic_RM16_reg16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2221. void SoftCPU::mnemonic##_RM32_imm32(const X86::Instruction& insn) { generic_RM32_imm32<update_dest>(op<ValueWithShadow<u32>>, insn); } \
  2222. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { generic_RM32_reg32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2223. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<update_dest>(op<ValueWithShadow<u8>>, insn); } \
  2224. void SoftCPU::mnemonic##_RM8_reg8(const X86::Instruction& insn) { generic_RM8_reg8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2225. #define DEFINE_GENERIC_INSN_HANDLERS(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same) \
  2226. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same) \
  2227. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8<update_dest>(op<ValueWithShadow<u16>>, insn); } \
  2228. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8<update_dest>(op<ValueWithShadow<u32>>, insn); } \
  2229. void SoftCPU::mnemonic##_reg16_RM16(const X86::Instruction& insn) { generic_reg16_RM16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2230. void SoftCPU::mnemonic##_reg32_RM32(const X86::Instruction& insn) { generic_reg32_RM32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2231. void SoftCPU::mnemonic##_reg8_RM8(const X86::Instruction& insn) { generic_reg8_RM8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2232. DEFINE_GENERIC_INSN_HANDLERS(XOR, op_xor, true, true)
  2233. DEFINE_GENERIC_INSN_HANDLERS(OR, op_or, true, false)
  2234. DEFINE_GENERIC_INSN_HANDLERS(ADD, op_add, true, false)
  2235. DEFINE_GENERIC_INSN_HANDLERS(ADC, op_adc, true, false)
  2236. DEFINE_GENERIC_INSN_HANDLERS(SUB, op_sub, true, true)
  2237. DEFINE_GENERIC_INSN_HANDLERS(SBB, op_sbb, true, false)
  2238. DEFINE_GENERIC_INSN_HANDLERS(AND, op_and, true, false)
  2239. DEFINE_GENERIC_INSN_HANDLERS(CMP, op_sub, false, false)
  2240. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(TEST, op_and, false, false)
  2241. void SoftCPU::MOVQ_mm1_mm2m64(const X86::Instruction&) { TODO(); }
  2242. void SoftCPU::EMMS(const X86::Instruction&) { TODO(); }
  2243. void SoftCPU::MOVQ_mm1_m64_mm2(const X86::Instruction&) { TODO(); }
  2244. void SoftCPU::wrap_0xC0(const X86::Instruction&) { TODO(); }
  2245. void SoftCPU::wrap_0xC1_16(const X86::Instruction&) { TODO(); }
  2246. void SoftCPU::wrap_0xC1_32(const X86::Instruction&) { TODO(); }
  2247. void SoftCPU::wrap_0xD0(const X86::Instruction&) { TODO(); }
  2248. void SoftCPU::wrap_0xD1_16(const X86::Instruction&) { TODO(); }
  2249. void SoftCPU::wrap_0xD1_32(const X86::Instruction&) { TODO(); }
  2250. void SoftCPU::wrap_0xD2(const X86::Instruction&) { TODO(); }
  2251. void SoftCPU::wrap_0xD3_16(const X86::Instruction&) { TODO(); }
  2252. void SoftCPU::wrap_0xD3_32(const X86::Instruction&) { TODO(); }
  2253. }