SoftCPU.cpp 100 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include "SoftCPU.h"
  27. #include "Emulator.h"
  28. #include <AK/Assertions.h>
  29. #include <stdio.h>
  30. #include <string.h>
  31. #if defined(__GNUC__) && !defined(__clang__)
  32. # pragma GCC optimize("O3")
  33. #endif
  34. #define TODO_INSN() \
  35. do { \
  36. report("\n==%d== Unimplemented instruction: %s\n", getpid(), __FUNCTION__); \
  37. m_emulator.dump_backtrace(); \
  38. _exit(0); \
  39. } while (0)
  40. //#define MEMORY_DEBUG
  41. #define DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(mnemonic, op) \
  42. void SoftCPU::mnemonic##_RM8_1(const X86::Instruction& insn) { generic_RM8_1(op<ValueWithShadow<u8>>, insn); } \
  43. void SoftCPU::mnemonic##_RM8_CL(const X86::Instruction& insn) { generic_RM8_CL(op<ValueWithShadow<u8>>, insn); } \
  44. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<true, false>(op<ValueWithShadow<u8>>, insn); } \
  45. void SoftCPU::mnemonic##_RM16_1(const X86::Instruction& insn) { generic_RM16_1(op<ValueWithShadow<u16>>, insn); } \
  46. void SoftCPU::mnemonic##_RM16_CL(const X86::Instruction& insn) { generic_RM16_CL(op<ValueWithShadow<u16>>, insn); } \
  47. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_unsigned_imm8<true>(op<ValueWithShadow<u16>>, insn); } \
  48. void SoftCPU::mnemonic##_RM32_1(const X86::Instruction& insn) { generic_RM32_1(op<ValueWithShadow<u32>>, insn); } \
  49. void SoftCPU::mnemonic##_RM32_CL(const X86::Instruction& insn) { generic_RM32_CL(op<ValueWithShadow<u32>>, insn); } \
  50. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_unsigned_imm8<true>(op<ValueWithShadow<u32>>, insn); }
  51. namespace UserspaceEmulator {
  52. template<typename T>
  53. void warn_if_uninitialized(T value_with_shadow, const char* message)
  54. {
  55. if (value_with_shadow.is_uninitialized()) {
  56. dbgprintf("\033[31;1mWarning! Use of uninitialized value: %s\033[0m\n", message);
  57. Emulator::the().dump_backtrace();
  58. }
  59. }
  60. void SoftCPU::warn_if_flags_tainted(const char* message) const
  61. {
  62. if (m_flags_tainted) {
  63. report("\n");
  64. report("==%d== \033[31;1mConditional depends on uninitialized data\033[0m (%s)\n", getpid(), message);
  65. Emulator::the().dump_backtrace();
  66. }
  67. }
  68. template<typename T, typename U>
  69. inline constexpr T sign_extended_to(U value)
  70. {
  71. if (!(value & X86::TypeTrivia<U>::sign_bit))
  72. return value;
  73. return (X86::TypeTrivia<T>::mask & ~X86::TypeTrivia<U>::mask) | value;
  74. }
  75. SoftCPU::SoftCPU(Emulator& emulator)
  76. : m_emulator(emulator)
  77. {
  78. memset(m_gpr, 0, sizeof(m_gpr));
  79. memset(m_gpr_shadow, 1, sizeof(m_gpr_shadow));
  80. m_segment[(int)X86::SegmentRegister::CS] = 0x18;
  81. m_segment[(int)X86::SegmentRegister::DS] = 0x20;
  82. m_segment[(int)X86::SegmentRegister::ES] = 0x20;
  83. m_segment[(int)X86::SegmentRegister::SS] = 0x20;
  84. m_segment[(int)X86::SegmentRegister::GS] = 0x28;
  85. }
  86. void SoftCPU::dump() const
  87. {
  88. printf("eax=%08x ebx=%08x ecx=%08x edx=%08x ", eax().value(), ebx().value(), ecx().value(), edx().value());
  89. printf("ebp=%08x esp=%08x esi=%08x edi=%08x ", ebp().value(), esp().value(), esi().value(), edi().value());
  90. printf("o=%u s=%u z=%u a=%u p=%u c=%u\n", of(), sf(), zf(), af(), pf(), cf());
  91. printf("#ax=%08x #bx=%08x #cx=%08x #dx=%08x ", eax().shadow(), ebx().shadow(), ecx().shadow(), edx().shadow());
  92. printf("#bp=%08x #sp=%08x #si=%08x #di=%08x ", ebp().shadow(), esp().shadow(), esi().shadow(), edi().shadow());
  93. printf("#f=%u\n", m_flags_tainted);
  94. fflush(stdout);
  95. }
  96. void SoftCPU::did_receive_secret_data()
  97. {
  98. if (m_secret_data[0] == 1) {
  99. if (auto* tracer = m_emulator.malloc_tracer())
  100. tracer->target_did_malloc({}, m_secret_data[2], m_secret_data[1]);
  101. } else if (m_secret_data[0] == 2) {
  102. if (auto* tracer = m_emulator.malloc_tracer())
  103. tracer->target_did_free({}, m_secret_data[1]);
  104. } else {
  105. ASSERT_NOT_REACHED();
  106. }
  107. }
  108. void SoftCPU::update_code_cache()
  109. {
  110. auto* region = m_emulator.mmu().find_region({ cs(), eip() });
  111. ASSERT(region);
  112. m_cached_code_ptr = region->cacheable_ptr(eip() - region->base());
  113. m_cached_code_end = region->cacheable_ptr(region->size());
  114. }
  115. ValueWithShadow<u8> SoftCPU::read_memory8(X86::LogicalAddress address)
  116. {
  117. ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28);
  118. auto value = m_emulator.mmu().read8(address);
  119. #ifdef MEMORY_DEBUG
  120. printf("\033[36;1mread_memory8: @%08x:%08x -> %02x (%02x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  121. #endif
  122. return value;
  123. }
  124. ValueWithShadow<u16> SoftCPU::read_memory16(X86::LogicalAddress address)
  125. {
  126. ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28);
  127. auto value = m_emulator.mmu().read16(address);
  128. #ifdef MEMORY_DEBUG
  129. printf("\033[36;1mread_memory16: @%04x:%08x -> %04x (%04x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  130. #endif
  131. return value;
  132. }
  133. ValueWithShadow<u32> SoftCPU::read_memory32(X86::LogicalAddress address)
  134. {
  135. ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28);
  136. auto value = m_emulator.mmu().read32(address);
  137. #ifdef MEMORY_DEBUG
  138. printf("\033[36;1mread_memory32: @%04x:%08x -> %08x (%08x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  139. #endif
  140. return value;
  141. }
  142. ValueWithShadow<u64> SoftCPU::read_memory64(X86::LogicalAddress address)
  143. {
  144. ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28);
  145. auto value = m_emulator.mmu().read64(address);
  146. #ifdef MEMORY_DEBUG
  147. printf("\033[36;1mread_memory64: @%04x:%08x -> %016llx (%016llx)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  148. #endif
  149. return value;
  150. }
  151. void SoftCPU::write_memory8(X86::LogicalAddress address, ValueWithShadow<u8> value)
  152. {
  153. ASSERT(address.selector() == 0x20 || address.selector() == 0x28);
  154. #ifdef MEMORY_DEBUG
  155. printf("\033[35;1mwrite_memory8: @%04x:%08x <- %02x (%02x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  156. #endif
  157. m_emulator.mmu().write8(address, value);
  158. }
  159. void SoftCPU::write_memory16(X86::LogicalAddress address, ValueWithShadow<u16> value)
  160. {
  161. ASSERT(address.selector() == 0x20 || address.selector() == 0x28);
  162. #ifdef MEMORY_DEBUG
  163. printf("\033[35;1mwrite_memory16: @%04x:%08x <- %04x (%04x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  164. #endif
  165. m_emulator.mmu().write16(address, value);
  166. }
  167. void SoftCPU::write_memory32(X86::LogicalAddress address, ValueWithShadow<u32> value)
  168. {
  169. ASSERT(address.selector() == 0x20 || address.selector() == 0x28);
  170. #ifdef MEMORY_DEBUG
  171. printf("\033[35;1mwrite_memory32: @%04x:%08x <- %08x (%08x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  172. #endif
  173. m_emulator.mmu().write32(address, value);
  174. }
  175. void SoftCPU::write_memory64(X86::LogicalAddress address, ValueWithShadow<u64> value)
  176. {
  177. ASSERT(address.selector() == 0x20 || address.selector() == 0x28);
  178. #ifdef MEMORY_DEBUG
  179. printf("\033[35;1mwrite_memory64: @%04x:%08x <- %016llx (%016llx)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow());
  180. #endif
  181. m_emulator.mmu().write64(address, value);
  182. }
  183. void SoftCPU::push_string(const StringView& string)
  184. {
  185. size_t space_to_allocate = round_up_to_power_of_two(string.length() + 1, 16);
  186. set_esp({ esp().value() - space_to_allocate, esp().shadow() });
  187. m_emulator.mmu().copy_to_vm(esp().value(), string.characters_without_null_termination(), string.length());
  188. m_emulator.mmu().write8({ 0x20, esp().value() + string.length() }, shadow_wrap_as_initialized((u8)'\0'));
  189. }
  190. void SoftCPU::push32(ValueWithShadow<u32> value)
  191. {
  192. set_esp({ esp().value() - sizeof(u32), esp().shadow() });
  193. warn_if_uninitialized(esp(), "push32");
  194. write_memory32({ ss(), esp().value() }, value);
  195. }
  196. ValueWithShadow<u32> SoftCPU::pop32()
  197. {
  198. warn_if_uninitialized(esp(), "pop32");
  199. auto value = read_memory32({ ss(), esp().value() });
  200. set_esp({ esp().value() + sizeof(u32), esp().shadow() });
  201. return value;
  202. }
  203. void SoftCPU::push16(ValueWithShadow<u16> value)
  204. {
  205. warn_if_uninitialized(esp(), "push16");
  206. set_esp({ esp().value() - sizeof(u16), esp().shadow() });
  207. write_memory16({ ss(), esp().value() }, value);
  208. }
  209. ValueWithShadow<u16> SoftCPU::pop16()
  210. {
  211. warn_if_uninitialized(esp(), "pop16");
  212. auto value = read_memory16({ ss(), esp().value() });
  213. set_esp({ esp().value() + sizeof(u16), esp().shadow() });
  214. return value;
  215. }
  216. template<bool check_zf, typename Callback>
  217. void SoftCPU::do_once_or_repeat(const X86::Instruction& insn, Callback callback)
  218. {
  219. if (!insn.has_rep_prefix())
  220. return callback();
  221. while (loop_index(insn.a32()).value()) {
  222. callback();
  223. decrement_loop_index(insn.a32());
  224. if constexpr (check_zf) {
  225. warn_if_flags_tainted("repz/repnz");
  226. if (insn.rep_prefix() == X86::Prefix::REPZ && !zf())
  227. break;
  228. if (insn.rep_prefix() == X86::Prefix::REPNZ && zf())
  229. break;
  230. }
  231. }
  232. }
  233. template<typename T>
  234. ALWAYS_INLINE static T op_inc(SoftCPU& cpu, T data)
  235. {
  236. typename T::ValueType result;
  237. u32 new_flags = 0;
  238. if constexpr (sizeof(typename T::ValueType) == 4) {
  239. asm volatile("incl %%eax\n"
  240. : "=a"(result)
  241. : "a"(data.value()));
  242. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  243. asm volatile("incw %%ax\n"
  244. : "=a"(result)
  245. : "a"(data.value()));
  246. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  247. asm volatile("incb %%al\n"
  248. : "=a"(result)
  249. : "a"(data.value()));
  250. }
  251. asm volatile(
  252. "pushf\n"
  253. "pop %%ebx"
  254. : "=b"(new_flags));
  255. cpu.set_flags_oszap(new_flags);
  256. cpu.taint_flags_from(data);
  257. return shadow_wrap_with_taint_from(result, data);
  258. }
  259. template<typename T>
  260. ALWAYS_INLINE static T op_dec(SoftCPU& cpu, T data)
  261. {
  262. typename T::ValueType result;
  263. u32 new_flags = 0;
  264. if constexpr (sizeof(typename T::ValueType) == 4) {
  265. asm volatile("decl %%eax\n"
  266. : "=a"(result)
  267. : "a"(data.value()));
  268. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  269. asm volatile("decw %%ax\n"
  270. : "=a"(result)
  271. : "a"(data.value()));
  272. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  273. asm volatile("decb %%al\n"
  274. : "=a"(result)
  275. : "a"(data.value()));
  276. }
  277. asm volatile(
  278. "pushf\n"
  279. "pop %%ebx"
  280. : "=b"(new_flags));
  281. cpu.set_flags_oszap(new_flags);
  282. cpu.taint_flags_from(data);
  283. return shadow_wrap_with_taint_from(result, data);
  284. }
  285. template<typename T>
  286. ALWAYS_INLINE static T op_xor(SoftCPU& cpu, const T& dest, const T& src)
  287. {
  288. typename T::ValueType result;
  289. u32 new_flags = 0;
  290. if constexpr (sizeof(typename T::ValueType) == 4) {
  291. asm volatile("xorl %%ecx, %%eax\n"
  292. : "=a"(result)
  293. : "a"(dest.value()), "c"(src.value()));
  294. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  295. asm volatile("xor %%cx, %%ax\n"
  296. : "=a"(result)
  297. : "a"(dest.value()), "c"(src.value()));
  298. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  299. asm volatile("xorb %%cl, %%al\n"
  300. : "=a"(result)
  301. : "a"(dest.value()), "c"(src.value()));
  302. } else {
  303. ASSERT_NOT_REACHED();
  304. }
  305. asm volatile(
  306. "pushf\n"
  307. "pop %%ebx"
  308. : "=b"(new_flags));
  309. cpu.set_flags_oszpc(new_flags);
  310. cpu.taint_flags_from(dest, src);
  311. return shadow_wrap_with_taint_from(result, dest, src);
  312. }
  313. template<typename T>
  314. ALWAYS_INLINE static T op_or(SoftCPU& cpu, const T& dest, const T& src)
  315. {
  316. typename T::ValueType result = 0;
  317. u32 new_flags = 0;
  318. if constexpr (sizeof(typename T::ValueType) == 4) {
  319. asm volatile("orl %%ecx, %%eax\n"
  320. : "=a"(result)
  321. : "a"(dest.value()), "c"(src.value()));
  322. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  323. asm volatile("or %%cx, %%ax\n"
  324. : "=a"(result)
  325. : "a"(dest.value()), "c"(src.value()));
  326. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  327. asm volatile("orb %%cl, %%al\n"
  328. : "=a"(result)
  329. : "a"(dest.value()), "c"(src.value()));
  330. } else {
  331. ASSERT_NOT_REACHED();
  332. }
  333. asm volatile(
  334. "pushf\n"
  335. "pop %%ebx"
  336. : "=b"(new_flags));
  337. cpu.set_flags_oszpc(new_flags);
  338. cpu.taint_flags_from(dest, src);
  339. return shadow_wrap_with_taint_from(result, dest, src);
  340. }
  341. template<typename T>
  342. ALWAYS_INLINE static T op_sub(SoftCPU& cpu, const T& dest, const T& src)
  343. {
  344. typename T::ValueType result = 0;
  345. u32 new_flags = 0;
  346. if constexpr (sizeof(typename T::ValueType) == 4) {
  347. asm volatile("subl %%ecx, %%eax\n"
  348. : "=a"(result)
  349. : "a"(dest.value()), "c"(src.value()));
  350. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  351. asm volatile("subw %%cx, %%ax\n"
  352. : "=a"(result)
  353. : "a"(dest.value()), "c"(src.value()));
  354. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  355. asm volatile("subb %%cl, %%al\n"
  356. : "=a"(result)
  357. : "a"(dest.value()), "c"(src.value()));
  358. } else {
  359. ASSERT_NOT_REACHED();
  360. }
  361. asm volatile(
  362. "pushf\n"
  363. "pop %%ebx"
  364. : "=b"(new_flags));
  365. cpu.set_flags_oszapc(new_flags);
  366. cpu.taint_flags_from(dest, src);
  367. return shadow_wrap_with_taint_from(result, dest, src);
  368. }
  369. template<typename T, bool cf>
  370. ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src)
  371. {
  372. typename T::ValueType result = 0;
  373. u32 new_flags = 0;
  374. if constexpr (cf)
  375. asm volatile("stc");
  376. else
  377. asm volatile("clc");
  378. if constexpr (sizeof(typename T::ValueType) == 4) {
  379. asm volatile("sbbl %%ecx, %%eax\n"
  380. : "=a"(result)
  381. : "a"(dest.value()), "c"(src.value()));
  382. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  383. asm volatile("sbbw %%cx, %%ax\n"
  384. : "=a"(result)
  385. : "a"(dest.value()), "c"(src.value()));
  386. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  387. asm volatile("sbbb %%cl, %%al\n"
  388. : "=a"(result)
  389. : "a"(dest.value()), "c"(src.value()));
  390. } else {
  391. ASSERT_NOT_REACHED();
  392. }
  393. asm volatile(
  394. "pushf\n"
  395. "pop %%ebx"
  396. : "=b"(new_flags));
  397. cpu.set_flags_oszapc(new_flags);
  398. cpu.taint_flags_from(dest, src);
  399. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  400. }
  401. template<typename T>
  402. ALWAYS_INLINE static T op_sbb(SoftCPU& cpu, T& dest, const T& src)
  403. {
  404. cpu.warn_if_flags_tainted("sbb");
  405. if (cpu.cf())
  406. return op_sbb_impl<T, true>(cpu, dest, src);
  407. return op_sbb_impl<T, false>(cpu, dest, src);
  408. }
  409. template<typename T>
  410. ALWAYS_INLINE static T op_add(SoftCPU& cpu, T& dest, const T& src)
  411. {
  412. typename T::ValueType result = 0;
  413. u32 new_flags = 0;
  414. if constexpr (sizeof(typename T::ValueType) == 4) {
  415. asm volatile("addl %%ecx, %%eax\n"
  416. : "=a"(result)
  417. : "a"(dest.value()), "c"(src.value()));
  418. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  419. asm volatile("addw %%cx, %%ax\n"
  420. : "=a"(result)
  421. : "a"(dest.value()), "c"(src.value()));
  422. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  423. asm volatile("addb %%cl, %%al\n"
  424. : "=a"(result)
  425. : "a"(dest.value()), "c"(src.value()));
  426. } else {
  427. ASSERT_NOT_REACHED();
  428. }
  429. asm volatile(
  430. "pushf\n"
  431. "pop %%ebx"
  432. : "=b"(new_flags));
  433. cpu.set_flags_oszapc(new_flags);
  434. cpu.taint_flags_from(dest, src);
  435. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  436. }
  437. template<typename T, bool cf>
  438. ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src)
  439. {
  440. typename T::ValueType result = 0;
  441. u32 new_flags = 0;
  442. if constexpr (cf)
  443. asm volatile("stc");
  444. else
  445. asm volatile("clc");
  446. if constexpr (sizeof(typename T::ValueType) == 4) {
  447. asm volatile("adcl %%ecx, %%eax\n"
  448. : "=a"(result)
  449. : "a"(dest.value()), "c"(src.value()));
  450. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  451. asm volatile("adcw %%cx, %%ax\n"
  452. : "=a"(result)
  453. : "a"(dest.value()), "c"(src.value()));
  454. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  455. asm volatile("adcb %%cl, %%al\n"
  456. : "=a"(result)
  457. : "a"(dest.value()), "c"(src.value()));
  458. } else {
  459. ASSERT_NOT_REACHED();
  460. }
  461. asm volatile(
  462. "pushf\n"
  463. "pop %%ebx"
  464. : "=b"(new_flags));
  465. cpu.set_flags_oszapc(new_flags);
  466. cpu.taint_flags_from(dest, src);
  467. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  468. }
  469. template<typename T>
  470. ALWAYS_INLINE static T op_adc(SoftCPU& cpu, T& dest, const T& src)
  471. {
  472. cpu.warn_if_flags_tainted("adc");
  473. if (cpu.cf())
  474. return op_adc_impl<T, true>(cpu, dest, src);
  475. return op_adc_impl<T, false>(cpu, dest, src);
  476. }
  477. template<typename T>
  478. ALWAYS_INLINE static T op_and(SoftCPU& cpu, const T& dest, const T& src)
  479. {
  480. typename T::ValueType result = 0;
  481. u32 new_flags = 0;
  482. if constexpr (sizeof(typename T::ValueType) == 4) {
  483. asm volatile("andl %%ecx, %%eax\n"
  484. : "=a"(result)
  485. : "a"(dest.value()), "c"(src.value()));
  486. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  487. asm volatile("andw %%cx, %%ax\n"
  488. : "=a"(result)
  489. : "a"(dest.value()), "c"(src.value()));
  490. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  491. asm volatile("andb %%cl, %%al\n"
  492. : "=a"(result)
  493. : "a"(dest.value()), "c"(src.value()));
  494. } else {
  495. ASSERT_NOT_REACHED();
  496. }
  497. asm volatile(
  498. "pushf\n"
  499. "pop %%ebx"
  500. : "=b"(new_flags));
  501. cpu.set_flags_oszpc(new_flags);
  502. cpu.taint_flags_from(dest, src);
  503. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  504. }
  505. template<typename T>
  506. ALWAYS_INLINE static void op_imul(SoftCPU& cpu, const T& dest, const T& src, T& result_high, T& result_low)
  507. {
  508. bool did_overflow = false;
  509. if constexpr (sizeof(T) == 4) {
  510. i64 result = (i64)src * (i64)dest;
  511. result_low = result & 0xffffffff;
  512. result_high = result >> 32;
  513. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  514. } else if constexpr (sizeof(T) == 2) {
  515. i32 result = (i32)src * (i32)dest;
  516. result_low = result & 0xffff;
  517. result_high = result >> 16;
  518. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  519. } else if constexpr (sizeof(T) == 1) {
  520. i16 result = (i16)src * (i16)dest;
  521. result_low = result & 0xff;
  522. result_high = result >> 8;
  523. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  524. }
  525. if (did_overflow) {
  526. cpu.set_cf(true);
  527. cpu.set_of(true);
  528. } else {
  529. cpu.set_cf(false);
  530. cpu.set_of(false);
  531. }
  532. }
  533. template<typename T>
  534. ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  535. {
  536. if (steps.value() == 0)
  537. return shadow_wrap_with_taint_from(data.value(), data, steps);
  538. u32 result = 0;
  539. u32 new_flags = 0;
  540. if constexpr (sizeof(typename T::ValueType) == 4) {
  541. asm volatile("shrl %%cl, %%eax\n"
  542. : "=a"(result)
  543. : "a"(data.value()), "c"(steps.value()));
  544. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  545. asm volatile("shrw %%cl, %%ax\n"
  546. : "=a"(result)
  547. : "a"(data.value()), "c"(steps.value()));
  548. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  549. asm volatile("shrb %%cl, %%al\n"
  550. : "=a"(result)
  551. : "a"(data.value()), "c"(steps.value()));
  552. }
  553. asm volatile(
  554. "pushf\n"
  555. "pop %%ebx"
  556. : "=b"(new_flags));
  557. cpu.set_flags_oszapc(new_flags);
  558. cpu.taint_flags_from(data, steps);
  559. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  560. }
  561. template<typename T>
  562. ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  563. {
  564. if (steps.value() == 0)
  565. return shadow_wrap_with_taint_from(data.value(), data, steps);
  566. u32 result = 0;
  567. u32 new_flags = 0;
  568. if constexpr (sizeof(typename T::ValueType) == 4) {
  569. asm volatile("shll %%cl, %%eax\n"
  570. : "=a"(result)
  571. : "a"(data.value()), "c"(steps.value()));
  572. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  573. asm volatile("shlw %%cl, %%ax\n"
  574. : "=a"(result)
  575. : "a"(data.value()), "c"(steps.value()));
  576. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  577. asm volatile("shlb %%cl, %%al\n"
  578. : "=a"(result)
  579. : "a"(data.value()), "c"(steps.value()));
  580. }
  581. asm volatile(
  582. "pushf\n"
  583. "pop %%ebx"
  584. : "=b"(new_flags));
  585. cpu.set_flags_oszapc(new_flags);
  586. cpu.taint_flags_from(data, steps);
  587. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  588. }
  589. template<typename T>
  590. ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  591. {
  592. if (steps.value() == 0)
  593. return shadow_wrap_with_taint_from(data.value(), data, steps);
  594. u32 result = 0;
  595. u32 new_flags = 0;
  596. if constexpr (sizeof(typename T::ValueType) == 4) {
  597. asm volatile("shrd %%cl, %%edx, %%eax\n"
  598. : "=a"(result)
  599. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  600. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  601. asm volatile("shrd %%cl, %%dx, %%ax\n"
  602. : "=a"(result)
  603. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  604. }
  605. asm volatile(
  606. "pushf\n"
  607. "pop %%ebx"
  608. : "=b"(new_flags));
  609. cpu.set_flags_oszapc(new_flags);
  610. cpu.taint_flags_from(data, steps);
  611. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  612. }
  613. template<typename T>
  614. ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  615. {
  616. if (steps.value() == 0)
  617. return shadow_wrap_with_taint_from(data.value(), data, steps);
  618. u32 result = 0;
  619. u32 new_flags = 0;
  620. if constexpr (sizeof(typename T::ValueType) == 4) {
  621. asm volatile("shld %%cl, %%edx, %%eax\n"
  622. : "=a"(result)
  623. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  624. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  625. asm volatile("shld %%cl, %%dx, %%ax\n"
  626. : "=a"(result)
  627. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  628. }
  629. asm volatile(
  630. "pushf\n"
  631. "pop %%ebx"
  632. : "=b"(new_flags));
  633. cpu.set_flags_oszapc(new_flags);
  634. cpu.taint_flags_from(data, steps);
  635. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  636. }
  637. template<bool update_dest, bool is_or, typename Op>
  638. ALWAYS_INLINE void SoftCPU::generic_AL_imm8(Op op, const X86::Instruction& insn)
  639. {
  640. auto dest = al();
  641. auto src = shadow_wrap_as_initialized(insn.imm8());
  642. auto result = op(*this, dest, src);
  643. if (is_or && insn.imm8() == 0xff)
  644. result.set_initialized();
  645. if (update_dest)
  646. set_al(result);
  647. }
  648. template<bool update_dest, bool is_or, typename Op>
  649. ALWAYS_INLINE void SoftCPU::generic_AX_imm16(Op op, const X86::Instruction& insn)
  650. {
  651. auto dest = ax();
  652. auto src = shadow_wrap_as_initialized(insn.imm16());
  653. auto result = op(*this, dest, src);
  654. if (is_or && insn.imm16() == 0xffff)
  655. result.set_initialized();
  656. if (update_dest)
  657. set_ax(result);
  658. }
  659. template<bool update_dest, bool is_or, typename Op>
  660. ALWAYS_INLINE void SoftCPU::generic_EAX_imm32(Op op, const X86::Instruction& insn)
  661. {
  662. auto dest = eax();
  663. auto src = shadow_wrap_as_initialized(insn.imm32());
  664. auto result = op(*this, dest, src);
  665. if (is_or && insn.imm32() == 0xffffffff)
  666. result.set_initialized();
  667. if (update_dest)
  668. set_eax(result);
  669. }
  670. template<bool update_dest, bool is_or, typename Op>
  671. ALWAYS_INLINE void SoftCPU::generic_RM16_imm16(Op op, const X86::Instruction& insn)
  672. {
  673. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  674. auto src = shadow_wrap_as_initialized(insn.imm16());
  675. auto result = op(*this, dest, src);
  676. if (is_or && insn.imm16() == 0xffff)
  677. result.set_initialized();
  678. if (update_dest)
  679. insn.modrm().write16(*this, insn, result);
  680. }
  681. template<bool update_dest, bool is_or, typename Op>
  682. ALWAYS_INLINE void SoftCPU::generic_RM16_imm8(Op op, const X86::Instruction& insn)
  683. {
  684. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  685. auto src = shadow_wrap_as_initialized<u16>(sign_extended_to<u16>(insn.imm8()));
  686. auto result = op(*this, dest, src);
  687. if (is_or && src.value() == 0xffff)
  688. result.set_initialized();
  689. if (update_dest)
  690. insn.modrm().write16(*this, insn, result);
  691. }
  692. template<bool update_dest, typename Op>
  693. ALWAYS_INLINE void SoftCPU::generic_RM16_unsigned_imm8(Op op, const X86::Instruction& insn)
  694. {
  695. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  696. auto src = shadow_wrap_as_initialized(insn.imm8());
  697. auto result = op(*this, dest, src);
  698. if (update_dest)
  699. insn.modrm().write16(*this, insn, result);
  700. }
  701. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  702. ALWAYS_INLINE void SoftCPU::generic_RM16_reg16(Op op, const X86::Instruction& insn)
  703. {
  704. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  705. auto src = const_gpr16(insn.reg16());
  706. auto result = op(*this, dest, src);
  707. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  708. result.set_initialized();
  709. m_flags_tainted = false;
  710. }
  711. if (update_dest)
  712. insn.modrm().write16(*this, insn, result);
  713. }
  714. template<bool update_dest, bool is_or, typename Op>
  715. ALWAYS_INLINE void SoftCPU::generic_RM32_imm32(Op op, const X86::Instruction& insn)
  716. {
  717. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  718. auto src = insn.imm32();
  719. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  720. if (is_or && src == 0xffffffff)
  721. result.set_initialized();
  722. if (update_dest)
  723. insn.modrm().write32(*this, insn, result);
  724. }
  725. template<bool update_dest, bool is_or, typename Op>
  726. ALWAYS_INLINE void SoftCPU::generic_RM32_imm8(Op op, const X86::Instruction& insn)
  727. {
  728. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  729. auto src = sign_extended_to<u32>(insn.imm8());
  730. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  731. if (is_or && src == 0xffffffff)
  732. result.set_initialized();
  733. if (update_dest)
  734. insn.modrm().write32(*this, insn, result);
  735. }
  736. template<bool update_dest, typename Op>
  737. ALWAYS_INLINE void SoftCPU::generic_RM32_unsigned_imm8(Op op, const X86::Instruction& insn)
  738. {
  739. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  740. auto src = shadow_wrap_as_initialized(insn.imm8());
  741. auto result = op(*this, dest, src);
  742. if (update_dest)
  743. insn.modrm().write32(*this, insn, result);
  744. }
  745. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  746. ALWAYS_INLINE void SoftCPU::generic_RM32_reg32(Op op, const X86::Instruction& insn)
  747. {
  748. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  749. auto src = const_gpr32(insn.reg32());
  750. auto result = op(*this, dest, src);
  751. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  752. result.set_initialized();
  753. m_flags_tainted = false;
  754. }
  755. if (update_dest)
  756. insn.modrm().write32(*this, insn, result);
  757. }
  758. template<bool update_dest, bool is_or, typename Op>
  759. ALWAYS_INLINE void SoftCPU::generic_RM8_imm8(Op op, const X86::Instruction& insn)
  760. {
  761. auto dest = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  762. auto src = insn.imm8();
  763. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  764. if (is_or && src == 0xff)
  765. result.set_initialized();
  766. if (update_dest)
  767. insn.modrm().write8(*this, insn, result);
  768. }
  769. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  770. ALWAYS_INLINE void SoftCPU::generic_RM8_reg8(Op op, const X86::Instruction& insn)
  771. {
  772. auto dest = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  773. auto src = const_gpr8(insn.reg8());
  774. auto result = op(*this, dest, src);
  775. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  776. result.set_initialized();
  777. m_flags_tainted = false;
  778. }
  779. if (update_dest)
  780. insn.modrm().write8(*this, insn, result);
  781. }
  782. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  783. ALWAYS_INLINE void SoftCPU::generic_reg16_RM16(Op op, const X86::Instruction& insn)
  784. {
  785. auto dest = const_gpr16(insn.reg16());
  786. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  787. auto result = op(*this, dest, src);
  788. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  789. result.set_initialized();
  790. m_flags_tainted = false;
  791. }
  792. if (update_dest)
  793. gpr16(insn.reg16()) = result;
  794. }
  795. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  796. ALWAYS_INLINE void SoftCPU::generic_reg32_RM32(Op op, const X86::Instruction& insn)
  797. {
  798. auto dest = const_gpr32(insn.reg32());
  799. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  800. auto result = op(*this, dest, src);
  801. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  802. result.set_initialized();
  803. m_flags_tainted = false;
  804. }
  805. if (update_dest)
  806. gpr32(insn.reg32()) = result;
  807. }
  808. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  809. ALWAYS_INLINE void SoftCPU::generic_reg8_RM8(Op op, const X86::Instruction& insn)
  810. {
  811. auto dest = const_gpr8(insn.reg8());
  812. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  813. auto result = op(*this, dest, src);
  814. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  815. result.set_initialized();
  816. m_flags_tainted = false;
  817. }
  818. if (update_dest)
  819. gpr8(insn.reg8()) = result;
  820. }
  821. template<typename Op>
  822. ALWAYS_INLINE void SoftCPU::generic_RM8_1(Op op, const X86::Instruction& insn)
  823. {
  824. auto data = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  825. insn.modrm().write8(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  826. }
  827. template<typename Op>
  828. ALWAYS_INLINE void SoftCPU::generic_RM8_CL(Op op, const X86::Instruction& insn)
  829. {
  830. auto data = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  831. insn.modrm().write8(*this, insn, op(*this, data, cl()));
  832. }
  833. template<typename Op>
  834. ALWAYS_INLINE void SoftCPU::generic_RM16_1(Op op, const X86::Instruction& insn)
  835. {
  836. auto data = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  837. insn.modrm().write16(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  838. }
  839. template<typename Op>
  840. ALWAYS_INLINE void SoftCPU::generic_RM16_CL(Op op, const X86::Instruction& insn)
  841. {
  842. auto data = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  843. insn.modrm().write16(*this, insn, op(*this, data, cl()));
  844. }
  845. template<typename Op>
  846. ALWAYS_INLINE void SoftCPU::generic_RM32_1(Op op, const X86::Instruction& insn)
  847. {
  848. auto data = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  849. insn.modrm().write32(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  850. }
  851. template<typename Op>
  852. ALWAYS_INLINE void SoftCPU::generic_RM32_CL(Op op, const X86::Instruction& insn)
  853. {
  854. auto data = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  855. insn.modrm().write32(*this, insn, op(*this, data, cl()));
  856. }
  857. void SoftCPU::AAA(const X86::Instruction&) { TODO_INSN(); }
  858. void SoftCPU::AAD(const X86::Instruction&) { TODO_INSN(); }
  859. void SoftCPU::AAM(const X86::Instruction&) { TODO_INSN(); }
  860. void SoftCPU::AAS(const X86::Instruction&) { TODO_INSN(); }
  861. void SoftCPU::ARPL(const X86::Instruction&) { TODO_INSN(); }
  862. void SoftCPU::BOUND(const X86::Instruction&) { TODO_INSN(); }
  863. template<typename T>
  864. ALWAYS_INLINE static T op_bsf(SoftCPU&, T value)
  865. {
  866. return { (typename T::ValueType)__builtin_ctz(value.value()), value.shadow() };
  867. }
  868. template<typename T>
  869. ALWAYS_INLINE static T op_bsr(SoftCPU&, T value)
  870. {
  871. typename T::ValueType bit_index = 0;
  872. if constexpr (sizeof(typename T::ValueType) == 4) {
  873. asm volatile("bsrl %%eax, %%edx"
  874. : "=d"(bit_index)
  875. : "a"(value.value()));
  876. }
  877. if constexpr (sizeof(typename T::ValueType) == 2) {
  878. asm volatile("bsrw %%ax, %%dx"
  879. : "=d"(bit_index)
  880. : "a"(value.value()));
  881. }
  882. return shadow_wrap_with_taint_from(bit_index, value);
  883. }
  884. void SoftCPU::BSF_reg16_RM16(const X86::Instruction& insn)
  885. {
  886. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  887. set_zf(!src.value());
  888. if (src.value())
  889. gpr16(insn.reg16()) = op_bsf(*this, src);
  890. taint_flags_from(src);
  891. }
  892. void SoftCPU::BSF_reg32_RM32(const X86::Instruction& insn)
  893. {
  894. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  895. set_zf(!src.value());
  896. if (src.value()) {
  897. gpr32(insn.reg32()) = op_bsf(*this, src);
  898. taint_flags_from(src);
  899. }
  900. }
  901. void SoftCPU::BSR_reg16_RM16(const X86::Instruction& insn)
  902. {
  903. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  904. set_zf(!src.value());
  905. if (src.value()) {
  906. gpr16(insn.reg16()) = op_bsr(*this, src);
  907. taint_flags_from(src);
  908. }
  909. }
  910. void SoftCPU::BSR_reg32_RM32(const X86::Instruction& insn)
  911. {
  912. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  913. set_zf(!src.value());
  914. if (src.value()) {
  915. gpr32(insn.reg32()) = op_bsr(*this, src);
  916. taint_flags_from(src);
  917. }
  918. }
  919. void SoftCPU::BSWAP_reg32(const X86::Instruction& insn)
  920. {
  921. gpr32(insn.reg32()) = { __builtin_bswap32(gpr32(insn.reg32()).value()), __builtin_bswap32(gpr32(insn.reg32()).shadow()) };
  922. }
  923. template<typename T>
  924. ALWAYS_INLINE static T op_bt(T value, T)
  925. {
  926. return value;
  927. }
  928. template<typename T>
  929. ALWAYS_INLINE static T op_bts(T value, T bit_mask)
  930. {
  931. return value | bit_mask;
  932. }
  933. template<typename T>
  934. ALWAYS_INLINE static T op_btr(T value, T bit_mask)
  935. {
  936. return value & ~bit_mask;
  937. }
  938. template<typename T>
  939. ALWAYS_INLINE static T op_btc(T value, T bit_mask)
  940. {
  941. return value ^ bit_mask;
  942. }
  943. template<bool should_update, typename Op>
  944. ALWAYS_INLINE void BTx_RM16_reg16(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  945. {
  946. if (insn.modrm().is_register()) {
  947. unsigned bit_index = cpu.const_gpr16(insn.reg16()).value() & (X86::TypeTrivia<u16>::bits - 1);
  948. auto original = insn.modrm().read16<ValueWithShadow<u16>>(cpu, insn);
  949. u16 bit_mask = 1 << bit_index;
  950. u16 result = op(original.value(), bit_mask);
  951. cpu.set_cf((original.value() & bit_mask) != 0);
  952. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), original);
  953. if (should_update)
  954. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), original));
  955. return;
  956. }
  957. // FIXME: Is this supposed to perform a full 16-bit read/modify/write?
  958. unsigned bit_offset_in_array = cpu.const_gpr16(insn.reg16()).value() / 8;
  959. unsigned bit_offset_in_byte = cpu.const_gpr16(insn.reg16()).value() & 7;
  960. auto address = insn.modrm().resolve(cpu, insn);
  961. address.set_offset(address.offset() + bit_offset_in_array);
  962. auto dest = cpu.read_memory8(address);
  963. u8 bit_mask = 1 << bit_offset_in_byte;
  964. u8 result = op(dest.value(), bit_mask);
  965. cpu.set_cf((dest.value() & bit_mask) != 0);
  966. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), dest);
  967. if (should_update)
  968. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), dest));
  969. }
  970. template<bool should_update, typename Op>
  971. ALWAYS_INLINE void BTx_RM32_reg32(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  972. {
  973. if (insn.modrm().is_register()) {
  974. unsigned bit_index = cpu.const_gpr32(insn.reg32()).value() & (X86::TypeTrivia<u32>::bits - 1);
  975. auto original = insn.modrm().read32<ValueWithShadow<u32>>(cpu, insn);
  976. u32 bit_mask = 1 << bit_index;
  977. u32 result = op(original.value(), bit_mask);
  978. cpu.set_cf((original.value() & bit_mask) != 0);
  979. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), original);
  980. if (should_update)
  981. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), original));
  982. return;
  983. }
  984. // FIXME: Is this supposed to perform a full 32-bit read/modify/write?
  985. unsigned bit_offset_in_array = cpu.const_gpr32(insn.reg32()).value() / 8;
  986. unsigned bit_offset_in_byte = cpu.const_gpr32(insn.reg32()).value() & 7;
  987. auto address = insn.modrm().resolve(cpu, insn);
  988. address.set_offset(address.offset() + bit_offset_in_array);
  989. auto dest = cpu.read_memory8(address);
  990. u8 bit_mask = 1 << bit_offset_in_byte;
  991. u8 result = op(dest.value(), bit_mask);
  992. cpu.set_cf((dest.value() & bit_mask) != 0);
  993. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), dest);
  994. if (should_update)
  995. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), dest));
  996. }
  997. template<bool should_update, typename Op>
  998. ALWAYS_INLINE void BTx_RM16_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  999. {
  1000. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u16>::mask);
  1001. // FIXME: Support higher bit indices
  1002. ASSERT(bit_index < 16);
  1003. auto original = insn.modrm().read16<ValueWithShadow<u16>>(cpu, insn);
  1004. u16 bit_mask = 1 << bit_index;
  1005. auto result = op(original.value(), bit_mask);
  1006. cpu.set_cf((original.value() & bit_mask) != 0);
  1007. cpu.taint_flags_from(original);
  1008. if (should_update)
  1009. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1010. }
  1011. template<bool should_update, typename Op>
  1012. ALWAYS_INLINE void BTx_RM32_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  1013. {
  1014. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u32>::mask);
  1015. // FIXME: Support higher bit indices
  1016. ASSERT(bit_index < 32);
  1017. auto original = insn.modrm().read32<ValueWithShadow<u32>>(cpu, insn);
  1018. u32 bit_mask = 1 << bit_index;
  1019. auto result = op(original.value(), bit_mask);
  1020. cpu.set_cf((original.value() & bit_mask) != 0);
  1021. cpu.taint_flags_from(original);
  1022. if (should_update)
  1023. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1024. }
  1025. #define DEFINE_GENERIC_BTx_INSN_HANDLERS(mnemonic, op, update_dest) \
  1026. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { BTx_RM32_reg32<update_dest>(*this, insn, op<u32>); } \
  1027. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { BTx_RM16_reg16<update_dest>(*this, insn, op<u16>); } \
  1028. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { BTx_RM32_imm8<update_dest>(*this, insn, op<u32>); } \
  1029. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { BTx_RM16_imm8<update_dest>(*this, insn, op<u16>); }
  1030. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTS, op_bts, true);
  1031. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTR, op_btr, true);
  1032. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTC, op_btc, true);
  1033. DEFINE_GENERIC_BTx_INSN_HANDLERS(BT, op_bt, false);
  1034. void SoftCPU::CALL_FAR_mem16(const X86::Instruction&)
  1035. {
  1036. TODO();
  1037. }
  1038. void SoftCPU::CALL_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  1039. void SoftCPU::CALL_RM16(const X86::Instruction&) { TODO_INSN(); }
  1040. void SoftCPU::CALL_RM32(const X86::Instruction& insn)
  1041. {
  1042. push32(shadow_wrap_as_initialized(eip()));
  1043. auto address = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1044. warn_if_uninitialized(address, "call rm32");
  1045. set_eip(address.value());
  1046. }
  1047. void SoftCPU::CALL_imm16(const X86::Instruction&) { TODO_INSN(); }
  1048. void SoftCPU::CALL_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  1049. void SoftCPU::CALL_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  1050. void SoftCPU::CALL_imm32(const X86::Instruction& insn)
  1051. {
  1052. push32(shadow_wrap_as_initialized(eip()));
  1053. set_eip(eip() + (i32)insn.imm32());
  1054. }
  1055. void SoftCPU::CBW(const X86::Instruction&)
  1056. {
  1057. set_ah(shadow_wrap_with_taint_from<u8>((al().value() & 0x80) ? 0xff : 0x00, al()));
  1058. }
  1059. void SoftCPU::CDQ(const X86::Instruction&)
  1060. {
  1061. if (eax().value() & 0x80000000)
  1062. set_edx(shadow_wrap_with_taint_from<u32>(0xffffffff, eax()));
  1063. else
  1064. set_edx(shadow_wrap_with_taint_from<u32>(0, eax()));
  1065. }
  1066. void SoftCPU::CLC(const X86::Instruction&)
  1067. {
  1068. set_cf(false);
  1069. }
  1070. void SoftCPU::CLD(const X86::Instruction&)
  1071. {
  1072. set_df(false);
  1073. }
  1074. void SoftCPU::CLI(const X86::Instruction&) { TODO_INSN(); }
  1075. void SoftCPU::CLTS(const X86::Instruction&) { TODO_INSN(); }
  1076. void SoftCPU::CMC(const X86::Instruction&) { TODO_INSN(); }
  1077. void SoftCPU::CMOVcc_reg16_RM16(const X86::Instruction& insn)
  1078. {
  1079. warn_if_flags_tainted("cmovcc reg16, rm16");
  1080. if (evaluate_condition(insn.cc()))
  1081. gpr16(insn.reg16()) = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1082. }
  1083. void SoftCPU::CMOVcc_reg32_RM32(const X86::Instruction& insn)
  1084. {
  1085. warn_if_flags_tainted("cmovcc reg32, rm32");
  1086. if (evaluate_condition(insn.cc()))
  1087. gpr32(insn.reg32()) = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1088. }
  1089. template<typename T>
  1090. ALWAYS_INLINE static void do_cmps(SoftCPU& cpu, const X86::Instruction& insn)
  1091. {
  1092. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1093. cpu.do_once_or_repeat<true>(insn, [&] {
  1094. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1095. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  1096. op_sub(cpu, dest, src);
  1097. cpu.step_source_index(insn.a32(), sizeof(T));
  1098. cpu.step_destination_index(insn.a32(), sizeof(T));
  1099. });
  1100. }
  1101. void SoftCPU::CMPSB(const X86::Instruction& insn)
  1102. {
  1103. do_cmps<u8>(*this, insn);
  1104. }
  1105. void SoftCPU::CMPSD(const X86::Instruction& insn)
  1106. {
  1107. do_cmps<u32>(*this, insn);
  1108. }
  1109. void SoftCPU::CMPSW(const X86::Instruction& insn)
  1110. {
  1111. do_cmps<u16>(*this, insn);
  1112. }
  1113. void SoftCPU::CMPXCHG_RM16_reg16(const X86::Instruction& insn)
  1114. {
  1115. auto current = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1116. taint_flags_from(current, ax());
  1117. if (current.value() == ax().value()) {
  1118. set_zf(true);
  1119. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  1120. } else {
  1121. set_zf(false);
  1122. set_ax(current);
  1123. }
  1124. }
  1125. void SoftCPU::CMPXCHG_RM32_reg32(const X86::Instruction& insn)
  1126. {
  1127. auto current = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1128. taint_flags_from(current, eax());
  1129. if (current.value() == eax().value()) {
  1130. set_zf(true);
  1131. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  1132. } else {
  1133. set_zf(false);
  1134. set_eax(current);
  1135. }
  1136. }
  1137. void SoftCPU::CMPXCHG_RM8_reg8(const X86::Instruction& insn)
  1138. {
  1139. auto current = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1140. taint_flags_from(current, al());
  1141. if (current.value() == al().value()) {
  1142. set_zf(true);
  1143. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  1144. } else {
  1145. set_zf(false);
  1146. set_al(current);
  1147. }
  1148. }
  1149. void SoftCPU::CPUID(const X86::Instruction&) { TODO_INSN(); }
  1150. void SoftCPU::CWD(const X86::Instruction&)
  1151. {
  1152. set_dx(shadow_wrap_with_taint_from<u16>((ax().value() & 0x8000) ? 0xffff : 0x0000, ax()));
  1153. }
  1154. void SoftCPU::CWDE(const X86::Instruction&)
  1155. {
  1156. set_eax(shadow_wrap_with_taint_from(sign_extended_to<u32>(ax().value()), ax()));
  1157. }
  1158. void SoftCPU::DAA(const X86::Instruction&) { TODO_INSN(); }
  1159. void SoftCPU::DAS(const X86::Instruction&) { TODO_INSN(); }
  1160. void SoftCPU::DEC_RM16(const X86::Instruction& insn)
  1161. {
  1162. insn.modrm().write16(*this, insn, op_dec(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn)));
  1163. }
  1164. void SoftCPU::DEC_RM32(const X86::Instruction& insn)
  1165. {
  1166. insn.modrm().write32(*this, insn, op_dec(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn)));
  1167. }
  1168. void SoftCPU::DEC_RM8(const X86::Instruction& insn)
  1169. {
  1170. insn.modrm().write8(*this, insn, op_dec(*this, insn.modrm().read8<ValueWithShadow<u8>>(*this, insn)));
  1171. }
  1172. void SoftCPU::DEC_reg16(const X86::Instruction& insn)
  1173. {
  1174. gpr16(insn.reg16()) = op_dec(*this, const_gpr16(insn.reg16()));
  1175. }
  1176. void SoftCPU::DEC_reg32(const X86::Instruction& insn)
  1177. {
  1178. gpr32(insn.reg32()) = op_dec(*this, const_gpr32(insn.reg32()));
  1179. }
  1180. void SoftCPU::DIV_RM16(const X86::Instruction& insn)
  1181. {
  1182. auto divisor = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1183. if (divisor.value() == 0) {
  1184. warn() << "Divide by zero";
  1185. TODO();
  1186. }
  1187. u32 dividend = ((u32)dx().value() << 16) | ax().value();
  1188. auto quotient = dividend / divisor.value();
  1189. if (quotient > NumericLimits<u16>::max()) {
  1190. warn() << "Divide overflow";
  1191. TODO();
  1192. }
  1193. auto remainder = dividend % divisor.value();
  1194. auto original_ax = ax();
  1195. set_ax(shadow_wrap_with_taint_from<u16>(quotient, original_ax, dx()));
  1196. set_dx(shadow_wrap_with_taint_from<u16>(remainder, original_ax, dx()));
  1197. }
  1198. void SoftCPU::DIV_RM32(const X86::Instruction& insn)
  1199. {
  1200. auto divisor = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1201. if (divisor.value() == 0) {
  1202. warn() << "Divide by zero";
  1203. TODO();
  1204. }
  1205. u64 dividend = ((u64)edx().value() << 32) | eax().value();
  1206. auto quotient = dividend / divisor.value();
  1207. if (quotient > NumericLimits<u32>::max()) {
  1208. warn() << "Divide overflow";
  1209. TODO();
  1210. }
  1211. auto remainder = dividend % divisor.value();
  1212. auto original_eax = eax();
  1213. set_eax(shadow_wrap_with_taint_from<u32>(quotient, original_eax, edx(), divisor));
  1214. set_edx(shadow_wrap_with_taint_from<u32>(remainder, original_eax, edx(), divisor));
  1215. }
  1216. void SoftCPU::DIV_RM8(const X86::Instruction& insn)
  1217. {
  1218. auto divisor = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1219. if (divisor.value() == 0) {
  1220. warn() << "Divide by zero";
  1221. TODO();
  1222. }
  1223. u16 dividend = ax().value();
  1224. auto quotient = dividend / divisor.value();
  1225. if (quotient > NumericLimits<u8>::max()) {
  1226. warn() << "Divide overflow";
  1227. TODO();
  1228. }
  1229. auto remainder = dividend % divisor.value();
  1230. auto original_ax = ax();
  1231. set_al(shadow_wrap_with_taint_from<u8>(quotient, original_ax, divisor));
  1232. set_ah(shadow_wrap_with_taint_from<u8>(remainder, original_ax, divisor));
  1233. }
  1234. void SoftCPU::ENTER16(const X86::Instruction&) { TODO_INSN(); }
  1235. void SoftCPU::ENTER32(const X86::Instruction&) { TODO_INSN(); }
  1236. void SoftCPU::ESCAPE(const X86::Instruction&)
  1237. {
  1238. dbg() << "FIXME: x87 floating-point support";
  1239. m_emulator.dump_backtrace();
  1240. TODO();
  1241. }
  1242. void SoftCPU::FADD_RM32(const X86::Instruction&) { TODO_INSN(); }
  1243. void SoftCPU::FMUL_RM32(const X86::Instruction&) { TODO_INSN(); }
  1244. void SoftCPU::FCOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1245. void SoftCPU::FCOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1246. void SoftCPU::FSUB_RM32(const X86::Instruction&) { TODO_INSN(); }
  1247. void SoftCPU::FSUBR_RM32(const X86::Instruction&) { TODO_INSN(); }
  1248. void SoftCPU::FDIV_RM32(const X86::Instruction&) { TODO_INSN(); }
  1249. void SoftCPU::FDIVR_RM32(const X86::Instruction&) { TODO_INSN(); }
  1250. void SoftCPU::FLD_RM32(const X86::Instruction&) { TODO_INSN(); }
  1251. void SoftCPU::FXCH(const X86::Instruction&) { TODO_INSN(); }
  1252. void SoftCPU::FST_RM32(const X86::Instruction&) { TODO_INSN(); }
  1253. void SoftCPU::FNOP(const X86::Instruction&) { TODO_INSN(); }
  1254. void SoftCPU::FSTP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1255. void SoftCPU::FLDENV(const X86::Instruction&) { TODO_INSN(); }
  1256. void SoftCPU::FCHS(const X86::Instruction&) { TODO_INSN(); }
  1257. void SoftCPU::FABS(const X86::Instruction&) { TODO_INSN(); }
  1258. void SoftCPU::FTST(const X86::Instruction&) { TODO_INSN(); }
  1259. void SoftCPU::FXAM(const X86::Instruction&) { TODO_INSN(); }
  1260. void SoftCPU::FLDCW(const X86::Instruction& insn)
  1261. {
  1262. m_fpu_cw = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1263. }
  1264. void SoftCPU::FLD1(const X86::Instruction&) { TODO_INSN(); }
  1265. void SoftCPU::FLDL2T(const X86::Instruction&) { TODO_INSN(); }
  1266. void SoftCPU::FLDL2E(const X86::Instruction&) { TODO_INSN(); }
  1267. void SoftCPU::FLDPI(const X86::Instruction&) { TODO_INSN(); }
  1268. void SoftCPU::FLDLG2(const X86::Instruction&) { TODO_INSN(); }
  1269. void SoftCPU::FLDLN2(const X86::Instruction&) { TODO_INSN(); }
  1270. void SoftCPU::FLDZ(const X86::Instruction&) { TODO_INSN(); }
  1271. void SoftCPU::FNSTENV(const X86::Instruction&) { TODO_INSN(); }
  1272. void SoftCPU::F2XM1(const X86::Instruction&) { TODO_INSN(); }
  1273. void SoftCPU::FYL2X(const X86::Instruction&) { TODO_INSN(); }
  1274. void SoftCPU::FPTAN(const X86::Instruction&) { TODO_INSN(); }
  1275. void SoftCPU::FPATAN(const X86::Instruction&) { TODO_INSN(); }
  1276. void SoftCPU::FXTRACT(const X86::Instruction&) { TODO_INSN(); }
  1277. void SoftCPU::FPREM1(const X86::Instruction&) { TODO_INSN(); }
  1278. void SoftCPU::FDECSTP(const X86::Instruction&) { TODO_INSN(); }
  1279. void SoftCPU::FINCSTP(const X86::Instruction&) { TODO_INSN(); }
  1280. void SoftCPU::FNSTCW(const X86::Instruction& insn)
  1281. {
  1282. insn.modrm().write16(*this, insn, m_fpu_cw);
  1283. }
  1284. void SoftCPU::FPREM(const X86::Instruction&) { TODO_INSN(); }
  1285. void SoftCPU::FYL2XP1(const X86::Instruction&) { TODO_INSN(); }
  1286. void SoftCPU::FSQRT(const X86::Instruction&) { TODO_INSN(); }
  1287. void SoftCPU::FSINCOS(const X86::Instruction&) { TODO_INSN(); }
  1288. void SoftCPU::FRNDINT(const X86::Instruction&) { TODO_INSN(); }
  1289. void SoftCPU::FSCALE(const X86::Instruction&) { TODO_INSN(); }
  1290. void SoftCPU::FSIN(const X86::Instruction&) { TODO_INSN(); }
  1291. void SoftCPU::FCOS(const X86::Instruction&) { TODO_INSN(); }
  1292. void SoftCPU::FIADD_RM32(const X86::Instruction&) { TODO_INSN(); }
  1293. void SoftCPU::FCMOVB(const X86::Instruction&) { TODO_INSN(); }
  1294. void SoftCPU::FIMUL_RM32(const X86::Instruction&) { TODO_INSN(); }
  1295. void SoftCPU::FCMOVE(const X86::Instruction&) { TODO_INSN(); }
  1296. void SoftCPU::FICOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1297. void SoftCPU::FCMOVBE(const X86::Instruction&) { TODO_INSN(); }
  1298. void SoftCPU::FICOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1299. void SoftCPU::FCMOVU(const X86::Instruction&) { TODO_INSN(); }
  1300. void SoftCPU::FISUB_RM32(const X86::Instruction&) { TODO_INSN(); }
  1301. void SoftCPU::FISUBR_RM32(const X86::Instruction&) { TODO_INSN(); }
  1302. void SoftCPU::FUCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1303. void SoftCPU::FIDIV_RM32(const X86::Instruction&) { TODO_INSN(); }
  1304. void SoftCPU::FIDIVR_RM32(const X86::Instruction&) { TODO_INSN(); }
  1305. void SoftCPU::FILD_RM32(const X86::Instruction&) { TODO_INSN(); }
  1306. void SoftCPU::FCMOVNB(const X86::Instruction&) { TODO_INSN(); }
  1307. void SoftCPU::FISTTP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1308. void SoftCPU::FCMOVNE(const X86::Instruction&) { TODO_INSN(); }
  1309. void SoftCPU::FIST_RM32(const X86::Instruction&) { TODO_INSN(); }
  1310. void SoftCPU::FCMOVNBE(const X86::Instruction&) { TODO_INSN(); }
  1311. void SoftCPU::FISTP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1312. void SoftCPU::FCMOVNU(const X86::Instruction&) { TODO_INSN(); }
  1313. void SoftCPU::FNENI(const X86::Instruction&) { TODO_INSN(); }
  1314. void SoftCPU::FNDISI(const X86::Instruction&) { TODO_INSN(); }
  1315. void SoftCPU::FNCLEX(const X86::Instruction&) { TODO_INSN(); }
  1316. void SoftCPU::FNINIT(const X86::Instruction&) { TODO_INSN(); }
  1317. void SoftCPU::FNSETPM(const X86::Instruction&) { TODO_INSN(); }
  1318. void SoftCPU::FLD_RM80(const X86::Instruction&) { TODO_INSN(); }
  1319. void SoftCPU::FUCOMI(const X86::Instruction&) { TODO_INSN(); }
  1320. void SoftCPU::FCOMI(const X86::Instruction&) { TODO_INSN(); }
  1321. void SoftCPU::FSTP_RM80(const X86::Instruction&) { TODO_INSN(); }
  1322. void SoftCPU::FADD_RM64(const X86::Instruction&) { TODO_INSN(); }
  1323. void SoftCPU::FMUL_RM64(const X86::Instruction&) { TODO_INSN(); }
  1324. void SoftCPU::FCOM_RM64(const X86::Instruction&) { TODO_INSN(); }
  1325. void SoftCPU::FCOMP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1326. void SoftCPU::FSUB_RM64(const X86::Instruction&) { TODO_INSN(); }
  1327. void SoftCPU::FSUBR_RM64(const X86::Instruction&) { TODO_INSN(); }
  1328. void SoftCPU::FDIV_RM64(const X86::Instruction&) { TODO_INSN(); }
  1329. void SoftCPU::FDIVR_RM64(const X86::Instruction&) { TODO_INSN(); }
  1330. void SoftCPU::FLD_RM64(const X86::Instruction&) { TODO_INSN(); }
  1331. void SoftCPU::FFREE(const X86::Instruction&) { TODO_INSN(); }
  1332. void SoftCPU::FISTTP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1333. void SoftCPU::FST_RM64(const X86::Instruction&) { TODO_INSN(); }
  1334. void SoftCPU::FSTP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1335. void SoftCPU::FRSTOR(const X86::Instruction&) { TODO_INSN(); }
  1336. void SoftCPU::FUCOM(const X86::Instruction&) { TODO_INSN(); }
  1337. void SoftCPU::FUCOMP(const X86::Instruction&) { TODO_INSN(); }
  1338. void SoftCPU::FNSAVE(const X86::Instruction&) { TODO_INSN(); }
  1339. void SoftCPU::FNSTSW(const X86::Instruction&) { TODO_INSN(); }
  1340. void SoftCPU::FIADD_RM16(const X86::Instruction&) { TODO_INSN(); }
  1341. void SoftCPU::FADDP(const X86::Instruction&) { TODO_INSN(); }
  1342. void SoftCPU::FIMUL_RM16(const X86::Instruction&) { TODO_INSN(); }
  1343. void SoftCPU::FMULP(const X86::Instruction&) { TODO_INSN(); }
  1344. void SoftCPU::FICOM_RM16(const X86::Instruction&) { TODO_INSN(); }
  1345. void SoftCPU::FICOMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1346. void SoftCPU::FCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1347. void SoftCPU::FISUB_RM16(const X86::Instruction&) { TODO_INSN(); }
  1348. void SoftCPU::FSUBRP(const X86::Instruction&) { TODO_INSN(); }
  1349. void SoftCPU::FISUBR_RM16(const X86::Instruction&) { TODO_INSN(); }
  1350. void SoftCPU::FSUBP(const X86::Instruction&) { TODO_INSN(); }
  1351. void SoftCPU::FIDIV_RM16(const X86::Instruction&) { TODO_INSN(); }
  1352. void SoftCPU::FDIVRP(const X86::Instruction&) { TODO_INSN(); }
  1353. void SoftCPU::FIDIVR_RM16(const X86::Instruction&) { TODO_INSN(); }
  1354. void SoftCPU::FDIVP(const X86::Instruction&) { TODO_INSN(); }
  1355. void SoftCPU::FILD_RM16(const X86::Instruction&) { TODO_INSN(); }
  1356. void SoftCPU::FFREEP(const X86::Instruction&) { TODO_INSN(); }
  1357. void SoftCPU::FISTTP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1358. void SoftCPU::FIST_RM16(const X86::Instruction&) { TODO_INSN(); }
  1359. void SoftCPU::FISTP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1360. void SoftCPU::FBLD_M80(const X86::Instruction&) { TODO_INSN(); }
  1361. void SoftCPU::FNSTSW_AX(const X86::Instruction&) { TODO_INSN(); }
  1362. void SoftCPU::FILD_RM64(const X86::Instruction&) { TODO_INSN(); }
  1363. void SoftCPU::FUCOMIP(const X86::Instruction&) { TODO_INSN(); }
  1364. void SoftCPU::FBSTP_M80(const X86::Instruction&) { TODO_INSN(); }
  1365. void SoftCPU::FCOMIP(const X86::Instruction&) { TODO_INSN(); }
  1366. void SoftCPU::FISTP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1367. void SoftCPU::HLT(const X86::Instruction&) { TODO_INSN(); }
  1368. void SoftCPU::IDIV_RM16(const X86::Instruction& insn)
  1369. {
  1370. auto divisor_with_shadow = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1371. auto divisor = (i16)divisor_with_shadow.value();
  1372. if (divisor == 0) {
  1373. warn() << "Divide by zero";
  1374. TODO();
  1375. }
  1376. i32 dividend = (i32)(((u32)dx().value() << 16) | (u32)ax().value());
  1377. i32 result = dividend / divisor;
  1378. if (result > NumericLimits<i16>::max() || result < NumericLimits<i16>::min()) {
  1379. warn() << "Divide overflow";
  1380. TODO();
  1381. }
  1382. auto original_ax = ax();
  1383. set_ax(shadow_wrap_with_taint_from<u16>(result, original_ax, dx(), divisor_with_shadow));
  1384. set_dx(shadow_wrap_with_taint_from<u16>(dividend % divisor, original_ax, dx(), divisor_with_shadow));
  1385. }
  1386. void SoftCPU::IDIV_RM32(const X86::Instruction& insn)
  1387. {
  1388. auto divisor_with_shadow = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1389. auto divisor = (i32)divisor_with_shadow.value();
  1390. if (divisor == 0) {
  1391. warn() << "Divide by zero";
  1392. TODO();
  1393. }
  1394. i64 dividend = (i64)(((u64)edx().value() << 32) | (u64)eax().value());
  1395. i64 result = dividend / divisor;
  1396. if (result > NumericLimits<i32>::max() || result < NumericLimits<i32>::min()) {
  1397. warn() << "Divide overflow";
  1398. TODO();
  1399. }
  1400. auto original_eax = eax();
  1401. set_eax(shadow_wrap_with_taint_from<u32>(result, original_eax, edx(), divisor_with_shadow));
  1402. set_edx(shadow_wrap_with_taint_from<u32>(dividend % divisor, original_eax, edx(), divisor_with_shadow));
  1403. }
  1404. void SoftCPU::IDIV_RM8(const X86::Instruction& insn)
  1405. {
  1406. auto divisor_with_shadow = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1407. auto divisor = (i8)divisor_with_shadow.value();
  1408. if (divisor == 0) {
  1409. warn() << "Divide by zero";
  1410. TODO();
  1411. }
  1412. i16 dividend = ax().value();
  1413. i16 result = dividend / divisor;
  1414. if (result > NumericLimits<i8>::max() || result < NumericLimits<i8>::min()) {
  1415. warn() << "Divide overflow";
  1416. TODO();
  1417. }
  1418. auto original_ax = ax();
  1419. set_al(shadow_wrap_with_taint_from<u8>(result, divisor_with_shadow, original_ax));
  1420. set_ah(shadow_wrap_with_taint_from<u8>(dividend % divisor, divisor_with_shadow, original_ax));
  1421. }
  1422. void SoftCPU::IMUL_RM16(const X86::Instruction& insn)
  1423. {
  1424. i16 result_high;
  1425. i16 result_low;
  1426. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1427. op_imul<i16>(*this, src.value(), ax().value(), result_high, result_low);
  1428. gpr16(X86::RegisterDX) = shadow_wrap_with_taint_from<u16>(result_high, src, ax());
  1429. gpr16(X86::RegisterAX) = shadow_wrap_with_taint_from<u16>(result_low, src, ax());
  1430. }
  1431. void SoftCPU::IMUL_RM32(const X86::Instruction& insn)
  1432. {
  1433. i32 result_high;
  1434. i32 result_low;
  1435. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1436. op_imul<i32>(*this, src.value(), eax().value(), result_high, result_low);
  1437. gpr32(X86::RegisterEDX) = shadow_wrap_with_taint_from<u32>(result_high, src, eax());
  1438. gpr32(X86::RegisterEAX) = shadow_wrap_with_taint_from<u32>(result_low, src, eax());
  1439. }
  1440. void SoftCPU::IMUL_RM8(const X86::Instruction& insn)
  1441. {
  1442. i8 result_high;
  1443. i8 result_low;
  1444. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1445. op_imul<i8>(*this, src.value(), al().value(), result_high, result_low);
  1446. gpr8(X86::RegisterAH) = shadow_wrap_with_taint_from<u8>(result_high, src, al());
  1447. gpr8(X86::RegisterAL) = shadow_wrap_with_taint_from<u8>(result_low, src, al());
  1448. }
  1449. void SoftCPU::IMUL_reg16_RM16(const X86::Instruction& insn)
  1450. {
  1451. i16 result_high;
  1452. i16 result_low;
  1453. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1454. op_imul<i16>(*this, gpr16(insn.reg16()).value(), src.value(), result_high, result_low);
  1455. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src, gpr16(insn.reg16()));
  1456. }
  1457. void SoftCPU::IMUL_reg16_RM16_imm16(const X86::Instruction& insn)
  1458. {
  1459. i16 result_high;
  1460. i16 result_low;
  1461. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1462. op_imul<i16>(*this, src.value(), insn.imm16(), result_high, result_low);
  1463. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  1464. }
  1465. void SoftCPU::IMUL_reg16_RM16_imm8(const X86::Instruction& insn)
  1466. {
  1467. i16 result_high;
  1468. i16 result_low;
  1469. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1470. op_imul<i16>(*this, src.value(), sign_extended_to<i16>(insn.imm8()), result_high, result_low);
  1471. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  1472. }
  1473. void SoftCPU::IMUL_reg32_RM32(const X86::Instruction& insn)
  1474. {
  1475. i32 result_high;
  1476. i32 result_low;
  1477. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1478. op_imul<i32>(*this, gpr32(insn.reg32()).value(), src.value(), result_high, result_low);
  1479. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src, gpr32(insn.reg32()));
  1480. }
  1481. void SoftCPU::IMUL_reg32_RM32_imm32(const X86::Instruction& insn)
  1482. {
  1483. i32 result_high;
  1484. i32 result_low;
  1485. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1486. op_imul<i32>(*this, src.value(), insn.imm32(), result_high, result_low);
  1487. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  1488. }
  1489. void SoftCPU::IMUL_reg32_RM32_imm8(const X86::Instruction& insn)
  1490. {
  1491. i32 result_high;
  1492. i32 result_low;
  1493. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1494. op_imul<i32>(*this, src.value(), sign_extended_to<i32>(insn.imm8()), result_high, result_low);
  1495. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  1496. }
  1497. void SoftCPU::INC_RM16(const X86::Instruction& insn)
  1498. {
  1499. insn.modrm().write16(*this, insn, op_inc(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn)));
  1500. }
  1501. void SoftCPU::INC_RM32(const X86::Instruction& insn)
  1502. {
  1503. insn.modrm().write32(*this, insn, op_inc(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn)));
  1504. }
  1505. void SoftCPU::INC_RM8(const X86::Instruction& insn)
  1506. {
  1507. insn.modrm().write8(*this, insn, op_inc(*this, insn.modrm().read8<ValueWithShadow<u8>>(*this, insn)));
  1508. }
  1509. void SoftCPU::INC_reg16(const X86::Instruction& insn)
  1510. {
  1511. gpr16(insn.reg16()) = op_inc(*this, const_gpr16(insn.reg16()));
  1512. }
  1513. void SoftCPU::INC_reg32(const X86::Instruction& insn)
  1514. {
  1515. gpr32(insn.reg32()) = op_inc(*this, const_gpr32(insn.reg32()));
  1516. }
  1517. void SoftCPU::INSB(const X86::Instruction&) { TODO_INSN(); }
  1518. void SoftCPU::INSD(const X86::Instruction&) { TODO_INSN(); }
  1519. void SoftCPU::INSW(const X86::Instruction&) { TODO_INSN(); }
  1520. void SoftCPU::INT3(const X86::Instruction&) { TODO_INSN(); }
  1521. void SoftCPU::INTO(const X86::Instruction&) { TODO_INSN(); }
  1522. void SoftCPU::INT_imm8(const X86::Instruction& insn)
  1523. {
  1524. ASSERT(insn.imm8() == 0x82);
  1525. // FIXME: virt_syscall should take ValueWithShadow and whine about uninitialized arguments
  1526. set_eax(shadow_wrap_as_initialized(m_emulator.virt_syscall(eax().value(), edx().value(), ecx().value(), ebx().value())));
  1527. }
  1528. void SoftCPU::INVLPG(const X86::Instruction&) { TODO_INSN(); }
  1529. void SoftCPU::IN_AL_DX(const X86::Instruction&) { TODO_INSN(); }
  1530. void SoftCPU::IN_AL_imm8(const X86::Instruction&) { TODO_INSN(); }
  1531. void SoftCPU::IN_AX_DX(const X86::Instruction&) { TODO_INSN(); }
  1532. void SoftCPU::IN_AX_imm8(const X86::Instruction&) { TODO_INSN(); }
  1533. void SoftCPU::IN_EAX_DX(const X86::Instruction&) { TODO_INSN(); }
  1534. void SoftCPU::IN_EAX_imm8(const X86::Instruction&) { TODO_INSN(); }
  1535. void SoftCPU::IRET(const X86::Instruction&) { TODO_INSN(); }
  1536. void SoftCPU::JCXZ_imm8(const X86::Instruction& insn)
  1537. {
  1538. if (insn.a32()) {
  1539. warn_if_uninitialized(ecx(), "jecxz imm8");
  1540. if (ecx().value() == 0)
  1541. set_eip(eip() + (i8)insn.imm8());
  1542. } else {
  1543. warn_if_uninitialized(cx(), "jcxz imm8");
  1544. if (cx().value() == 0)
  1545. set_eip(eip() + (i8)insn.imm8());
  1546. }
  1547. }
  1548. void SoftCPU::JMP_FAR_mem16(const X86::Instruction&) { TODO_INSN(); }
  1549. void SoftCPU::JMP_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  1550. void SoftCPU::JMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1551. void SoftCPU::JMP_RM32(const X86::Instruction& insn)
  1552. {
  1553. set_eip(insn.modrm().read32<ValueWithShadow<u32>>(*this, insn).value());
  1554. }
  1555. void SoftCPU::JMP_imm16(const X86::Instruction& insn)
  1556. {
  1557. set_eip(eip() + (i16)insn.imm16());
  1558. }
  1559. void SoftCPU::JMP_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  1560. void SoftCPU::JMP_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  1561. void SoftCPU::JMP_imm32(const X86::Instruction& insn)
  1562. {
  1563. set_eip(eip() + (i32)insn.imm32());
  1564. }
  1565. void SoftCPU::JMP_short_imm8(const X86::Instruction& insn)
  1566. {
  1567. set_eip(eip() + (i8)insn.imm8());
  1568. }
  1569. void SoftCPU::Jcc_NEAR_imm(const X86::Instruction& insn)
  1570. {
  1571. warn_if_flags_tainted("jcc near imm32");
  1572. if (evaluate_condition(insn.cc()))
  1573. set_eip(eip() + (i32)insn.imm32());
  1574. }
  1575. void SoftCPU::Jcc_imm8(const X86::Instruction& insn)
  1576. {
  1577. warn_if_flags_tainted("jcc imm8");
  1578. if (evaluate_condition(insn.cc()))
  1579. set_eip(eip() + (i8)insn.imm8());
  1580. }
  1581. void SoftCPU::LAHF(const X86::Instruction&) { TODO_INSN(); }
  1582. void SoftCPU::LAR_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  1583. void SoftCPU::LAR_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  1584. void SoftCPU::LDS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  1585. void SoftCPU::LDS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  1586. void SoftCPU::LEAVE16(const X86::Instruction&) { TODO_INSN(); }
  1587. void SoftCPU::LEAVE32(const X86::Instruction&)
  1588. {
  1589. auto new_ebp = read_memory32({ ss(), ebp().value() });
  1590. set_esp({ ebp().value() + 4, ebp().shadow() });
  1591. set_ebp(new_ebp);
  1592. }
  1593. void SoftCPU::LEA_reg16_mem16(const X86::Instruction& insn)
  1594. {
  1595. // FIXME: Respect shadow values
  1596. gpr16(insn.reg16()) = shadow_wrap_as_initialized<u16>(insn.modrm().resolve(*this, insn).offset());
  1597. }
  1598. void SoftCPU::LEA_reg32_mem32(const X86::Instruction& insn)
  1599. {
  1600. // FIXME: Respect shadow values
  1601. gpr32(insn.reg32()) = shadow_wrap_as_initialized<u32>(insn.modrm().resolve(*this, insn).offset());
  1602. }
  1603. void SoftCPU::LES_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  1604. void SoftCPU::LES_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  1605. void SoftCPU::LFS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  1606. void SoftCPU::LFS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  1607. void SoftCPU::LGDT(const X86::Instruction&) { TODO_INSN(); }
  1608. void SoftCPU::LGS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  1609. void SoftCPU::LGS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  1610. void SoftCPU::LIDT(const X86::Instruction&) { TODO_INSN(); }
  1611. void SoftCPU::LLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  1612. void SoftCPU::LMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  1613. template<typename T>
  1614. ALWAYS_INLINE static void do_lods(SoftCPU& cpu, const X86::Instruction& insn)
  1615. {
  1616. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1617. cpu.do_once_or_repeat<true>(insn, [&] {
  1618. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1619. cpu.gpr<T>(X86::RegisterAL) = src;
  1620. cpu.step_source_index(insn.a32(), sizeof(T));
  1621. });
  1622. }
  1623. void SoftCPU::LODSB(const X86::Instruction& insn)
  1624. {
  1625. do_lods<u8>(*this, insn);
  1626. }
  1627. void SoftCPU::LODSD(const X86::Instruction& insn)
  1628. {
  1629. do_lods<u32>(*this, insn);
  1630. }
  1631. void SoftCPU::LODSW(const X86::Instruction& insn)
  1632. {
  1633. do_lods<u16>(*this, insn);
  1634. }
  1635. void SoftCPU::LOOPNZ_imm8(const X86::Instruction& insn)
  1636. {
  1637. warn_if_flags_tainted("loopnz");
  1638. if (insn.a32()) {
  1639. set_ecx({ ecx().value() - 1, ecx().shadow() });
  1640. if (ecx().value() != 0 && !zf())
  1641. set_eip(eip() + (i8)insn.imm8());
  1642. } else {
  1643. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  1644. if (cx().value() != 0 && !zf())
  1645. set_eip(eip() + (i8)insn.imm8());
  1646. }
  1647. }
  1648. void SoftCPU::LOOPZ_imm8(const X86::Instruction& insn)
  1649. {
  1650. warn_if_flags_tainted("loopz");
  1651. if (insn.a32()) {
  1652. set_ecx({ ecx().value() - 1, ecx().shadow() });
  1653. if (ecx().value() != 0 && zf())
  1654. set_eip(eip() + (i8)insn.imm8());
  1655. } else {
  1656. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  1657. if (cx().value() != 0 && zf())
  1658. set_eip(eip() + (i8)insn.imm8());
  1659. }
  1660. }
  1661. void SoftCPU::LOOP_imm8(const X86::Instruction& insn)
  1662. {
  1663. if (insn.a32()) {
  1664. set_ecx({ ecx().value() - 1, ecx().shadow() });
  1665. if (ecx().value() != 0)
  1666. set_eip(eip() + (i8)insn.imm8());
  1667. } else {
  1668. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  1669. if (cx().value() != 0)
  1670. set_eip(eip() + (i8)insn.imm8());
  1671. }
  1672. }
  1673. void SoftCPU::LSL_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  1674. void SoftCPU::LSL_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  1675. void SoftCPU::LSS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  1676. void SoftCPU::LSS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  1677. void SoftCPU::LTR_RM16(const X86::Instruction&) { TODO_INSN(); }
  1678. template<typename T>
  1679. ALWAYS_INLINE static void do_movs(SoftCPU& cpu, const X86::Instruction& insn)
  1680. {
  1681. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1682. cpu.do_once_or_repeat<false>(insn, [&] {
  1683. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1684. cpu.write_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() }, src);
  1685. cpu.step_source_index(insn.a32(), sizeof(T));
  1686. cpu.step_destination_index(insn.a32(), sizeof(T));
  1687. });
  1688. }
  1689. void SoftCPU::MOVSB(const X86::Instruction& insn)
  1690. {
  1691. do_movs<u8>(*this, insn);
  1692. }
  1693. void SoftCPU::MOVSD(const X86::Instruction& insn)
  1694. {
  1695. do_movs<u32>(*this, insn);
  1696. }
  1697. void SoftCPU::MOVSW(const X86::Instruction& insn)
  1698. {
  1699. do_movs<u16>(*this, insn);
  1700. }
  1701. void SoftCPU::MOVSX_reg16_RM8(const X86::Instruction& insn)
  1702. {
  1703. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1704. gpr16(insn.reg16()) = ValueWithShadow<u16>(sign_extended_to<u16>(src.value()), 0x0100 | (src.shadow()));
  1705. }
  1706. void SoftCPU::MOVSX_reg32_RM16(const X86::Instruction& insn)
  1707. {
  1708. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1709. gpr32(insn.reg32()) = ValueWithShadow<u32>(sign_extended_to<u32>(src.value()), 0x01010000 | (src.shadow()));
  1710. }
  1711. void SoftCPU::MOVSX_reg32_RM8(const X86::Instruction& insn)
  1712. {
  1713. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1714. gpr32(insn.reg32()) = ValueWithShadow<u32>(sign_extended_to<u32>(src.value()), 0x01010100 | (src.shadow()));
  1715. }
  1716. void SoftCPU::MOVZX_reg16_RM8(const X86::Instruction& insn)
  1717. {
  1718. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1719. gpr16(insn.reg16()) = ValueWithShadow<u16>(src.value(), 0x0100 | (src.shadow() & 0xff));
  1720. }
  1721. void SoftCPU::MOVZX_reg32_RM16(const X86::Instruction& insn)
  1722. {
  1723. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1724. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010000 | (src.shadow() & 0xffff));
  1725. }
  1726. void SoftCPU::MOVZX_reg32_RM8(const X86::Instruction& insn)
  1727. {
  1728. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1729. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010100 | (src.shadow() & 0xff));
  1730. }
  1731. void SoftCPU::MOV_AL_moff8(const X86::Instruction& insn)
  1732. {
  1733. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  1734. }
  1735. void SoftCPU::MOV_AX_moff16(const X86::Instruction& insn)
  1736. {
  1737. set_ax(read_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  1738. }
  1739. void SoftCPU::MOV_CR_reg32(const X86::Instruction&) { TODO_INSN(); }
  1740. void SoftCPU::MOV_DR_reg32(const X86::Instruction&) { TODO_INSN(); }
  1741. void SoftCPU::MOV_EAX_moff32(const X86::Instruction& insn)
  1742. {
  1743. set_eax(read_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  1744. }
  1745. void SoftCPU::MOV_RM16_imm16(const X86::Instruction& insn)
  1746. {
  1747. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(insn.imm16()));
  1748. }
  1749. void SoftCPU::MOV_RM16_reg16(const X86::Instruction& insn)
  1750. {
  1751. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  1752. }
  1753. void SoftCPU::MOV_RM16_seg(const X86::Instruction&) { TODO_INSN(); }
  1754. void SoftCPU::MOV_RM32_imm32(const X86::Instruction& insn)
  1755. {
  1756. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(insn.imm32()));
  1757. }
  1758. void SoftCPU::MOV_RM32_reg32(const X86::Instruction& insn)
  1759. {
  1760. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  1761. }
  1762. void SoftCPU::MOV_RM8_imm8(const X86::Instruction& insn)
  1763. {
  1764. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized(insn.imm8()));
  1765. }
  1766. void SoftCPU::MOV_RM8_reg8(const X86::Instruction& insn)
  1767. {
  1768. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  1769. }
  1770. void SoftCPU::MOV_moff16_AX(const X86::Instruction& insn)
  1771. {
  1772. write_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, ax());
  1773. }
  1774. void SoftCPU::MOV_moff32_EAX(const X86::Instruction& insn)
  1775. {
  1776. write_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, eax());
  1777. }
  1778. void SoftCPU::MOV_moff8_AL(const X86::Instruction& insn)
  1779. {
  1780. write_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, al());
  1781. }
  1782. void SoftCPU::MOV_reg16_RM16(const X86::Instruction& insn)
  1783. {
  1784. gpr16(insn.reg16()) = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1785. }
  1786. void SoftCPU::MOV_reg16_imm16(const X86::Instruction& insn)
  1787. {
  1788. gpr16(insn.reg16()) = shadow_wrap_as_initialized(insn.imm16());
  1789. }
  1790. void SoftCPU::MOV_reg32_CR(const X86::Instruction&) { TODO_INSN(); }
  1791. void SoftCPU::MOV_reg32_DR(const X86::Instruction&) { TODO_INSN(); }
  1792. void SoftCPU::MOV_reg32_RM32(const X86::Instruction& insn)
  1793. {
  1794. gpr32(insn.reg32()) = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1795. }
  1796. void SoftCPU::MOV_reg32_imm32(const X86::Instruction& insn)
  1797. {
  1798. gpr32(insn.reg32()) = shadow_wrap_as_initialized(insn.imm32());
  1799. }
  1800. void SoftCPU::MOV_reg8_RM8(const X86::Instruction& insn)
  1801. {
  1802. gpr8(insn.reg8()) = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1803. }
  1804. void SoftCPU::MOV_reg8_imm8(const X86::Instruction& insn)
  1805. {
  1806. gpr8(insn.reg8()) = shadow_wrap_as_initialized(insn.imm8());
  1807. }
  1808. void SoftCPU::MOV_seg_RM16(const X86::Instruction&) { TODO_INSN(); }
  1809. void SoftCPU::MOV_seg_RM32(const X86::Instruction&) { TODO_INSN(); }
  1810. void SoftCPU::MUL_RM16(const X86::Instruction& insn)
  1811. {
  1812. auto src = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1813. u32 result = (u32)ax().value() * (u32)src.value();
  1814. auto original_ax = ax();
  1815. set_ax(shadow_wrap_with_taint_from<u16>(result & 0xffff, src, original_ax));
  1816. set_dx(shadow_wrap_with_taint_from<u16>(result >> 16, src, original_ax));
  1817. taint_flags_from(src, original_ax);
  1818. set_cf(dx().value() != 0);
  1819. set_of(dx().value() != 0);
  1820. }
  1821. void SoftCPU::MUL_RM32(const X86::Instruction& insn)
  1822. {
  1823. auto src = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1824. u64 result = (u64)eax().value() * (u64)src.value();
  1825. auto original_eax = eax();
  1826. set_eax(shadow_wrap_with_taint_from<u32>(result, src, original_eax));
  1827. set_edx(shadow_wrap_with_taint_from<u32>(result >> 32, src, original_eax));
  1828. taint_flags_from(src, original_eax);
  1829. set_cf(edx().value() != 0);
  1830. set_of(edx().value() != 0);
  1831. }
  1832. void SoftCPU::MUL_RM8(const X86::Instruction& insn)
  1833. {
  1834. auto src = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1835. u16 result = (u16)al().value() * src.value();
  1836. auto original_al = al();
  1837. set_ax(shadow_wrap_with_taint_from(result, src, original_al));
  1838. taint_flags_from(src, original_al);
  1839. set_cf((result & 0xff00) != 0);
  1840. set_of((result & 0xff00) != 0);
  1841. }
  1842. void SoftCPU::NEG_RM16(const X86::Instruction& insn)
  1843. {
  1844. insn.modrm().write16(*this, insn, op_sub<ValueWithShadow<u16>>(*this, shadow_wrap_as_initialized<u16>(0), insn.modrm().read16<ValueWithShadow<u16>>(*this, insn)));
  1845. }
  1846. void SoftCPU::NEG_RM32(const X86::Instruction& insn)
  1847. {
  1848. insn.modrm().write32(*this, insn, op_sub<ValueWithShadow<u32>>(*this, shadow_wrap_as_initialized<u32>(0), insn.modrm().read32<ValueWithShadow<u32>>(*this, insn)));
  1849. }
  1850. void SoftCPU::NEG_RM8(const X86::Instruction& insn)
  1851. {
  1852. insn.modrm().write8(*this, insn, op_sub<ValueWithShadow<u8>>(*this, shadow_wrap_as_initialized<u8>(0), insn.modrm().read8<ValueWithShadow<u8>>(*this, insn)));
  1853. }
  1854. void SoftCPU::NOP(const X86::Instruction&)
  1855. {
  1856. }
  1857. void SoftCPU::NOT_RM16(const X86::Instruction& insn)
  1858. {
  1859. auto data = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  1860. insn.modrm().write16(*this, insn, ValueWithShadow<u16>(~data.value(), data.shadow()));
  1861. }
  1862. void SoftCPU::NOT_RM32(const X86::Instruction& insn)
  1863. {
  1864. auto data = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  1865. insn.modrm().write32(*this, insn, ValueWithShadow<u32>(~data.value(), data.shadow()));
  1866. }
  1867. void SoftCPU::NOT_RM8(const X86::Instruction& insn)
  1868. {
  1869. auto data = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  1870. insn.modrm().write8(*this, insn, ValueWithShadow<u8>(~data.value(), data.shadow()));
  1871. }
  1872. void SoftCPU::OUTSB(const X86::Instruction&) { TODO_INSN(); }
  1873. void SoftCPU::OUTSD(const X86::Instruction&) { TODO_INSN(); }
  1874. void SoftCPU::OUTSW(const X86::Instruction&) { TODO_INSN(); }
  1875. void SoftCPU::OUT_DX_AL(const X86::Instruction&) { TODO_INSN(); }
  1876. void SoftCPU::OUT_DX_AX(const X86::Instruction&) { TODO_INSN(); }
  1877. void SoftCPU::OUT_DX_EAX(const X86::Instruction&) { TODO_INSN(); }
  1878. void SoftCPU::OUT_imm8_AL(const X86::Instruction&) { TODO_INSN(); }
  1879. void SoftCPU::OUT_imm8_AX(const X86::Instruction&) { TODO_INSN(); }
  1880. void SoftCPU::OUT_imm8_EAX(const X86::Instruction&) { TODO_INSN(); }
  1881. void SoftCPU::PADDB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  1882. void SoftCPU::PADDW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  1883. void SoftCPU::PADDD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  1884. void SoftCPU::POPA(const X86::Instruction&) { TODO_INSN(); }
  1885. void SoftCPU::POPAD(const X86::Instruction&) { TODO_INSN(); }
  1886. void SoftCPU::POPF(const X86::Instruction&) { TODO_INSN(); }
  1887. void SoftCPU::POPFD(const X86::Instruction&)
  1888. {
  1889. auto popped_value = pop32();
  1890. m_eflags &= ~0x00fcffff;
  1891. m_eflags |= popped_value.value() & 0x00fcffff;
  1892. taint_flags_from(popped_value);
  1893. }
  1894. void SoftCPU::POP_DS(const X86::Instruction&) { TODO_INSN(); }
  1895. void SoftCPU::POP_ES(const X86::Instruction&) { TODO_INSN(); }
  1896. void SoftCPU::POP_FS(const X86::Instruction&) { TODO_INSN(); }
  1897. void SoftCPU::POP_GS(const X86::Instruction&) { TODO_INSN(); }
  1898. void SoftCPU::POP_RM16(const X86::Instruction& insn)
  1899. {
  1900. insn.modrm().write16(*this, insn, pop16());
  1901. }
  1902. void SoftCPU::POP_RM32(const X86::Instruction& insn)
  1903. {
  1904. insn.modrm().write32(*this, insn, pop32());
  1905. }
  1906. void SoftCPU::POP_SS(const X86::Instruction&) { TODO_INSN(); }
  1907. void SoftCPU::POP_reg16(const X86::Instruction& insn)
  1908. {
  1909. gpr16(insn.reg16()) = pop16();
  1910. }
  1911. void SoftCPU::POP_reg32(const X86::Instruction& insn)
  1912. {
  1913. gpr32(insn.reg32()) = pop32();
  1914. }
  1915. void SoftCPU::PUSHA(const X86::Instruction&) { TODO_INSN(); }
  1916. void SoftCPU::PUSHAD(const X86::Instruction&) { TODO_INSN(); }
  1917. void SoftCPU::PUSHF(const X86::Instruction&) { TODO_INSN(); }
  1918. void SoftCPU::PUSHFD(const X86::Instruction&)
  1919. {
  1920. // FIXME: Respect shadow flags when they exist!
  1921. push32(shadow_wrap_as_initialized(m_eflags & 0x00fcffff));
  1922. }
  1923. void SoftCPU::PUSH_CS(const X86::Instruction&) { TODO_INSN(); }
  1924. void SoftCPU::PUSH_DS(const X86::Instruction&) { TODO_INSN(); }
  1925. void SoftCPU::PUSH_ES(const X86::Instruction&) { TODO_INSN(); }
  1926. void SoftCPU::PUSH_FS(const X86::Instruction&) { TODO_INSN(); }
  1927. void SoftCPU::PUSH_GS(const X86::Instruction&) { TODO_INSN(); }
  1928. void SoftCPU::PUSH_RM16(const X86::Instruction&) { TODO_INSN(); }
  1929. void SoftCPU::PUSH_RM32(const X86::Instruction& insn)
  1930. {
  1931. push32(insn.modrm().read32<ValueWithShadow<u32>>(*this, insn));
  1932. }
  1933. void SoftCPU::PUSH_SP_8086_80186(const X86::Instruction&) { TODO_INSN(); }
  1934. void SoftCPU::PUSH_SS(const X86::Instruction&) { TODO_INSN(); }
  1935. void SoftCPU::PUSH_imm16(const X86::Instruction& insn)
  1936. {
  1937. push16(shadow_wrap_as_initialized(insn.imm16()));
  1938. }
  1939. void SoftCPU::PUSH_imm32(const X86::Instruction& insn)
  1940. {
  1941. push32(shadow_wrap_as_initialized(insn.imm32()));
  1942. }
  1943. void SoftCPU::PUSH_imm8(const X86::Instruction& insn)
  1944. {
  1945. ASSERT(!insn.has_operand_size_override_prefix());
  1946. push32(shadow_wrap_as_initialized<u32>(sign_extended_to<i32>(insn.imm8())));
  1947. }
  1948. void SoftCPU::PUSH_reg16(const X86::Instruction& insn)
  1949. {
  1950. push16(gpr16(insn.reg16()));
  1951. }
  1952. void SoftCPU::PUSH_reg32(const X86::Instruction& insn)
  1953. {
  1954. push32(gpr32(insn.reg32()));
  1955. if (m_secret_handshake_state == 2) {
  1956. m_secret_data[0] = gpr32(insn.reg32()).value();
  1957. ++m_secret_handshake_state;
  1958. } else if (m_secret_handshake_state == 3) {
  1959. m_secret_data[1] = gpr32(insn.reg32()).value();
  1960. ++m_secret_handshake_state;
  1961. } else if (m_secret_handshake_state == 4) {
  1962. m_secret_data[2] = gpr32(insn.reg32()).value();
  1963. m_secret_handshake_state = 0;
  1964. did_receive_secret_data();
  1965. }
  1966. }
  1967. template<typename T, bool cf>
  1968. ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  1969. {
  1970. if (steps.value() == 0)
  1971. return shadow_wrap_with_taint_from(data.value(), data, steps);
  1972. u32 result = 0;
  1973. u32 new_flags = 0;
  1974. if constexpr (cf)
  1975. asm volatile("stc");
  1976. else
  1977. asm volatile("clc");
  1978. if constexpr (sizeof(typename T::ValueType) == 4) {
  1979. asm volatile("rcll %%cl, %%eax\n"
  1980. : "=a"(result)
  1981. : "a"(data.value()), "c"(steps.value()));
  1982. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  1983. asm volatile("rclw %%cl, %%ax\n"
  1984. : "=a"(result)
  1985. : "a"(data.value()), "c"(steps.value()));
  1986. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  1987. asm volatile("rclb %%cl, %%al\n"
  1988. : "=a"(result)
  1989. : "a"(data.value()), "c"(steps.value()));
  1990. }
  1991. asm volatile(
  1992. "pushf\n"
  1993. "pop %%ebx"
  1994. : "=b"(new_flags));
  1995. cpu.set_flags_oc(new_flags);
  1996. cpu.taint_flags_from(data, steps);
  1997. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  1998. }
  1999. template<typename T>
  2000. ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2001. {
  2002. cpu.warn_if_flags_tainted("rcl");
  2003. if (cpu.cf())
  2004. return op_rcl_impl<T, true>(cpu, data, steps);
  2005. return op_rcl_impl<T, false>(cpu, data, steps);
  2006. }
  2007. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCL, op_rcl)
  2008. template<typename T, bool cf>
  2009. ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2010. {
  2011. if (steps.value() == 0)
  2012. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2013. u32 result = 0;
  2014. u32 new_flags = 0;
  2015. if constexpr (cf)
  2016. asm volatile("stc");
  2017. else
  2018. asm volatile("clc");
  2019. if constexpr (sizeof(typename T::ValueType) == 4) {
  2020. asm volatile("rcrl %%cl, %%eax\n"
  2021. : "=a"(result)
  2022. : "a"(data.value()), "c"(steps.value()));
  2023. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2024. asm volatile("rcrw %%cl, %%ax\n"
  2025. : "=a"(result)
  2026. : "a"(data.value()), "c"(steps.value()));
  2027. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2028. asm volatile("rcrb %%cl, %%al\n"
  2029. : "=a"(result)
  2030. : "a"(data.value()), "c"(steps.value()));
  2031. }
  2032. asm volatile(
  2033. "pushf\n"
  2034. "pop %%ebx"
  2035. : "=b"(new_flags));
  2036. cpu.set_flags_oc(new_flags);
  2037. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2038. }
  2039. template<typename T>
  2040. ALWAYS_INLINE static T op_rcr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2041. {
  2042. cpu.warn_if_flags_tainted("rcr");
  2043. if (cpu.cf())
  2044. return op_rcr_impl<T, true>(cpu, data, steps);
  2045. return op_rcr_impl<T, false>(cpu, data, steps);
  2046. }
  2047. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCR, op_rcr)
  2048. void SoftCPU::RDTSC(const X86::Instruction&) { TODO_INSN(); }
  2049. void SoftCPU::RET(const X86::Instruction& insn)
  2050. {
  2051. ASSERT(!insn.has_operand_size_override_prefix());
  2052. auto ret_address = pop32();
  2053. warn_if_uninitialized(ret_address, "ret");
  2054. set_eip(ret_address.value());
  2055. }
  2056. void SoftCPU::RETF(const X86::Instruction&) { TODO_INSN(); }
  2057. void SoftCPU::RETF_imm16(const X86::Instruction&) { TODO_INSN(); }
  2058. void SoftCPU::RET_imm16(const X86::Instruction& insn)
  2059. {
  2060. ASSERT(!insn.has_operand_size_override_prefix());
  2061. auto ret_address = pop32();
  2062. warn_if_uninitialized(ret_address, "ret imm16");
  2063. set_eip(ret_address.value());
  2064. set_esp({ esp().value() + insn.imm16(), esp().shadow() });
  2065. }
  2066. template<typename T>
  2067. ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2068. {
  2069. if (steps.value() == 0)
  2070. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2071. u32 result = 0;
  2072. u32 new_flags = 0;
  2073. if constexpr (sizeof(typename T::ValueType) == 4) {
  2074. asm volatile("roll %%cl, %%eax\n"
  2075. : "=a"(result)
  2076. : "a"(data.value()), "c"(steps.value()));
  2077. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2078. asm volatile("rolw %%cl, %%ax\n"
  2079. : "=a"(result)
  2080. : "a"(data.value()), "c"(steps.value()));
  2081. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2082. asm volatile("rolb %%cl, %%al\n"
  2083. : "=a"(result)
  2084. : "a"(data.value()), "c"(steps.value()));
  2085. }
  2086. asm volatile(
  2087. "pushf\n"
  2088. "pop %%ebx"
  2089. : "=b"(new_flags));
  2090. cpu.set_flags_oc(new_flags);
  2091. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2092. }
  2093. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROL, op_rol)
  2094. template<typename T>
  2095. ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2096. {
  2097. if (steps.value() == 0)
  2098. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2099. u32 result = 0;
  2100. u32 new_flags = 0;
  2101. if constexpr (sizeof(typename T::ValueType) == 4) {
  2102. asm volatile("rorl %%cl, %%eax\n"
  2103. : "=a"(result)
  2104. : "a"(data.value()), "c"(steps.value()));
  2105. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2106. asm volatile("rorw %%cl, %%ax\n"
  2107. : "=a"(result)
  2108. : "a"(data.value()), "c"(steps.value()));
  2109. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2110. asm volatile("rorb %%cl, %%al\n"
  2111. : "=a"(result)
  2112. : "a"(data.value()), "c"(steps.value()));
  2113. }
  2114. asm volatile(
  2115. "pushf\n"
  2116. "pop %%ebx"
  2117. : "=b"(new_flags));
  2118. cpu.set_flags_oc(new_flags);
  2119. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2120. }
  2121. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROR, op_ror)
  2122. void SoftCPU::SAHF(const X86::Instruction&) { TODO_INSN(); }
  2123. void SoftCPU::SALC(const X86::Instruction&)
  2124. {
  2125. // FIXME: Respect shadow flags once they exists!
  2126. set_al(shadow_wrap_as_initialized<u8>(cf() ? 0xff : 0x00));
  2127. if (m_secret_handshake_state < 2)
  2128. ++m_secret_handshake_state;
  2129. else
  2130. m_secret_handshake_state = 0;
  2131. }
  2132. template<typename T>
  2133. static T op_sar(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2134. {
  2135. if (steps.value() == 0)
  2136. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2137. u32 result = 0;
  2138. u32 new_flags = 0;
  2139. if constexpr (sizeof(typename T::ValueType) == 4) {
  2140. asm volatile("sarl %%cl, %%eax\n"
  2141. : "=a"(result)
  2142. : "a"(data.value()), "c"(steps.value()));
  2143. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2144. asm volatile("sarw %%cl, %%ax\n"
  2145. : "=a"(result)
  2146. : "a"(data.value()), "c"(steps.value()));
  2147. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2148. asm volatile("sarb %%cl, %%al\n"
  2149. : "=a"(result)
  2150. : "a"(data.value()), "c"(steps.value()));
  2151. }
  2152. asm volatile(
  2153. "pushf\n"
  2154. "pop %%ebx"
  2155. : "=b"(new_flags));
  2156. cpu.set_flags_oszapc(new_flags);
  2157. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2158. }
  2159. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SAR, op_sar)
  2160. template<typename T>
  2161. ALWAYS_INLINE static void do_scas(SoftCPU& cpu, const X86::Instruction& insn)
  2162. {
  2163. cpu.do_once_or_repeat<true>(insn, [&] {
  2164. auto src = cpu.const_gpr<T>(X86::RegisterAL);
  2165. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  2166. op_sub(cpu, dest, src);
  2167. cpu.step_destination_index(insn.a32(), sizeof(T));
  2168. });
  2169. }
  2170. void SoftCPU::SCASB(const X86::Instruction& insn)
  2171. {
  2172. do_scas<u8>(*this, insn);
  2173. }
  2174. void SoftCPU::SCASD(const X86::Instruction& insn)
  2175. {
  2176. do_scas<u32>(*this, insn);
  2177. }
  2178. void SoftCPU::SCASW(const X86::Instruction& insn)
  2179. {
  2180. do_scas<u16>(*this, insn);
  2181. }
  2182. void SoftCPU::SETcc_RM8(const X86::Instruction& insn)
  2183. {
  2184. warn_if_flags_tainted("setcc");
  2185. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized<u8>(evaluate_condition(insn.cc())));
  2186. }
  2187. void SoftCPU::SGDT(const X86::Instruction&) { TODO_INSN(); }
  2188. void SoftCPU::SHLD_RM16_reg16_CL(const X86::Instruction& insn)
  2189. {
  2190. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn), const_gpr16(insn.reg16()), cl()));
  2191. }
  2192. void SoftCPU::SHLD_RM16_reg16_imm8(const X86::Instruction& insn)
  2193. {
  2194. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2195. }
  2196. void SoftCPU::SHLD_RM32_reg32_CL(const X86::Instruction& insn)
  2197. {
  2198. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn), const_gpr32(insn.reg32()), cl()));
  2199. }
  2200. void SoftCPU::SHLD_RM32_reg32_imm8(const X86::Instruction& insn)
  2201. {
  2202. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2203. }
  2204. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHL, op_shl)
  2205. void SoftCPU::SHRD_RM16_reg16_CL(const X86::Instruction& insn)
  2206. {
  2207. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn), const_gpr16(insn.reg16()), cl()));
  2208. }
  2209. void SoftCPU::SHRD_RM16_reg16_imm8(const X86::Instruction& insn)
  2210. {
  2211. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16<ValueWithShadow<u16>>(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2212. }
  2213. void SoftCPU::SHRD_RM32_reg32_CL(const X86::Instruction& insn)
  2214. {
  2215. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn), const_gpr32(insn.reg32()), cl()));
  2216. }
  2217. void SoftCPU::SHRD_RM32_reg32_imm8(const X86::Instruction& insn)
  2218. {
  2219. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32<ValueWithShadow<u32>>(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2220. }
  2221. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHR, op_shr)
  2222. void SoftCPU::SIDT(const X86::Instruction&) { TODO_INSN(); }
  2223. void SoftCPU::SLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2224. void SoftCPU::SMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2225. void SoftCPU::STC(const X86::Instruction&)
  2226. {
  2227. set_cf(true);
  2228. }
  2229. void SoftCPU::STD(const X86::Instruction&)
  2230. {
  2231. set_df(true);
  2232. }
  2233. void SoftCPU::STI(const X86::Instruction&) { TODO_INSN(); }
  2234. void SoftCPU::STOSB(const X86::Instruction& insn)
  2235. {
  2236. do_once_or_repeat<false>(insn, [&] {
  2237. write_memory8({ es(), destination_index(insn.a32()).value() }, al());
  2238. step_destination_index(insn.a32(), 1);
  2239. });
  2240. }
  2241. void SoftCPU::STOSD(const X86::Instruction& insn)
  2242. {
  2243. do_once_or_repeat<false>(insn, [&] {
  2244. write_memory32({ es(), destination_index(insn.a32()).value() }, eax());
  2245. step_destination_index(insn.a32(), 4);
  2246. });
  2247. }
  2248. void SoftCPU::STOSW(const X86::Instruction& insn)
  2249. {
  2250. do_once_or_repeat<false>(insn, [&] {
  2251. write_memory16({ es(), destination_index(insn.a32()).value() }, ax());
  2252. step_destination_index(insn.a32(), 2);
  2253. });
  2254. }
  2255. void SoftCPU::STR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2256. void SoftCPU::UD0(const X86::Instruction&) { TODO_INSN(); }
  2257. void SoftCPU::UD1(const X86::Instruction&) { TODO_INSN(); }
  2258. void SoftCPU::UD2(const X86::Instruction&) { TODO_INSN(); }
  2259. void SoftCPU::VERR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2260. void SoftCPU::VERW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2261. void SoftCPU::WAIT(const X86::Instruction&) { TODO_INSN(); }
  2262. void SoftCPU::WBINVD(const X86::Instruction&) { TODO_INSN(); }
  2263. void SoftCPU::XADD_RM16_reg16(const X86::Instruction& insn)
  2264. {
  2265. auto dest = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  2266. auto src = const_gpr16(insn.reg16());
  2267. auto result = op_add(*this, dest, src);
  2268. gpr16(insn.reg16()) = dest;
  2269. insn.modrm().write16(*this, insn, result);
  2270. }
  2271. void SoftCPU::XADD_RM32_reg32(const X86::Instruction& insn)
  2272. {
  2273. auto dest = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  2274. auto src = const_gpr32(insn.reg32());
  2275. auto result = op_add(*this, dest, src);
  2276. gpr32(insn.reg32()) = dest;
  2277. insn.modrm().write32(*this, insn, result);
  2278. }
  2279. void SoftCPU::XADD_RM8_reg8(const X86::Instruction& insn)
  2280. {
  2281. auto dest = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  2282. auto src = const_gpr8(insn.reg8());
  2283. auto result = op_add(*this, dest, src);
  2284. gpr8(insn.reg8()) = dest;
  2285. insn.modrm().write8(*this, insn, result);
  2286. }
  2287. void SoftCPU::XCHG_AX_reg16(const X86::Instruction& insn)
  2288. {
  2289. auto temp = gpr16(insn.reg16());
  2290. gpr16(insn.reg16()) = ax();
  2291. set_ax(temp);
  2292. }
  2293. void SoftCPU::XCHG_EAX_reg32(const X86::Instruction& insn)
  2294. {
  2295. auto temp = gpr32(insn.reg32());
  2296. gpr32(insn.reg32()) = eax();
  2297. set_eax(temp);
  2298. }
  2299. void SoftCPU::XCHG_reg16_RM16(const X86::Instruction& insn)
  2300. {
  2301. auto temp = insn.modrm().read16<ValueWithShadow<u16>>(*this, insn);
  2302. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2303. gpr16(insn.reg16()) = temp;
  2304. }
  2305. void SoftCPU::XCHG_reg32_RM32(const X86::Instruction& insn)
  2306. {
  2307. auto temp = insn.modrm().read32<ValueWithShadow<u32>>(*this, insn);
  2308. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2309. gpr32(insn.reg32()) = temp;
  2310. }
  2311. void SoftCPU::XCHG_reg8_RM8(const X86::Instruction& insn)
  2312. {
  2313. auto temp = insn.modrm().read8<ValueWithShadow<u8>>(*this, insn);
  2314. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2315. gpr8(insn.reg8()) = temp;
  2316. }
  2317. void SoftCPU::XLAT(const X86::Instruction& insn)
  2318. {
  2319. if (insn.a32())
  2320. warn_if_uninitialized(ebx(), "xlat ebx");
  2321. else
  2322. warn_if_uninitialized(bx(), "xlat bx");
  2323. warn_if_uninitialized(al(), "xlat al");
  2324. u32 offset = (insn.a32() ? ebx().value() : bx().value()) + al().value();
  2325. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), offset }));
  2326. }
  2327. #define DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2328. void SoftCPU::mnemonic##_AL_imm8(const X86::Instruction& insn) { generic_AL_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  2329. void SoftCPU::mnemonic##_AX_imm16(const X86::Instruction& insn) { generic_AX_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2330. void SoftCPU::mnemonic##_EAX_imm32(const X86::Instruction& insn) { generic_EAX_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2331. void SoftCPU::mnemonic##_RM16_imm16(const X86::Instruction& insn) { generic_RM16_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2332. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { generic_RM16_reg16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2333. void SoftCPU::mnemonic##_RM32_imm32(const X86::Instruction& insn) { generic_RM32_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2334. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { generic_RM32_reg32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2335. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  2336. void SoftCPU::mnemonic##_RM8_reg8(const X86::Instruction& insn) { generic_RM8_reg8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2337. #define DEFINE_GENERIC_INSN_HANDLERS(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2338. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2339. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2340. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2341. void SoftCPU::mnemonic##_reg16_RM16(const X86::Instruction& insn) { generic_reg16_RM16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2342. void SoftCPU::mnemonic##_reg32_RM32(const X86::Instruction& insn) { generic_reg32_RM32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2343. void SoftCPU::mnemonic##_reg8_RM8(const X86::Instruction& insn) { generic_reg8_RM8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2344. DEFINE_GENERIC_INSN_HANDLERS(XOR, op_xor, true, true, false)
  2345. DEFINE_GENERIC_INSN_HANDLERS(OR, op_or, true, false, true)
  2346. DEFINE_GENERIC_INSN_HANDLERS(ADD, op_add, true, false, false)
  2347. DEFINE_GENERIC_INSN_HANDLERS(ADC, op_adc, true, false, false)
  2348. DEFINE_GENERIC_INSN_HANDLERS(SUB, op_sub, true, true, false)
  2349. DEFINE_GENERIC_INSN_HANDLERS(SBB, op_sbb, true, false, false)
  2350. DEFINE_GENERIC_INSN_HANDLERS(AND, op_and, true, false, false)
  2351. DEFINE_GENERIC_INSN_HANDLERS(CMP, op_sub, false, false, false)
  2352. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(TEST, op_and, false, false, false)
  2353. void SoftCPU::MOVQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2354. void SoftCPU::EMMS(const X86::Instruction&) { TODO_INSN(); }
  2355. void SoftCPU::MOVQ_mm1_m64_mm2(const X86::Instruction&) { TODO_INSN(); }
  2356. void SoftCPU::wrap_0xC0(const X86::Instruction&) { TODO_INSN(); }
  2357. void SoftCPU::wrap_0xC1_16(const X86::Instruction&) { TODO_INSN(); }
  2358. void SoftCPU::wrap_0xC1_32(const X86::Instruction&) { TODO_INSN(); }
  2359. void SoftCPU::wrap_0xD0(const X86::Instruction&) { TODO_INSN(); }
  2360. void SoftCPU::wrap_0xD1_16(const X86::Instruction&) { TODO_INSN(); }
  2361. void SoftCPU::wrap_0xD1_32(const X86::Instruction&) { TODO_INSN(); }
  2362. void SoftCPU::wrap_0xD2(const X86::Instruction&) { TODO_INSN(); }
  2363. void SoftCPU::wrap_0xD3_16(const X86::Instruction&) { TODO_INSN(); }
  2364. void SoftCPU::wrap_0xD3_32(const X86::Instruction&) { TODO_INSN(); }
  2365. }