SoftCPU.cpp 116 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include "SoftCPU.h"
  8. #include "Emulator.h"
  9. #include <AK/Assertions.h>
  10. #include <AK/BitCast.h>
  11. #include <AK/Debug.h>
  12. #include <math.h>
  13. #include <stdio.h>
  14. #include <string.h>
  15. #include <unistd.h>
  16. #if defined(__GNUC__) && !defined(__clang__)
  17. # pragma GCC optimize("O3")
  18. #endif
  19. #define TODO_INSN() \
  20. do { \
  21. reportln("\n=={}== Unimplemented instruction: {}\n", getpid(), __FUNCTION__); \
  22. m_emulator.dump_backtrace(); \
  23. _exit(0); \
  24. } while (0)
  25. #define DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(mnemonic, op) \
  26. void SoftCPU::mnemonic##_RM8_1(const X86::Instruction& insn) { generic_RM8_1(op<ValueWithShadow<u8>>, insn); } \
  27. void SoftCPU::mnemonic##_RM8_CL(const X86::Instruction& insn) { generic_RM8_CL(op<ValueWithShadow<u8>>, insn); } \
  28. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<true, false>(op<ValueWithShadow<u8>>, insn); } \
  29. void SoftCPU::mnemonic##_RM16_1(const X86::Instruction& insn) { generic_RM16_1(op<ValueWithShadow<u16>>, insn); } \
  30. void SoftCPU::mnemonic##_RM16_CL(const X86::Instruction& insn) { generic_RM16_CL(op<ValueWithShadow<u16>>, insn); } \
  31. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_unsigned_imm8<true>(op<ValueWithShadow<u16>>, insn); } \
  32. void SoftCPU::mnemonic##_RM32_1(const X86::Instruction& insn) { generic_RM32_1(op<ValueWithShadow<u32>>, insn); } \
  33. void SoftCPU::mnemonic##_RM32_CL(const X86::Instruction& insn) { generic_RM32_CL(op<ValueWithShadow<u32>>, insn); } \
  34. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_unsigned_imm8<true>(op<ValueWithShadow<u32>>, insn); }
  35. namespace UserspaceEmulator {
  36. template<typename T>
  37. ALWAYS_INLINE void warn_if_uninitialized(T value_with_shadow, const char* message)
  38. {
  39. if (value_with_shadow.is_uninitialized()) [[unlikely]] {
  40. reportln("\033[31;1mWarning! Use of uninitialized value: {}\033[0m\n", message);
  41. Emulator::the().dump_backtrace();
  42. }
  43. }
  44. ALWAYS_INLINE void SoftCPU::warn_if_flags_tainted(const char* message) const
  45. {
  46. if (m_flags_tainted) [[unlikely]] {
  47. reportln("\n=={}== \033[31;1mConditional depends on uninitialized data\033[0m ({})\n", getpid(), message);
  48. Emulator::the().dump_backtrace();
  49. }
  50. }
  51. template<typename T, typename U>
  52. constexpr T sign_extended_to(U value)
  53. {
  54. if (!(value & X86::TypeTrivia<U>::sign_bit))
  55. return value;
  56. return (X86::TypeTrivia<T>::mask & ~X86::TypeTrivia<U>::mask) | value;
  57. }
  58. SoftCPU::SoftCPU(Emulator& emulator)
  59. : m_emulator(emulator)
  60. {
  61. memset(m_gpr, 0, sizeof(m_gpr));
  62. memset(m_gpr_shadow, 1, sizeof(m_gpr_shadow));
  63. m_segment[(int)X86::SegmentRegister::CS] = 0x1b;
  64. m_segment[(int)X86::SegmentRegister::DS] = 0x23;
  65. m_segment[(int)X86::SegmentRegister::ES] = 0x23;
  66. m_segment[(int)X86::SegmentRegister::SS] = 0x23;
  67. m_segment[(int)X86::SegmentRegister::GS] = 0x2b;
  68. }
  69. void SoftCPU::dump() const
  70. {
  71. outln(" eax={:08x} ebx={:08x} ecx={:08x} edx={:08x} ebp={:08x} esp={:08x} esi={:08x} edi={:08x} o={:d} s={:d} z={:d} a={:d} p={:d} c={:d}",
  72. eax(), ebx(), ecx(), edx(), ebp(), esp(), esi(), edi(), of(), sf(), zf(), af(), pf(), cf());
  73. outln("#eax={:08x} #ebx={:08x} #ecx={:08x} #edx={:08x} #ebp={:08x} #esp={:08x} #esi={:08x} #edi={:08x} #f={}",
  74. eax().shadow(), ebx().shadow(), ecx().shadow(), edx().shadow(), ebp().shadow(), esp().shadow(), esi().shadow(), edi().shadow(), m_flags_tainted);
  75. fflush(stdout);
  76. }
  77. void SoftCPU::update_code_cache()
  78. {
  79. auto* region = m_emulator.mmu().find_region({ cs(), eip() });
  80. VERIFY(region);
  81. if (!region->is_executable()) {
  82. reportln("SoftCPU::update_code_cache: Non-executable region @ {:p}", eip());
  83. Emulator::the().dump_backtrace();
  84. TODO();
  85. }
  86. // FIXME: This cache needs to be invalidated if the code region is ever unmapped.
  87. m_cached_code_region = region;
  88. m_cached_code_base_ptr = region->data();
  89. }
  90. ValueWithShadow<u8> SoftCPU::read_memory8(X86::LogicalAddress address)
  91. {
  92. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  93. auto value = m_emulator.mmu().read8(address);
  94. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory8: @{:04x}:{:08x} -> {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  95. return value;
  96. }
  97. ValueWithShadow<u16> SoftCPU::read_memory16(X86::LogicalAddress address)
  98. {
  99. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  100. auto value = m_emulator.mmu().read16(address);
  101. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory16: @{:04x}:{:08x} -> {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  102. return value;
  103. }
  104. ValueWithShadow<u32> SoftCPU::read_memory32(X86::LogicalAddress address)
  105. {
  106. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  107. auto value = m_emulator.mmu().read32(address);
  108. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory32: @{:04x}:{:08x} -> {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  109. return value;
  110. }
  111. ValueWithShadow<u64> SoftCPU::read_memory64(X86::LogicalAddress address)
  112. {
  113. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  114. auto value = m_emulator.mmu().read64(address);
  115. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory64: @{:04x}:{:08x} -> {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  116. return value;
  117. }
  118. ValueWithShadow<u128> SoftCPU::read_memory128(X86::LogicalAddress address)
  119. {
  120. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  121. auto value = m_emulator.mmu().read128(address);
  122. #if MEMORY_DEBUG
  123. outln("\033[36;1mread_memory128: @{:04x}:{:08x} -> {:032x} ({:032x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  124. #endif
  125. return value;
  126. }
  127. ValueWithShadow<u256> SoftCPU::read_memory256(X86::LogicalAddress address)
  128. {
  129. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  130. auto value = m_emulator.mmu().read256(address);
  131. #if MEMORY_DEBUG
  132. outln("\033[36;1mread_memory256: @{:04x}:{:08x} -> {:064x} ({:064x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  133. #endif
  134. return value;
  135. }
  136. void SoftCPU::write_memory8(X86::LogicalAddress address, ValueWithShadow<u8> value)
  137. {
  138. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  139. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory8: @{:04x}:{:08x} <- {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  140. m_emulator.mmu().write8(address, value);
  141. }
  142. void SoftCPU::write_memory16(X86::LogicalAddress address, ValueWithShadow<u16> value)
  143. {
  144. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  145. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory16: @{:04x}:{:08x} <- {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  146. m_emulator.mmu().write16(address, value);
  147. }
  148. void SoftCPU::write_memory32(X86::LogicalAddress address, ValueWithShadow<u32> value)
  149. {
  150. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  151. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory32: @{:04x}:{:08x} <- {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  152. m_emulator.mmu().write32(address, value);
  153. }
  154. void SoftCPU::write_memory64(X86::LogicalAddress address, ValueWithShadow<u64> value)
  155. {
  156. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  157. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory64: @{:04x}:{:08x} <- {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  158. m_emulator.mmu().write64(address, value);
  159. }
  160. void SoftCPU::write_memory128(X86::LogicalAddress address, ValueWithShadow<u128> value)
  161. {
  162. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  163. #if MEMORY_DEBUG
  164. outln("\033[36;1mwrite_memory128: @{:04x}:{:08x} <- {:032x} ({:032x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  165. #endif
  166. m_emulator.mmu().write128(address, value);
  167. }
  168. void SoftCPU::write_memory256(X86::LogicalAddress address, ValueWithShadow<u256> value)
  169. {
  170. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  171. #if MEMORY_DEBUG
  172. outln("\033[36;1mwrite_memory256: @{:04x}:{:08x} <- {:064x} ({:064x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  173. #endif
  174. m_emulator.mmu().write256(address, value);
  175. }
  176. void SoftCPU::push_string(const StringView& string)
  177. {
  178. size_t space_to_allocate = round_up_to_power_of_two(string.length() + 1, 16);
  179. set_esp({ esp().value() - space_to_allocate, esp().shadow() });
  180. m_emulator.mmu().copy_to_vm(esp().value(), string.characters_without_null_termination(), string.length());
  181. m_emulator.mmu().write8({ 0x23, esp().value() + string.length() }, shadow_wrap_as_initialized((u8)'\0'));
  182. }
  183. void SoftCPU::push_buffer(const u8* data, size_t size)
  184. {
  185. set_esp({ esp().value() - size, esp().shadow() });
  186. warn_if_uninitialized(esp(), "push_buffer");
  187. m_emulator.mmu().copy_to_vm(esp().value(), data, size);
  188. }
  189. void SoftCPU::push32(ValueWithShadow<u32> value)
  190. {
  191. set_esp({ esp().value() - sizeof(u32), esp().shadow() });
  192. warn_if_uninitialized(esp(), "push32");
  193. write_memory32({ ss(), esp().value() }, value);
  194. }
  195. ValueWithShadow<u32> SoftCPU::pop32()
  196. {
  197. warn_if_uninitialized(esp(), "pop32");
  198. auto value = read_memory32({ ss(), esp().value() });
  199. set_esp({ esp().value() + sizeof(u32), esp().shadow() });
  200. return value;
  201. }
  202. void SoftCPU::push16(ValueWithShadow<u16> value)
  203. {
  204. warn_if_uninitialized(esp(), "push16");
  205. set_esp({ esp().value() - sizeof(u16), esp().shadow() });
  206. write_memory16({ ss(), esp().value() }, value);
  207. }
  208. ValueWithShadow<u16> SoftCPU::pop16()
  209. {
  210. warn_if_uninitialized(esp(), "pop16");
  211. auto value = read_memory16({ ss(), esp().value() });
  212. set_esp({ esp().value() + sizeof(u16), esp().shadow() });
  213. return value;
  214. }
  215. template<bool check_zf, typename Callback>
  216. void SoftCPU::do_once_or_repeat(const X86::Instruction& insn, Callback callback)
  217. {
  218. if (!insn.has_rep_prefix())
  219. return callback();
  220. while (loop_index(insn.a32()).value()) {
  221. callback();
  222. decrement_loop_index(insn.a32());
  223. if constexpr (check_zf) {
  224. warn_if_flags_tainted("repz/repnz");
  225. if (insn.rep_prefix() == X86::Prefix::REPZ && !zf())
  226. break;
  227. if (insn.rep_prefix() == X86::Prefix::REPNZ && zf())
  228. break;
  229. }
  230. }
  231. }
  232. template<typename T>
  233. ALWAYS_INLINE static T op_inc(SoftCPU& cpu, T data)
  234. {
  235. typename T::ValueType result;
  236. u32 new_flags = 0;
  237. if constexpr (sizeof(typename T::ValueType) == 4) {
  238. asm volatile("incl %%eax\n"
  239. : "=a"(result)
  240. : "a"(data.value()));
  241. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  242. asm volatile("incw %%ax\n"
  243. : "=a"(result)
  244. : "a"(data.value()));
  245. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  246. asm volatile("incb %%al\n"
  247. : "=a"(result)
  248. : "a"(data.value()));
  249. }
  250. asm volatile(
  251. "pushf\n"
  252. "pop %%ebx"
  253. : "=b"(new_flags));
  254. cpu.set_flags_oszap(new_flags);
  255. cpu.taint_flags_from(data);
  256. return shadow_wrap_with_taint_from(result, data);
  257. }
  258. template<typename T>
  259. ALWAYS_INLINE static T op_dec(SoftCPU& cpu, T data)
  260. {
  261. typename T::ValueType result;
  262. u32 new_flags = 0;
  263. if constexpr (sizeof(typename T::ValueType) == 4) {
  264. asm volatile("decl %%eax\n"
  265. : "=a"(result)
  266. : "a"(data.value()));
  267. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  268. asm volatile("decw %%ax\n"
  269. : "=a"(result)
  270. : "a"(data.value()));
  271. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  272. asm volatile("decb %%al\n"
  273. : "=a"(result)
  274. : "a"(data.value()));
  275. }
  276. asm volatile(
  277. "pushf\n"
  278. "pop %%ebx"
  279. : "=b"(new_flags));
  280. cpu.set_flags_oszap(new_flags);
  281. cpu.taint_flags_from(data);
  282. return shadow_wrap_with_taint_from(result, data);
  283. }
  284. template<typename T>
  285. ALWAYS_INLINE static T op_xor(SoftCPU& cpu, const T& dest, const T& src)
  286. {
  287. typename T::ValueType result;
  288. u32 new_flags = 0;
  289. if constexpr (sizeof(typename T::ValueType) == 4) {
  290. asm volatile("xorl %%ecx, %%eax\n"
  291. : "=a"(result)
  292. : "a"(dest.value()), "c"(src.value()));
  293. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  294. asm volatile("xor %%cx, %%ax\n"
  295. : "=a"(result)
  296. : "a"(dest.value()), "c"(src.value()));
  297. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  298. asm volatile("xorb %%cl, %%al\n"
  299. : "=a"(result)
  300. : "a"(dest.value()), "c"(src.value()));
  301. } else {
  302. VERIFY_NOT_REACHED();
  303. }
  304. asm volatile(
  305. "pushf\n"
  306. "pop %%ebx"
  307. : "=b"(new_flags));
  308. cpu.set_flags_oszpc(new_flags);
  309. cpu.taint_flags_from(dest, src);
  310. return shadow_wrap_with_taint_from(result, dest, src);
  311. }
  312. template<typename T>
  313. ALWAYS_INLINE static T op_or(SoftCPU& cpu, const T& dest, const T& src)
  314. {
  315. typename T::ValueType result = 0;
  316. u32 new_flags = 0;
  317. if constexpr (sizeof(typename T::ValueType) == 4) {
  318. asm volatile("orl %%ecx, %%eax\n"
  319. : "=a"(result)
  320. : "a"(dest.value()), "c"(src.value()));
  321. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  322. asm volatile("or %%cx, %%ax\n"
  323. : "=a"(result)
  324. : "a"(dest.value()), "c"(src.value()));
  325. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  326. asm volatile("orb %%cl, %%al\n"
  327. : "=a"(result)
  328. : "a"(dest.value()), "c"(src.value()));
  329. } else {
  330. VERIFY_NOT_REACHED();
  331. }
  332. asm volatile(
  333. "pushf\n"
  334. "pop %%ebx"
  335. : "=b"(new_flags));
  336. cpu.set_flags_oszpc(new_flags);
  337. cpu.taint_flags_from(dest, src);
  338. return shadow_wrap_with_taint_from(result, dest, src);
  339. }
  340. template<typename T>
  341. ALWAYS_INLINE static T op_sub(SoftCPU& cpu, const T& dest, const T& src)
  342. {
  343. typename T::ValueType result = 0;
  344. u32 new_flags = 0;
  345. if constexpr (sizeof(typename T::ValueType) == 4) {
  346. asm volatile("subl %%ecx, %%eax\n"
  347. : "=a"(result)
  348. : "a"(dest.value()), "c"(src.value()));
  349. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  350. asm volatile("subw %%cx, %%ax\n"
  351. : "=a"(result)
  352. : "a"(dest.value()), "c"(src.value()));
  353. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  354. asm volatile("subb %%cl, %%al\n"
  355. : "=a"(result)
  356. : "a"(dest.value()), "c"(src.value()));
  357. } else {
  358. VERIFY_NOT_REACHED();
  359. }
  360. asm volatile(
  361. "pushf\n"
  362. "pop %%ebx"
  363. : "=b"(new_flags));
  364. cpu.set_flags_oszapc(new_flags);
  365. cpu.taint_flags_from(dest, src);
  366. return shadow_wrap_with_taint_from(result, dest, src);
  367. }
  368. template<typename T, bool cf>
  369. ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src)
  370. {
  371. typename T::ValueType result = 0;
  372. u32 new_flags = 0;
  373. if constexpr (cf)
  374. asm volatile("stc");
  375. else
  376. asm volatile("clc");
  377. if constexpr (sizeof(typename T::ValueType) == 4) {
  378. asm volatile("sbbl %%ecx, %%eax\n"
  379. : "=a"(result)
  380. : "a"(dest.value()), "c"(src.value()));
  381. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  382. asm volatile("sbbw %%cx, %%ax\n"
  383. : "=a"(result)
  384. : "a"(dest.value()), "c"(src.value()));
  385. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  386. asm volatile("sbbb %%cl, %%al\n"
  387. : "=a"(result)
  388. : "a"(dest.value()), "c"(src.value()));
  389. } else {
  390. VERIFY_NOT_REACHED();
  391. }
  392. asm volatile(
  393. "pushf\n"
  394. "pop %%ebx"
  395. : "=b"(new_flags));
  396. cpu.set_flags_oszapc(new_flags);
  397. cpu.taint_flags_from(dest, src);
  398. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  399. }
  400. template<typename T>
  401. ALWAYS_INLINE static T op_sbb(SoftCPU& cpu, T& dest, const T& src)
  402. {
  403. cpu.warn_if_flags_tainted("sbb");
  404. if (cpu.cf())
  405. return op_sbb_impl<T, true>(cpu, dest, src);
  406. return op_sbb_impl<T, false>(cpu, dest, src);
  407. }
  408. template<typename T>
  409. ALWAYS_INLINE static T op_add(SoftCPU& cpu, T& dest, const T& src)
  410. {
  411. typename T::ValueType result = 0;
  412. u32 new_flags = 0;
  413. if constexpr (sizeof(typename T::ValueType) == 4) {
  414. asm volatile("addl %%ecx, %%eax\n"
  415. : "=a"(result)
  416. : "a"(dest.value()), "c"(src.value()));
  417. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  418. asm volatile("addw %%cx, %%ax\n"
  419. : "=a"(result)
  420. : "a"(dest.value()), "c"(src.value()));
  421. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  422. asm volatile("addb %%cl, %%al\n"
  423. : "=a"(result)
  424. : "a"(dest.value()), "c"(src.value()));
  425. } else {
  426. VERIFY_NOT_REACHED();
  427. }
  428. asm volatile(
  429. "pushf\n"
  430. "pop %%ebx"
  431. : "=b"(new_flags));
  432. cpu.set_flags_oszapc(new_flags);
  433. cpu.taint_flags_from(dest, src);
  434. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  435. }
  436. template<typename T, bool cf>
  437. ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src)
  438. {
  439. typename T::ValueType result = 0;
  440. u32 new_flags = 0;
  441. if constexpr (cf)
  442. asm volatile("stc");
  443. else
  444. asm volatile("clc");
  445. if constexpr (sizeof(typename T::ValueType) == 4) {
  446. asm volatile("adcl %%ecx, %%eax\n"
  447. : "=a"(result)
  448. : "a"(dest.value()), "c"(src.value()));
  449. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  450. asm volatile("adcw %%cx, %%ax\n"
  451. : "=a"(result)
  452. : "a"(dest.value()), "c"(src.value()));
  453. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  454. asm volatile("adcb %%cl, %%al\n"
  455. : "=a"(result)
  456. : "a"(dest.value()), "c"(src.value()));
  457. } else {
  458. VERIFY_NOT_REACHED();
  459. }
  460. asm volatile(
  461. "pushf\n"
  462. "pop %%ebx"
  463. : "=b"(new_flags));
  464. cpu.set_flags_oszapc(new_flags);
  465. cpu.taint_flags_from(dest, src);
  466. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  467. }
  468. template<typename T>
  469. ALWAYS_INLINE static T op_adc(SoftCPU& cpu, T& dest, const T& src)
  470. {
  471. cpu.warn_if_flags_tainted("adc");
  472. if (cpu.cf())
  473. return op_adc_impl<T, true>(cpu, dest, src);
  474. return op_adc_impl<T, false>(cpu, dest, src);
  475. }
  476. template<typename T>
  477. ALWAYS_INLINE static T op_and(SoftCPU& cpu, const T& dest, const T& src)
  478. {
  479. typename T::ValueType result = 0;
  480. u32 new_flags = 0;
  481. if constexpr (sizeof(typename T::ValueType) == 4) {
  482. asm volatile("andl %%ecx, %%eax\n"
  483. : "=a"(result)
  484. : "a"(dest.value()), "c"(src.value()));
  485. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  486. asm volatile("andw %%cx, %%ax\n"
  487. : "=a"(result)
  488. : "a"(dest.value()), "c"(src.value()));
  489. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  490. asm volatile("andb %%cl, %%al\n"
  491. : "=a"(result)
  492. : "a"(dest.value()), "c"(src.value()));
  493. } else {
  494. VERIFY_NOT_REACHED();
  495. }
  496. asm volatile(
  497. "pushf\n"
  498. "pop %%ebx"
  499. : "=b"(new_flags));
  500. cpu.set_flags_oszpc(new_flags);
  501. cpu.taint_flags_from(dest, src);
  502. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  503. }
  504. template<typename T>
  505. ALWAYS_INLINE static void op_imul(SoftCPU& cpu, const T& dest, const T& src, T& result_high, T& result_low)
  506. {
  507. bool did_overflow = false;
  508. if constexpr (sizeof(T) == 4) {
  509. i64 result = (i64)src * (i64)dest;
  510. result_low = result & 0xffffffff;
  511. result_high = result >> 32;
  512. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  513. } else if constexpr (sizeof(T) == 2) {
  514. i32 result = (i32)src * (i32)dest;
  515. result_low = result & 0xffff;
  516. result_high = result >> 16;
  517. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  518. } else if constexpr (sizeof(T) == 1) {
  519. i16 result = (i16)src * (i16)dest;
  520. result_low = result & 0xff;
  521. result_high = result >> 8;
  522. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  523. }
  524. if (did_overflow) {
  525. cpu.set_cf(true);
  526. cpu.set_of(true);
  527. } else {
  528. cpu.set_cf(false);
  529. cpu.set_of(false);
  530. }
  531. }
  532. template<typename T>
  533. ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  534. {
  535. if (steps.value() == 0)
  536. return shadow_wrap_with_taint_from(data.value(), data, steps);
  537. u32 result = 0;
  538. u32 new_flags = 0;
  539. if constexpr (sizeof(typename T::ValueType) == 4) {
  540. asm volatile("shrl %%cl, %%eax\n"
  541. : "=a"(result)
  542. : "a"(data.value()), "c"(steps.value()));
  543. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  544. asm volatile("shrw %%cl, %%ax\n"
  545. : "=a"(result)
  546. : "a"(data.value()), "c"(steps.value()));
  547. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  548. asm volatile("shrb %%cl, %%al\n"
  549. : "=a"(result)
  550. : "a"(data.value()), "c"(steps.value()));
  551. }
  552. asm volatile(
  553. "pushf\n"
  554. "pop %%ebx"
  555. : "=b"(new_flags));
  556. cpu.set_flags_oszapc(new_flags);
  557. cpu.taint_flags_from(data, steps);
  558. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  559. }
  560. template<typename T>
  561. ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  562. {
  563. if (steps.value() == 0)
  564. return shadow_wrap_with_taint_from(data.value(), data, steps);
  565. u32 result = 0;
  566. u32 new_flags = 0;
  567. if constexpr (sizeof(typename T::ValueType) == 4) {
  568. asm volatile("shll %%cl, %%eax\n"
  569. : "=a"(result)
  570. : "a"(data.value()), "c"(steps.value()));
  571. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  572. asm volatile("shlw %%cl, %%ax\n"
  573. : "=a"(result)
  574. : "a"(data.value()), "c"(steps.value()));
  575. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  576. asm volatile("shlb %%cl, %%al\n"
  577. : "=a"(result)
  578. : "a"(data.value()), "c"(steps.value()));
  579. }
  580. asm volatile(
  581. "pushf\n"
  582. "pop %%ebx"
  583. : "=b"(new_flags));
  584. cpu.set_flags_oszapc(new_flags);
  585. cpu.taint_flags_from(data, steps);
  586. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  587. }
  588. template<typename T>
  589. ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  590. {
  591. if (steps.value() == 0)
  592. return shadow_wrap_with_taint_from(data.value(), data, steps);
  593. u32 result = 0;
  594. u32 new_flags = 0;
  595. if constexpr (sizeof(typename T::ValueType) == 4) {
  596. asm volatile("shrd %%cl, %%edx, %%eax\n"
  597. : "=a"(result)
  598. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  599. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  600. asm volatile("shrd %%cl, %%dx, %%ax\n"
  601. : "=a"(result)
  602. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  603. }
  604. asm volatile(
  605. "pushf\n"
  606. "pop %%ebx"
  607. : "=b"(new_flags));
  608. cpu.set_flags_oszapc(new_flags);
  609. cpu.taint_flags_from(data, steps);
  610. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  611. }
  612. template<typename T>
  613. ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  614. {
  615. if (steps.value() == 0)
  616. return shadow_wrap_with_taint_from(data.value(), data, steps);
  617. u32 result = 0;
  618. u32 new_flags = 0;
  619. if constexpr (sizeof(typename T::ValueType) == 4) {
  620. asm volatile("shld %%cl, %%edx, %%eax\n"
  621. : "=a"(result)
  622. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  623. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  624. asm volatile("shld %%cl, %%dx, %%ax\n"
  625. : "=a"(result)
  626. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  627. }
  628. asm volatile(
  629. "pushf\n"
  630. "pop %%ebx"
  631. : "=b"(new_flags));
  632. cpu.set_flags_oszapc(new_flags);
  633. cpu.taint_flags_from(data, steps);
  634. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  635. }
  636. template<bool update_dest, bool is_or, typename Op>
  637. ALWAYS_INLINE void SoftCPU::generic_AL_imm8(Op op, const X86::Instruction& insn)
  638. {
  639. auto dest = al();
  640. auto src = shadow_wrap_as_initialized(insn.imm8());
  641. auto result = op(*this, dest, src);
  642. if (is_or && insn.imm8() == 0xff)
  643. result.set_initialized();
  644. if (update_dest)
  645. set_al(result);
  646. }
  647. template<bool update_dest, bool is_or, typename Op>
  648. ALWAYS_INLINE void SoftCPU::generic_AX_imm16(Op op, const X86::Instruction& insn)
  649. {
  650. auto dest = ax();
  651. auto src = shadow_wrap_as_initialized(insn.imm16());
  652. auto result = op(*this, dest, src);
  653. if (is_or && insn.imm16() == 0xffff)
  654. result.set_initialized();
  655. if (update_dest)
  656. set_ax(result);
  657. }
  658. template<bool update_dest, bool is_or, typename Op>
  659. ALWAYS_INLINE void SoftCPU::generic_EAX_imm32(Op op, const X86::Instruction& insn)
  660. {
  661. auto dest = eax();
  662. auto src = shadow_wrap_as_initialized(insn.imm32());
  663. auto result = op(*this, dest, src);
  664. if (is_or && insn.imm32() == 0xffffffff)
  665. result.set_initialized();
  666. if (update_dest)
  667. set_eax(result);
  668. }
  669. template<bool update_dest, bool is_or, typename Op>
  670. ALWAYS_INLINE void SoftCPU::generic_RM16_imm16(Op op, const X86::Instruction& insn)
  671. {
  672. auto dest = insn.modrm().read16(*this, insn);
  673. auto src = shadow_wrap_as_initialized(insn.imm16());
  674. auto result = op(*this, dest, src);
  675. if (is_or && insn.imm16() == 0xffff)
  676. result.set_initialized();
  677. if (update_dest)
  678. insn.modrm().write16(*this, insn, result);
  679. }
  680. template<bool update_dest, bool is_or, typename Op>
  681. ALWAYS_INLINE void SoftCPU::generic_RM16_imm8(Op op, const X86::Instruction& insn)
  682. {
  683. auto dest = insn.modrm().read16(*this, insn);
  684. auto src = shadow_wrap_as_initialized<u16>(sign_extended_to<u16>(insn.imm8()));
  685. auto result = op(*this, dest, src);
  686. if (is_or && src.value() == 0xffff)
  687. result.set_initialized();
  688. if (update_dest)
  689. insn.modrm().write16(*this, insn, result);
  690. }
  691. template<bool update_dest, typename Op>
  692. ALWAYS_INLINE void SoftCPU::generic_RM16_unsigned_imm8(Op op, const X86::Instruction& insn)
  693. {
  694. auto dest = insn.modrm().read16(*this, insn);
  695. auto src = shadow_wrap_as_initialized(insn.imm8());
  696. auto result = op(*this, dest, src);
  697. if (update_dest)
  698. insn.modrm().write16(*this, insn, result);
  699. }
  700. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  701. ALWAYS_INLINE void SoftCPU::generic_RM16_reg16(Op op, const X86::Instruction& insn)
  702. {
  703. auto dest = insn.modrm().read16(*this, insn);
  704. auto src = const_gpr16(insn.reg16());
  705. auto result = op(*this, dest, src);
  706. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  707. result.set_initialized();
  708. m_flags_tainted = false;
  709. }
  710. if (update_dest)
  711. insn.modrm().write16(*this, insn, result);
  712. }
  713. template<bool update_dest, bool is_or, typename Op>
  714. ALWAYS_INLINE void SoftCPU::generic_RM32_imm32(Op op, const X86::Instruction& insn)
  715. {
  716. auto dest = insn.modrm().read32(*this, insn);
  717. auto src = insn.imm32();
  718. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  719. if (is_or && src == 0xffffffff)
  720. result.set_initialized();
  721. if (update_dest)
  722. insn.modrm().write32(*this, insn, result);
  723. }
  724. template<bool update_dest, bool is_or, typename Op>
  725. ALWAYS_INLINE void SoftCPU::generic_RM32_imm8(Op op, const X86::Instruction& insn)
  726. {
  727. auto dest = insn.modrm().read32(*this, insn);
  728. auto src = sign_extended_to<u32>(insn.imm8());
  729. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  730. if (is_or && src == 0xffffffff)
  731. result.set_initialized();
  732. if (update_dest)
  733. insn.modrm().write32(*this, insn, result);
  734. }
  735. template<bool update_dest, typename Op>
  736. ALWAYS_INLINE void SoftCPU::generic_RM32_unsigned_imm8(Op op, const X86::Instruction& insn)
  737. {
  738. auto dest = insn.modrm().read32(*this, insn);
  739. auto src = shadow_wrap_as_initialized(insn.imm8());
  740. auto result = op(*this, dest, src);
  741. if (update_dest)
  742. insn.modrm().write32(*this, insn, result);
  743. }
  744. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  745. ALWAYS_INLINE void SoftCPU::generic_RM32_reg32(Op op, const X86::Instruction& insn)
  746. {
  747. auto dest = insn.modrm().read32(*this, insn);
  748. auto src = const_gpr32(insn.reg32());
  749. auto result = op(*this, dest, src);
  750. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  751. result.set_initialized();
  752. m_flags_tainted = false;
  753. }
  754. if (update_dest)
  755. insn.modrm().write32(*this, insn, result);
  756. }
  757. template<bool update_dest, bool is_or, typename Op>
  758. ALWAYS_INLINE void SoftCPU::generic_RM8_imm8(Op op, const X86::Instruction& insn)
  759. {
  760. auto dest = insn.modrm().read8(*this, insn);
  761. auto src = insn.imm8();
  762. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  763. if (is_or && src == 0xff)
  764. result.set_initialized();
  765. if (update_dest)
  766. insn.modrm().write8(*this, insn, result);
  767. }
  768. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  769. ALWAYS_INLINE void SoftCPU::generic_RM8_reg8(Op op, const X86::Instruction& insn)
  770. {
  771. auto dest = insn.modrm().read8(*this, insn);
  772. auto src = const_gpr8(insn.reg8());
  773. auto result = op(*this, dest, src);
  774. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  775. result.set_initialized();
  776. m_flags_tainted = false;
  777. }
  778. if (update_dest)
  779. insn.modrm().write8(*this, insn, result);
  780. }
  781. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  782. ALWAYS_INLINE void SoftCPU::generic_reg16_RM16(Op op, const X86::Instruction& insn)
  783. {
  784. auto dest = const_gpr16(insn.reg16());
  785. auto src = insn.modrm().read16(*this, insn);
  786. auto result = op(*this, dest, src);
  787. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  788. result.set_initialized();
  789. m_flags_tainted = false;
  790. }
  791. if (update_dest)
  792. gpr16(insn.reg16()) = result;
  793. }
  794. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  795. ALWAYS_INLINE void SoftCPU::generic_reg32_RM32(Op op, const X86::Instruction& insn)
  796. {
  797. auto dest = const_gpr32(insn.reg32());
  798. auto src = insn.modrm().read32(*this, insn);
  799. auto result = op(*this, dest, src);
  800. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  801. result.set_initialized();
  802. m_flags_tainted = false;
  803. }
  804. if (update_dest)
  805. gpr32(insn.reg32()) = result;
  806. }
  807. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  808. ALWAYS_INLINE void SoftCPU::generic_reg8_RM8(Op op, const X86::Instruction& insn)
  809. {
  810. auto dest = const_gpr8(insn.reg8());
  811. auto src = insn.modrm().read8(*this, insn);
  812. auto result = op(*this, dest, src);
  813. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  814. result.set_initialized();
  815. m_flags_tainted = false;
  816. }
  817. if (update_dest)
  818. gpr8(insn.reg8()) = result;
  819. }
  820. template<typename Op>
  821. ALWAYS_INLINE void SoftCPU::generic_RM8_1(Op op, const X86::Instruction& insn)
  822. {
  823. auto data = insn.modrm().read8(*this, insn);
  824. insn.modrm().write8(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  825. }
  826. template<typename Op>
  827. ALWAYS_INLINE void SoftCPU::generic_RM8_CL(Op op, const X86::Instruction& insn)
  828. {
  829. auto data = insn.modrm().read8(*this, insn);
  830. insn.modrm().write8(*this, insn, op(*this, data, cl()));
  831. }
  832. template<typename Op>
  833. ALWAYS_INLINE void SoftCPU::generic_RM16_1(Op op, const X86::Instruction& insn)
  834. {
  835. auto data = insn.modrm().read16(*this, insn);
  836. insn.modrm().write16(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  837. }
  838. template<typename Op>
  839. ALWAYS_INLINE void SoftCPU::generic_RM16_CL(Op op, const X86::Instruction& insn)
  840. {
  841. auto data = insn.modrm().read16(*this, insn);
  842. insn.modrm().write16(*this, insn, op(*this, data, cl()));
  843. }
  844. template<typename Op>
  845. ALWAYS_INLINE void SoftCPU::generic_RM32_1(Op op, const X86::Instruction& insn)
  846. {
  847. auto data = insn.modrm().read32(*this, insn);
  848. insn.modrm().write32(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  849. }
  850. template<typename Op>
  851. ALWAYS_INLINE void SoftCPU::generic_RM32_CL(Op op, const X86::Instruction& insn)
  852. {
  853. auto data = insn.modrm().read32(*this, insn);
  854. insn.modrm().write32(*this, insn, op(*this, data, cl()));
  855. }
  856. void SoftCPU::AAA(const X86::Instruction&) { TODO_INSN(); }
  857. void SoftCPU::AAD(const X86::Instruction&) { TODO_INSN(); }
  858. void SoftCPU::AAM(const X86::Instruction&) { TODO_INSN(); }
  859. void SoftCPU::AAS(const X86::Instruction&) { TODO_INSN(); }
  860. void SoftCPU::ARPL(const X86::Instruction&) { TODO_INSN(); }
  861. void SoftCPU::BOUND(const X86::Instruction&) { TODO_INSN(); }
  862. template<typename T>
  863. ALWAYS_INLINE static T op_bsf(SoftCPU&, T value)
  864. {
  865. return { (typename T::ValueType)__builtin_ctz(value.value()), value.shadow() };
  866. }
  867. template<typename T>
  868. ALWAYS_INLINE static T op_bsr(SoftCPU&, T value)
  869. {
  870. typename T::ValueType bit_index = 0;
  871. if constexpr (sizeof(typename T::ValueType) == 4) {
  872. asm volatile("bsrl %%eax, %%edx"
  873. : "=d"(bit_index)
  874. : "a"(value.value()));
  875. }
  876. if constexpr (sizeof(typename T::ValueType) == 2) {
  877. asm volatile("bsrw %%ax, %%dx"
  878. : "=d"(bit_index)
  879. : "a"(value.value()));
  880. }
  881. return shadow_wrap_with_taint_from(bit_index, value);
  882. }
  883. void SoftCPU::BSF_reg16_RM16(const X86::Instruction& insn)
  884. {
  885. auto src = insn.modrm().read16(*this, insn);
  886. set_zf(!src.value());
  887. if (src.value())
  888. gpr16(insn.reg16()) = op_bsf(*this, src);
  889. taint_flags_from(src);
  890. }
  891. void SoftCPU::BSF_reg32_RM32(const X86::Instruction& insn)
  892. {
  893. auto src = insn.modrm().read32(*this, insn);
  894. set_zf(!src.value());
  895. if (src.value()) {
  896. gpr32(insn.reg32()) = op_bsf(*this, src);
  897. taint_flags_from(src);
  898. }
  899. }
  900. void SoftCPU::BSR_reg16_RM16(const X86::Instruction& insn)
  901. {
  902. auto src = insn.modrm().read16(*this, insn);
  903. set_zf(!src.value());
  904. if (src.value()) {
  905. gpr16(insn.reg16()) = op_bsr(*this, src);
  906. taint_flags_from(src);
  907. }
  908. }
  909. void SoftCPU::BSR_reg32_RM32(const X86::Instruction& insn)
  910. {
  911. auto src = insn.modrm().read32(*this, insn);
  912. set_zf(!src.value());
  913. if (src.value()) {
  914. gpr32(insn.reg32()) = op_bsr(*this, src);
  915. taint_flags_from(src);
  916. }
  917. }
  918. void SoftCPU::BSWAP_reg32(const X86::Instruction& insn)
  919. {
  920. gpr32(insn.reg32()) = { __builtin_bswap32(gpr32(insn.reg32()).value()), __builtin_bswap32(gpr32(insn.reg32()).shadow()) };
  921. }
  922. template<typename T>
  923. ALWAYS_INLINE static T op_bt(T value, T)
  924. {
  925. return value;
  926. }
  927. template<typename T>
  928. ALWAYS_INLINE static T op_bts(T value, T bit_mask)
  929. {
  930. return value | bit_mask;
  931. }
  932. template<typename T>
  933. ALWAYS_INLINE static T op_btr(T value, T bit_mask)
  934. {
  935. return value & ~bit_mask;
  936. }
  937. template<typename T>
  938. ALWAYS_INLINE static T op_btc(T value, T bit_mask)
  939. {
  940. return value ^ bit_mask;
  941. }
  942. template<bool should_update, typename Op>
  943. ALWAYS_INLINE void BTx_RM16_reg16(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  944. {
  945. if (insn.modrm().is_register()) {
  946. unsigned bit_index = cpu.const_gpr16(insn.reg16()).value() & (X86::TypeTrivia<u16>::bits - 1);
  947. auto original = insn.modrm().read16(cpu, insn);
  948. u16 bit_mask = 1 << bit_index;
  949. u16 result = op(original.value(), bit_mask);
  950. cpu.set_cf((original.value() & bit_mask) != 0);
  951. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), original);
  952. if (should_update)
  953. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), original));
  954. return;
  955. }
  956. // FIXME: Is this supposed to perform a full 16-bit read/modify/write?
  957. unsigned bit_offset_in_array = cpu.const_gpr16(insn.reg16()).value() / 8;
  958. unsigned bit_offset_in_byte = cpu.const_gpr16(insn.reg16()).value() & 7;
  959. auto address = insn.modrm().resolve(cpu, insn);
  960. address.set_offset(address.offset() + bit_offset_in_array);
  961. auto dest = cpu.read_memory8(address);
  962. u8 bit_mask = 1 << bit_offset_in_byte;
  963. u8 result = op(dest.value(), bit_mask);
  964. cpu.set_cf((dest.value() & bit_mask) != 0);
  965. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), dest);
  966. if (should_update)
  967. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), dest));
  968. }
  969. template<bool should_update, typename Op>
  970. ALWAYS_INLINE void BTx_RM32_reg32(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  971. {
  972. if (insn.modrm().is_register()) {
  973. unsigned bit_index = cpu.const_gpr32(insn.reg32()).value() & (X86::TypeTrivia<u32>::bits - 1);
  974. auto original = insn.modrm().read32(cpu, insn);
  975. u32 bit_mask = 1 << bit_index;
  976. u32 result = op(original.value(), bit_mask);
  977. cpu.set_cf((original.value() & bit_mask) != 0);
  978. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), original);
  979. if (should_update)
  980. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), original));
  981. return;
  982. }
  983. // FIXME: Is this supposed to perform a full 32-bit read/modify/write?
  984. unsigned bit_offset_in_array = cpu.const_gpr32(insn.reg32()).value() / 8;
  985. unsigned bit_offset_in_byte = cpu.const_gpr32(insn.reg32()).value() & 7;
  986. auto address = insn.modrm().resolve(cpu, insn);
  987. address.set_offset(address.offset() + bit_offset_in_array);
  988. auto dest = cpu.read_memory8(address);
  989. u8 bit_mask = 1 << bit_offset_in_byte;
  990. u8 result = op(dest.value(), bit_mask);
  991. cpu.set_cf((dest.value() & bit_mask) != 0);
  992. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), dest);
  993. if (should_update)
  994. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), dest));
  995. }
  996. template<bool should_update, typename Op>
  997. ALWAYS_INLINE void BTx_RM16_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  998. {
  999. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u16>::mask);
  1000. // FIXME: Support higher bit indices
  1001. VERIFY(bit_index < 16);
  1002. auto original = insn.modrm().read16(cpu, insn);
  1003. u16 bit_mask = 1 << bit_index;
  1004. auto result = op(original.value(), bit_mask);
  1005. cpu.set_cf((original.value() & bit_mask) != 0);
  1006. cpu.taint_flags_from(original);
  1007. if (should_update)
  1008. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1009. }
  1010. template<bool should_update, typename Op>
  1011. ALWAYS_INLINE void BTx_RM32_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  1012. {
  1013. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u32>::mask);
  1014. // FIXME: Support higher bit indices
  1015. VERIFY(bit_index < 32);
  1016. auto original = insn.modrm().read32(cpu, insn);
  1017. u32 bit_mask = 1 << bit_index;
  1018. auto result = op(original.value(), bit_mask);
  1019. cpu.set_cf((original.value() & bit_mask) != 0);
  1020. cpu.taint_flags_from(original);
  1021. if (should_update)
  1022. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1023. }
  1024. #define DEFINE_GENERIC_BTx_INSN_HANDLERS(mnemonic, op, update_dest) \
  1025. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { BTx_RM32_reg32<update_dest>(*this, insn, op<u32>); } \
  1026. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { BTx_RM16_reg16<update_dest>(*this, insn, op<u16>); } \
  1027. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { BTx_RM32_imm8<update_dest>(*this, insn, op<u32>); } \
  1028. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { BTx_RM16_imm8<update_dest>(*this, insn, op<u16>); }
  1029. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTS, op_bts, true);
  1030. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTR, op_btr, true);
  1031. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTC, op_btc, true);
  1032. DEFINE_GENERIC_BTx_INSN_HANDLERS(BT, op_bt, false);
  1033. void SoftCPU::CALL_FAR_mem16(const X86::Instruction&)
  1034. {
  1035. TODO();
  1036. }
  1037. void SoftCPU::CALL_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  1038. void SoftCPU::CALL_RM16(const X86::Instruction&) { TODO_INSN(); }
  1039. void SoftCPU::CALL_RM32(const X86::Instruction& insn)
  1040. {
  1041. push32(shadow_wrap_as_initialized(eip()));
  1042. auto address = insn.modrm().read32(*this, insn);
  1043. warn_if_uninitialized(address, "call rm32");
  1044. set_eip(address.value());
  1045. }
  1046. void SoftCPU::CALL_imm16(const X86::Instruction&) { TODO_INSN(); }
  1047. void SoftCPU::CALL_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  1048. void SoftCPU::CALL_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  1049. void SoftCPU::CALL_imm32(const X86::Instruction& insn)
  1050. {
  1051. push32(shadow_wrap_as_initialized(eip()));
  1052. set_eip(eip() + (i32)insn.imm32());
  1053. }
  1054. void SoftCPU::CBW(const X86::Instruction&)
  1055. {
  1056. set_ah(shadow_wrap_with_taint_from<u8>((al().value() & 0x80) ? 0xff : 0x00, al()));
  1057. }
  1058. void SoftCPU::CDQ(const X86::Instruction&)
  1059. {
  1060. if (eax().value() & 0x80000000)
  1061. set_edx(shadow_wrap_with_taint_from<u32>(0xffffffff, eax()));
  1062. else
  1063. set_edx(shadow_wrap_with_taint_from<u32>(0, eax()));
  1064. }
  1065. void SoftCPU::CLC(const X86::Instruction&)
  1066. {
  1067. set_cf(false);
  1068. }
  1069. void SoftCPU::CLD(const X86::Instruction&)
  1070. {
  1071. set_df(false);
  1072. }
  1073. void SoftCPU::CLI(const X86::Instruction&) { TODO_INSN(); }
  1074. void SoftCPU::CLTS(const X86::Instruction&) { TODO_INSN(); }
  1075. void SoftCPU::CMC(const X86::Instruction&)
  1076. {
  1077. set_cf(!cf());
  1078. }
  1079. void SoftCPU::CMOVcc_reg16_RM16(const X86::Instruction& insn)
  1080. {
  1081. warn_if_flags_tainted("cmovcc reg16, rm16");
  1082. if (evaluate_condition(insn.cc()))
  1083. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  1084. }
  1085. void SoftCPU::CMOVcc_reg32_RM32(const X86::Instruction& insn)
  1086. {
  1087. warn_if_flags_tainted("cmovcc reg32, rm32");
  1088. if (evaluate_condition(insn.cc()))
  1089. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  1090. }
  1091. template<typename T>
  1092. ALWAYS_INLINE static void do_cmps(SoftCPU& cpu, const X86::Instruction& insn)
  1093. {
  1094. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1095. cpu.do_once_or_repeat<true>(insn, [&] {
  1096. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1097. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  1098. op_sub(cpu, dest, src);
  1099. cpu.step_source_index(insn.a32(), sizeof(T));
  1100. cpu.step_destination_index(insn.a32(), sizeof(T));
  1101. });
  1102. }
  1103. void SoftCPU::CMPSB(const X86::Instruction& insn)
  1104. {
  1105. do_cmps<u8>(*this, insn);
  1106. }
  1107. void SoftCPU::CMPSD(const X86::Instruction& insn)
  1108. {
  1109. do_cmps<u32>(*this, insn);
  1110. }
  1111. void SoftCPU::CMPSW(const X86::Instruction& insn)
  1112. {
  1113. do_cmps<u16>(*this, insn);
  1114. }
  1115. void SoftCPU::CMPXCHG_RM16_reg16(const X86::Instruction& insn)
  1116. {
  1117. auto current = insn.modrm().read16(*this, insn);
  1118. taint_flags_from(current, ax());
  1119. if (current.value() == ax().value()) {
  1120. set_zf(true);
  1121. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  1122. } else {
  1123. set_zf(false);
  1124. set_ax(current);
  1125. }
  1126. }
  1127. void SoftCPU::CMPXCHG_RM32_reg32(const X86::Instruction& insn)
  1128. {
  1129. auto current = insn.modrm().read32(*this, insn);
  1130. taint_flags_from(current, eax());
  1131. if (current.value() == eax().value()) {
  1132. set_zf(true);
  1133. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  1134. } else {
  1135. set_zf(false);
  1136. set_eax(current);
  1137. }
  1138. }
  1139. void SoftCPU::CMPXCHG_RM8_reg8(const X86::Instruction& insn)
  1140. {
  1141. auto current = insn.modrm().read8(*this, insn);
  1142. taint_flags_from(current, al());
  1143. if (current.value() == al().value()) {
  1144. set_zf(true);
  1145. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  1146. } else {
  1147. set_zf(false);
  1148. set_al(current);
  1149. }
  1150. }
  1151. void SoftCPU::CPUID(const X86::Instruction&)
  1152. {
  1153. if (eax().value() == 0) {
  1154. set_eax(shadow_wrap_as_initialized<u32>(1));
  1155. set_ebx(shadow_wrap_as_initialized<u32>(0x6c6c6548));
  1156. set_edx(shadow_wrap_as_initialized<u32>(0x6972466f));
  1157. set_ecx(shadow_wrap_as_initialized<u32>(0x73646e65));
  1158. return;
  1159. }
  1160. if (eax().value() == 1) {
  1161. u32 stepping = 0;
  1162. u32 model = 1;
  1163. u32 family = 3;
  1164. u32 type = 0;
  1165. set_eax(shadow_wrap_as_initialized<u32>(stepping | (model << 4) | (family << 8) | (type << 12)));
  1166. set_ebx(shadow_wrap_as_initialized<u32>(0));
  1167. set_edx(shadow_wrap_as_initialized<u32>((1 << 15))); // Features (CMOV)
  1168. set_ecx(shadow_wrap_as_initialized<u32>(0));
  1169. return;
  1170. }
  1171. dbgln("Unhandled CPUID with eax={:08x}", eax().value());
  1172. }
  1173. void SoftCPU::CWD(const X86::Instruction&)
  1174. {
  1175. set_dx(shadow_wrap_with_taint_from<u16>((ax().value() & 0x8000) ? 0xffff : 0x0000, ax()));
  1176. }
  1177. void SoftCPU::CWDE(const X86::Instruction&)
  1178. {
  1179. set_eax(shadow_wrap_with_taint_from(sign_extended_to<u32>(ax().value()), ax()));
  1180. }
  1181. void SoftCPU::DAA(const X86::Instruction&) { TODO_INSN(); }
  1182. void SoftCPU::DAS(const X86::Instruction&) { TODO_INSN(); }
  1183. void SoftCPU::DEC_RM16(const X86::Instruction& insn)
  1184. {
  1185. insn.modrm().write16(*this, insn, op_dec(*this, insn.modrm().read16(*this, insn)));
  1186. }
  1187. void SoftCPU::DEC_RM32(const X86::Instruction& insn)
  1188. {
  1189. insn.modrm().write32(*this, insn, op_dec(*this, insn.modrm().read32(*this, insn)));
  1190. }
  1191. void SoftCPU::DEC_RM8(const X86::Instruction& insn)
  1192. {
  1193. insn.modrm().write8(*this, insn, op_dec(*this, insn.modrm().read8(*this, insn)));
  1194. }
  1195. void SoftCPU::DEC_reg16(const X86::Instruction& insn)
  1196. {
  1197. gpr16(insn.reg16()) = op_dec(*this, const_gpr16(insn.reg16()));
  1198. }
  1199. void SoftCPU::DEC_reg32(const X86::Instruction& insn)
  1200. {
  1201. gpr32(insn.reg32()) = op_dec(*this, const_gpr32(insn.reg32()));
  1202. }
  1203. void SoftCPU::DIV_RM16(const X86::Instruction& insn)
  1204. {
  1205. auto divisor = insn.modrm().read16(*this, insn);
  1206. if (divisor.value() == 0) {
  1207. reportln("Divide by zero");
  1208. TODO();
  1209. }
  1210. u32 dividend = ((u32)dx().value() << 16) | ax().value();
  1211. auto quotient = dividend / divisor.value();
  1212. if (quotient > NumericLimits<u16>::max()) {
  1213. reportln("Divide overflow");
  1214. TODO();
  1215. }
  1216. auto remainder = dividend % divisor.value();
  1217. auto original_ax = ax();
  1218. set_ax(shadow_wrap_with_taint_from<u16>(quotient, original_ax, dx()));
  1219. set_dx(shadow_wrap_with_taint_from<u16>(remainder, original_ax, dx()));
  1220. }
  1221. void SoftCPU::DIV_RM32(const X86::Instruction& insn)
  1222. {
  1223. auto divisor = insn.modrm().read32(*this, insn);
  1224. if (divisor.value() == 0) {
  1225. reportln("Divide by zero");
  1226. TODO();
  1227. }
  1228. u64 dividend = ((u64)edx().value() << 32) | eax().value();
  1229. auto quotient = dividend / divisor.value();
  1230. if (quotient > NumericLimits<u32>::max()) {
  1231. reportln("Divide overflow");
  1232. TODO();
  1233. }
  1234. auto remainder = dividend % divisor.value();
  1235. auto original_eax = eax();
  1236. set_eax(shadow_wrap_with_taint_from<u32>(quotient, original_eax, edx(), divisor));
  1237. set_edx(shadow_wrap_with_taint_from<u32>(remainder, original_eax, edx(), divisor));
  1238. }
  1239. void SoftCPU::DIV_RM8(const X86::Instruction& insn)
  1240. {
  1241. auto divisor = insn.modrm().read8(*this, insn);
  1242. if (divisor.value() == 0) {
  1243. reportln("Divide by zero");
  1244. TODO();
  1245. }
  1246. u16 dividend = ax().value();
  1247. auto quotient = dividend / divisor.value();
  1248. if (quotient > NumericLimits<u8>::max()) {
  1249. reportln("Divide overflow");
  1250. TODO();
  1251. }
  1252. auto remainder = dividend % divisor.value();
  1253. auto original_ax = ax();
  1254. set_al(shadow_wrap_with_taint_from<u8>(quotient, original_ax, divisor));
  1255. set_ah(shadow_wrap_with_taint_from<u8>(remainder, original_ax, divisor));
  1256. }
  1257. void SoftCPU::ENTER16(const X86::Instruction&) { TODO_INSN(); }
  1258. void SoftCPU::ENTER32(const X86::Instruction&) { TODO_INSN(); }
  1259. void SoftCPU::ESCAPE(const X86::Instruction&)
  1260. {
  1261. reportln("FIXME: x87 floating-point support");
  1262. m_emulator.dump_backtrace();
  1263. TODO();
  1264. }
  1265. void SoftCPU::FADD_RM32(const X86::Instruction& insn)
  1266. {
  1267. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1268. if (insn.modrm().is_register()) {
  1269. fpu_set(0, fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1270. } else {
  1271. auto new_f32 = insn.modrm().read32(*this, insn);
  1272. // FIXME: Respect shadow values
  1273. auto f32 = bit_cast<float>(new_f32.value());
  1274. fpu_set(0, fpu_get(0) + f32);
  1275. }
  1276. }
  1277. void SoftCPU::FMUL_RM32(const X86::Instruction& insn)
  1278. {
  1279. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1280. if (insn.modrm().is_register()) {
  1281. fpu_set(0, fpu_get(0) * fpu_get(insn.modrm().register_index()));
  1282. } else {
  1283. auto new_f32 = insn.modrm().read32(*this, insn);
  1284. // FIXME: Respect shadow values
  1285. auto f32 = bit_cast<float>(new_f32.value());
  1286. fpu_set(0, fpu_get(0) * f32);
  1287. }
  1288. }
  1289. void SoftCPU::FCOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1290. void SoftCPU::FCOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1291. void SoftCPU::FSUB_RM32(const X86::Instruction& insn)
  1292. {
  1293. if (insn.modrm().is_register()) {
  1294. fpu_set(0, fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1295. } else {
  1296. auto new_f32 = insn.modrm().read32(*this, insn);
  1297. // FIXME: Respect shadow values
  1298. auto f32 = bit_cast<float>(new_f32.value());
  1299. fpu_set(0, fpu_get(0) - f32);
  1300. }
  1301. }
  1302. void SoftCPU::FSUBR_RM32(const X86::Instruction& insn)
  1303. {
  1304. if (insn.modrm().is_register()) {
  1305. fpu_set(0, fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1306. } else {
  1307. auto new_f32 = insn.modrm().read32(*this, insn);
  1308. // FIXME: Respect shadow values
  1309. auto f32 = bit_cast<float>(new_f32.value());
  1310. fpu_set(0, f32 - fpu_get(0));
  1311. }
  1312. }
  1313. void SoftCPU::FDIV_RM32(const X86::Instruction& insn)
  1314. {
  1315. if (insn.modrm().is_register()) {
  1316. fpu_set(0, fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1317. } else {
  1318. auto new_f32 = insn.modrm().read32(*this, insn);
  1319. // FIXME: Respect shadow values
  1320. auto f32 = bit_cast<float>(new_f32.value());
  1321. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1322. fpu_set(0, fpu_get(0) / f32);
  1323. }
  1324. }
  1325. void SoftCPU::FDIVR_RM32(const X86::Instruction& insn)
  1326. {
  1327. if (insn.modrm().is_register()) {
  1328. fpu_set(0, fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1329. } else {
  1330. auto new_f32 = insn.modrm().read32(*this, insn);
  1331. // FIXME: Respect shadow values
  1332. auto f32 = bit_cast<float>(new_f32.value());
  1333. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1334. fpu_set(0, f32 / fpu_get(0));
  1335. }
  1336. }
  1337. void SoftCPU::FLD_RM32(const X86::Instruction& insn)
  1338. {
  1339. if (insn.modrm().is_register()) {
  1340. fpu_push(fpu_get(insn.modrm().register_index()));
  1341. } else {
  1342. auto new_f32 = insn.modrm().read32(*this, insn);
  1343. // FIXME: Respect shadow values
  1344. fpu_push(bit_cast<float>(new_f32.value()));
  1345. }
  1346. }
  1347. void SoftCPU::FXCH(const X86::Instruction& insn)
  1348. {
  1349. VERIFY(insn.modrm().is_register());
  1350. auto tmp = fpu_get(0);
  1351. fpu_set(0, fpu_get(insn.modrm().register_index()));
  1352. fpu_set(insn.modrm().register_index(), tmp);
  1353. }
  1354. void SoftCPU::FST_RM32(const X86::Instruction& insn)
  1355. {
  1356. VERIFY(!insn.modrm().is_register());
  1357. float f32 = (float)fpu_get(0);
  1358. // FIXME: Respect shadow values
  1359. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(f32)));
  1360. }
  1361. void SoftCPU::FNOP(const X86::Instruction&)
  1362. {
  1363. }
  1364. void SoftCPU::FSTP_RM32(const X86::Instruction& insn)
  1365. {
  1366. FST_RM32(insn);
  1367. fpu_pop();
  1368. }
  1369. void SoftCPU::FLDENV(const X86::Instruction&) { TODO_INSN(); }
  1370. void SoftCPU::FCHS(const X86::Instruction&)
  1371. {
  1372. fpu_set(0, -fpu_get(0));
  1373. }
  1374. void SoftCPU::FABS(const X86::Instruction&)
  1375. {
  1376. fpu_set(0, __builtin_fabs(fpu_get(0)));
  1377. }
  1378. void SoftCPU::FTST(const X86::Instruction&) { TODO_INSN(); }
  1379. void SoftCPU::FXAM(const X86::Instruction&) { TODO_INSN(); }
  1380. void SoftCPU::FLDCW(const X86::Instruction& insn)
  1381. {
  1382. m_fpu_cw = insn.modrm().read16(*this, insn);
  1383. }
  1384. void SoftCPU::FLD1(const X86::Instruction&)
  1385. {
  1386. fpu_push(1.0);
  1387. }
  1388. void SoftCPU::FLDL2T(const X86::Instruction&)
  1389. {
  1390. fpu_push(log2f(10.0f));
  1391. }
  1392. void SoftCPU::FLDL2E(const X86::Instruction&)
  1393. {
  1394. fpu_push(log2f(M_E));
  1395. }
  1396. void SoftCPU::FLDPI(const X86::Instruction&)
  1397. {
  1398. fpu_push(M_PI);
  1399. }
  1400. void SoftCPU::FLDLG2(const X86::Instruction&)
  1401. {
  1402. fpu_push(log10f(2.0f));
  1403. }
  1404. void SoftCPU::FLDLN2(const X86::Instruction&)
  1405. {
  1406. fpu_push(M_LN2);
  1407. }
  1408. void SoftCPU::FLDZ(const X86::Instruction&)
  1409. {
  1410. fpu_push(0.0);
  1411. }
  1412. void SoftCPU::FNSTENV(const X86::Instruction&) { TODO_INSN(); }
  1413. void SoftCPU::F2XM1(const X86::Instruction&)
  1414. {
  1415. // FIXME: validate ST(0) is in range –1.0 to +1.0
  1416. auto f32 = fpu_get(0);
  1417. // FIXME: Set C0, C2, C3 in FPU status word.
  1418. fpu_set(0, powf(2, f32) - 1.0f);
  1419. }
  1420. void SoftCPU::FYL2X(const X86::Instruction&)
  1421. {
  1422. // FIXME: Raise IA on +-infinity, +-0, raise Z on +-0
  1423. auto f32 = fpu_get(0);
  1424. // FIXME: Set C0, C2, C3 in FPU status word.
  1425. fpu_set(1, fpu_get(1) * log2f(f32));
  1426. fpu_pop();
  1427. }
  1428. void SoftCPU::FYL2XP1(const X86::Instruction&)
  1429. {
  1430. // FIXME: validate ST(0) range
  1431. auto f32 = fpu_get(0);
  1432. // FIXME: Set C0, C2, C3 in FPU status word.
  1433. fpu_set(1, (fpu_get(1) * log2f(f32 + 1.0f)));
  1434. fpu_pop();
  1435. }
  1436. void SoftCPU::FPTAN(const X86::Instruction&)
  1437. {
  1438. // FIXME: set C1 upon stack overflow or if result was rounded
  1439. // FIXME: Set C2 to 1 if ST(0) is outside range of -2^63 to +2^63; else set to 0
  1440. fpu_set(0, tanf(fpu_get(0)));
  1441. fpu_push(1.0f);
  1442. }
  1443. void SoftCPU::FPATAN(const X86::Instruction&) { TODO_INSN(); }
  1444. void SoftCPU::FXTRACT(const X86::Instruction&) { TODO_INSN(); }
  1445. void SoftCPU::FPREM1(const X86::Instruction&) { TODO_INSN(); }
  1446. void SoftCPU::FDECSTP(const X86::Instruction&)
  1447. {
  1448. m_fpu_top = (m_fpu_top == 0) ? 7 : m_fpu_top - 1;
  1449. set_cf(0);
  1450. }
  1451. void SoftCPU::FINCSTP(const X86::Instruction&)
  1452. {
  1453. m_fpu_top = (m_fpu_top == 7) ? 0 : m_fpu_top + 1;
  1454. set_cf(0);
  1455. }
  1456. void SoftCPU::FNSTCW(const X86::Instruction& insn)
  1457. {
  1458. insn.modrm().write16(*this, insn, m_fpu_cw);
  1459. }
  1460. void SoftCPU::FPREM(const X86::Instruction&)
  1461. {
  1462. fpu_set(0,
  1463. fmodl(fpu_get(0), fpu_get(1)));
  1464. }
  1465. void SoftCPU::FSQRT(const X86::Instruction&)
  1466. {
  1467. fpu_set(0, sqrt(fpu_get(0)));
  1468. }
  1469. void SoftCPU::FSINCOS(const X86::Instruction&)
  1470. {
  1471. long double sin = sinl(fpu_get(0));
  1472. long double cos = cosl(fpu_get(0));
  1473. fpu_set(0, sin);
  1474. fpu_push(cos);
  1475. }
  1476. void SoftCPU::FRNDINT(const X86::Instruction&)
  1477. {
  1478. // FIXME: support rounding mode
  1479. fpu_set(0, round(fpu_get(0)));
  1480. }
  1481. void SoftCPU::FSCALE(const X86::Instruction&)
  1482. {
  1483. // FIXME: set C1 upon stack overflow or if result was rounded
  1484. fpu_set(0, fpu_get(0) * powf(2, floorf(fpu_get(1))));
  1485. }
  1486. void SoftCPU::FSIN(const X86::Instruction&)
  1487. {
  1488. fpu_set(0, sin(fpu_get(0)));
  1489. }
  1490. void SoftCPU::FCOS(const X86::Instruction&)
  1491. {
  1492. fpu_set(0, cos(fpu_get(0)));
  1493. }
  1494. void SoftCPU::FIADD_RM32(const X86::Instruction& insn)
  1495. {
  1496. VERIFY(!insn.modrm().is_register());
  1497. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1498. // FIXME: Respect shadow values
  1499. fpu_set(0, fpu_get(0) + (long double)m32int);
  1500. }
  1501. void SoftCPU::FCMOVB(const X86::Instruction& insn)
  1502. {
  1503. VERIFY(insn.modrm().is_register());
  1504. if (cf())
  1505. fpu_set(0, fpu_get(insn.rm() & 7));
  1506. }
  1507. void SoftCPU::FIMUL_RM32(const X86::Instruction& insn)
  1508. {
  1509. VERIFY(!insn.modrm().is_register());
  1510. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1511. // FIXME: Respect shadow values
  1512. fpu_set(0, fpu_get(0) * (long double)m32int);
  1513. }
  1514. void SoftCPU::FCMOVE(const X86::Instruction& insn)
  1515. {
  1516. VERIFY(insn.modrm().is_register());
  1517. if (zf())
  1518. fpu_set(0, fpu_get(insn.rm() & 7));
  1519. }
  1520. void SoftCPU::FICOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1521. void SoftCPU::FCMOVBE(const X86::Instruction& insn)
  1522. {
  1523. if (evaluate_condition(6))
  1524. fpu_set(0, fpu_get(insn.rm() & 7));
  1525. }
  1526. void SoftCPU::FICOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1527. void SoftCPU::FCMOVU(const X86::Instruction& insn)
  1528. {
  1529. VERIFY(insn.modrm().is_register());
  1530. if (pf())
  1531. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1532. }
  1533. void SoftCPU::FISUB_RM32(const X86::Instruction& insn)
  1534. {
  1535. VERIFY(!insn.modrm().is_register());
  1536. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1537. // FIXME: Respect shadow values
  1538. fpu_set(0, fpu_get(0) - (long double)m32int);
  1539. }
  1540. void SoftCPU::FISUBR_RM32(const X86::Instruction& insn)
  1541. {
  1542. VERIFY(!insn.modrm().is_register());
  1543. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1544. // FIXME: Respect shadow values
  1545. fpu_set(0, (long double)m32int - fpu_get(0));
  1546. }
  1547. void SoftCPU::FIDIV_RM32(const X86::Instruction& insn)
  1548. {
  1549. VERIFY(!insn.modrm().is_register());
  1550. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1551. // FIXME: Respect shadow values
  1552. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1553. fpu_set(0, fpu_get(0) / (long double)m32int);
  1554. }
  1555. void SoftCPU::FIDIVR_RM32(const X86::Instruction& insn)
  1556. {
  1557. VERIFY(!insn.modrm().is_register());
  1558. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1559. // FIXME: Respect shadow values
  1560. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1561. fpu_set(0, (long double)m32int / fpu_get(0));
  1562. }
  1563. void SoftCPU::FILD_RM32(const X86::Instruction& insn)
  1564. {
  1565. VERIFY(!insn.modrm().is_register());
  1566. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1567. // FIXME: Respect shadow values
  1568. fpu_push((long double)m32int);
  1569. }
  1570. void SoftCPU::FCMOVNB(const X86::Instruction& insn)
  1571. {
  1572. VERIFY(insn.modrm().is_register());
  1573. if (!cf())
  1574. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1575. }
  1576. void SoftCPU::FISTTP_RM32(const X86::Instruction& insn)
  1577. {
  1578. VERIFY(!insn.modrm().is_register());
  1579. i32 value = static_cast<i32>(fpu_pop());
  1580. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(value)));
  1581. }
  1582. void SoftCPU::FCMOVNE(const X86::Instruction& insn)
  1583. {
  1584. VERIFY(insn.modrm().is_register());
  1585. if (!zf())
  1586. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1587. }
  1588. void SoftCPU::FIST_RM32(const X86::Instruction& insn)
  1589. {
  1590. VERIFY(!insn.modrm().is_register());
  1591. auto f = fpu_get(0);
  1592. // FIXME: Respect rounding mode in m_fpu_cw.
  1593. auto value = static_cast<i32>(f);
  1594. // FIXME: Respect shadow values
  1595. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(value)));
  1596. }
  1597. void SoftCPU::FCMOVNBE(const X86::Instruction& insn)
  1598. {
  1599. if (evaluate_condition(7))
  1600. fpu_set(0, fpu_get(insn.rm() & 7));
  1601. }
  1602. void SoftCPU::FISTP_RM32(const X86::Instruction& insn)
  1603. {
  1604. FIST_RM32(insn);
  1605. fpu_pop();
  1606. }
  1607. void SoftCPU::FCMOVNU(const X86::Instruction& insn)
  1608. {
  1609. VERIFY(insn.modrm().is_register());
  1610. if (!pf())
  1611. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1612. }
  1613. void SoftCPU::FNENI(const X86::Instruction&) { TODO_INSN(); }
  1614. void SoftCPU::FNDISI(const X86::Instruction&) { TODO_INSN(); }
  1615. void SoftCPU::FNCLEX(const X86::Instruction&) { TODO_INSN(); }
  1616. void SoftCPU::FNINIT(const X86::Instruction&) { TODO_INSN(); }
  1617. void SoftCPU::FNSETPM(const X86::Instruction&) { TODO_INSN(); }
  1618. void SoftCPU::FLD_RM80(const X86::Instruction& insn)
  1619. {
  1620. VERIFY(!insn.modrm().is_register());
  1621. // long doubles can be up to 128 bits wide in memory for reasons (alignment) and only uses 80 bits of precision
  1622. // gcc uses 12 byte in 32 bit and 16 byte in 64 bit mode
  1623. // so in the 32 bit case we read a bit to much, but that shouldnt be that bad
  1624. auto new_f80 = insn.modrm().read128(*this, insn);
  1625. // FIXME: Respect shadow values
  1626. fpu_push(*(long double*)new_f80.value().bytes());
  1627. }
  1628. void SoftCPU::FUCOMI(const X86::Instruction& insn)
  1629. {
  1630. auto i = insn.rm() & 7;
  1631. // FIXME: Unordered comparison checks.
  1632. // FIXME: QNaN / exception handling.
  1633. // FIXME: Set C0, C2, C3 in FPU status word.
  1634. if (__builtin_isnan(fpu_get(0)) || __builtin_isnan(fpu_get(i))) {
  1635. set_zf(true);
  1636. set_pf(true);
  1637. set_cf(true);
  1638. } else {
  1639. set_zf(fpu_get(0) == fpu_get(i));
  1640. set_pf(false);
  1641. set_cf(fpu_get(0) < fpu_get(i));
  1642. set_of(false);
  1643. }
  1644. // FIXME: Taint should be based on ST(0) and ST(i)
  1645. m_flags_tainted = false;
  1646. }
  1647. void SoftCPU::FCOMI(const X86::Instruction& insn)
  1648. {
  1649. auto i = insn.rm() & 7;
  1650. // FIXME: QNaN / exception handling.
  1651. // FIXME: Set C0, C2, C3 in FPU status word.
  1652. set_zf(fpu_get(0) == fpu_get(i));
  1653. set_pf(false);
  1654. set_cf(fpu_get(0) < fpu_get(i));
  1655. set_of(false);
  1656. // FIXME: Taint should be based on ST(0) and ST(i)
  1657. m_flags_tainted = false;
  1658. }
  1659. void SoftCPU::FSTP_RM80(const X86::Instruction& insn)
  1660. {
  1661. if (insn.modrm().is_register()) {
  1662. fpu_set(insn.modrm().register_index(), fpu_pop());
  1663. } else {
  1664. // FIXME: Respect shadow values
  1665. // long doubles can be up to 128 bits wide in memory for reasons (alignment) and only uses 80 bits of precision
  1666. // gcc uses 12 byte in 32 bit and 16 byte in 64 bit mode
  1667. // so in the 32 bit case we have to read first, to not override data on the overly big write
  1668. u128 f80 {};
  1669. if constexpr (sizeof(long double) == 12)
  1670. f80 = insn.modrm().read128(*this, insn).value();
  1671. *(long double*)f80.bytes() = fpu_pop();
  1672. insn.modrm().write128(*this, insn, shadow_wrap_as_initialized(f80));
  1673. }
  1674. }
  1675. void SoftCPU::FADD_RM64(const X86::Instruction& insn)
  1676. {
  1677. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1678. if (insn.modrm().is_register()) {
  1679. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1680. } else {
  1681. auto new_f64 = insn.modrm().read64(*this, insn);
  1682. // FIXME: Respect shadow values
  1683. auto f64 = bit_cast<double>(new_f64.value());
  1684. fpu_set(0, fpu_get(0) + f64);
  1685. }
  1686. }
  1687. void SoftCPU::FMUL_RM64(const X86::Instruction& insn)
  1688. {
  1689. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1690. if (insn.modrm().is_register()) {
  1691. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1692. } else {
  1693. auto new_f64 = insn.modrm().read64(*this, insn);
  1694. // FIXME: Respect shadow values
  1695. auto f64 = bit_cast<double>(new_f64.value());
  1696. fpu_set(0, fpu_get(0) * f64);
  1697. }
  1698. }
  1699. void SoftCPU::FCOM_RM64(const X86::Instruction&) { TODO_INSN(); }
  1700. void SoftCPU::FCOMP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1701. void SoftCPU::FSUB_RM64(const X86::Instruction& insn)
  1702. {
  1703. if (insn.modrm().is_register()) {
  1704. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1705. } else {
  1706. auto new_f64 = insn.modrm().read64(*this, insn);
  1707. // FIXME: Respect shadow values
  1708. auto f64 = bit_cast<double>(new_f64.value());
  1709. fpu_set(0, fpu_get(0) - f64);
  1710. }
  1711. }
  1712. void SoftCPU::FSUBR_RM64(const X86::Instruction& insn)
  1713. {
  1714. if (insn.modrm().is_register()) {
  1715. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1716. } else {
  1717. auto new_f64 = insn.modrm().read64(*this, insn);
  1718. // FIXME: Respect shadow values
  1719. auto f64 = bit_cast<double>(new_f64.value());
  1720. fpu_set(0, f64 - fpu_get(0));
  1721. }
  1722. }
  1723. void SoftCPU::FDIV_RM64(const X86::Instruction& insn)
  1724. {
  1725. if (insn.modrm().is_register()) {
  1726. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1727. } else {
  1728. auto new_f64 = insn.modrm().read64(*this, insn);
  1729. // FIXME: Respect shadow values
  1730. auto f64 = bit_cast<double>(new_f64.value());
  1731. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1732. fpu_set(0, fpu_get(0) / f64);
  1733. }
  1734. }
  1735. void SoftCPU::FDIVR_RM64(const X86::Instruction& insn)
  1736. {
  1737. if (insn.modrm().is_register()) {
  1738. // XXX this is FDIVR, Instruction decodes this weirdly
  1739. //fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1740. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1741. } else {
  1742. auto new_f64 = insn.modrm().read64(*this, insn);
  1743. // FIXME: Respect shadow values
  1744. auto f64 = bit_cast<double>(new_f64.value());
  1745. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1746. fpu_set(0, f64 / fpu_get(0));
  1747. }
  1748. }
  1749. void SoftCPU::FLD_RM64(const X86::Instruction& insn)
  1750. {
  1751. VERIFY(!insn.modrm().is_register());
  1752. auto new_f64 = insn.modrm().read64(*this, insn);
  1753. // FIXME: Respect shadow values
  1754. fpu_push(bit_cast<double>(new_f64.value()));
  1755. }
  1756. void SoftCPU::FFREE(const X86::Instruction&) { TODO_INSN(); }
  1757. void SoftCPU::FISTTP_RM64(const X86::Instruction& insn)
  1758. {
  1759. // is this allowed to be a register?
  1760. VERIFY(!insn.modrm().is_register());
  1761. i64 value = static_cast<i64>(fpu_pop());
  1762. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(value)));
  1763. }
  1764. void SoftCPU::FST_RM64(const X86::Instruction& insn)
  1765. {
  1766. if (insn.modrm().is_register()) {
  1767. fpu_set(insn.modrm().register_index(), fpu_get(0));
  1768. } else {
  1769. // FIXME: Respect shadow values
  1770. double f64 = (double)fpu_get(0);
  1771. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(f64)));
  1772. }
  1773. }
  1774. void SoftCPU::FSTP_RM64(const X86::Instruction& insn)
  1775. {
  1776. FST_RM64(insn);
  1777. fpu_pop();
  1778. }
  1779. void SoftCPU::FRSTOR(const X86::Instruction&) { TODO_INSN(); }
  1780. void SoftCPU::FUCOM(const X86::Instruction&) { TODO_INSN(); }
  1781. void SoftCPU::FUCOMP(const X86::Instruction&) { TODO_INSN(); }
  1782. void SoftCPU::FUCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1783. void SoftCPU::FNSAVE(const X86::Instruction&) { TODO_INSN(); }
  1784. void SoftCPU::FNSTSW(const X86::Instruction&) { TODO_INSN(); }
  1785. void SoftCPU::FIADD_RM16(const X86::Instruction& insn)
  1786. {
  1787. VERIFY(!insn.modrm().is_register());
  1788. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1789. // FIXME: Respect shadow values
  1790. fpu_set(0, fpu_get(0) + (long double)m16int);
  1791. }
  1792. void SoftCPU::FADDP(const X86::Instruction& insn)
  1793. {
  1794. VERIFY(insn.modrm().is_register());
  1795. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1796. fpu_pop();
  1797. }
  1798. void SoftCPU::FIMUL_RM16(const X86::Instruction& insn)
  1799. {
  1800. VERIFY(!insn.modrm().is_register());
  1801. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1802. // FIXME: Respect shadow values
  1803. fpu_set(0, fpu_get(0) * (long double)m16int);
  1804. }
  1805. void SoftCPU::FMULP(const X86::Instruction& insn)
  1806. {
  1807. VERIFY(insn.modrm().is_register());
  1808. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1809. fpu_pop();
  1810. }
  1811. void SoftCPU::FICOM_RM16(const X86::Instruction&) { TODO_INSN(); }
  1812. void SoftCPU::FICOMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1813. void SoftCPU::FCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1814. void SoftCPU::FISUB_RM16(const X86::Instruction& insn)
  1815. {
  1816. VERIFY(!insn.modrm().is_register());
  1817. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1818. // FIXME: Respect shadow values
  1819. fpu_set(0, fpu_get(0) - (long double)m16int);
  1820. }
  1821. void SoftCPU::FSUBRP(const X86::Instruction& insn)
  1822. {
  1823. VERIFY(insn.modrm().is_register());
  1824. fpu_set(insn.modrm().register_index(), fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1825. fpu_pop();
  1826. }
  1827. void SoftCPU::FISUBR_RM16(const X86::Instruction& insn)
  1828. {
  1829. VERIFY(!insn.modrm().is_register());
  1830. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1831. // FIXME: Respect shadow values
  1832. fpu_set(0, (long double)m16int - fpu_get(0));
  1833. }
  1834. void SoftCPU::FSUBP(const X86::Instruction& insn)
  1835. {
  1836. VERIFY(insn.modrm().is_register());
  1837. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1838. fpu_pop();
  1839. }
  1840. void SoftCPU::FIDIV_RM16(const X86::Instruction& insn)
  1841. {
  1842. VERIFY(!insn.modrm().is_register());
  1843. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1844. // FIXME: Respect shadow values
  1845. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1846. fpu_set(0, fpu_get(0) / (long double)m16int);
  1847. }
  1848. void SoftCPU::FDIVRP(const X86::Instruction& insn)
  1849. {
  1850. VERIFY(insn.modrm().is_register());
  1851. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1852. fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1853. fpu_pop();
  1854. }
  1855. void SoftCPU::FIDIVR_RM16(const X86::Instruction& insn)
  1856. {
  1857. VERIFY(!insn.modrm().is_register());
  1858. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1859. // FIXME: Respect shadow values
  1860. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1861. fpu_set(0, (long double)m16int / fpu_get(0));
  1862. }
  1863. void SoftCPU::FDIVP(const X86::Instruction& insn)
  1864. {
  1865. VERIFY(insn.modrm().is_register());
  1866. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1867. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1868. fpu_pop();
  1869. }
  1870. void SoftCPU::FILD_RM16(const X86::Instruction& insn)
  1871. {
  1872. VERIFY(!insn.modrm().is_register());
  1873. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1874. // FIXME: Respect shadow values
  1875. fpu_push((long double)m16int);
  1876. }
  1877. void SoftCPU::FFREEP(const X86::Instruction&) { TODO_INSN(); }
  1878. void SoftCPU::FISTTP_RM16(const X86::Instruction& insn)
  1879. {
  1880. // is this allowed to be a register?
  1881. VERIFY(!insn.modrm().is_register());
  1882. i16 value = static_cast<i16>(fpu_pop());
  1883. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(bit_cast<u16>(value)));
  1884. }
  1885. void SoftCPU::FIST_RM16(const X86::Instruction& insn)
  1886. {
  1887. VERIFY(!insn.modrm().is_register());
  1888. auto f = fpu_get(0);
  1889. // FIXME: Respect rounding mode in m_fpu_cw.
  1890. auto value = static_cast<i16>(f);
  1891. // FIXME: Respect shadow values
  1892. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(bit_cast<u16>(value)));
  1893. }
  1894. void SoftCPU::FISTP_RM16(const X86::Instruction& insn)
  1895. {
  1896. FIST_RM16(insn);
  1897. fpu_pop();
  1898. }
  1899. void SoftCPU::FBLD_M80(const X86::Instruction&) { TODO_INSN(); }
  1900. void SoftCPU::FNSTSW_AX(const X86::Instruction&) { TODO_INSN(); }
  1901. void SoftCPU::FILD_RM64(const X86::Instruction& insn)
  1902. {
  1903. VERIFY(!insn.modrm().is_register());
  1904. auto m64int = (i64)insn.modrm().read64(*this, insn).value();
  1905. // FIXME: Respect shadow values
  1906. fpu_push((long double)m64int);
  1907. }
  1908. void SoftCPU::FUCOMIP(const X86::Instruction& insn)
  1909. {
  1910. FUCOMI(insn);
  1911. fpu_pop();
  1912. }
  1913. void SoftCPU::FBSTP_M80(const X86::Instruction&) { TODO_INSN(); }
  1914. void SoftCPU::FCOMIP(const X86::Instruction& insn)
  1915. {
  1916. FCOMI(insn);
  1917. fpu_pop();
  1918. }
  1919. void SoftCPU::FISTP_RM64(const X86::Instruction& insn)
  1920. {
  1921. VERIFY(!insn.modrm().is_register());
  1922. auto f = fpu_pop();
  1923. // FIXME: Respect rounding mode in m_fpu_cw.
  1924. auto value = static_cast<i64>(f);
  1925. // FIXME: Respect shadow values
  1926. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(value)));
  1927. }
  1928. void SoftCPU::HLT(const X86::Instruction&) { TODO_INSN(); }
  1929. void SoftCPU::IDIV_RM16(const X86::Instruction& insn)
  1930. {
  1931. auto divisor_with_shadow = insn.modrm().read16(*this, insn);
  1932. auto divisor = (i16)divisor_with_shadow.value();
  1933. if (divisor == 0) {
  1934. reportln("Divide by zero");
  1935. TODO();
  1936. }
  1937. i32 dividend = (i32)(((u32)dx().value() << 16) | (u32)ax().value());
  1938. i32 result = dividend / divisor;
  1939. if (result > NumericLimits<i16>::max() || result < NumericLimits<i16>::min()) {
  1940. reportln("Divide overflow");
  1941. TODO();
  1942. }
  1943. auto original_ax = ax();
  1944. set_ax(shadow_wrap_with_taint_from<u16>(result, original_ax, dx(), divisor_with_shadow));
  1945. set_dx(shadow_wrap_with_taint_from<u16>(dividend % divisor, original_ax, dx(), divisor_with_shadow));
  1946. }
  1947. void SoftCPU::IDIV_RM32(const X86::Instruction& insn)
  1948. {
  1949. auto divisor_with_shadow = insn.modrm().read32(*this, insn);
  1950. auto divisor = (i32)divisor_with_shadow.value();
  1951. if (divisor == 0) {
  1952. reportln("Divide by zero");
  1953. TODO();
  1954. }
  1955. i64 dividend = (i64)(((u64)edx().value() << 32) | (u64)eax().value());
  1956. i64 result = dividend / divisor;
  1957. if (result > NumericLimits<i32>::max() || result < NumericLimits<i32>::min()) {
  1958. reportln("Divide overflow");
  1959. TODO();
  1960. }
  1961. auto original_eax = eax();
  1962. set_eax(shadow_wrap_with_taint_from<u32>(result, original_eax, edx(), divisor_with_shadow));
  1963. set_edx(shadow_wrap_with_taint_from<u32>(dividend % divisor, original_eax, edx(), divisor_with_shadow));
  1964. }
  1965. void SoftCPU::IDIV_RM8(const X86::Instruction& insn)
  1966. {
  1967. auto divisor_with_shadow = insn.modrm().read8(*this, insn);
  1968. auto divisor = (i8)divisor_with_shadow.value();
  1969. if (divisor == 0) {
  1970. reportln("Divide by zero");
  1971. TODO();
  1972. }
  1973. i16 dividend = ax().value();
  1974. i16 result = dividend / divisor;
  1975. if (result > NumericLimits<i8>::max() || result < NumericLimits<i8>::min()) {
  1976. reportln("Divide overflow");
  1977. TODO();
  1978. }
  1979. auto original_ax = ax();
  1980. set_al(shadow_wrap_with_taint_from<u8>(result, divisor_with_shadow, original_ax));
  1981. set_ah(shadow_wrap_with_taint_from<u8>(dividend % divisor, divisor_with_shadow, original_ax));
  1982. }
  1983. void SoftCPU::IMUL_RM16(const X86::Instruction& insn)
  1984. {
  1985. i16 result_high;
  1986. i16 result_low;
  1987. auto src = insn.modrm().read16(*this, insn);
  1988. op_imul<i16>(*this, src.value(), ax().value(), result_high, result_low);
  1989. gpr16(X86::RegisterDX) = shadow_wrap_with_taint_from<u16>(result_high, src, ax());
  1990. gpr16(X86::RegisterAX) = shadow_wrap_with_taint_from<u16>(result_low, src, ax());
  1991. }
  1992. void SoftCPU::IMUL_RM32(const X86::Instruction& insn)
  1993. {
  1994. i32 result_high;
  1995. i32 result_low;
  1996. auto src = insn.modrm().read32(*this, insn);
  1997. op_imul<i32>(*this, src.value(), eax().value(), result_high, result_low);
  1998. gpr32(X86::RegisterEDX) = shadow_wrap_with_taint_from<u32>(result_high, src, eax());
  1999. gpr32(X86::RegisterEAX) = shadow_wrap_with_taint_from<u32>(result_low, src, eax());
  2000. }
  2001. void SoftCPU::IMUL_RM8(const X86::Instruction& insn)
  2002. {
  2003. i8 result_high;
  2004. i8 result_low;
  2005. auto src = insn.modrm().read8(*this, insn);
  2006. op_imul<i8>(*this, src.value(), al().value(), result_high, result_low);
  2007. gpr8(X86::RegisterAH) = shadow_wrap_with_taint_from<u8>(result_high, src, al());
  2008. gpr8(X86::RegisterAL) = shadow_wrap_with_taint_from<u8>(result_low, src, al());
  2009. }
  2010. void SoftCPU::IMUL_reg16_RM16(const X86::Instruction& insn)
  2011. {
  2012. i16 result_high;
  2013. i16 result_low;
  2014. auto src = insn.modrm().read16(*this, insn);
  2015. op_imul<i16>(*this, gpr16(insn.reg16()).value(), src.value(), result_high, result_low);
  2016. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src, gpr16(insn.reg16()));
  2017. }
  2018. void SoftCPU::IMUL_reg16_RM16_imm16(const X86::Instruction& insn)
  2019. {
  2020. i16 result_high;
  2021. i16 result_low;
  2022. auto src = insn.modrm().read16(*this, insn);
  2023. op_imul<i16>(*this, src.value(), insn.imm16(), result_high, result_low);
  2024. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  2025. }
  2026. void SoftCPU::IMUL_reg16_RM16_imm8(const X86::Instruction& insn)
  2027. {
  2028. i16 result_high;
  2029. i16 result_low;
  2030. auto src = insn.modrm().read16(*this, insn);
  2031. op_imul<i16>(*this, src.value(), sign_extended_to<i16>(insn.imm8()), result_high, result_low);
  2032. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  2033. }
  2034. void SoftCPU::IMUL_reg32_RM32(const X86::Instruction& insn)
  2035. {
  2036. i32 result_high;
  2037. i32 result_low;
  2038. auto src = insn.modrm().read32(*this, insn);
  2039. op_imul<i32>(*this, gpr32(insn.reg32()).value(), src.value(), result_high, result_low);
  2040. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src, gpr32(insn.reg32()));
  2041. }
  2042. void SoftCPU::IMUL_reg32_RM32_imm32(const X86::Instruction& insn)
  2043. {
  2044. i32 result_high;
  2045. i32 result_low;
  2046. auto src = insn.modrm().read32(*this, insn);
  2047. op_imul<i32>(*this, src.value(), insn.imm32(), result_high, result_low);
  2048. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  2049. }
  2050. void SoftCPU::IMUL_reg32_RM32_imm8(const X86::Instruction& insn)
  2051. {
  2052. i32 result_high;
  2053. i32 result_low;
  2054. auto src = insn.modrm().read32(*this, insn);
  2055. op_imul<i32>(*this, src.value(), sign_extended_to<i32>(insn.imm8()), result_high, result_low);
  2056. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  2057. }
  2058. void SoftCPU::INC_RM16(const X86::Instruction& insn)
  2059. {
  2060. insn.modrm().write16(*this, insn, op_inc(*this, insn.modrm().read16(*this, insn)));
  2061. }
  2062. void SoftCPU::INC_RM32(const X86::Instruction& insn)
  2063. {
  2064. insn.modrm().write32(*this, insn, op_inc(*this, insn.modrm().read32(*this, insn)));
  2065. }
  2066. void SoftCPU::INC_RM8(const X86::Instruction& insn)
  2067. {
  2068. insn.modrm().write8(*this, insn, op_inc(*this, insn.modrm().read8(*this, insn)));
  2069. }
  2070. void SoftCPU::INC_reg16(const X86::Instruction& insn)
  2071. {
  2072. gpr16(insn.reg16()) = op_inc(*this, const_gpr16(insn.reg16()));
  2073. }
  2074. void SoftCPU::INC_reg32(const X86::Instruction& insn)
  2075. {
  2076. gpr32(insn.reg32()) = op_inc(*this, const_gpr32(insn.reg32()));
  2077. }
  2078. void SoftCPU::INSB(const X86::Instruction&) { TODO_INSN(); }
  2079. void SoftCPU::INSD(const X86::Instruction&) { TODO_INSN(); }
  2080. void SoftCPU::INSW(const X86::Instruction&) { TODO_INSN(); }
  2081. void SoftCPU::INT3(const X86::Instruction&) { TODO_INSN(); }
  2082. void SoftCPU::INTO(const X86::Instruction&) { TODO_INSN(); }
  2083. void SoftCPU::INT_imm8(const X86::Instruction& insn)
  2084. {
  2085. VERIFY(insn.imm8() == 0x82);
  2086. // FIXME: virt_syscall should take ValueWithShadow and whine about uninitialized arguments
  2087. set_eax(shadow_wrap_as_initialized(m_emulator.virt_syscall(eax().value(), edx().value(), ecx().value(), ebx().value())));
  2088. }
  2089. void SoftCPU::INVLPG(const X86::Instruction&) { TODO_INSN(); }
  2090. void SoftCPU::IN_AL_DX(const X86::Instruction&) { TODO_INSN(); }
  2091. void SoftCPU::IN_AL_imm8(const X86::Instruction&) { TODO_INSN(); }
  2092. void SoftCPU::IN_AX_DX(const X86::Instruction&) { TODO_INSN(); }
  2093. void SoftCPU::IN_AX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2094. void SoftCPU::IN_EAX_DX(const X86::Instruction&) { TODO_INSN(); }
  2095. void SoftCPU::IN_EAX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2096. void SoftCPU::IRET(const X86::Instruction&) { TODO_INSN(); }
  2097. void SoftCPU::JCXZ_imm8(const X86::Instruction& insn)
  2098. {
  2099. if (insn.a32()) {
  2100. warn_if_uninitialized(ecx(), "jecxz imm8");
  2101. if (ecx().value() == 0)
  2102. set_eip(eip() + (i8)insn.imm8());
  2103. } else {
  2104. warn_if_uninitialized(cx(), "jcxz imm8");
  2105. if (cx().value() == 0)
  2106. set_eip(eip() + (i8)insn.imm8());
  2107. }
  2108. }
  2109. void SoftCPU::JMP_FAR_mem16(const X86::Instruction&) { TODO_INSN(); }
  2110. void SoftCPU::JMP_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  2111. void SoftCPU::JMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  2112. void SoftCPU::JMP_RM32(const X86::Instruction& insn)
  2113. {
  2114. set_eip(insn.modrm().read32(*this, insn).value());
  2115. }
  2116. void SoftCPU::JMP_imm16(const X86::Instruction& insn)
  2117. {
  2118. set_eip(eip() + (i16)insn.imm16());
  2119. }
  2120. void SoftCPU::JMP_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  2121. void SoftCPU::JMP_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  2122. void SoftCPU::JMP_imm32(const X86::Instruction& insn)
  2123. {
  2124. set_eip(eip() + (i32)insn.imm32());
  2125. }
  2126. void SoftCPU::JMP_short_imm8(const X86::Instruction& insn)
  2127. {
  2128. set_eip(eip() + (i8)insn.imm8());
  2129. }
  2130. void SoftCPU::Jcc_NEAR_imm(const X86::Instruction& insn)
  2131. {
  2132. warn_if_flags_tainted("jcc near imm32");
  2133. if (evaluate_condition(insn.cc()))
  2134. set_eip(eip() + (i32)insn.imm32());
  2135. }
  2136. void SoftCPU::Jcc_imm8(const X86::Instruction& insn)
  2137. {
  2138. warn_if_flags_tainted("jcc imm8");
  2139. if (evaluate_condition(insn.cc()))
  2140. set_eip(eip() + (i8)insn.imm8());
  2141. }
  2142. void SoftCPU::LAHF(const X86::Instruction&) { TODO_INSN(); }
  2143. void SoftCPU::LAR_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2144. void SoftCPU::LAR_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2145. void SoftCPU::LDS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2146. void SoftCPU::LDS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2147. void SoftCPU::LEAVE16(const X86::Instruction&) { TODO_INSN(); }
  2148. void SoftCPU::LEAVE32(const X86::Instruction&)
  2149. {
  2150. auto new_ebp = read_memory32({ ss(), ebp().value() });
  2151. set_esp({ ebp().value() + 4, ebp().shadow() });
  2152. set_ebp(new_ebp);
  2153. }
  2154. void SoftCPU::LEA_reg16_mem16(const X86::Instruction& insn)
  2155. {
  2156. // FIXME: Respect shadow values
  2157. gpr16(insn.reg16()) = shadow_wrap_as_initialized<u16>(insn.modrm().resolve(*this, insn).offset());
  2158. }
  2159. void SoftCPU::LEA_reg32_mem32(const X86::Instruction& insn)
  2160. {
  2161. // FIXME: Respect shadow values
  2162. gpr32(insn.reg32()) = shadow_wrap_as_initialized<u32>(insn.modrm().resolve(*this, insn).offset());
  2163. }
  2164. void SoftCPU::LES_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2165. void SoftCPU::LES_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2166. void SoftCPU::LFS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2167. void SoftCPU::LFS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2168. void SoftCPU::LGDT(const X86::Instruction&) { TODO_INSN(); }
  2169. void SoftCPU::LGS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2170. void SoftCPU::LGS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2171. void SoftCPU::LIDT(const X86::Instruction&) { TODO_INSN(); }
  2172. void SoftCPU::LLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2173. void SoftCPU::LMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2174. template<typename T>
  2175. ALWAYS_INLINE static void do_lods(SoftCPU& cpu, const X86::Instruction& insn)
  2176. {
  2177. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2178. cpu.do_once_or_repeat<true>(insn, [&] {
  2179. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2180. cpu.gpr<T>(X86::RegisterAL) = src;
  2181. cpu.step_source_index(insn.a32(), sizeof(T));
  2182. });
  2183. }
  2184. void SoftCPU::LODSB(const X86::Instruction& insn)
  2185. {
  2186. do_lods<u8>(*this, insn);
  2187. }
  2188. void SoftCPU::LODSD(const X86::Instruction& insn)
  2189. {
  2190. do_lods<u32>(*this, insn);
  2191. }
  2192. void SoftCPU::LODSW(const X86::Instruction& insn)
  2193. {
  2194. do_lods<u16>(*this, insn);
  2195. }
  2196. void SoftCPU::LOOPNZ_imm8(const X86::Instruction& insn)
  2197. {
  2198. warn_if_flags_tainted("loopnz");
  2199. if (insn.a32()) {
  2200. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2201. if (ecx().value() != 0 && !zf())
  2202. set_eip(eip() + (i8)insn.imm8());
  2203. } else {
  2204. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2205. if (cx().value() != 0 && !zf())
  2206. set_eip(eip() + (i8)insn.imm8());
  2207. }
  2208. }
  2209. void SoftCPU::LOOPZ_imm8(const X86::Instruction& insn)
  2210. {
  2211. warn_if_flags_tainted("loopz");
  2212. if (insn.a32()) {
  2213. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2214. if (ecx().value() != 0 && zf())
  2215. set_eip(eip() + (i8)insn.imm8());
  2216. } else {
  2217. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2218. if (cx().value() != 0 && zf())
  2219. set_eip(eip() + (i8)insn.imm8());
  2220. }
  2221. }
  2222. void SoftCPU::LOOP_imm8(const X86::Instruction& insn)
  2223. {
  2224. if (insn.a32()) {
  2225. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2226. if (ecx().value() != 0)
  2227. set_eip(eip() + (i8)insn.imm8());
  2228. } else {
  2229. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2230. if (cx().value() != 0)
  2231. set_eip(eip() + (i8)insn.imm8());
  2232. }
  2233. }
  2234. void SoftCPU::LSL_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2235. void SoftCPU::LSL_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2236. void SoftCPU::LSS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2237. void SoftCPU::LSS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2238. void SoftCPU::LTR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2239. template<typename T>
  2240. ALWAYS_INLINE static void do_movs(SoftCPU& cpu, const X86::Instruction& insn)
  2241. {
  2242. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2243. cpu.do_once_or_repeat<false>(insn, [&] {
  2244. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2245. cpu.write_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() }, src);
  2246. cpu.step_source_index(insn.a32(), sizeof(T));
  2247. cpu.step_destination_index(insn.a32(), sizeof(T));
  2248. });
  2249. }
  2250. void SoftCPU::MOVSB(const X86::Instruction& insn)
  2251. {
  2252. do_movs<u8>(*this, insn);
  2253. }
  2254. void SoftCPU::MOVSD(const X86::Instruction& insn)
  2255. {
  2256. do_movs<u32>(*this, insn);
  2257. }
  2258. void SoftCPU::MOVSW(const X86::Instruction& insn)
  2259. {
  2260. do_movs<u16>(*this, insn);
  2261. }
  2262. void SoftCPU::MOVSX_reg16_RM8(const X86::Instruction& insn)
  2263. {
  2264. auto src = insn.modrm().read8(*this, insn);
  2265. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(sign_extended_to<u16>(src.value()), src);
  2266. }
  2267. void SoftCPU::MOVSX_reg32_RM16(const X86::Instruction& insn)
  2268. {
  2269. auto src = insn.modrm().read16(*this, insn);
  2270. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2271. }
  2272. void SoftCPU::MOVSX_reg32_RM8(const X86::Instruction& insn)
  2273. {
  2274. auto src = insn.modrm().read8(*this, insn);
  2275. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2276. }
  2277. void SoftCPU::MOVZX_reg16_RM8(const X86::Instruction& insn)
  2278. {
  2279. auto src = insn.modrm().read8(*this, insn);
  2280. gpr16(insn.reg16()) = ValueWithShadow<u16>(src.value(), 0x0100 | (src.shadow() & 0xff));
  2281. }
  2282. void SoftCPU::MOVZX_reg32_RM16(const X86::Instruction& insn)
  2283. {
  2284. auto src = insn.modrm().read16(*this, insn);
  2285. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010000 | (src.shadow() & 0xffff));
  2286. }
  2287. void SoftCPU::MOVZX_reg32_RM8(const X86::Instruction& insn)
  2288. {
  2289. auto src = insn.modrm().read8(*this, insn);
  2290. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010100 | (src.shadow() & 0xff));
  2291. }
  2292. void SoftCPU::MOV_AL_moff8(const X86::Instruction& insn)
  2293. {
  2294. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2295. }
  2296. void SoftCPU::MOV_AX_moff16(const X86::Instruction& insn)
  2297. {
  2298. set_ax(read_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2299. }
  2300. void SoftCPU::MOV_CR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2301. void SoftCPU::MOV_DR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2302. void SoftCPU::MOV_EAX_moff32(const X86::Instruction& insn)
  2303. {
  2304. set_eax(read_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2305. }
  2306. void SoftCPU::MOV_RM16_imm16(const X86::Instruction& insn)
  2307. {
  2308. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(insn.imm16()));
  2309. }
  2310. void SoftCPU::MOV_RM16_reg16(const X86::Instruction& insn)
  2311. {
  2312. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2313. }
  2314. void SoftCPU::MOV_RM16_seg(const X86::Instruction&) { TODO_INSN(); }
  2315. void SoftCPU::MOV_RM32_imm32(const X86::Instruction& insn)
  2316. {
  2317. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(insn.imm32()));
  2318. }
  2319. void SoftCPU::MOV_RM32_reg32(const X86::Instruction& insn)
  2320. {
  2321. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2322. }
  2323. void SoftCPU::MOV_RM8_imm8(const X86::Instruction& insn)
  2324. {
  2325. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized(insn.imm8()));
  2326. }
  2327. void SoftCPU::MOV_RM8_reg8(const X86::Instruction& insn)
  2328. {
  2329. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2330. }
  2331. void SoftCPU::MOV_moff16_AX(const X86::Instruction& insn)
  2332. {
  2333. write_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, ax());
  2334. }
  2335. void SoftCPU::MOV_moff32_EAX(const X86::Instruction& insn)
  2336. {
  2337. write_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, eax());
  2338. }
  2339. void SoftCPU::MOV_moff8_AL(const X86::Instruction& insn)
  2340. {
  2341. write_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, al());
  2342. }
  2343. void SoftCPU::MOV_reg16_RM16(const X86::Instruction& insn)
  2344. {
  2345. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  2346. }
  2347. void SoftCPU::MOV_reg16_imm16(const X86::Instruction& insn)
  2348. {
  2349. gpr16(insn.reg16()) = shadow_wrap_as_initialized(insn.imm16());
  2350. }
  2351. void SoftCPU::MOV_reg32_CR(const X86::Instruction&) { TODO_INSN(); }
  2352. void SoftCPU::MOV_reg32_DR(const X86::Instruction&) { TODO_INSN(); }
  2353. void SoftCPU::MOV_reg32_RM32(const X86::Instruction& insn)
  2354. {
  2355. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  2356. }
  2357. void SoftCPU::MOV_reg32_imm32(const X86::Instruction& insn)
  2358. {
  2359. gpr32(insn.reg32()) = shadow_wrap_as_initialized(insn.imm32());
  2360. }
  2361. void SoftCPU::MOV_reg8_RM8(const X86::Instruction& insn)
  2362. {
  2363. gpr8(insn.reg8()) = insn.modrm().read8(*this, insn);
  2364. }
  2365. void SoftCPU::MOV_reg8_imm8(const X86::Instruction& insn)
  2366. {
  2367. gpr8(insn.reg8()) = shadow_wrap_as_initialized(insn.imm8());
  2368. }
  2369. void SoftCPU::MOV_seg_RM16(const X86::Instruction&) { TODO_INSN(); }
  2370. void SoftCPU::MOV_seg_RM32(const X86::Instruction&) { TODO_INSN(); }
  2371. void SoftCPU::MUL_RM16(const X86::Instruction& insn)
  2372. {
  2373. auto src = insn.modrm().read16(*this, insn);
  2374. u32 result = (u32)ax().value() * (u32)src.value();
  2375. auto original_ax = ax();
  2376. set_ax(shadow_wrap_with_taint_from<u16>(result & 0xffff, src, original_ax));
  2377. set_dx(shadow_wrap_with_taint_from<u16>(result >> 16, src, original_ax));
  2378. taint_flags_from(src, original_ax);
  2379. set_cf(dx().value() != 0);
  2380. set_of(dx().value() != 0);
  2381. }
  2382. void SoftCPU::MUL_RM32(const X86::Instruction& insn)
  2383. {
  2384. auto src = insn.modrm().read32(*this, insn);
  2385. u64 result = (u64)eax().value() * (u64)src.value();
  2386. auto original_eax = eax();
  2387. set_eax(shadow_wrap_with_taint_from<u32>(result, src, original_eax));
  2388. set_edx(shadow_wrap_with_taint_from<u32>(result >> 32, src, original_eax));
  2389. taint_flags_from(src, original_eax);
  2390. set_cf(edx().value() != 0);
  2391. set_of(edx().value() != 0);
  2392. }
  2393. void SoftCPU::MUL_RM8(const X86::Instruction& insn)
  2394. {
  2395. auto src = insn.modrm().read8(*this, insn);
  2396. u16 result = (u16)al().value() * src.value();
  2397. auto original_al = al();
  2398. set_ax(shadow_wrap_with_taint_from(result, src, original_al));
  2399. taint_flags_from(src, original_al);
  2400. set_cf((result & 0xff00) != 0);
  2401. set_of((result & 0xff00) != 0);
  2402. }
  2403. void SoftCPU::NEG_RM16(const X86::Instruction& insn)
  2404. {
  2405. insn.modrm().write16(*this, insn, op_sub<ValueWithShadow<u16>>(*this, shadow_wrap_as_initialized<u16>(0), insn.modrm().read16(*this, insn)));
  2406. }
  2407. void SoftCPU::NEG_RM32(const X86::Instruction& insn)
  2408. {
  2409. insn.modrm().write32(*this, insn, op_sub<ValueWithShadow<u32>>(*this, shadow_wrap_as_initialized<u32>(0), insn.modrm().read32(*this, insn)));
  2410. }
  2411. void SoftCPU::NEG_RM8(const X86::Instruction& insn)
  2412. {
  2413. insn.modrm().write8(*this, insn, op_sub<ValueWithShadow<u8>>(*this, shadow_wrap_as_initialized<u8>(0), insn.modrm().read8(*this, insn)));
  2414. }
  2415. void SoftCPU::NOP(const X86::Instruction&)
  2416. {
  2417. }
  2418. void SoftCPU::NOT_RM16(const X86::Instruction& insn)
  2419. {
  2420. auto data = insn.modrm().read16(*this, insn);
  2421. insn.modrm().write16(*this, insn, ValueWithShadow<u16>(~data.value(), data.shadow()));
  2422. }
  2423. void SoftCPU::NOT_RM32(const X86::Instruction& insn)
  2424. {
  2425. auto data = insn.modrm().read32(*this, insn);
  2426. insn.modrm().write32(*this, insn, ValueWithShadow<u32>(~data.value(), data.shadow()));
  2427. }
  2428. void SoftCPU::NOT_RM8(const X86::Instruction& insn)
  2429. {
  2430. auto data = insn.modrm().read8(*this, insn);
  2431. insn.modrm().write8(*this, insn, ValueWithShadow<u8>(~data.value(), data.shadow()));
  2432. }
  2433. void SoftCPU::OUTSB(const X86::Instruction&) { TODO_INSN(); }
  2434. void SoftCPU::OUTSD(const X86::Instruction&) { TODO_INSN(); }
  2435. void SoftCPU::OUTSW(const X86::Instruction&) { TODO_INSN(); }
  2436. void SoftCPU::OUT_DX_AL(const X86::Instruction&) { TODO_INSN(); }
  2437. void SoftCPU::OUT_DX_AX(const X86::Instruction&) { TODO_INSN(); }
  2438. void SoftCPU::OUT_DX_EAX(const X86::Instruction&) { TODO_INSN(); }
  2439. void SoftCPU::OUT_imm8_AL(const X86::Instruction&) { TODO_INSN(); }
  2440. void SoftCPU::OUT_imm8_AX(const X86::Instruction&) { TODO_INSN(); }
  2441. void SoftCPU::OUT_imm8_EAX(const X86::Instruction&) { TODO_INSN(); }
  2442. void SoftCPU::PADDB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2443. void SoftCPU::PADDW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2444. void SoftCPU::PADDD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2445. void SoftCPU::POPA(const X86::Instruction&)
  2446. {
  2447. set_di(pop16());
  2448. set_si(pop16());
  2449. set_bp(pop16());
  2450. pop16();
  2451. set_bx(pop16());
  2452. set_dx(pop16());
  2453. set_cx(pop16());
  2454. set_ax(pop16());
  2455. }
  2456. void SoftCPU::POPAD(const X86::Instruction&)
  2457. {
  2458. set_edi(pop32());
  2459. set_esi(pop32());
  2460. set_ebp(pop32());
  2461. pop32();
  2462. set_ebx(pop32());
  2463. set_edx(pop32());
  2464. set_ecx(pop32());
  2465. set_eax(pop32());
  2466. }
  2467. void SoftCPU::POPF(const X86::Instruction&)
  2468. {
  2469. auto popped_value = pop16();
  2470. m_eflags &= ~0xffff;
  2471. m_eflags |= popped_value.value();
  2472. taint_flags_from(popped_value);
  2473. }
  2474. void SoftCPU::POPFD(const X86::Instruction&)
  2475. {
  2476. auto popped_value = pop32();
  2477. m_eflags &= ~0x00fcffff;
  2478. m_eflags |= popped_value.value() & 0x00fcffff;
  2479. taint_flags_from(popped_value);
  2480. }
  2481. void SoftCPU::POP_DS(const X86::Instruction&) { TODO_INSN(); }
  2482. void SoftCPU::POP_ES(const X86::Instruction&) { TODO_INSN(); }
  2483. void SoftCPU::POP_FS(const X86::Instruction&) { TODO_INSN(); }
  2484. void SoftCPU::POP_GS(const X86::Instruction&) { TODO_INSN(); }
  2485. void SoftCPU::POP_RM16(const X86::Instruction& insn)
  2486. {
  2487. insn.modrm().write16(*this, insn, pop16());
  2488. }
  2489. void SoftCPU::POP_RM32(const X86::Instruction& insn)
  2490. {
  2491. insn.modrm().write32(*this, insn, pop32());
  2492. }
  2493. void SoftCPU::POP_SS(const X86::Instruction&) { TODO_INSN(); }
  2494. void SoftCPU::POP_reg16(const X86::Instruction& insn)
  2495. {
  2496. gpr16(insn.reg16()) = pop16();
  2497. }
  2498. void SoftCPU::POP_reg32(const X86::Instruction& insn)
  2499. {
  2500. gpr32(insn.reg32()) = pop32();
  2501. }
  2502. void SoftCPU::PUSHA(const X86::Instruction&)
  2503. {
  2504. auto temp = sp();
  2505. push16(ax());
  2506. push16(cx());
  2507. push16(dx());
  2508. push16(bx());
  2509. push16(temp);
  2510. push16(bp());
  2511. push16(si());
  2512. push16(di());
  2513. }
  2514. void SoftCPU::PUSHAD(const X86::Instruction&)
  2515. {
  2516. auto temp = esp();
  2517. push32(eax());
  2518. push32(ecx());
  2519. push32(edx());
  2520. push32(ebx());
  2521. push32(temp);
  2522. push32(ebp());
  2523. push32(esi());
  2524. push32(edi());
  2525. }
  2526. void SoftCPU::PUSHF(const X86::Instruction&)
  2527. {
  2528. // FIXME: Respect shadow flags when they exist!
  2529. push16(shadow_wrap_as_initialized<u16>(m_eflags & 0xffff));
  2530. }
  2531. void SoftCPU::PUSHFD(const X86::Instruction&)
  2532. {
  2533. // FIXME: Respect shadow flags when they exist!
  2534. push32(shadow_wrap_as_initialized(m_eflags & 0x00fcffff));
  2535. }
  2536. void SoftCPU::PUSH_CS(const X86::Instruction&) { TODO_INSN(); }
  2537. void SoftCPU::PUSH_DS(const X86::Instruction&) { TODO_INSN(); }
  2538. void SoftCPU::PUSH_ES(const X86::Instruction&) { TODO_INSN(); }
  2539. void SoftCPU::PUSH_FS(const X86::Instruction&) { TODO_INSN(); }
  2540. void SoftCPU::PUSH_GS(const X86::Instruction&) { TODO_INSN(); }
  2541. void SoftCPU::PUSH_RM16(const X86::Instruction& insn)
  2542. {
  2543. push16(insn.modrm().read16(*this, insn));
  2544. }
  2545. void SoftCPU::PUSH_RM32(const X86::Instruction& insn)
  2546. {
  2547. push32(insn.modrm().read32(*this, insn));
  2548. }
  2549. void SoftCPU::PUSH_SP_8086_80186(const X86::Instruction&) { TODO_INSN(); }
  2550. void SoftCPU::PUSH_SS(const X86::Instruction&) { TODO_INSN(); }
  2551. void SoftCPU::PUSH_imm16(const X86::Instruction& insn)
  2552. {
  2553. push16(shadow_wrap_as_initialized(insn.imm16()));
  2554. }
  2555. void SoftCPU::PUSH_imm32(const X86::Instruction& insn)
  2556. {
  2557. push32(shadow_wrap_as_initialized(insn.imm32()));
  2558. }
  2559. void SoftCPU::PUSH_imm8(const X86::Instruction& insn)
  2560. {
  2561. VERIFY(!insn.has_operand_size_override_prefix());
  2562. push32(shadow_wrap_as_initialized<u32>(sign_extended_to<i32>(insn.imm8())));
  2563. }
  2564. void SoftCPU::PUSH_reg16(const X86::Instruction& insn)
  2565. {
  2566. push16(gpr16(insn.reg16()));
  2567. }
  2568. void SoftCPU::PUSH_reg32(const X86::Instruction& insn)
  2569. {
  2570. push32(gpr32(insn.reg32()));
  2571. }
  2572. template<typename T, bool cf>
  2573. ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2574. {
  2575. if (steps.value() == 0)
  2576. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2577. u32 result = 0;
  2578. u32 new_flags = 0;
  2579. if constexpr (cf)
  2580. asm volatile("stc");
  2581. else
  2582. asm volatile("clc");
  2583. if constexpr (sizeof(typename T::ValueType) == 4) {
  2584. asm volatile("rcll %%cl, %%eax\n"
  2585. : "=a"(result)
  2586. : "a"(data.value()), "c"(steps.value()));
  2587. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2588. asm volatile("rclw %%cl, %%ax\n"
  2589. : "=a"(result)
  2590. : "a"(data.value()), "c"(steps.value()));
  2591. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2592. asm volatile("rclb %%cl, %%al\n"
  2593. : "=a"(result)
  2594. : "a"(data.value()), "c"(steps.value()));
  2595. }
  2596. asm volatile(
  2597. "pushf\n"
  2598. "pop %%ebx"
  2599. : "=b"(new_flags));
  2600. cpu.set_flags_oc(new_flags);
  2601. cpu.taint_flags_from(data, steps);
  2602. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2603. }
  2604. template<typename T>
  2605. ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2606. {
  2607. cpu.warn_if_flags_tainted("rcl");
  2608. if (cpu.cf())
  2609. return op_rcl_impl<T, true>(cpu, data, steps);
  2610. return op_rcl_impl<T, false>(cpu, data, steps);
  2611. }
  2612. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCL, op_rcl)
  2613. template<typename T, bool cf>
  2614. ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2615. {
  2616. if (steps.value() == 0)
  2617. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2618. u32 result = 0;
  2619. u32 new_flags = 0;
  2620. if constexpr (cf)
  2621. asm volatile("stc");
  2622. else
  2623. asm volatile("clc");
  2624. if constexpr (sizeof(typename T::ValueType) == 4) {
  2625. asm volatile("rcrl %%cl, %%eax\n"
  2626. : "=a"(result)
  2627. : "a"(data.value()), "c"(steps.value()));
  2628. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2629. asm volatile("rcrw %%cl, %%ax\n"
  2630. : "=a"(result)
  2631. : "a"(data.value()), "c"(steps.value()));
  2632. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2633. asm volatile("rcrb %%cl, %%al\n"
  2634. : "=a"(result)
  2635. : "a"(data.value()), "c"(steps.value()));
  2636. }
  2637. asm volatile(
  2638. "pushf\n"
  2639. "pop %%ebx"
  2640. : "=b"(new_flags));
  2641. cpu.set_flags_oc(new_flags);
  2642. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2643. }
  2644. template<typename T>
  2645. ALWAYS_INLINE static T op_rcr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2646. {
  2647. cpu.warn_if_flags_tainted("rcr");
  2648. if (cpu.cf())
  2649. return op_rcr_impl<T, true>(cpu, data, steps);
  2650. return op_rcr_impl<T, false>(cpu, data, steps);
  2651. }
  2652. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCR, op_rcr)
  2653. void SoftCPU::RDTSC(const X86::Instruction&) { TODO_INSN(); }
  2654. void SoftCPU::RET(const X86::Instruction& insn)
  2655. {
  2656. VERIFY(!insn.has_operand_size_override_prefix());
  2657. auto ret_address = pop32();
  2658. warn_if_uninitialized(ret_address, "ret");
  2659. set_eip(ret_address.value());
  2660. }
  2661. void SoftCPU::RETF(const X86::Instruction&) { TODO_INSN(); }
  2662. void SoftCPU::RETF_imm16(const X86::Instruction&) { TODO_INSN(); }
  2663. void SoftCPU::RET_imm16(const X86::Instruction& insn)
  2664. {
  2665. VERIFY(!insn.has_operand_size_override_prefix());
  2666. auto ret_address = pop32();
  2667. warn_if_uninitialized(ret_address, "ret imm16");
  2668. set_eip(ret_address.value());
  2669. set_esp({ esp().value() + insn.imm16(), esp().shadow() });
  2670. }
  2671. template<typename T>
  2672. ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2673. {
  2674. if (steps.value() == 0)
  2675. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2676. u32 result = 0;
  2677. u32 new_flags = 0;
  2678. if constexpr (sizeof(typename T::ValueType) == 4) {
  2679. asm volatile("roll %%cl, %%eax\n"
  2680. : "=a"(result)
  2681. : "a"(data.value()), "c"(steps.value()));
  2682. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2683. asm volatile("rolw %%cl, %%ax\n"
  2684. : "=a"(result)
  2685. : "a"(data.value()), "c"(steps.value()));
  2686. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2687. asm volatile("rolb %%cl, %%al\n"
  2688. : "=a"(result)
  2689. : "a"(data.value()), "c"(steps.value()));
  2690. }
  2691. asm volatile(
  2692. "pushf\n"
  2693. "pop %%ebx"
  2694. : "=b"(new_flags));
  2695. cpu.set_flags_oc(new_flags);
  2696. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2697. }
  2698. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROL, op_rol)
  2699. template<typename T>
  2700. ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2701. {
  2702. if (steps.value() == 0)
  2703. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2704. u32 result = 0;
  2705. u32 new_flags = 0;
  2706. if constexpr (sizeof(typename T::ValueType) == 4) {
  2707. asm volatile("rorl %%cl, %%eax\n"
  2708. : "=a"(result)
  2709. : "a"(data.value()), "c"(steps.value()));
  2710. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2711. asm volatile("rorw %%cl, %%ax\n"
  2712. : "=a"(result)
  2713. : "a"(data.value()), "c"(steps.value()));
  2714. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2715. asm volatile("rorb %%cl, %%al\n"
  2716. : "=a"(result)
  2717. : "a"(data.value()), "c"(steps.value()));
  2718. }
  2719. asm volatile(
  2720. "pushf\n"
  2721. "pop %%ebx"
  2722. : "=b"(new_flags));
  2723. cpu.set_flags_oc(new_flags);
  2724. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2725. }
  2726. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROR, op_ror)
  2727. void SoftCPU::SAHF(const X86::Instruction&)
  2728. {
  2729. // FIXME: Respect shadow flags once they exists!
  2730. set_al(shadow_wrap_as_initialized<u8>(eflags() & 0xff));
  2731. }
  2732. void SoftCPU::SALC(const X86::Instruction&)
  2733. {
  2734. // FIXME: Respect shadow flags once they exists!
  2735. set_al(shadow_wrap_as_initialized<u8>(cf() ? 0xff : 0x00));
  2736. }
  2737. template<typename T>
  2738. static T op_sar(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2739. {
  2740. if (steps.value() == 0)
  2741. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2742. u32 result = 0;
  2743. u32 new_flags = 0;
  2744. if constexpr (sizeof(typename T::ValueType) == 4) {
  2745. asm volatile("sarl %%cl, %%eax\n"
  2746. : "=a"(result)
  2747. : "a"(data.value()), "c"(steps.value()));
  2748. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2749. asm volatile("sarw %%cl, %%ax\n"
  2750. : "=a"(result)
  2751. : "a"(data.value()), "c"(steps.value()));
  2752. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2753. asm volatile("sarb %%cl, %%al\n"
  2754. : "=a"(result)
  2755. : "a"(data.value()), "c"(steps.value()));
  2756. }
  2757. asm volatile(
  2758. "pushf\n"
  2759. "pop %%ebx"
  2760. : "=b"(new_flags));
  2761. cpu.set_flags_oszapc(new_flags);
  2762. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2763. }
  2764. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SAR, op_sar)
  2765. template<typename T>
  2766. ALWAYS_INLINE static void do_scas(SoftCPU& cpu, const X86::Instruction& insn)
  2767. {
  2768. cpu.do_once_or_repeat<true>(insn, [&] {
  2769. auto src = cpu.const_gpr<T>(X86::RegisterAL);
  2770. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  2771. op_sub(cpu, dest, src);
  2772. cpu.step_destination_index(insn.a32(), sizeof(T));
  2773. });
  2774. }
  2775. void SoftCPU::SCASB(const X86::Instruction& insn)
  2776. {
  2777. do_scas<u8>(*this, insn);
  2778. }
  2779. void SoftCPU::SCASD(const X86::Instruction& insn)
  2780. {
  2781. do_scas<u32>(*this, insn);
  2782. }
  2783. void SoftCPU::SCASW(const X86::Instruction& insn)
  2784. {
  2785. do_scas<u16>(*this, insn);
  2786. }
  2787. void SoftCPU::SETcc_RM8(const X86::Instruction& insn)
  2788. {
  2789. warn_if_flags_tainted("setcc");
  2790. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized<u8>(evaluate_condition(insn.cc())));
  2791. }
  2792. void SoftCPU::SGDT(const X86::Instruction&) { TODO_INSN(); }
  2793. void SoftCPU::SHLD_RM16_reg16_CL(const X86::Instruction& insn)
  2794. {
  2795. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2796. }
  2797. void SoftCPU::SHLD_RM16_reg16_imm8(const X86::Instruction& insn)
  2798. {
  2799. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2800. }
  2801. void SoftCPU::SHLD_RM32_reg32_CL(const X86::Instruction& insn)
  2802. {
  2803. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2804. }
  2805. void SoftCPU::SHLD_RM32_reg32_imm8(const X86::Instruction& insn)
  2806. {
  2807. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2808. }
  2809. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHL, op_shl)
  2810. void SoftCPU::SHRD_RM16_reg16_CL(const X86::Instruction& insn)
  2811. {
  2812. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2813. }
  2814. void SoftCPU::SHRD_RM16_reg16_imm8(const X86::Instruction& insn)
  2815. {
  2816. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2817. }
  2818. void SoftCPU::SHRD_RM32_reg32_CL(const X86::Instruction& insn)
  2819. {
  2820. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2821. }
  2822. void SoftCPU::SHRD_RM32_reg32_imm8(const X86::Instruction& insn)
  2823. {
  2824. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2825. }
  2826. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHR, op_shr)
  2827. void SoftCPU::SIDT(const X86::Instruction&) { TODO_INSN(); }
  2828. void SoftCPU::SLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2829. void SoftCPU::SMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2830. void SoftCPU::STC(const X86::Instruction&)
  2831. {
  2832. set_cf(true);
  2833. }
  2834. void SoftCPU::STD(const X86::Instruction&)
  2835. {
  2836. set_df(true);
  2837. }
  2838. void SoftCPU::STI(const X86::Instruction&) { TODO_INSN(); }
  2839. void SoftCPU::STOSB(const X86::Instruction& insn)
  2840. {
  2841. if (insn.has_rep_prefix() && !df()) {
  2842. // Fast path for 8-bit forward memory fill.
  2843. if (m_emulator.mmu().fast_fill_memory8({ es(), destination_index(insn.a32()).value() }, ecx().value(), al())) {
  2844. if (insn.a32()) {
  2845. // FIXME: Should an uninitialized ECX taint EDI here?
  2846. set_edi({ (u32)(edi().value() + ecx().value()), edi().shadow() });
  2847. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2848. } else {
  2849. // FIXME: Should an uninitialized CX taint DI here?
  2850. set_di({ (u16)(di().value() + cx().value()), di().shadow() });
  2851. set_cx(shadow_wrap_as_initialized<u16>(0));
  2852. }
  2853. return;
  2854. }
  2855. }
  2856. do_once_or_repeat<false>(insn, [&] {
  2857. write_memory8({ es(), destination_index(insn.a32()).value() }, al());
  2858. step_destination_index(insn.a32(), 1);
  2859. });
  2860. }
  2861. void SoftCPU::STOSD(const X86::Instruction& insn)
  2862. {
  2863. if (insn.has_rep_prefix() && !df()) {
  2864. // Fast path for 32-bit forward memory fill.
  2865. if (m_emulator.mmu().fast_fill_memory32({ es(), destination_index(insn.a32()).value() }, ecx().value(), eax())) {
  2866. if (insn.a32()) {
  2867. // FIXME: Should an uninitialized ECX taint EDI here?
  2868. set_edi({ (u32)(edi().value() + (ecx().value() * sizeof(u32))), edi().shadow() });
  2869. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2870. } else {
  2871. // FIXME: Should an uninitialized CX taint DI here?
  2872. set_di({ (u16)(di().value() + (cx().value() * sizeof(u32))), di().shadow() });
  2873. set_cx(shadow_wrap_as_initialized<u16>(0));
  2874. }
  2875. return;
  2876. }
  2877. }
  2878. do_once_or_repeat<false>(insn, [&] {
  2879. write_memory32({ es(), destination_index(insn.a32()).value() }, eax());
  2880. step_destination_index(insn.a32(), 4);
  2881. });
  2882. }
  2883. void SoftCPU::STOSW(const X86::Instruction& insn)
  2884. {
  2885. do_once_or_repeat<false>(insn, [&] {
  2886. write_memory16({ es(), destination_index(insn.a32()).value() }, ax());
  2887. step_destination_index(insn.a32(), 2);
  2888. });
  2889. }
  2890. void SoftCPU::STR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2891. void SoftCPU::UD0(const X86::Instruction&) { TODO_INSN(); }
  2892. void SoftCPU::UD1(const X86::Instruction&) { TODO_INSN(); }
  2893. void SoftCPU::UD2(const X86::Instruction&) { TODO_INSN(); }
  2894. void SoftCPU::VERR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2895. void SoftCPU::VERW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2896. void SoftCPU::WAIT(const X86::Instruction&) { TODO_INSN(); }
  2897. void SoftCPU::WBINVD(const X86::Instruction&) { TODO_INSN(); }
  2898. void SoftCPU::XADD_RM16_reg16(const X86::Instruction& insn)
  2899. {
  2900. auto dest = insn.modrm().read16(*this, insn);
  2901. auto src = const_gpr16(insn.reg16());
  2902. auto result = op_add(*this, dest, src);
  2903. gpr16(insn.reg16()) = dest;
  2904. insn.modrm().write16(*this, insn, result);
  2905. }
  2906. void SoftCPU::XADD_RM32_reg32(const X86::Instruction& insn)
  2907. {
  2908. auto dest = insn.modrm().read32(*this, insn);
  2909. auto src = const_gpr32(insn.reg32());
  2910. auto result = op_add(*this, dest, src);
  2911. gpr32(insn.reg32()) = dest;
  2912. insn.modrm().write32(*this, insn, result);
  2913. }
  2914. void SoftCPU::XADD_RM8_reg8(const X86::Instruction& insn)
  2915. {
  2916. auto dest = insn.modrm().read8(*this, insn);
  2917. auto src = const_gpr8(insn.reg8());
  2918. auto result = op_add(*this, dest, src);
  2919. gpr8(insn.reg8()) = dest;
  2920. insn.modrm().write8(*this, insn, result);
  2921. }
  2922. void SoftCPU::XCHG_AX_reg16(const X86::Instruction& insn)
  2923. {
  2924. auto temp = gpr16(insn.reg16());
  2925. gpr16(insn.reg16()) = ax();
  2926. set_ax(temp);
  2927. }
  2928. void SoftCPU::XCHG_EAX_reg32(const X86::Instruction& insn)
  2929. {
  2930. auto temp = gpr32(insn.reg32());
  2931. gpr32(insn.reg32()) = eax();
  2932. set_eax(temp);
  2933. }
  2934. void SoftCPU::XCHG_reg16_RM16(const X86::Instruction& insn)
  2935. {
  2936. auto temp = insn.modrm().read16(*this, insn);
  2937. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2938. gpr16(insn.reg16()) = temp;
  2939. }
  2940. void SoftCPU::XCHG_reg32_RM32(const X86::Instruction& insn)
  2941. {
  2942. auto temp = insn.modrm().read32(*this, insn);
  2943. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2944. gpr32(insn.reg32()) = temp;
  2945. }
  2946. void SoftCPU::XCHG_reg8_RM8(const X86::Instruction& insn)
  2947. {
  2948. auto temp = insn.modrm().read8(*this, insn);
  2949. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2950. gpr8(insn.reg8()) = temp;
  2951. }
  2952. void SoftCPU::XLAT(const X86::Instruction& insn)
  2953. {
  2954. if (insn.a32())
  2955. warn_if_uninitialized(ebx(), "xlat ebx");
  2956. else
  2957. warn_if_uninitialized(bx(), "xlat bx");
  2958. warn_if_uninitialized(al(), "xlat al");
  2959. u32 offset = (insn.a32() ? ebx().value() : bx().value()) + al().value();
  2960. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), offset }));
  2961. }
  2962. #define DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2963. void SoftCPU::mnemonic##_AL_imm8(const X86::Instruction& insn) { generic_AL_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  2964. void SoftCPU::mnemonic##_AX_imm16(const X86::Instruction& insn) { generic_AX_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2965. void SoftCPU::mnemonic##_EAX_imm32(const X86::Instruction& insn) { generic_EAX_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2966. void SoftCPU::mnemonic##_RM16_imm16(const X86::Instruction& insn) { generic_RM16_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2967. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { generic_RM16_reg16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2968. void SoftCPU::mnemonic##_RM32_imm32(const X86::Instruction& insn) { generic_RM32_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2969. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { generic_RM32_reg32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2970. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  2971. void SoftCPU::mnemonic##_RM8_reg8(const X86::Instruction& insn) { generic_RM8_reg8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2972. #define DEFINE_GENERIC_INSN_HANDLERS(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2973. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2974. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2975. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2976. void SoftCPU::mnemonic##_reg16_RM16(const X86::Instruction& insn) { generic_reg16_RM16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2977. void SoftCPU::mnemonic##_reg32_RM32(const X86::Instruction& insn) { generic_reg32_RM32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2978. void SoftCPU::mnemonic##_reg8_RM8(const X86::Instruction& insn) { generic_reg8_RM8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2979. DEFINE_GENERIC_INSN_HANDLERS(XOR, op_xor, true, true, false)
  2980. DEFINE_GENERIC_INSN_HANDLERS(OR, op_or, true, false, true)
  2981. DEFINE_GENERIC_INSN_HANDLERS(ADD, op_add, true, false, false)
  2982. DEFINE_GENERIC_INSN_HANDLERS(ADC, op_adc, true, false, false)
  2983. DEFINE_GENERIC_INSN_HANDLERS(SUB, op_sub, true, true, false)
  2984. DEFINE_GENERIC_INSN_HANDLERS(SBB, op_sbb, true, false, false)
  2985. DEFINE_GENERIC_INSN_HANDLERS(AND, op_and, true, false, false)
  2986. DEFINE_GENERIC_INSN_HANDLERS(CMP, op_sub, false, false, false)
  2987. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(TEST, op_and, false, false, false)
  2988. void SoftCPU::MOVQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2989. void SoftCPU::EMMS(const X86::Instruction&) { TODO_INSN(); }
  2990. void SoftCPU::MOVQ_mm1_m64_mm2(const X86::Instruction&) { TODO_INSN(); }
  2991. void SoftCPU::wrap_0xC0(const X86::Instruction&) { TODO_INSN(); }
  2992. void SoftCPU::wrap_0xC1_16(const X86::Instruction&) { TODO_INSN(); }
  2993. void SoftCPU::wrap_0xC1_32(const X86::Instruction&) { TODO_INSN(); }
  2994. void SoftCPU::wrap_0xD0(const X86::Instruction&) { TODO_INSN(); }
  2995. void SoftCPU::wrap_0xD1_16(const X86::Instruction&) { TODO_INSN(); }
  2996. void SoftCPU::wrap_0xD1_32(const X86::Instruction&) { TODO_INSN(); }
  2997. void SoftCPU::wrap_0xD2(const X86::Instruction&) { TODO_INSN(); }
  2998. void SoftCPU::wrap_0xD3_16(const X86::Instruction&) { TODO_INSN(); }
  2999. void SoftCPU::wrap_0xD3_32(const X86::Instruction&) { TODO_INSN(); }
  3000. }