SoftCPU.cpp 119 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include "SoftCPU.h"
  8. #include "Emulator.h"
  9. #include <AK/Assertions.h>
  10. #include <AK/BitCast.h>
  11. #include <AK/Debug.h>
  12. #include <math.h>
  13. #include <stdio.h>
  14. #include <string.h>
  15. #include <unistd.h>
  16. #if defined(__GNUC__) && !defined(__clang__)
  17. # pragma GCC optimize("O3")
  18. #endif
  19. #define TODO_INSN() \
  20. do { \
  21. reportln("\n=={}== Unimplemented instruction: {}\n", getpid(), __FUNCTION__); \
  22. m_emulator.dump_backtrace(); \
  23. _exit(0); \
  24. } while (0)
  25. #define DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(mnemonic, op) \
  26. void SoftCPU::mnemonic##_RM8_1(const X86::Instruction& insn) { generic_RM8_1(op<ValueWithShadow<u8>>, insn); } \
  27. void SoftCPU::mnemonic##_RM8_CL(const X86::Instruction& insn) { generic_RM8_CL(op<ValueWithShadow<u8>>, insn); } \
  28. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<true, false>(op<ValueWithShadow<u8>>, insn); } \
  29. void SoftCPU::mnemonic##_RM16_1(const X86::Instruction& insn) { generic_RM16_1(op<ValueWithShadow<u16>>, insn); } \
  30. void SoftCPU::mnemonic##_RM16_CL(const X86::Instruction& insn) { generic_RM16_CL(op<ValueWithShadow<u16>>, insn); } \
  31. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_unsigned_imm8<true>(op<ValueWithShadow<u16>>, insn); } \
  32. void SoftCPU::mnemonic##_RM32_1(const X86::Instruction& insn) { generic_RM32_1(op<ValueWithShadow<u32>>, insn); } \
  33. void SoftCPU::mnemonic##_RM32_CL(const X86::Instruction& insn) { generic_RM32_CL(op<ValueWithShadow<u32>>, insn); } \
  34. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_unsigned_imm8<true>(op<ValueWithShadow<u32>>, insn); }
  35. namespace UserspaceEmulator {
  36. template<typename T>
  37. ALWAYS_INLINE void warn_if_uninitialized(T value_with_shadow, const char* message)
  38. {
  39. if (value_with_shadow.is_uninitialized()) [[unlikely]] {
  40. reportln("\033[31;1mWarning! Use of uninitialized value: {}\033[0m\n", message);
  41. Emulator::the().dump_backtrace();
  42. }
  43. }
  44. ALWAYS_INLINE void SoftCPU::warn_if_flags_tainted(const char* message) const
  45. {
  46. if (m_flags_tainted) [[unlikely]] {
  47. reportln("\n=={}== \033[31;1mConditional depends on uninitialized data\033[0m ({})\n", getpid(), message);
  48. Emulator::the().dump_backtrace();
  49. }
  50. }
  51. template<typename T, typename U>
  52. constexpr T sign_extended_to(U value)
  53. {
  54. if (!(value & X86::TypeTrivia<U>::sign_bit))
  55. return value;
  56. return (X86::TypeTrivia<T>::mask & ~X86::TypeTrivia<U>::mask) | value;
  57. }
  58. SoftCPU::SoftCPU(Emulator& emulator)
  59. : m_emulator(emulator)
  60. {
  61. memset(m_gpr, 0, sizeof(m_gpr));
  62. memset(m_gpr_shadow, 1, sizeof(m_gpr_shadow));
  63. m_segment[(int)X86::SegmentRegister::CS] = 0x1b;
  64. m_segment[(int)X86::SegmentRegister::DS] = 0x23;
  65. m_segment[(int)X86::SegmentRegister::ES] = 0x23;
  66. m_segment[(int)X86::SegmentRegister::SS] = 0x23;
  67. m_segment[(int)X86::SegmentRegister::GS] = 0x2b;
  68. }
  69. void SoftCPU::dump() const
  70. {
  71. outln(" eax={:08x} ebx={:08x} ecx={:08x} edx={:08x} ebp={:08x} esp={:08x} esi={:08x} edi={:08x} o={:d} s={:d} z={:d} a={:d} p={:d} c={:d}",
  72. eax(), ebx(), ecx(), edx(), ebp(), esp(), esi(), edi(), of(), sf(), zf(), af(), pf(), cf());
  73. outln("#eax={:08x} #ebx={:08x} #ecx={:08x} #edx={:08x} #ebp={:08x} #esp={:08x} #esi={:08x} #edi={:08x} #f={}",
  74. eax().shadow(), ebx().shadow(), ecx().shadow(), edx().shadow(), ebp().shadow(), esp().shadow(), esi().shadow(), edi().shadow(), m_flags_tainted);
  75. fflush(stdout);
  76. }
  77. void SoftCPU::update_code_cache()
  78. {
  79. auto* region = m_emulator.mmu().find_region({ cs(), eip() });
  80. VERIFY(region);
  81. if (!region->is_executable()) {
  82. reportln("SoftCPU::update_code_cache: Non-executable region @ {:p}", eip());
  83. Emulator::the().dump_backtrace();
  84. TODO();
  85. }
  86. // FIXME: This cache needs to be invalidated if the code region is ever unmapped.
  87. m_cached_code_region = region;
  88. m_cached_code_base_ptr = region->data();
  89. }
  90. ValueWithShadow<u8> SoftCPU::read_memory8(X86::LogicalAddress address)
  91. {
  92. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  93. auto value = m_emulator.mmu().read8(address);
  94. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory8: @{:04x}:{:08x} -> {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  95. return value;
  96. }
  97. ValueWithShadow<u16> SoftCPU::read_memory16(X86::LogicalAddress address)
  98. {
  99. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  100. auto value = m_emulator.mmu().read16(address);
  101. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory16: @{:04x}:{:08x} -> {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  102. return value;
  103. }
  104. ValueWithShadow<u32> SoftCPU::read_memory32(X86::LogicalAddress address)
  105. {
  106. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  107. auto value = m_emulator.mmu().read32(address);
  108. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory32: @{:04x}:{:08x} -> {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  109. return value;
  110. }
  111. ValueWithShadow<u64> SoftCPU::read_memory64(X86::LogicalAddress address)
  112. {
  113. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  114. auto value = m_emulator.mmu().read64(address);
  115. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory64: @{:04x}:{:08x} -> {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  116. return value;
  117. }
  118. ValueWithShadow<u128> SoftCPU::read_memory128(X86::LogicalAddress address)
  119. {
  120. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  121. auto value = m_emulator.mmu().read128(address);
  122. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory128: @{:04x}:{:08x} -> {:032x} ({:032x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  123. return value;
  124. }
  125. ValueWithShadow<u256> SoftCPU::read_memory256(X86::LogicalAddress address)
  126. {
  127. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  128. auto value = m_emulator.mmu().read256(address);
  129. outln_if(MEMORY_DEBUG, "\033[36;1mread_memory256: @{:04x}:{:08x} -> {:064x} ({:064x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  130. return value;
  131. }
  132. void SoftCPU::write_memory8(X86::LogicalAddress address, ValueWithShadow<u8> value)
  133. {
  134. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  135. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory8: @{:04x}:{:08x} <- {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  136. m_emulator.mmu().write8(address, value);
  137. }
  138. void SoftCPU::write_memory16(X86::LogicalAddress address, ValueWithShadow<u16> value)
  139. {
  140. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  141. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory16: @{:04x}:{:08x} <- {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  142. m_emulator.mmu().write16(address, value);
  143. }
  144. void SoftCPU::write_memory32(X86::LogicalAddress address, ValueWithShadow<u32> value)
  145. {
  146. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  147. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory32: @{:04x}:{:08x} <- {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  148. m_emulator.mmu().write32(address, value);
  149. }
  150. void SoftCPU::write_memory64(X86::LogicalAddress address, ValueWithShadow<u64> value)
  151. {
  152. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  153. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory64: @{:04x}:{:08x} <- {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  154. m_emulator.mmu().write64(address, value);
  155. }
  156. void SoftCPU::write_memory128(X86::LogicalAddress address, ValueWithShadow<u128> value)
  157. {
  158. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  159. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory128: @{:04x}:{:08x} <- {:032x} ({:032x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  160. m_emulator.mmu().write128(address, value);
  161. }
  162. void SoftCPU::write_memory256(X86::LogicalAddress address, ValueWithShadow<u256> value)
  163. {
  164. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  165. outln_if(MEMORY_DEBUG, "\033[36;1mwrite_memory256: @{:04x}:{:08x} <- {:064x} ({:064x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  166. m_emulator.mmu().write256(address, value);
  167. }
  168. void SoftCPU::push_string(const StringView& string)
  169. {
  170. size_t space_to_allocate = round_up_to_power_of_two(string.length() + 1, 16);
  171. set_esp({ esp().value() - space_to_allocate, esp().shadow() });
  172. m_emulator.mmu().copy_to_vm(esp().value(), string.characters_without_null_termination(), string.length());
  173. m_emulator.mmu().write8({ 0x23, esp().value() + string.length() }, shadow_wrap_as_initialized((u8)'\0'));
  174. }
  175. void SoftCPU::push_buffer(const u8* data, size_t size)
  176. {
  177. set_esp({ esp().value() - size, esp().shadow() });
  178. warn_if_uninitialized(esp(), "push_buffer");
  179. m_emulator.mmu().copy_to_vm(esp().value(), data, size);
  180. }
  181. void SoftCPU::push32(ValueWithShadow<u32> value)
  182. {
  183. set_esp({ esp().value() - sizeof(u32), esp().shadow() });
  184. warn_if_uninitialized(esp(), "push32");
  185. write_memory32({ ss(), esp().value() }, value);
  186. }
  187. ValueWithShadow<u32> SoftCPU::pop32()
  188. {
  189. warn_if_uninitialized(esp(), "pop32");
  190. auto value = read_memory32({ ss(), esp().value() });
  191. set_esp({ esp().value() + sizeof(u32), esp().shadow() });
  192. return value;
  193. }
  194. void SoftCPU::push16(ValueWithShadow<u16> value)
  195. {
  196. warn_if_uninitialized(esp(), "push16");
  197. set_esp({ esp().value() - sizeof(u16), esp().shadow() });
  198. write_memory16({ ss(), esp().value() }, value);
  199. }
  200. ValueWithShadow<u16> SoftCPU::pop16()
  201. {
  202. warn_if_uninitialized(esp(), "pop16");
  203. auto value = read_memory16({ ss(), esp().value() });
  204. set_esp({ esp().value() + sizeof(u16), esp().shadow() });
  205. return value;
  206. }
  207. template<bool check_zf, typename Callback>
  208. void SoftCPU::do_once_or_repeat(const X86::Instruction& insn, Callback callback)
  209. {
  210. if (!insn.has_rep_prefix())
  211. return callback();
  212. while (loop_index(insn.a32()).value()) {
  213. callback();
  214. decrement_loop_index(insn.a32());
  215. if constexpr (check_zf) {
  216. warn_if_flags_tainted("repz/repnz");
  217. if (insn.rep_prefix() == X86::Prefix::REPZ && !zf())
  218. break;
  219. if (insn.rep_prefix() == X86::Prefix::REPNZ && zf())
  220. break;
  221. }
  222. }
  223. }
  224. template<typename T>
  225. ALWAYS_INLINE static T op_inc(SoftCPU& cpu, T data)
  226. {
  227. typename T::ValueType result;
  228. u32 new_flags = 0;
  229. if constexpr (sizeof(typename T::ValueType) == 4) {
  230. asm volatile("incl %%eax\n"
  231. : "=a"(result)
  232. : "a"(data.value()));
  233. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  234. asm volatile("incw %%ax\n"
  235. : "=a"(result)
  236. : "a"(data.value()));
  237. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  238. asm volatile("incb %%al\n"
  239. : "=a"(result)
  240. : "a"(data.value()));
  241. }
  242. asm volatile(
  243. "pushf\n"
  244. "pop %%ebx"
  245. : "=b"(new_flags));
  246. cpu.set_flags_oszap(new_flags);
  247. cpu.taint_flags_from(data);
  248. return shadow_wrap_with_taint_from(result, data);
  249. }
  250. template<typename T>
  251. ALWAYS_INLINE static T op_dec(SoftCPU& cpu, T data)
  252. {
  253. typename T::ValueType result;
  254. u32 new_flags = 0;
  255. if constexpr (sizeof(typename T::ValueType) == 4) {
  256. asm volatile("decl %%eax\n"
  257. : "=a"(result)
  258. : "a"(data.value()));
  259. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  260. asm volatile("decw %%ax\n"
  261. : "=a"(result)
  262. : "a"(data.value()));
  263. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  264. asm volatile("decb %%al\n"
  265. : "=a"(result)
  266. : "a"(data.value()));
  267. }
  268. asm volatile(
  269. "pushf\n"
  270. "pop %%ebx"
  271. : "=b"(new_flags));
  272. cpu.set_flags_oszap(new_flags);
  273. cpu.taint_flags_from(data);
  274. return shadow_wrap_with_taint_from(result, data);
  275. }
  276. template<typename T>
  277. ALWAYS_INLINE static T op_xor(SoftCPU& cpu, const T& dest, const T& src)
  278. {
  279. typename T::ValueType result;
  280. u32 new_flags = 0;
  281. if constexpr (sizeof(typename T::ValueType) == 4) {
  282. asm volatile("xorl %%ecx, %%eax\n"
  283. : "=a"(result)
  284. : "a"(dest.value()), "c"(src.value()));
  285. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  286. asm volatile("xor %%cx, %%ax\n"
  287. : "=a"(result)
  288. : "a"(dest.value()), "c"(src.value()));
  289. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  290. asm volatile("xorb %%cl, %%al\n"
  291. : "=a"(result)
  292. : "a"(dest.value()), "c"(src.value()));
  293. } else {
  294. VERIFY_NOT_REACHED();
  295. }
  296. asm volatile(
  297. "pushf\n"
  298. "pop %%ebx"
  299. : "=b"(new_flags));
  300. cpu.set_flags_oszpc(new_flags);
  301. cpu.taint_flags_from(dest, src);
  302. return shadow_wrap_with_taint_from(result, dest, src);
  303. }
  304. template<typename T>
  305. ALWAYS_INLINE static T op_or(SoftCPU& cpu, const T& dest, const T& src)
  306. {
  307. typename T::ValueType result = 0;
  308. u32 new_flags = 0;
  309. if constexpr (sizeof(typename T::ValueType) == 4) {
  310. asm volatile("orl %%ecx, %%eax\n"
  311. : "=a"(result)
  312. : "a"(dest.value()), "c"(src.value()));
  313. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  314. asm volatile("or %%cx, %%ax\n"
  315. : "=a"(result)
  316. : "a"(dest.value()), "c"(src.value()));
  317. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  318. asm volatile("orb %%cl, %%al\n"
  319. : "=a"(result)
  320. : "a"(dest.value()), "c"(src.value()));
  321. } else {
  322. VERIFY_NOT_REACHED();
  323. }
  324. asm volatile(
  325. "pushf\n"
  326. "pop %%ebx"
  327. : "=b"(new_flags));
  328. cpu.set_flags_oszpc(new_flags);
  329. cpu.taint_flags_from(dest, src);
  330. return shadow_wrap_with_taint_from(result, dest, src);
  331. }
  332. template<typename T>
  333. ALWAYS_INLINE static T op_sub(SoftCPU& cpu, const T& dest, const T& src)
  334. {
  335. typename T::ValueType result = 0;
  336. u32 new_flags = 0;
  337. if constexpr (sizeof(typename T::ValueType) == 4) {
  338. asm volatile("subl %%ecx, %%eax\n"
  339. : "=a"(result)
  340. : "a"(dest.value()), "c"(src.value()));
  341. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  342. asm volatile("subw %%cx, %%ax\n"
  343. : "=a"(result)
  344. : "a"(dest.value()), "c"(src.value()));
  345. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  346. asm volatile("subb %%cl, %%al\n"
  347. : "=a"(result)
  348. : "a"(dest.value()), "c"(src.value()));
  349. } else {
  350. VERIFY_NOT_REACHED();
  351. }
  352. asm volatile(
  353. "pushf\n"
  354. "pop %%ebx"
  355. : "=b"(new_flags));
  356. cpu.set_flags_oszapc(new_flags);
  357. cpu.taint_flags_from(dest, src);
  358. return shadow_wrap_with_taint_from(result, dest, src);
  359. }
  360. template<typename T, bool cf>
  361. ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src)
  362. {
  363. typename T::ValueType result = 0;
  364. u32 new_flags = 0;
  365. if constexpr (cf)
  366. asm volatile("stc");
  367. else
  368. asm volatile("clc");
  369. if constexpr (sizeof(typename T::ValueType) == 4) {
  370. asm volatile("sbbl %%ecx, %%eax\n"
  371. : "=a"(result)
  372. : "a"(dest.value()), "c"(src.value()));
  373. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  374. asm volatile("sbbw %%cx, %%ax\n"
  375. : "=a"(result)
  376. : "a"(dest.value()), "c"(src.value()));
  377. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  378. asm volatile("sbbb %%cl, %%al\n"
  379. : "=a"(result)
  380. : "a"(dest.value()), "c"(src.value()));
  381. } else {
  382. VERIFY_NOT_REACHED();
  383. }
  384. asm volatile(
  385. "pushf\n"
  386. "pop %%ebx"
  387. : "=b"(new_flags));
  388. cpu.set_flags_oszapc(new_flags);
  389. cpu.taint_flags_from(dest, src);
  390. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  391. }
  392. template<typename T>
  393. ALWAYS_INLINE static T op_sbb(SoftCPU& cpu, T& dest, const T& src)
  394. {
  395. cpu.warn_if_flags_tainted("sbb");
  396. if (cpu.cf())
  397. return op_sbb_impl<T, true>(cpu, dest, src);
  398. return op_sbb_impl<T, false>(cpu, dest, src);
  399. }
  400. template<typename T>
  401. ALWAYS_INLINE static T op_add(SoftCPU& cpu, T& dest, const T& src)
  402. {
  403. typename T::ValueType result = 0;
  404. u32 new_flags = 0;
  405. if constexpr (sizeof(typename T::ValueType) == 4) {
  406. asm volatile("addl %%ecx, %%eax\n"
  407. : "=a"(result)
  408. : "a"(dest.value()), "c"(src.value()));
  409. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  410. asm volatile("addw %%cx, %%ax\n"
  411. : "=a"(result)
  412. : "a"(dest.value()), "c"(src.value()));
  413. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  414. asm volatile("addb %%cl, %%al\n"
  415. : "=a"(result)
  416. : "a"(dest.value()), "c"(src.value()));
  417. } else {
  418. VERIFY_NOT_REACHED();
  419. }
  420. asm volatile(
  421. "pushf\n"
  422. "pop %%ebx"
  423. : "=b"(new_flags));
  424. cpu.set_flags_oszapc(new_flags);
  425. cpu.taint_flags_from(dest, src);
  426. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  427. }
  428. template<typename T, bool cf>
  429. ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src)
  430. {
  431. typename T::ValueType result = 0;
  432. u32 new_flags = 0;
  433. if constexpr (cf)
  434. asm volatile("stc");
  435. else
  436. asm volatile("clc");
  437. if constexpr (sizeof(typename T::ValueType) == 4) {
  438. asm volatile("adcl %%ecx, %%eax\n"
  439. : "=a"(result)
  440. : "a"(dest.value()), "c"(src.value()));
  441. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  442. asm volatile("adcw %%cx, %%ax\n"
  443. : "=a"(result)
  444. : "a"(dest.value()), "c"(src.value()));
  445. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  446. asm volatile("adcb %%cl, %%al\n"
  447. : "=a"(result)
  448. : "a"(dest.value()), "c"(src.value()));
  449. } else {
  450. VERIFY_NOT_REACHED();
  451. }
  452. asm volatile(
  453. "pushf\n"
  454. "pop %%ebx"
  455. : "=b"(new_flags));
  456. cpu.set_flags_oszapc(new_flags);
  457. cpu.taint_flags_from(dest, src);
  458. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  459. }
  460. template<typename T>
  461. ALWAYS_INLINE static T op_adc(SoftCPU& cpu, T& dest, const T& src)
  462. {
  463. cpu.warn_if_flags_tainted("adc");
  464. if (cpu.cf())
  465. return op_adc_impl<T, true>(cpu, dest, src);
  466. return op_adc_impl<T, false>(cpu, dest, src);
  467. }
  468. template<typename T>
  469. ALWAYS_INLINE static T op_and(SoftCPU& cpu, const T& dest, const T& src)
  470. {
  471. typename T::ValueType result = 0;
  472. u32 new_flags = 0;
  473. if constexpr (sizeof(typename T::ValueType) == 4) {
  474. asm volatile("andl %%ecx, %%eax\n"
  475. : "=a"(result)
  476. : "a"(dest.value()), "c"(src.value()));
  477. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  478. asm volatile("andw %%cx, %%ax\n"
  479. : "=a"(result)
  480. : "a"(dest.value()), "c"(src.value()));
  481. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  482. asm volatile("andb %%cl, %%al\n"
  483. : "=a"(result)
  484. : "a"(dest.value()), "c"(src.value()));
  485. } else {
  486. VERIFY_NOT_REACHED();
  487. }
  488. asm volatile(
  489. "pushf\n"
  490. "pop %%ebx"
  491. : "=b"(new_flags));
  492. cpu.set_flags_oszpc(new_flags);
  493. cpu.taint_flags_from(dest, src);
  494. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  495. }
  496. template<typename T>
  497. ALWAYS_INLINE static void op_imul(SoftCPU& cpu, const T& dest, const T& src, T& result_high, T& result_low)
  498. {
  499. bool did_overflow = false;
  500. if constexpr (sizeof(T) == 4) {
  501. i64 result = (i64)src * (i64)dest;
  502. result_low = result & 0xffffffff;
  503. result_high = result >> 32;
  504. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  505. } else if constexpr (sizeof(T) == 2) {
  506. i32 result = (i32)src * (i32)dest;
  507. result_low = result & 0xffff;
  508. result_high = result >> 16;
  509. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  510. } else if constexpr (sizeof(T) == 1) {
  511. i16 result = (i16)src * (i16)dest;
  512. result_low = result & 0xff;
  513. result_high = result >> 8;
  514. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  515. }
  516. if (did_overflow) {
  517. cpu.set_cf(true);
  518. cpu.set_of(true);
  519. } else {
  520. cpu.set_cf(false);
  521. cpu.set_of(false);
  522. }
  523. }
  524. template<typename T>
  525. ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  526. {
  527. if (steps.value() == 0)
  528. return shadow_wrap_with_taint_from(data.value(), data, steps);
  529. u32 result = 0;
  530. u32 new_flags = 0;
  531. if constexpr (sizeof(typename T::ValueType) == 4) {
  532. asm volatile("shrl %%cl, %%eax\n"
  533. : "=a"(result)
  534. : "a"(data.value()), "c"(steps.value()));
  535. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  536. asm volatile("shrw %%cl, %%ax\n"
  537. : "=a"(result)
  538. : "a"(data.value()), "c"(steps.value()));
  539. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  540. asm volatile("shrb %%cl, %%al\n"
  541. : "=a"(result)
  542. : "a"(data.value()), "c"(steps.value()));
  543. }
  544. asm volatile(
  545. "pushf\n"
  546. "pop %%ebx"
  547. : "=b"(new_flags));
  548. cpu.set_flags_oszapc(new_flags);
  549. cpu.taint_flags_from(data, steps);
  550. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  551. }
  552. template<typename T>
  553. ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  554. {
  555. if (steps.value() == 0)
  556. return shadow_wrap_with_taint_from(data.value(), data, steps);
  557. u32 result = 0;
  558. u32 new_flags = 0;
  559. if constexpr (sizeof(typename T::ValueType) == 4) {
  560. asm volatile("shll %%cl, %%eax\n"
  561. : "=a"(result)
  562. : "a"(data.value()), "c"(steps.value()));
  563. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  564. asm volatile("shlw %%cl, %%ax\n"
  565. : "=a"(result)
  566. : "a"(data.value()), "c"(steps.value()));
  567. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  568. asm volatile("shlb %%cl, %%al\n"
  569. : "=a"(result)
  570. : "a"(data.value()), "c"(steps.value()));
  571. }
  572. asm volatile(
  573. "pushf\n"
  574. "pop %%ebx"
  575. : "=b"(new_flags));
  576. cpu.set_flags_oszapc(new_flags);
  577. cpu.taint_flags_from(data, steps);
  578. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  579. }
  580. template<typename T>
  581. ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  582. {
  583. if (steps.value() == 0)
  584. return shadow_wrap_with_taint_from(data.value(), data, steps);
  585. u32 result = 0;
  586. u32 new_flags = 0;
  587. if constexpr (sizeof(typename T::ValueType) == 4) {
  588. asm volatile("shrd %%cl, %%edx, %%eax\n"
  589. : "=a"(result)
  590. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  591. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  592. asm volatile("shrd %%cl, %%dx, %%ax\n"
  593. : "=a"(result)
  594. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  595. }
  596. asm volatile(
  597. "pushf\n"
  598. "pop %%ebx"
  599. : "=b"(new_flags));
  600. cpu.set_flags_oszapc(new_flags);
  601. cpu.taint_flags_from(data, steps);
  602. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  603. }
  604. template<typename T>
  605. ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  606. {
  607. if (steps.value() == 0)
  608. return shadow_wrap_with_taint_from(data.value(), data, steps);
  609. u32 result = 0;
  610. u32 new_flags = 0;
  611. if constexpr (sizeof(typename T::ValueType) == 4) {
  612. asm volatile("shld %%cl, %%edx, %%eax\n"
  613. : "=a"(result)
  614. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  615. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  616. asm volatile("shld %%cl, %%dx, %%ax\n"
  617. : "=a"(result)
  618. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  619. }
  620. asm volatile(
  621. "pushf\n"
  622. "pop %%ebx"
  623. : "=b"(new_flags));
  624. cpu.set_flags_oszapc(new_flags);
  625. cpu.taint_flags_from(data, steps);
  626. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  627. }
  628. template<bool update_dest, bool is_or, typename Op>
  629. ALWAYS_INLINE void SoftCPU::generic_AL_imm8(Op op, const X86::Instruction& insn)
  630. {
  631. auto dest = al();
  632. auto src = shadow_wrap_as_initialized(insn.imm8());
  633. auto result = op(*this, dest, src);
  634. if (is_or && insn.imm8() == 0xff)
  635. result.set_initialized();
  636. if (update_dest)
  637. set_al(result);
  638. }
  639. template<bool update_dest, bool is_or, typename Op>
  640. ALWAYS_INLINE void SoftCPU::generic_AX_imm16(Op op, const X86::Instruction& insn)
  641. {
  642. auto dest = ax();
  643. auto src = shadow_wrap_as_initialized(insn.imm16());
  644. auto result = op(*this, dest, src);
  645. if (is_or && insn.imm16() == 0xffff)
  646. result.set_initialized();
  647. if (update_dest)
  648. set_ax(result);
  649. }
  650. template<bool update_dest, bool is_or, typename Op>
  651. ALWAYS_INLINE void SoftCPU::generic_EAX_imm32(Op op, const X86::Instruction& insn)
  652. {
  653. auto dest = eax();
  654. auto src = shadow_wrap_as_initialized(insn.imm32());
  655. auto result = op(*this, dest, src);
  656. if (is_or && insn.imm32() == 0xffffffff)
  657. result.set_initialized();
  658. if (update_dest)
  659. set_eax(result);
  660. }
  661. template<bool update_dest, bool is_or, typename Op>
  662. ALWAYS_INLINE void SoftCPU::generic_RM16_imm16(Op op, const X86::Instruction& insn)
  663. {
  664. auto dest = insn.modrm().read16(*this, insn);
  665. auto src = shadow_wrap_as_initialized(insn.imm16());
  666. auto result = op(*this, dest, src);
  667. if (is_or && insn.imm16() == 0xffff)
  668. result.set_initialized();
  669. if (update_dest)
  670. insn.modrm().write16(*this, insn, result);
  671. }
  672. template<bool update_dest, bool is_or, typename Op>
  673. ALWAYS_INLINE void SoftCPU::generic_RM16_imm8(Op op, const X86::Instruction& insn)
  674. {
  675. auto dest = insn.modrm().read16(*this, insn);
  676. auto src = shadow_wrap_as_initialized<u16>(sign_extended_to<u16>(insn.imm8()));
  677. auto result = op(*this, dest, src);
  678. if (is_or && src.value() == 0xffff)
  679. result.set_initialized();
  680. if (update_dest)
  681. insn.modrm().write16(*this, insn, result);
  682. }
  683. template<bool update_dest, typename Op>
  684. ALWAYS_INLINE void SoftCPU::generic_RM16_unsigned_imm8(Op op, const X86::Instruction& insn)
  685. {
  686. auto dest = insn.modrm().read16(*this, insn);
  687. auto src = shadow_wrap_as_initialized(insn.imm8());
  688. auto result = op(*this, dest, src);
  689. if (update_dest)
  690. insn.modrm().write16(*this, insn, result);
  691. }
  692. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  693. ALWAYS_INLINE void SoftCPU::generic_RM16_reg16(Op op, const X86::Instruction& insn)
  694. {
  695. auto dest = insn.modrm().read16(*this, insn);
  696. auto src = const_gpr16(insn.reg16());
  697. auto result = op(*this, dest, src);
  698. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  699. result.set_initialized();
  700. m_flags_tainted = false;
  701. }
  702. if (update_dest)
  703. insn.modrm().write16(*this, insn, result);
  704. }
  705. template<bool update_dest, bool is_or, typename Op>
  706. ALWAYS_INLINE void SoftCPU::generic_RM32_imm32(Op op, const X86::Instruction& insn)
  707. {
  708. auto dest = insn.modrm().read32(*this, insn);
  709. auto src = insn.imm32();
  710. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  711. if (is_or && src == 0xffffffff)
  712. result.set_initialized();
  713. if (update_dest)
  714. insn.modrm().write32(*this, insn, result);
  715. }
  716. template<bool update_dest, bool is_or, typename Op>
  717. ALWAYS_INLINE void SoftCPU::generic_RM32_imm8(Op op, const X86::Instruction& insn)
  718. {
  719. auto dest = insn.modrm().read32(*this, insn);
  720. auto src = sign_extended_to<u32>(insn.imm8());
  721. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  722. if (is_or && src == 0xffffffff)
  723. result.set_initialized();
  724. if (update_dest)
  725. insn.modrm().write32(*this, insn, result);
  726. }
  727. template<bool update_dest, typename Op>
  728. ALWAYS_INLINE void SoftCPU::generic_RM32_unsigned_imm8(Op op, const X86::Instruction& insn)
  729. {
  730. auto dest = insn.modrm().read32(*this, insn);
  731. auto src = shadow_wrap_as_initialized(insn.imm8());
  732. auto result = op(*this, dest, src);
  733. if (update_dest)
  734. insn.modrm().write32(*this, insn, result);
  735. }
  736. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  737. ALWAYS_INLINE void SoftCPU::generic_RM32_reg32(Op op, const X86::Instruction& insn)
  738. {
  739. auto dest = insn.modrm().read32(*this, insn);
  740. auto src = const_gpr32(insn.reg32());
  741. auto result = op(*this, dest, src);
  742. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  743. result.set_initialized();
  744. m_flags_tainted = false;
  745. }
  746. if (update_dest)
  747. insn.modrm().write32(*this, insn, result);
  748. }
  749. template<bool update_dest, bool is_or, typename Op>
  750. ALWAYS_INLINE void SoftCPU::generic_RM8_imm8(Op op, const X86::Instruction& insn)
  751. {
  752. auto dest = insn.modrm().read8(*this, insn);
  753. auto src = insn.imm8();
  754. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  755. if (is_or && src == 0xff)
  756. result.set_initialized();
  757. if (update_dest)
  758. insn.modrm().write8(*this, insn, result);
  759. }
  760. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  761. ALWAYS_INLINE void SoftCPU::generic_RM8_reg8(Op op, const X86::Instruction& insn)
  762. {
  763. auto dest = insn.modrm().read8(*this, insn);
  764. auto src = const_gpr8(insn.reg8());
  765. auto result = op(*this, dest, src);
  766. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  767. result.set_initialized();
  768. m_flags_tainted = false;
  769. }
  770. if (update_dest)
  771. insn.modrm().write8(*this, insn, result);
  772. }
  773. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  774. ALWAYS_INLINE void SoftCPU::generic_reg16_RM16(Op op, const X86::Instruction& insn)
  775. {
  776. auto dest = const_gpr16(insn.reg16());
  777. auto src = insn.modrm().read16(*this, insn);
  778. auto result = op(*this, dest, src);
  779. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  780. result.set_initialized();
  781. m_flags_tainted = false;
  782. }
  783. if (update_dest)
  784. gpr16(insn.reg16()) = result;
  785. }
  786. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  787. ALWAYS_INLINE void SoftCPU::generic_reg32_RM32(Op op, const X86::Instruction& insn)
  788. {
  789. auto dest = const_gpr32(insn.reg32());
  790. auto src = insn.modrm().read32(*this, insn);
  791. auto result = op(*this, dest, src);
  792. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  793. result.set_initialized();
  794. m_flags_tainted = false;
  795. }
  796. if (update_dest)
  797. gpr32(insn.reg32()) = result;
  798. }
  799. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  800. ALWAYS_INLINE void SoftCPU::generic_reg8_RM8(Op op, const X86::Instruction& insn)
  801. {
  802. auto dest = const_gpr8(insn.reg8());
  803. auto src = insn.modrm().read8(*this, insn);
  804. auto result = op(*this, dest, src);
  805. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  806. result.set_initialized();
  807. m_flags_tainted = false;
  808. }
  809. if (update_dest)
  810. gpr8(insn.reg8()) = result;
  811. }
  812. template<typename Op>
  813. ALWAYS_INLINE void SoftCPU::generic_RM8_1(Op op, const X86::Instruction& insn)
  814. {
  815. auto data = insn.modrm().read8(*this, insn);
  816. insn.modrm().write8(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  817. }
  818. template<typename Op>
  819. ALWAYS_INLINE void SoftCPU::generic_RM8_CL(Op op, const X86::Instruction& insn)
  820. {
  821. auto data = insn.modrm().read8(*this, insn);
  822. insn.modrm().write8(*this, insn, op(*this, data, cl()));
  823. }
  824. template<typename Op>
  825. ALWAYS_INLINE void SoftCPU::generic_RM16_1(Op op, const X86::Instruction& insn)
  826. {
  827. auto data = insn.modrm().read16(*this, insn);
  828. insn.modrm().write16(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  829. }
  830. template<typename Op>
  831. ALWAYS_INLINE void SoftCPU::generic_RM16_CL(Op op, const X86::Instruction& insn)
  832. {
  833. auto data = insn.modrm().read16(*this, insn);
  834. insn.modrm().write16(*this, insn, op(*this, data, cl()));
  835. }
  836. template<typename Op>
  837. ALWAYS_INLINE void SoftCPU::generic_RM32_1(Op op, const X86::Instruction& insn)
  838. {
  839. auto data = insn.modrm().read32(*this, insn);
  840. insn.modrm().write32(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  841. }
  842. template<typename Op>
  843. ALWAYS_INLINE void SoftCPU::generic_RM32_CL(Op op, const X86::Instruction& insn)
  844. {
  845. auto data = insn.modrm().read32(*this, insn);
  846. insn.modrm().write32(*this, insn, op(*this, data, cl()));
  847. }
  848. void SoftCPU::AAA(const X86::Instruction&) { TODO_INSN(); }
  849. void SoftCPU::AAD(const X86::Instruction&) { TODO_INSN(); }
  850. void SoftCPU::AAM(const X86::Instruction&) { TODO_INSN(); }
  851. void SoftCPU::AAS(const X86::Instruction&) { TODO_INSN(); }
  852. void SoftCPU::ARPL(const X86::Instruction&) { TODO_INSN(); }
  853. void SoftCPU::BOUND(const X86::Instruction&) { TODO_INSN(); }
  854. template<typename T>
  855. ALWAYS_INLINE static T op_bsf(SoftCPU&, T value)
  856. {
  857. return { (typename T::ValueType)__builtin_ctz(value.value()), value.shadow() };
  858. }
  859. template<typename T>
  860. ALWAYS_INLINE static T op_bsr(SoftCPU&, T value)
  861. {
  862. typename T::ValueType bit_index = 0;
  863. if constexpr (sizeof(typename T::ValueType) == 4) {
  864. asm volatile("bsrl %%eax, %%edx"
  865. : "=d"(bit_index)
  866. : "a"(value.value()));
  867. }
  868. if constexpr (sizeof(typename T::ValueType) == 2) {
  869. asm volatile("bsrw %%ax, %%dx"
  870. : "=d"(bit_index)
  871. : "a"(value.value()));
  872. }
  873. return shadow_wrap_with_taint_from(bit_index, value);
  874. }
  875. void SoftCPU::BSF_reg16_RM16(const X86::Instruction& insn)
  876. {
  877. auto src = insn.modrm().read16(*this, insn);
  878. set_zf(!src.value());
  879. if (src.value())
  880. gpr16(insn.reg16()) = op_bsf(*this, src);
  881. taint_flags_from(src);
  882. }
  883. void SoftCPU::BSF_reg32_RM32(const X86::Instruction& insn)
  884. {
  885. auto src = insn.modrm().read32(*this, insn);
  886. set_zf(!src.value());
  887. if (src.value()) {
  888. gpr32(insn.reg32()) = op_bsf(*this, src);
  889. taint_flags_from(src);
  890. }
  891. }
  892. void SoftCPU::BSR_reg16_RM16(const X86::Instruction& insn)
  893. {
  894. auto src = insn.modrm().read16(*this, insn);
  895. set_zf(!src.value());
  896. if (src.value()) {
  897. gpr16(insn.reg16()) = op_bsr(*this, src);
  898. taint_flags_from(src);
  899. }
  900. }
  901. void SoftCPU::BSR_reg32_RM32(const X86::Instruction& insn)
  902. {
  903. auto src = insn.modrm().read32(*this, insn);
  904. set_zf(!src.value());
  905. if (src.value()) {
  906. gpr32(insn.reg32()) = op_bsr(*this, src);
  907. taint_flags_from(src);
  908. }
  909. }
  910. void SoftCPU::BSWAP_reg32(const X86::Instruction& insn)
  911. {
  912. gpr32(insn.reg32()) = { __builtin_bswap32(gpr32(insn.reg32()).value()), __builtin_bswap32(gpr32(insn.reg32()).shadow()) };
  913. }
  914. template<typename T>
  915. ALWAYS_INLINE static T op_bt(T value, T)
  916. {
  917. return value;
  918. }
  919. template<typename T>
  920. ALWAYS_INLINE static T op_bts(T value, T bit_mask)
  921. {
  922. return value | bit_mask;
  923. }
  924. template<typename T>
  925. ALWAYS_INLINE static T op_btr(T value, T bit_mask)
  926. {
  927. return value & ~bit_mask;
  928. }
  929. template<typename T>
  930. ALWAYS_INLINE static T op_btc(T value, T bit_mask)
  931. {
  932. return value ^ bit_mask;
  933. }
  934. template<bool should_update, typename Op>
  935. ALWAYS_INLINE void BTx_RM16_reg16(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  936. {
  937. if (insn.modrm().is_register()) {
  938. unsigned bit_index = cpu.const_gpr16(insn.reg16()).value() & (X86::TypeTrivia<u16>::bits - 1);
  939. auto original = insn.modrm().read16(cpu, insn);
  940. u16 bit_mask = 1 << bit_index;
  941. u16 result = op(original.value(), bit_mask);
  942. cpu.set_cf((original.value() & bit_mask) != 0);
  943. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), original);
  944. if (should_update)
  945. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), original));
  946. return;
  947. }
  948. // FIXME: Is this supposed to perform a full 16-bit read/modify/write?
  949. unsigned bit_offset_in_array = cpu.const_gpr16(insn.reg16()).value() / 8;
  950. unsigned bit_offset_in_byte = cpu.const_gpr16(insn.reg16()).value() & 7;
  951. auto address = insn.modrm().resolve(cpu, insn);
  952. address.set_offset(address.offset() + bit_offset_in_array);
  953. auto dest = cpu.read_memory8(address);
  954. u8 bit_mask = 1 << bit_offset_in_byte;
  955. u8 result = op(dest.value(), bit_mask);
  956. cpu.set_cf((dest.value() & bit_mask) != 0);
  957. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), dest);
  958. if (should_update)
  959. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), dest));
  960. }
  961. template<bool should_update, typename Op>
  962. ALWAYS_INLINE void BTx_RM32_reg32(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  963. {
  964. if (insn.modrm().is_register()) {
  965. unsigned bit_index = cpu.const_gpr32(insn.reg32()).value() & (X86::TypeTrivia<u32>::bits - 1);
  966. auto original = insn.modrm().read32(cpu, insn);
  967. u32 bit_mask = 1 << bit_index;
  968. u32 result = op(original.value(), bit_mask);
  969. cpu.set_cf((original.value() & bit_mask) != 0);
  970. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), original);
  971. if (should_update)
  972. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), original));
  973. return;
  974. }
  975. // FIXME: Is this supposed to perform a full 32-bit read/modify/write?
  976. unsigned bit_offset_in_array = cpu.const_gpr32(insn.reg32()).value() / 8;
  977. unsigned bit_offset_in_byte = cpu.const_gpr32(insn.reg32()).value() & 7;
  978. auto address = insn.modrm().resolve(cpu, insn);
  979. address.set_offset(address.offset() + bit_offset_in_array);
  980. auto dest = cpu.read_memory8(address);
  981. u8 bit_mask = 1 << bit_offset_in_byte;
  982. u8 result = op(dest.value(), bit_mask);
  983. cpu.set_cf((dest.value() & bit_mask) != 0);
  984. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), dest);
  985. if (should_update)
  986. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), dest));
  987. }
  988. template<bool should_update, typename Op>
  989. ALWAYS_INLINE void BTx_RM16_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  990. {
  991. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u16>::mask);
  992. // FIXME: Support higher bit indices
  993. VERIFY(bit_index < 16);
  994. auto original = insn.modrm().read16(cpu, insn);
  995. u16 bit_mask = 1 << bit_index;
  996. auto result = op(original.value(), bit_mask);
  997. cpu.set_cf((original.value() & bit_mask) != 0);
  998. cpu.taint_flags_from(original);
  999. if (should_update)
  1000. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1001. }
  1002. template<bool should_update, typename Op>
  1003. ALWAYS_INLINE void BTx_RM32_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  1004. {
  1005. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u32>::mask);
  1006. // FIXME: Support higher bit indices
  1007. VERIFY(bit_index < 32);
  1008. auto original = insn.modrm().read32(cpu, insn);
  1009. u32 bit_mask = 1 << bit_index;
  1010. auto result = op(original.value(), bit_mask);
  1011. cpu.set_cf((original.value() & bit_mask) != 0);
  1012. cpu.taint_flags_from(original);
  1013. if (should_update)
  1014. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1015. }
  1016. #define DEFINE_GENERIC_BTx_INSN_HANDLERS(mnemonic, op, update_dest) \
  1017. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { BTx_RM32_reg32<update_dest>(*this, insn, op<u32>); } \
  1018. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { BTx_RM16_reg16<update_dest>(*this, insn, op<u16>); } \
  1019. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { BTx_RM32_imm8<update_dest>(*this, insn, op<u32>); } \
  1020. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { BTx_RM16_imm8<update_dest>(*this, insn, op<u16>); }
  1021. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTS, op_bts, true);
  1022. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTR, op_btr, true);
  1023. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTC, op_btc, true);
  1024. DEFINE_GENERIC_BTx_INSN_HANDLERS(BT, op_bt, false);
  1025. void SoftCPU::CALL_FAR_mem16(const X86::Instruction&)
  1026. {
  1027. TODO();
  1028. }
  1029. void SoftCPU::CALL_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  1030. void SoftCPU::CALL_RM16(const X86::Instruction&) { TODO_INSN(); }
  1031. void SoftCPU::CALL_RM32(const X86::Instruction& insn)
  1032. {
  1033. push32(shadow_wrap_as_initialized(eip()));
  1034. auto address = insn.modrm().read32(*this, insn);
  1035. warn_if_uninitialized(address, "call rm32");
  1036. set_eip(address.value());
  1037. }
  1038. void SoftCPU::CALL_imm16(const X86::Instruction&) { TODO_INSN(); }
  1039. void SoftCPU::CALL_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  1040. void SoftCPU::CALL_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  1041. void SoftCPU::CALL_imm32(const X86::Instruction& insn)
  1042. {
  1043. push32(shadow_wrap_as_initialized(eip()));
  1044. set_eip(eip() + (i32)insn.imm32());
  1045. }
  1046. void SoftCPU::CBW(const X86::Instruction&)
  1047. {
  1048. set_ah(shadow_wrap_with_taint_from<u8>((al().value() & 0x80) ? 0xff : 0x00, al()));
  1049. }
  1050. void SoftCPU::CDQ(const X86::Instruction&)
  1051. {
  1052. if (eax().value() & 0x80000000)
  1053. set_edx(shadow_wrap_with_taint_from<u32>(0xffffffff, eax()));
  1054. else
  1055. set_edx(shadow_wrap_with_taint_from<u32>(0, eax()));
  1056. }
  1057. void SoftCPU::CLC(const X86::Instruction&)
  1058. {
  1059. set_cf(false);
  1060. }
  1061. void SoftCPU::CLD(const X86::Instruction&)
  1062. {
  1063. set_df(false);
  1064. }
  1065. void SoftCPU::CLI(const X86::Instruction&) { TODO_INSN(); }
  1066. void SoftCPU::CLTS(const X86::Instruction&) { TODO_INSN(); }
  1067. void SoftCPU::CMC(const X86::Instruction&)
  1068. {
  1069. set_cf(!cf());
  1070. }
  1071. void SoftCPU::CMOVcc_reg16_RM16(const X86::Instruction& insn)
  1072. {
  1073. warn_if_flags_tainted("cmovcc reg16, rm16");
  1074. if (evaluate_condition(insn.cc()))
  1075. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  1076. }
  1077. void SoftCPU::CMOVcc_reg32_RM32(const X86::Instruction& insn)
  1078. {
  1079. warn_if_flags_tainted("cmovcc reg32, rm32");
  1080. if (evaluate_condition(insn.cc()))
  1081. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  1082. }
  1083. template<typename T>
  1084. ALWAYS_INLINE static void do_cmps(SoftCPU& cpu, const X86::Instruction& insn)
  1085. {
  1086. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1087. cpu.do_once_or_repeat<true>(insn, [&] {
  1088. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1089. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  1090. op_sub(cpu, dest, src);
  1091. cpu.step_source_index(insn.a32(), sizeof(T));
  1092. cpu.step_destination_index(insn.a32(), sizeof(T));
  1093. });
  1094. }
  1095. void SoftCPU::CMPSB(const X86::Instruction& insn)
  1096. {
  1097. do_cmps<u8>(*this, insn);
  1098. }
  1099. void SoftCPU::CMPSD(const X86::Instruction& insn)
  1100. {
  1101. do_cmps<u32>(*this, insn);
  1102. }
  1103. void SoftCPU::CMPSW(const X86::Instruction& insn)
  1104. {
  1105. do_cmps<u16>(*this, insn);
  1106. }
  1107. void SoftCPU::CMPXCHG_RM16_reg16(const X86::Instruction& insn)
  1108. {
  1109. auto current = insn.modrm().read16(*this, insn);
  1110. taint_flags_from(current, ax());
  1111. if (current.value() == ax().value()) {
  1112. set_zf(true);
  1113. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  1114. } else {
  1115. set_zf(false);
  1116. set_ax(current);
  1117. }
  1118. }
  1119. void SoftCPU::CMPXCHG_RM32_reg32(const X86::Instruction& insn)
  1120. {
  1121. auto current = insn.modrm().read32(*this, insn);
  1122. taint_flags_from(current, eax());
  1123. if (current.value() == eax().value()) {
  1124. set_zf(true);
  1125. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  1126. } else {
  1127. set_zf(false);
  1128. set_eax(current);
  1129. }
  1130. }
  1131. void SoftCPU::CMPXCHG_RM8_reg8(const X86::Instruction& insn)
  1132. {
  1133. auto current = insn.modrm().read8(*this, insn);
  1134. taint_flags_from(current, al());
  1135. if (current.value() == al().value()) {
  1136. set_zf(true);
  1137. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  1138. } else {
  1139. set_zf(false);
  1140. set_al(current);
  1141. }
  1142. }
  1143. void SoftCPU::CPUID(const X86::Instruction&)
  1144. {
  1145. if (eax().value() == 0) {
  1146. set_eax(shadow_wrap_as_initialized<u32>(1));
  1147. set_ebx(shadow_wrap_as_initialized<u32>(0x6c6c6548));
  1148. set_edx(shadow_wrap_as_initialized<u32>(0x6972466f));
  1149. set_ecx(shadow_wrap_as_initialized<u32>(0x73646e65));
  1150. return;
  1151. }
  1152. if (eax().value() == 1) {
  1153. u32 stepping = 0;
  1154. u32 model = 1;
  1155. u32 family = 3;
  1156. u32 type = 0;
  1157. set_eax(shadow_wrap_as_initialized<u32>(stepping | (model << 4) | (family << 8) | (type << 12)));
  1158. set_ebx(shadow_wrap_as_initialized<u32>(0));
  1159. set_edx(shadow_wrap_as_initialized<u32>((1 << 15))); // Features (CMOV)
  1160. set_ecx(shadow_wrap_as_initialized<u32>(0));
  1161. return;
  1162. }
  1163. dbgln("Unhandled CPUID with eax={:08x}", eax().value());
  1164. }
  1165. void SoftCPU::CWD(const X86::Instruction&)
  1166. {
  1167. set_dx(shadow_wrap_with_taint_from<u16>((ax().value() & 0x8000) ? 0xffff : 0x0000, ax()));
  1168. }
  1169. void SoftCPU::CWDE(const X86::Instruction&)
  1170. {
  1171. set_eax(shadow_wrap_with_taint_from(sign_extended_to<u32>(ax().value()), ax()));
  1172. }
  1173. void SoftCPU::DAA(const X86::Instruction&) { TODO_INSN(); }
  1174. void SoftCPU::DAS(const X86::Instruction&) { TODO_INSN(); }
  1175. void SoftCPU::DEC_RM16(const X86::Instruction& insn)
  1176. {
  1177. insn.modrm().write16(*this, insn, op_dec(*this, insn.modrm().read16(*this, insn)));
  1178. }
  1179. void SoftCPU::DEC_RM32(const X86::Instruction& insn)
  1180. {
  1181. insn.modrm().write32(*this, insn, op_dec(*this, insn.modrm().read32(*this, insn)));
  1182. }
  1183. void SoftCPU::DEC_RM8(const X86::Instruction& insn)
  1184. {
  1185. insn.modrm().write8(*this, insn, op_dec(*this, insn.modrm().read8(*this, insn)));
  1186. }
  1187. void SoftCPU::DEC_reg16(const X86::Instruction& insn)
  1188. {
  1189. gpr16(insn.reg16()) = op_dec(*this, const_gpr16(insn.reg16()));
  1190. }
  1191. void SoftCPU::DEC_reg32(const X86::Instruction& insn)
  1192. {
  1193. gpr32(insn.reg32()) = op_dec(*this, const_gpr32(insn.reg32()));
  1194. }
  1195. void SoftCPU::DIV_RM16(const X86::Instruction& insn)
  1196. {
  1197. auto divisor = insn.modrm().read16(*this, insn);
  1198. if (divisor.value() == 0) {
  1199. reportln("Divide by zero");
  1200. TODO();
  1201. }
  1202. u32 dividend = ((u32)dx().value() << 16) | ax().value();
  1203. auto quotient = dividend / divisor.value();
  1204. if (quotient > NumericLimits<u16>::max()) {
  1205. reportln("Divide overflow");
  1206. TODO();
  1207. }
  1208. auto remainder = dividend % divisor.value();
  1209. auto original_ax = ax();
  1210. set_ax(shadow_wrap_with_taint_from<u16>(quotient, original_ax, dx()));
  1211. set_dx(shadow_wrap_with_taint_from<u16>(remainder, original_ax, dx()));
  1212. }
  1213. void SoftCPU::DIV_RM32(const X86::Instruction& insn)
  1214. {
  1215. auto divisor = insn.modrm().read32(*this, insn);
  1216. if (divisor.value() == 0) {
  1217. reportln("Divide by zero");
  1218. TODO();
  1219. }
  1220. u64 dividend = ((u64)edx().value() << 32) | eax().value();
  1221. auto quotient = dividend / divisor.value();
  1222. if (quotient > NumericLimits<u32>::max()) {
  1223. reportln("Divide overflow");
  1224. TODO();
  1225. }
  1226. auto remainder = dividend % divisor.value();
  1227. auto original_eax = eax();
  1228. set_eax(shadow_wrap_with_taint_from<u32>(quotient, original_eax, edx(), divisor));
  1229. set_edx(shadow_wrap_with_taint_from<u32>(remainder, original_eax, edx(), divisor));
  1230. }
  1231. void SoftCPU::DIV_RM8(const X86::Instruction& insn)
  1232. {
  1233. auto divisor = insn.modrm().read8(*this, insn);
  1234. if (divisor.value() == 0) {
  1235. reportln("Divide by zero");
  1236. TODO();
  1237. }
  1238. u16 dividend = ax().value();
  1239. auto quotient = dividend / divisor.value();
  1240. if (quotient > NumericLimits<u8>::max()) {
  1241. reportln("Divide overflow");
  1242. TODO();
  1243. }
  1244. auto remainder = dividend % divisor.value();
  1245. auto original_ax = ax();
  1246. set_al(shadow_wrap_with_taint_from<u8>(quotient, original_ax, divisor));
  1247. set_ah(shadow_wrap_with_taint_from<u8>(remainder, original_ax, divisor));
  1248. }
  1249. void SoftCPU::ENTER16(const X86::Instruction&) { TODO_INSN(); }
  1250. void SoftCPU::ENTER32(const X86::Instruction&) { TODO_INSN(); }
  1251. void SoftCPU::ESCAPE(const X86::Instruction&)
  1252. {
  1253. reportln("FIXME: x87 floating-point support");
  1254. m_emulator.dump_backtrace();
  1255. TODO();
  1256. }
  1257. void SoftCPU::FADD_RM32(const X86::Instruction& insn)
  1258. {
  1259. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1260. if (insn.modrm().is_register()) {
  1261. fpu_set(0, fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1262. } else {
  1263. auto new_f32 = insn.modrm().read32(*this, insn);
  1264. // FIXME: Respect shadow values
  1265. auto f32 = bit_cast<float>(new_f32.value());
  1266. fpu_set(0, fpu_get(0) + f32);
  1267. }
  1268. }
  1269. void SoftCPU::FMUL_RM32(const X86::Instruction& insn)
  1270. {
  1271. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1272. if (insn.modrm().is_register()) {
  1273. fpu_set(0, fpu_get(0) * fpu_get(insn.modrm().register_index()));
  1274. } else {
  1275. auto new_f32 = insn.modrm().read32(*this, insn);
  1276. // FIXME: Respect shadow values
  1277. auto f32 = bit_cast<float>(new_f32.value());
  1278. fpu_set(0, fpu_get(0) * f32);
  1279. }
  1280. }
  1281. void SoftCPU::FCOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1282. void SoftCPU::FCOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1283. void SoftCPU::FSUB_RM32(const X86::Instruction& insn)
  1284. {
  1285. if (insn.modrm().is_register()) {
  1286. fpu_set(0, fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1287. } else {
  1288. auto new_f32 = insn.modrm().read32(*this, insn);
  1289. // FIXME: Respect shadow values
  1290. auto f32 = bit_cast<float>(new_f32.value());
  1291. fpu_set(0, fpu_get(0) - f32);
  1292. }
  1293. }
  1294. void SoftCPU::FSUBR_RM32(const X86::Instruction& insn)
  1295. {
  1296. if (insn.modrm().is_register()) {
  1297. fpu_set(0, fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1298. } else {
  1299. auto new_f32 = insn.modrm().read32(*this, insn);
  1300. // FIXME: Respect shadow values
  1301. auto f32 = bit_cast<float>(new_f32.value());
  1302. fpu_set(0, f32 - fpu_get(0));
  1303. }
  1304. }
  1305. void SoftCPU::FDIV_RM32(const X86::Instruction& insn)
  1306. {
  1307. if (insn.modrm().is_register()) {
  1308. fpu_set(0, fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1309. } else {
  1310. auto new_f32 = insn.modrm().read32(*this, insn);
  1311. // FIXME: Respect shadow values
  1312. auto f32 = bit_cast<float>(new_f32.value());
  1313. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1314. fpu_set(0, fpu_get(0) / f32);
  1315. }
  1316. }
  1317. void SoftCPU::FDIVR_RM32(const X86::Instruction& insn)
  1318. {
  1319. if (insn.modrm().is_register()) {
  1320. fpu_set(0, fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1321. } else {
  1322. auto new_f32 = insn.modrm().read32(*this, insn);
  1323. // FIXME: Respect shadow values
  1324. auto f32 = bit_cast<float>(new_f32.value());
  1325. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1326. fpu_set(0, f32 / fpu_get(0));
  1327. }
  1328. }
  1329. void SoftCPU::FLD_RM32(const X86::Instruction& insn)
  1330. {
  1331. if (insn.modrm().is_register()) {
  1332. fpu_push(fpu_get(insn.modrm().register_index()));
  1333. } else {
  1334. auto new_f32 = insn.modrm().read32(*this, insn);
  1335. // FIXME: Respect shadow values
  1336. fpu_push(bit_cast<float>(new_f32.value()));
  1337. }
  1338. }
  1339. void SoftCPU::FXCH(const X86::Instruction& insn)
  1340. {
  1341. VERIFY(insn.modrm().is_register());
  1342. auto tmp = fpu_get(0);
  1343. fpu_set(0, fpu_get(insn.modrm().register_index()));
  1344. fpu_set(insn.modrm().register_index(), tmp);
  1345. }
  1346. void SoftCPU::FST_RM32(const X86::Instruction& insn)
  1347. {
  1348. VERIFY(!insn.modrm().is_register());
  1349. float f32 = (float)fpu_get(0);
  1350. // FIXME: Respect shadow values
  1351. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(f32)));
  1352. }
  1353. void SoftCPU::FNOP(const X86::Instruction&)
  1354. {
  1355. }
  1356. void SoftCPU::FSTP_RM32(const X86::Instruction& insn)
  1357. {
  1358. FST_RM32(insn);
  1359. fpu_pop();
  1360. }
  1361. void SoftCPU::FLDENV(const X86::Instruction&) { TODO_INSN(); }
  1362. void SoftCPU::FCHS(const X86::Instruction&)
  1363. {
  1364. fpu_set(0, -fpu_get(0));
  1365. }
  1366. void SoftCPU::FABS(const X86::Instruction&)
  1367. {
  1368. fpu_set(0, __builtin_fabs(fpu_get(0)));
  1369. }
  1370. void SoftCPU::FTST(const X86::Instruction&) { TODO_INSN(); }
  1371. void SoftCPU::FXAM(const X86::Instruction&) { TODO_INSN(); }
  1372. void SoftCPU::FLDCW(const X86::Instruction& insn)
  1373. {
  1374. m_fpu_cw = insn.modrm().read16(*this, insn);
  1375. }
  1376. void SoftCPU::FLD1(const X86::Instruction&)
  1377. {
  1378. fpu_push(1.0);
  1379. }
  1380. void SoftCPU::FLDL2T(const X86::Instruction&)
  1381. {
  1382. fpu_push(log2f(10.0f));
  1383. }
  1384. void SoftCPU::FLDL2E(const X86::Instruction&)
  1385. {
  1386. fpu_push(log2f(M_E));
  1387. }
  1388. void SoftCPU::FLDPI(const X86::Instruction&)
  1389. {
  1390. fpu_push(M_PI);
  1391. }
  1392. void SoftCPU::FLDLG2(const X86::Instruction&)
  1393. {
  1394. fpu_push(log10f(2.0f));
  1395. }
  1396. void SoftCPU::FLDLN2(const X86::Instruction&)
  1397. {
  1398. fpu_push(M_LN2);
  1399. }
  1400. void SoftCPU::FLDZ(const X86::Instruction&)
  1401. {
  1402. fpu_push(0.0);
  1403. }
  1404. void SoftCPU::FNSTENV(const X86::Instruction&) { TODO_INSN(); }
  1405. void SoftCPU::F2XM1(const X86::Instruction&)
  1406. {
  1407. // FIXME: validate ST(0) is in range –1.0 to +1.0
  1408. auto f32 = fpu_get(0);
  1409. // FIXME: Set C0, C2, C3 in FPU status word.
  1410. fpu_set(0, powf(2, f32) - 1.0f);
  1411. }
  1412. void SoftCPU::FYL2X(const X86::Instruction&)
  1413. {
  1414. // FIXME: Raise IA on +-infinity, +-0, raise Z on +-0
  1415. auto f32 = fpu_get(0);
  1416. // FIXME: Set C0, C2, C3 in FPU status word.
  1417. fpu_set(1, fpu_get(1) * log2f(f32));
  1418. fpu_pop();
  1419. }
  1420. void SoftCPU::FYL2XP1(const X86::Instruction&)
  1421. {
  1422. // FIXME: validate ST(0) range
  1423. auto f32 = fpu_get(0);
  1424. // FIXME: Set C0, C2, C3 in FPU status word.
  1425. fpu_set(1, (fpu_get(1) * log2f(f32 + 1.0f)));
  1426. fpu_pop();
  1427. }
  1428. void SoftCPU::FPTAN(const X86::Instruction&)
  1429. {
  1430. // FIXME: set C1 upon stack overflow or if result was rounded
  1431. // FIXME: Set C2 to 1 if ST(0) is outside range of -2^63 to +2^63; else set to 0
  1432. fpu_set(0, tanf(fpu_get(0)));
  1433. fpu_push(1.0f);
  1434. }
  1435. void SoftCPU::FPATAN(const X86::Instruction&) { TODO_INSN(); }
  1436. void SoftCPU::FXTRACT(const X86::Instruction&) { TODO_INSN(); }
  1437. void SoftCPU::FPREM1(const X86::Instruction&) { TODO_INSN(); }
  1438. void SoftCPU::FDECSTP(const X86::Instruction&)
  1439. {
  1440. m_fpu_top = (m_fpu_top == 0) ? 7 : m_fpu_top - 1;
  1441. set_cf(0);
  1442. }
  1443. void SoftCPU::FINCSTP(const X86::Instruction&)
  1444. {
  1445. m_fpu_top = (m_fpu_top == 7) ? 0 : m_fpu_top + 1;
  1446. set_cf(0);
  1447. }
  1448. void SoftCPU::FNSTCW(const X86::Instruction& insn)
  1449. {
  1450. insn.modrm().write16(*this, insn, m_fpu_cw);
  1451. }
  1452. void SoftCPU::FPREM(const X86::Instruction&)
  1453. {
  1454. fpu_set(0,
  1455. fmodl(fpu_get(0), fpu_get(1)));
  1456. }
  1457. void SoftCPU::FSQRT(const X86::Instruction&)
  1458. {
  1459. fpu_set(0, sqrt(fpu_get(0)));
  1460. }
  1461. void SoftCPU::FSINCOS(const X86::Instruction&)
  1462. {
  1463. long double sin = sinl(fpu_get(0));
  1464. long double cos = cosl(fpu_get(0));
  1465. fpu_set(0, sin);
  1466. fpu_push(cos);
  1467. }
  1468. void SoftCPU::FRNDINT(const X86::Instruction&)
  1469. {
  1470. // FIXME: support rounding mode
  1471. fpu_set(0, round(fpu_get(0)));
  1472. }
  1473. void SoftCPU::FSCALE(const X86::Instruction&)
  1474. {
  1475. // FIXME: set C1 upon stack overflow or if result was rounded
  1476. fpu_set(0, fpu_get(0) * powf(2, floorf(fpu_get(1))));
  1477. }
  1478. void SoftCPU::FSIN(const X86::Instruction&)
  1479. {
  1480. fpu_set(0, sin(fpu_get(0)));
  1481. }
  1482. void SoftCPU::FCOS(const X86::Instruction&)
  1483. {
  1484. fpu_set(0, cos(fpu_get(0)));
  1485. }
  1486. void SoftCPU::FIADD_RM32(const X86::Instruction& insn)
  1487. {
  1488. VERIFY(!insn.modrm().is_register());
  1489. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1490. // FIXME: Respect shadow values
  1491. fpu_set(0, fpu_get(0) + (long double)m32int);
  1492. }
  1493. void SoftCPU::FCMOVB(const X86::Instruction& insn)
  1494. {
  1495. VERIFY(insn.modrm().is_register());
  1496. if (cf())
  1497. fpu_set(0, fpu_get(insn.rm() & 7));
  1498. }
  1499. void SoftCPU::FIMUL_RM32(const X86::Instruction& insn)
  1500. {
  1501. VERIFY(!insn.modrm().is_register());
  1502. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1503. // FIXME: Respect shadow values
  1504. fpu_set(0, fpu_get(0) * (long double)m32int);
  1505. }
  1506. void SoftCPU::FCMOVE(const X86::Instruction& insn)
  1507. {
  1508. VERIFY(insn.modrm().is_register());
  1509. if (zf())
  1510. fpu_set(0, fpu_get(insn.rm() & 7));
  1511. }
  1512. void SoftCPU::FICOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1513. void SoftCPU::FCMOVBE(const X86::Instruction& insn)
  1514. {
  1515. if (evaluate_condition(6))
  1516. fpu_set(0, fpu_get(insn.rm() & 7));
  1517. }
  1518. void SoftCPU::FICOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1519. void SoftCPU::FCMOVU(const X86::Instruction& insn)
  1520. {
  1521. VERIFY(insn.modrm().is_register());
  1522. if (pf())
  1523. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1524. }
  1525. void SoftCPU::FISUB_RM32(const X86::Instruction& insn)
  1526. {
  1527. VERIFY(!insn.modrm().is_register());
  1528. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1529. // FIXME: Respect shadow values
  1530. fpu_set(0, fpu_get(0) - (long double)m32int);
  1531. }
  1532. void SoftCPU::FISUBR_RM32(const X86::Instruction& insn)
  1533. {
  1534. VERIFY(!insn.modrm().is_register());
  1535. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1536. // FIXME: Respect shadow values
  1537. fpu_set(0, (long double)m32int - fpu_get(0));
  1538. }
  1539. void SoftCPU::FIDIV_RM32(const X86::Instruction& insn)
  1540. {
  1541. VERIFY(!insn.modrm().is_register());
  1542. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1543. // FIXME: Respect shadow values
  1544. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1545. fpu_set(0, fpu_get(0) / (long double)m32int);
  1546. }
  1547. void SoftCPU::FIDIVR_RM32(const X86::Instruction& insn)
  1548. {
  1549. VERIFY(!insn.modrm().is_register());
  1550. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1551. // FIXME: Respect shadow values
  1552. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1553. fpu_set(0, (long double)m32int / fpu_get(0));
  1554. }
  1555. void SoftCPU::FILD_RM32(const X86::Instruction& insn)
  1556. {
  1557. VERIFY(!insn.modrm().is_register());
  1558. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1559. // FIXME: Respect shadow values
  1560. fpu_push((long double)m32int);
  1561. }
  1562. void SoftCPU::FCMOVNB(const X86::Instruction& insn)
  1563. {
  1564. VERIFY(insn.modrm().is_register());
  1565. if (!cf())
  1566. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1567. }
  1568. void SoftCPU::FISTTP_RM32(const X86::Instruction& insn)
  1569. {
  1570. VERIFY(!insn.modrm().is_register());
  1571. i32 value = static_cast<i32>(fpu_pop());
  1572. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(value)));
  1573. }
  1574. void SoftCPU::FCMOVNE(const X86::Instruction& insn)
  1575. {
  1576. VERIFY(insn.modrm().is_register());
  1577. if (!zf())
  1578. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1579. }
  1580. void SoftCPU::FIST_RM32(const X86::Instruction& insn)
  1581. {
  1582. VERIFY(!insn.modrm().is_register());
  1583. auto f = fpu_get(0);
  1584. // FIXME: Respect rounding mode in m_fpu_cw.
  1585. auto value = static_cast<i32>(f);
  1586. // FIXME: Respect shadow values
  1587. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(value)));
  1588. }
  1589. void SoftCPU::FCMOVNBE(const X86::Instruction& insn)
  1590. {
  1591. if (evaluate_condition(7))
  1592. fpu_set(0, fpu_get(insn.rm() & 7));
  1593. }
  1594. void SoftCPU::FISTP_RM32(const X86::Instruction& insn)
  1595. {
  1596. FIST_RM32(insn);
  1597. fpu_pop();
  1598. }
  1599. void SoftCPU::FCMOVNU(const X86::Instruction& insn)
  1600. {
  1601. VERIFY(insn.modrm().is_register());
  1602. if (!pf())
  1603. fpu_set(0, fpu_get((insn.modrm().reg_fpu())));
  1604. }
  1605. void SoftCPU::FNENI(const X86::Instruction&) { TODO_INSN(); }
  1606. void SoftCPU::FNDISI(const X86::Instruction&) { TODO_INSN(); }
  1607. void SoftCPU::FNCLEX(const X86::Instruction&) { TODO_INSN(); }
  1608. void SoftCPU::FNINIT(const X86::Instruction&) { TODO_INSN(); }
  1609. void SoftCPU::FNSETPM(const X86::Instruction&) { TODO_INSN(); }
  1610. void SoftCPU::FLD_RM80(const X86::Instruction& insn)
  1611. {
  1612. VERIFY(!insn.modrm().is_register());
  1613. // long doubles can be up to 128 bits wide in memory for reasons (alignment) and only uses 80 bits of precision
  1614. // GCC uses 12 bytes in 32 bit and 16 bytes in 64 bit mode
  1615. // so in the 32 bit case we read a bit to much, but that shouldn't be an issue.
  1616. // FIXME: Respect shadow values
  1617. auto new_f80 = insn.modrm().read128(*this, insn).value();
  1618. fpu_push(*(long double*)new_f80.bytes().data());
  1619. }
  1620. void SoftCPU::FUCOMI(const X86::Instruction& insn)
  1621. {
  1622. auto i = insn.rm() & 7;
  1623. // FIXME: Unordered comparison checks.
  1624. // FIXME: QNaN / exception handling.
  1625. // FIXME: Set C0, C2, C3 in FPU status word.
  1626. if (__builtin_isnan(fpu_get(0)) || __builtin_isnan(fpu_get(i))) {
  1627. set_zf(true);
  1628. set_pf(true);
  1629. set_cf(true);
  1630. } else {
  1631. set_zf(fpu_get(0) == fpu_get(i));
  1632. set_pf(false);
  1633. set_cf(fpu_get(0) < fpu_get(i));
  1634. set_of(false);
  1635. }
  1636. // FIXME: Taint should be based on ST(0) and ST(i)
  1637. m_flags_tainted = false;
  1638. }
  1639. void SoftCPU::FCOMI(const X86::Instruction& insn)
  1640. {
  1641. auto i = insn.rm() & 7;
  1642. // FIXME: QNaN / exception handling.
  1643. // FIXME: Set C0, C2, C3 in FPU status word.
  1644. set_zf(fpu_get(0) == fpu_get(i));
  1645. set_pf(false);
  1646. set_cf(fpu_get(0) < fpu_get(i));
  1647. set_of(false);
  1648. // FIXME: Taint should be based on ST(0) and ST(i)
  1649. m_flags_tainted = false;
  1650. }
  1651. void SoftCPU::FSTP_RM80(const X86::Instruction& insn)
  1652. {
  1653. if (insn.modrm().is_register()) {
  1654. fpu_set(insn.modrm().register_index(), fpu_pop());
  1655. } else {
  1656. // FIXME: Respect shadow values
  1657. // long doubles can be up to 128 bits wide in memory for reasons (alignment) and only uses 80 bits of precision
  1658. // gcc uses 12 byte in 32 bit and 16 byte in 64 bit mode
  1659. // so in the 32 bit case we have to read first, to not override data on the overly big write
  1660. u128 f80 {};
  1661. if constexpr (sizeof(long double) == 12)
  1662. f80 = insn.modrm().read128(*this, insn).value();
  1663. *(long double*)f80.bytes().data() = fpu_pop();
  1664. insn.modrm().write128(*this, insn, shadow_wrap_as_initialized(f80));
  1665. }
  1666. }
  1667. void SoftCPU::FADD_RM64(const X86::Instruction& insn)
  1668. {
  1669. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1670. if (insn.modrm().is_register()) {
  1671. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1672. } else {
  1673. auto new_f64 = insn.modrm().read64(*this, insn);
  1674. // FIXME: Respect shadow values
  1675. auto f64 = bit_cast<double>(new_f64.value());
  1676. fpu_set(0, fpu_get(0) + f64);
  1677. }
  1678. }
  1679. void SoftCPU::FMUL_RM64(const X86::Instruction& insn)
  1680. {
  1681. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1682. if (insn.modrm().is_register()) {
  1683. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1684. } else {
  1685. auto new_f64 = insn.modrm().read64(*this, insn);
  1686. // FIXME: Respect shadow values
  1687. auto f64 = bit_cast<double>(new_f64.value());
  1688. fpu_set(0, fpu_get(0) * f64);
  1689. }
  1690. }
  1691. void SoftCPU::FCOM_RM64(const X86::Instruction&) { TODO_INSN(); }
  1692. void SoftCPU::FCOMP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1693. void SoftCPU::FSUB_RM64(const X86::Instruction& insn)
  1694. {
  1695. if (insn.modrm().is_register()) {
  1696. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1697. } else {
  1698. auto new_f64 = insn.modrm().read64(*this, insn);
  1699. // FIXME: Respect shadow values
  1700. auto f64 = bit_cast<double>(new_f64.value());
  1701. fpu_set(0, fpu_get(0) - f64);
  1702. }
  1703. }
  1704. void SoftCPU::FSUBR_RM64(const X86::Instruction& insn)
  1705. {
  1706. if (insn.modrm().is_register()) {
  1707. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1708. } else {
  1709. auto new_f64 = insn.modrm().read64(*this, insn);
  1710. // FIXME: Respect shadow values
  1711. auto f64 = bit_cast<double>(new_f64.value());
  1712. fpu_set(0, f64 - fpu_get(0));
  1713. }
  1714. }
  1715. void SoftCPU::FDIV_RM64(const X86::Instruction& insn)
  1716. {
  1717. if (insn.modrm().is_register()) {
  1718. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1719. } else {
  1720. auto new_f64 = insn.modrm().read64(*this, insn);
  1721. // FIXME: Respect shadow values
  1722. auto f64 = bit_cast<double>(new_f64.value());
  1723. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1724. fpu_set(0, fpu_get(0) / f64);
  1725. }
  1726. }
  1727. void SoftCPU::FDIVR_RM64(const X86::Instruction& insn)
  1728. {
  1729. if (insn.modrm().is_register()) {
  1730. // XXX this is FDIVR, Instruction decodes this weirdly
  1731. //fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1732. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1733. } else {
  1734. auto new_f64 = insn.modrm().read64(*this, insn);
  1735. // FIXME: Respect shadow values
  1736. auto f64 = bit_cast<double>(new_f64.value());
  1737. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1738. fpu_set(0, f64 / fpu_get(0));
  1739. }
  1740. }
  1741. void SoftCPU::FLD_RM64(const X86::Instruction& insn)
  1742. {
  1743. VERIFY(!insn.modrm().is_register());
  1744. auto new_f64 = insn.modrm().read64(*this, insn);
  1745. // FIXME: Respect shadow values
  1746. fpu_push(bit_cast<double>(new_f64.value()));
  1747. }
  1748. void SoftCPU::FFREE(const X86::Instruction&) { TODO_INSN(); }
  1749. void SoftCPU::FISTTP_RM64(const X86::Instruction& insn)
  1750. {
  1751. // is this allowed to be a register?
  1752. VERIFY(!insn.modrm().is_register());
  1753. i64 value = static_cast<i64>(fpu_pop());
  1754. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(value)));
  1755. }
  1756. void SoftCPU::FST_RM64(const X86::Instruction& insn)
  1757. {
  1758. if (insn.modrm().is_register()) {
  1759. fpu_set(insn.modrm().register_index(), fpu_get(0));
  1760. } else {
  1761. // FIXME: Respect shadow values
  1762. double f64 = (double)fpu_get(0);
  1763. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(f64)));
  1764. }
  1765. }
  1766. void SoftCPU::FSTP_RM64(const X86::Instruction& insn)
  1767. {
  1768. FST_RM64(insn);
  1769. fpu_pop();
  1770. }
  1771. void SoftCPU::FRSTOR(const X86::Instruction&) { TODO_INSN(); }
  1772. void SoftCPU::FUCOM(const X86::Instruction&) { TODO_INSN(); }
  1773. void SoftCPU::FUCOMP(const X86::Instruction&) { TODO_INSN(); }
  1774. void SoftCPU::FUCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1775. void SoftCPU::FNSAVE(const X86::Instruction&) { TODO_INSN(); }
  1776. void SoftCPU::FNSTSW(const X86::Instruction&) { TODO_INSN(); }
  1777. void SoftCPU::FIADD_RM16(const X86::Instruction& insn)
  1778. {
  1779. VERIFY(!insn.modrm().is_register());
  1780. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1781. // FIXME: Respect shadow values
  1782. fpu_set(0, fpu_get(0) + (long double)m16int);
  1783. }
  1784. void SoftCPU::FADDP(const X86::Instruction& insn)
  1785. {
  1786. VERIFY(insn.modrm().is_register());
  1787. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1788. fpu_pop();
  1789. }
  1790. void SoftCPU::FIMUL_RM16(const X86::Instruction& insn)
  1791. {
  1792. VERIFY(!insn.modrm().is_register());
  1793. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1794. // FIXME: Respect shadow values
  1795. fpu_set(0, fpu_get(0) * (long double)m16int);
  1796. }
  1797. void SoftCPU::FMULP(const X86::Instruction& insn)
  1798. {
  1799. VERIFY(insn.modrm().is_register());
  1800. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1801. fpu_pop();
  1802. }
  1803. void SoftCPU::FICOM_RM16(const X86::Instruction&) { TODO_INSN(); }
  1804. void SoftCPU::FICOMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1805. void SoftCPU::FCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1806. void SoftCPU::FISUB_RM16(const X86::Instruction& insn)
  1807. {
  1808. VERIFY(!insn.modrm().is_register());
  1809. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1810. // FIXME: Respect shadow values
  1811. fpu_set(0, fpu_get(0) - (long double)m16int);
  1812. }
  1813. void SoftCPU::FSUBRP(const X86::Instruction& insn)
  1814. {
  1815. VERIFY(insn.modrm().is_register());
  1816. fpu_set(insn.modrm().register_index(), fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1817. fpu_pop();
  1818. }
  1819. void SoftCPU::FISUBR_RM16(const X86::Instruction& insn)
  1820. {
  1821. VERIFY(!insn.modrm().is_register());
  1822. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1823. // FIXME: Respect shadow values
  1824. fpu_set(0, (long double)m16int - fpu_get(0));
  1825. }
  1826. void SoftCPU::FSUBP(const X86::Instruction& insn)
  1827. {
  1828. VERIFY(insn.modrm().is_register());
  1829. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1830. fpu_pop();
  1831. }
  1832. void SoftCPU::FIDIV_RM16(const X86::Instruction& insn)
  1833. {
  1834. VERIFY(!insn.modrm().is_register());
  1835. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1836. // FIXME: Respect shadow values
  1837. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1838. fpu_set(0, fpu_get(0) / (long double)m16int);
  1839. }
  1840. void SoftCPU::FDIVRP(const X86::Instruction& insn)
  1841. {
  1842. VERIFY(insn.modrm().is_register());
  1843. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1844. fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1845. fpu_pop();
  1846. }
  1847. void SoftCPU::FIDIVR_RM16(const X86::Instruction& insn)
  1848. {
  1849. VERIFY(!insn.modrm().is_register());
  1850. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1851. // FIXME: Respect shadow values
  1852. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1853. fpu_set(0, (long double)m16int / fpu_get(0));
  1854. }
  1855. void SoftCPU::FDIVP(const X86::Instruction& insn)
  1856. {
  1857. VERIFY(insn.modrm().is_register());
  1858. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1859. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1860. fpu_pop();
  1861. }
  1862. void SoftCPU::FILD_RM16(const X86::Instruction& insn)
  1863. {
  1864. VERIFY(!insn.modrm().is_register());
  1865. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1866. // FIXME: Respect shadow values
  1867. fpu_push((long double)m16int);
  1868. }
  1869. void SoftCPU::FFREEP(const X86::Instruction&) { TODO_INSN(); }
  1870. void SoftCPU::FISTTP_RM16(const X86::Instruction& insn)
  1871. {
  1872. // is this allowed to be a register?
  1873. VERIFY(!insn.modrm().is_register());
  1874. i16 value = static_cast<i16>(fpu_pop());
  1875. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(bit_cast<u16>(value)));
  1876. }
  1877. void SoftCPU::FIST_RM16(const X86::Instruction& insn)
  1878. {
  1879. VERIFY(!insn.modrm().is_register());
  1880. auto f = fpu_get(0);
  1881. // FIXME: Respect rounding mode in m_fpu_cw.
  1882. auto value = static_cast<i16>(f);
  1883. // FIXME: Respect shadow values
  1884. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(bit_cast<u16>(value)));
  1885. }
  1886. void SoftCPU::FISTP_RM16(const X86::Instruction& insn)
  1887. {
  1888. FIST_RM16(insn);
  1889. fpu_pop();
  1890. }
  1891. void SoftCPU::FBLD_M80(const X86::Instruction&) { TODO_INSN(); }
  1892. void SoftCPU::FNSTSW_AX(const X86::Instruction&) { TODO_INSN(); }
  1893. void SoftCPU::FILD_RM64(const X86::Instruction& insn)
  1894. {
  1895. VERIFY(!insn.modrm().is_register());
  1896. auto m64int = (i64)insn.modrm().read64(*this, insn).value();
  1897. // FIXME: Respect shadow values
  1898. fpu_push((long double)m64int);
  1899. }
  1900. void SoftCPU::FUCOMIP(const X86::Instruction& insn)
  1901. {
  1902. FUCOMI(insn);
  1903. fpu_pop();
  1904. }
  1905. void SoftCPU::FBSTP_M80(const X86::Instruction&) { TODO_INSN(); }
  1906. void SoftCPU::FCOMIP(const X86::Instruction& insn)
  1907. {
  1908. FCOMI(insn);
  1909. fpu_pop();
  1910. }
  1911. void SoftCPU::FISTP_RM64(const X86::Instruction& insn)
  1912. {
  1913. VERIFY(!insn.modrm().is_register());
  1914. auto f = fpu_pop();
  1915. // FIXME: Respect rounding mode in m_fpu_cw.
  1916. auto value = static_cast<i64>(f);
  1917. // FIXME: Respect shadow values
  1918. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(value)));
  1919. }
  1920. void SoftCPU::HLT(const X86::Instruction&) { TODO_INSN(); }
  1921. void SoftCPU::IDIV_RM16(const X86::Instruction& insn)
  1922. {
  1923. auto divisor_with_shadow = insn.modrm().read16(*this, insn);
  1924. auto divisor = (i16)divisor_with_shadow.value();
  1925. if (divisor == 0) {
  1926. reportln("Divide by zero");
  1927. TODO();
  1928. }
  1929. i32 dividend = (i32)(((u32)dx().value() << 16) | (u32)ax().value());
  1930. i32 result = dividend / divisor;
  1931. if (result > NumericLimits<i16>::max() || result < NumericLimits<i16>::min()) {
  1932. reportln("Divide overflow");
  1933. TODO();
  1934. }
  1935. auto original_ax = ax();
  1936. set_ax(shadow_wrap_with_taint_from<u16>(result, original_ax, dx(), divisor_with_shadow));
  1937. set_dx(shadow_wrap_with_taint_from<u16>(dividend % divisor, original_ax, dx(), divisor_with_shadow));
  1938. }
  1939. void SoftCPU::IDIV_RM32(const X86::Instruction& insn)
  1940. {
  1941. auto divisor_with_shadow = insn.modrm().read32(*this, insn);
  1942. auto divisor = (i32)divisor_with_shadow.value();
  1943. if (divisor == 0) {
  1944. reportln("Divide by zero");
  1945. TODO();
  1946. }
  1947. i64 dividend = (i64)(((u64)edx().value() << 32) | (u64)eax().value());
  1948. i64 result = dividend / divisor;
  1949. if (result > NumericLimits<i32>::max() || result < NumericLimits<i32>::min()) {
  1950. reportln("Divide overflow");
  1951. TODO();
  1952. }
  1953. auto original_eax = eax();
  1954. set_eax(shadow_wrap_with_taint_from<u32>(result, original_eax, edx(), divisor_with_shadow));
  1955. set_edx(shadow_wrap_with_taint_from<u32>(dividend % divisor, original_eax, edx(), divisor_with_shadow));
  1956. }
  1957. void SoftCPU::IDIV_RM8(const X86::Instruction& insn)
  1958. {
  1959. auto divisor_with_shadow = insn.modrm().read8(*this, insn);
  1960. auto divisor = (i8)divisor_with_shadow.value();
  1961. if (divisor == 0) {
  1962. reportln("Divide by zero");
  1963. TODO();
  1964. }
  1965. i16 dividend = ax().value();
  1966. i16 result = dividend / divisor;
  1967. if (result > NumericLimits<i8>::max() || result < NumericLimits<i8>::min()) {
  1968. reportln("Divide overflow");
  1969. TODO();
  1970. }
  1971. auto original_ax = ax();
  1972. set_al(shadow_wrap_with_taint_from<u8>(result, divisor_with_shadow, original_ax));
  1973. set_ah(shadow_wrap_with_taint_from<u8>(dividend % divisor, divisor_with_shadow, original_ax));
  1974. }
  1975. void SoftCPU::IMUL_RM16(const X86::Instruction& insn)
  1976. {
  1977. i16 result_high;
  1978. i16 result_low;
  1979. auto src = insn.modrm().read16(*this, insn);
  1980. op_imul<i16>(*this, src.value(), ax().value(), result_high, result_low);
  1981. gpr16(X86::RegisterDX) = shadow_wrap_with_taint_from<u16>(result_high, src, ax());
  1982. gpr16(X86::RegisterAX) = shadow_wrap_with_taint_from<u16>(result_low, src, ax());
  1983. }
  1984. void SoftCPU::IMUL_RM32(const X86::Instruction& insn)
  1985. {
  1986. i32 result_high;
  1987. i32 result_low;
  1988. auto src = insn.modrm().read32(*this, insn);
  1989. op_imul<i32>(*this, src.value(), eax().value(), result_high, result_low);
  1990. gpr32(X86::RegisterEDX) = shadow_wrap_with_taint_from<u32>(result_high, src, eax());
  1991. gpr32(X86::RegisterEAX) = shadow_wrap_with_taint_from<u32>(result_low, src, eax());
  1992. }
  1993. void SoftCPU::IMUL_RM8(const X86::Instruction& insn)
  1994. {
  1995. i8 result_high;
  1996. i8 result_low;
  1997. auto src = insn.modrm().read8(*this, insn);
  1998. op_imul<i8>(*this, src.value(), al().value(), result_high, result_low);
  1999. gpr8(X86::RegisterAH) = shadow_wrap_with_taint_from<u8>(result_high, src, al());
  2000. gpr8(X86::RegisterAL) = shadow_wrap_with_taint_from<u8>(result_low, src, al());
  2001. }
  2002. void SoftCPU::IMUL_reg16_RM16(const X86::Instruction& insn)
  2003. {
  2004. i16 result_high;
  2005. i16 result_low;
  2006. auto src = insn.modrm().read16(*this, insn);
  2007. op_imul<i16>(*this, gpr16(insn.reg16()).value(), src.value(), result_high, result_low);
  2008. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src, gpr16(insn.reg16()));
  2009. }
  2010. void SoftCPU::IMUL_reg16_RM16_imm16(const X86::Instruction& insn)
  2011. {
  2012. i16 result_high;
  2013. i16 result_low;
  2014. auto src = insn.modrm().read16(*this, insn);
  2015. op_imul<i16>(*this, src.value(), insn.imm16(), result_high, result_low);
  2016. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  2017. }
  2018. void SoftCPU::IMUL_reg16_RM16_imm8(const X86::Instruction& insn)
  2019. {
  2020. i16 result_high;
  2021. i16 result_low;
  2022. auto src = insn.modrm().read16(*this, insn);
  2023. op_imul<i16>(*this, src.value(), sign_extended_to<i16>(insn.imm8()), result_high, result_low);
  2024. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  2025. }
  2026. void SoftCPU::IMUL_reg32_RM32(const X86::Instruction& insn)
  2027. {
  2028. i32 result_high;
  2029. i32 result_low;
  2030. auto src = insn.modrm().read32(*this, insn);
  2031. op_imul<i32>(*this, gpr32(insn.reg32()).value(), src.value(), result_high, result_low);
  2032. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src, gpr32(insn.reg32()));
  2033. }
  2034. void SoftCPU::IMUL_reg32_RM32_imm32(const X86::Instruction& insn)
  2035. {
  2036. i32 result_high;
  2037. i32 result_low;
  2038. auto src = insn.modrm().read32(*this, insn);
  2039. op_imul<i32>(*this, src.value(), insn.imm32(), result_high, result_low);
  2040. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  2041. }
  2042. void SoftCPU::IMUL_reg32_RM32_imm8(const X86::Instruction& insn)
  2043. {
  2044. i32 result_high;
  2045. i32 result_low;
  2046. auto src = insn.modrm().read32(*this, insn);
  2047. op_imul<i32>(*this, src.value(), sign_extended_to<i32>(insn.imm8()), result_high, result_low);
  2048. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  2049. }
  2050. void SoftCPU::INC_RM16(const X86::Instruction& insn)
  2051. {
  2052. insn.modrm().write16(*this, insn, op_inc(*this, insn.modrm().read16(*this, insn)));
  2053. }
  2054. void SoftCPU::INC_RM32(const X86::Instruction& insn)
  2055. {
  2056. insn.modrm().write32(*this, insn, op_inc(*this, insn.modrm().read32(*this, insn)));
  2057. }
  2058. void SoftCPU::INC_RM8(const X86::Instruction& insn)
  2059. {
  2060. insn.modrm().write8(*this, insn, op_inc(*this, insn.modrm().read8(*this, insn)));
  2061. }
  2062. void SoftCPU::INC_reg16(const X86::Instruction& insn)
  2063. {
  2064. gpr16(insn.reg16()) = op_inc(*this, const_gpr16(insn.reg16()));
  2065. }
  2066. void SoftCPU::INC_reg32(const X86::Instruction& insn)
  2067. {
  2068. gpr32(insn.reg32()) = op_inc(*this, const_gpr32(insn.reg32()));
  2069. }
  2070. void SoftCPU::INSB(const X86::Instruction&) { TODO_INSN(); }
  2071. void SoftCPU::INSD(const X86::Instruction&) { TODO_INSN(); }
  2072. void SoftCPU::INSW(const X86::Instruction&) { TODO_INSN(); }
  2073. void SoftCPU::INT3(const X86::Instruction&) { TODO_INSN(); }
  2074. void SoftCPU::INTO(const X86::Instruction&) { TODO_INSN(); }
  2075. void SoftCPU::INT_imm8(const X86::Instruction& insn)
  2076. {
  2077. VERIFY(insn.imm8() == 0x82);
  2078. // FIXME: virt_syscall should take ValueWithShadow and whine about uninitialized arguments
  2079. set_eax(shadow_wrap_as_initialized(m_emulator.virt_syscall(eax().value(), edx().value(), ecx().value(), ebx().value())));
  2080. }
  2081. void SoftCPU::INVLPG(const X86::Instruction&) { TODO_INSN(); }
  2082. void SoftCPU::IN_AL_DX(const X86::Instruction&) { TODO_INSN(); }
  2083. void SoftCPU::IN_AL_imm8(const X86::Instruction&) { TODO_INSN(); }
  2084. void SoftCPU::IN_AX_DX(const X86::Instruction&) { TODO_INSN(); }
  2085. void SoftCPU::IN_AX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2086. void SoftCPU::IN_EAX_DX(const X86::Instruction&) { TODO_INSN(); }
  2087. void SoftCPU::IN_EAX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2088. void SoftCPU::IRET(const X86::Instruction&) { TODO_INSN(); }
  2089. void SoftCPU::JCXZ_imm8(const X86::Instruction& insn)
  2090. {
  2091. if (insn.a32()) {
  2092. warn_if_uninitialized(ecx(), "jecxz imm8");
  2093. if (ecx().value() == 0)
  2094. set_eip(eip() + (i8)insn.imm8());
  2095. } else {
  2096. warn_if_uninitialized(cx(), "jcxz imm8");
  2097. if (cx().value() == 0)
  2098. set_eip(eip() + (i8)insn.imm8());
  2099. }
  2100. }
  2101. void SoftCPU::JMP_FAR_mem16(const X86::Instruction&) { TODO_INSN(); }
  2102. void SoftCPU::JMP_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  2103. void SoftCPU::JMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  2104. void SoftCPU::JMP_RM32(const X86::Instruction& insn)
  2105. {
  2106. set_eip(insn.modrm().read32(*this, insn).value());
  2107. }
  2108. void SoftCPU::JMP_imm16(const X86::Instruction& insn)
  2109. {
  2110. set_eip(eip() + (i16)insn.imm16());
  2111. }
  2112. void SoftCPU::JMP_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  2113. void SoftCPU::JMP_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  2114. void SoftCPU::JMP_imm32(const X86::Instruction& insn)
  2115. {
  2116. set_eip(eip() + (i32)insn.imm32());
  2117. }
  2118. void SoftCPU::JMP_short_imm8(const X86::Instruction& insn)
  2119. {
  2120. set_eip(eip() + (i8)insn.imm8());
  2121. }
  2122. void SoftCPU::Jcc_NEAR_imm(const X86::Instruction& insn)
  2123. {
  2124. warn_if_flags_tainted("jcc near imm32");
  2125. if (evaluate_condition(insn.cc()))
  2126. set_eip(eip() + (i32)insn.imm32());
  2127. }
  2128. void SoftCPU::Jcc_imm8(const X86::Instruction& insn)
  2129. {
  2130. warn_if_flags_tainted("jcc imm8");
  2131. if (evaluate_condition(insn.cc()))
  2132. set_eip(eip() + (i8)insn.imm8());
  2133. }
  2134. void SoftCPU::LAHF(const X86::Instruction&) { TODO_INSN(); }
  2135. void SoftCPU::LAR_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2136. void SoftCPU::LAR_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2137. void SoftCPU::LDS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2138. void SoftCPU::LDS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2139. void SoftCPU::LEAVE16(const X86::Instruction&) { TODO_INSN(); }
  2140. void SoftCPU::LEAVE32(const X86::Instruction&)
  2141. {
  2142. auto new_ebp = read_memory32({ ss(), ebp().value() });
  2143. set_esp({ ebp().value() + 4, ebp().shadow() });
  2144. set_ebp(new_ebp);
  2145. }
  2146. void SoftCPU::LEA_reg16_mem16(const X86::Instruction& insn)
  2147. {
  2148. // FIXME: Respect shadow values
  2149. gpr16(insn.reg16()) = shadow_wrap_as_initialized<u16>(insn.modrm().resolve(*this, insn).offset());
  2150. }
  2151. void SoftCPU::LEA_reg32_mem32(const X86::Instruction& insn)
  2152. {
  2153. // FIXME: Respect shadow values
  2154. gpr32(insn.reg32()) = shadow_wrap_as_initialized<u32>(insn.modrm().resolve(*this, insn).offset());
  2155. }
  2156. void SoftCPU::LES_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2157. void SoftCPU::LES_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2158. void SoftCPU::LFS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2159. void SoftCPU::LFS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2160. void SoftCPU::LGDT(const X86::Instruction&) { TODO_INSN(); }
  2161. void SoftCPU::LGS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2162. void SoftCPU::LGS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2163. void SoftCPU::LIDT(const X86::Instruction&) { TODO_INSN(); }
  2164. void SoftCPU::LLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2165. void SoftCPU::LMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2166. template<typename T>
  2167. ALWAYS_INLINE static void do_lods(SoftCPU& cpu, const X86::Instruction& insn)
  2168. {
  2169. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2170. cpu.do_once_or_repeat<true>(insn, [&] {
  2171. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2172. cpu.gpr<T>(X86::RegisterAL) = src;
  2173. cpu.step_source_index(insn.a32(), sizeof(T));
  2174. });
  2175. }
  2176. void SoftCPU::LODSB(const X86::Instruction& insn)
  2177. {
  2178. do_lods<u8>(*this, insn);
  2179. }
  2180. void SoftCPU::LODSD(const X86::Instruction& insn)
  2181. {
  2182. do_lods<u32>(*this, insn);
  2183. }
  2184. void SoftCPU::LODSW(const X86::Instruction& insn)
  2185. {
  2186. do_lods<u16>(*this, insn);
  2187. }
  2188. void SoftCPU::LOOPNZ_imm8(const X86::Instruction& insn)
  2189. {
  2190. warn_if_flags_tainted("loopnz");
  2191. if (insn.a32()) {
  2192. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2193. if (ecx().value() != 0 && !zf())
  2194. set_eip(eip() + (i8)insn.imm8());
  2195. } else {
  2196. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2197. if (cx().value() != 0 && !zf())
  2198. set_eip(eip() + (i8)insn.imm8());
  2199. }
  2200. }
  2201. void SoftCPU::LOOPZ_imm8(const X86::Instruction& insn)
  2202. {
  2203. warn_if_flags_tainted("loopz");
  2204. if (insn.a32()) {
  2205. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2206. if (ecx().value() != 0 && zf())
  2207. set_eip(eip() + (i8)insn.imm8());
  2208. } else {
  2209. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2210. if (cx().value() != 0 && zf())
  2211. set_eip(eip() + (i8)insn.imm8());
  2212. }
  2213. }
  2214. void SoftCPU::LOOP_imm8(const X86::Instruction& insn)
  2215. {
  2216. if (insn.a32()) {
  2217. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2218. if (ecx().value() != 0)
  2219. set_eip(eip() + (i8)insn.imm8());
  2220. } else {
  2221. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2222. if (cx().value() != 0)
  2223. set_eip(eip() + (i8)insn.imm8());
  2224. }
  2225. }
  2226. void SoftCPU::LSL_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2227. void SoftCPU::LSL_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2228. void SoftCPU::LSS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2229. void SoftCPU::LSS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2230. void SoftCPU::LTR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2231. template<typename T>
  2232. ALWAYS_INLINE static void do_movs(SoftCPU& cpu, const X86::Instruction& insn)
  2233. {
  2234. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2235. cpu.do_once_or_repeat<false>(insn, [&] {
  2236. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2237. cpu.write_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() }, src);
  2238. cpu.step_source_index(insn.a32(), sizeof(T));
  2239. cpu.step_destination_index(insn.a32(), sizeof(T));
  2240. });
  2241. }
  2242. void SoftCPU::MOVSB(const X86::Instruction& insn)
  2243. {
  2244. do_movs<u8>(*this, insn);
  2245. }
  2246. void SoftCPU::MOVSD(const X86::Instruction& insn)
  2247. {
  2248. do_movs<u32>(*this, insn);
  2249. }
  2250. void SoftCPU::MOVSW(const X86::Instruction& insn)
  2251. {
  2252. do_movs<u16>(*this, insn);
  2253. }
  2254. void SoftCPU::MOVSX_reg16_RM8(const X86::Instruction& insn)
  2255. {
  2256. auto src = insn.modrm().read8(*this, insn);
  2257. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(sign_extended_to<u16>(src.value()), src);
  2258. }
  2259. void SoftCPU::MOVSX_reg32_RM16(const X86::Instruction& insn)
  2260. {
  2261. auto src = insn.modrm().read16(*this, insn);
  2262. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2263. }
  2264. void SoftCPU::MOVSX_reg32_RM8(const X86::Instruction& insn)
  2265. {
  2266. auto src = insn.modrm().read8(*this, insn);
  2267. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2268. }
  2269. void SoftCPU::MOVZX_reg16_RM8(const X86::Instruction& insn)
  2270. {
  2271. auto src = insn.modrm().read8(*this, insn);
  2272. gpr16(insn.reg16()) = ValueWithShadow<u16>(src.value(), 0x0100 | (src.shadow() & 0xff));
  2273. }
  2274. void SoftCPU::MOVZX_reg32_RM16(const X86::Instruction& insn)
  2275. {
  2276. auto src = insn.modrm().read16(*this, insn);
  2277. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010000 | (src.shadow() & 0xffff));
  2278. }
  2279. void SoftCPU::MOVZX_reg32_RM8(const X86::Instruction& insn)
  2280. {
  2281. auto src = insn.modrm().read8(*this, insn);
  2282. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010100 | (src.shadow() & 0xff));
  2283. }
  2284. void SoftCPU::MOV_AL_moff8(const X86::Instruction& insn)
  2285. {
  2286. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2287. }
  2288. void SoftCPU::MOV_AX_moff16(const X86::Instruction& insn)
  2289. {
  2290. set_ax(read_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2291. }
  2292. void SoftCPU::MOV_CR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2293. void SoftCPU::MOV_DR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2294. void SoftCPU::MOV_EAX_moff32(const X86::Instruction& insn)
  2295. {
  2296. set_eax(read_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2297. }
  2298. void SoftCPU::MOV_RM16_imm16(const X86::Instruction& insn)
  2299. {
  2300. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(insn.imm16()));
  2301. }
  2302. void SoftCPU::MOV_RM16_reg16(const X86::Instruction& insn)
  2303. {
  2304. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2305. }
  2306. void SoftCPU::MOV_RM16_seg(const X86::Instruction&) { TODO_INSN(); }
  2307. void SoftCPU::MOV_RM32_imm32(const X86::Instruction& insn)
  2308. {
  2309. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(insn.imm32()));
  2310. }
  2311. void SoftCPU::MOV_RM32_reg32(const X86::Instruction& insn)
  2312. {
  2313. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2314. }
  2315. void SoftCPU::MOV_RM8_imm8(const X86::Instruction& insn)
  2316. {
  2317. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized(insn.imm8()));
  2318. }
  2319. void SoftCPU::MOV_RM8_reg8(const X86::Instruction& insn)
  2320. {
  2321. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2322. }
  2323. void SoftCPU::MOV_moff16_AX(const X86::Instruction& insn)
  2324. {
  2325. write_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, ax());
  2326. }
  2327. void SoftCPU::MOV_moff32_EAX(const X86::Instruction& insn)
  2328. {
  2329. write_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, eax());
  2330. }
  2331. void SoftCPU::MOV_moff8_AL(const X86::Instruction& insn)
  2332. {
  2333. write_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, al());
  2334. }
  2335. void SoftCPU::MOV_reg16_RM16(const X86::Instruction& insn)
  2336. {
  2337. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  2338. }
  2339. void SoftCPU::MOV_reg16_imm16(const X86::Instruction& insn)
  2340. {
  2341. gpr16(insn.reg16()) = shadow_wrap_as_initialized(insn.imm16());
  2342. }
  2343. void SoftCPU::MOV_reg32_CR(const X86::Instruction&) { TODO_INSN(); }
  2344. void SoftCPU::MOV_reg32_DR(const X86::Instruction&) { TODO_INSN(); }
  2345. void SoftCPU::MOV_reg32_RM32(const X86::Instruction& insn)
  2346. {
  2347. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  2348. }
  2349. void SoftCPU::MOV_reg32_imm32(const X86::Instruction& insn)
  2350. {
  2351. gpr32(insn.reg32()) = shadow_wrap_as_initialized(insn.imm32());
  2352. }
  2353. void SoftCPU::MOV_reg8_RM8(const X86::Instruction& insn)
  2354. {
  2355. gpr8(insn.reg8()) = insn.modrm().read8(*this, insn);
  2356. }
  2357. void SoftCPU::MOV_reg8_imm8(const X86::Instruction& insn)
  2358. {
  2359. gpr8(insn.reg8()) = shadow_wrap_as_initialized(insn.imm8());
  2360. }
  2361. void SoftCPU::MOV_seg_RM16(const X86::Instruction&) { TODO_INSN(); }
  2362. void SoftCPU::MOV_seg_RM32(const X86::Instruction&) { TODO_INSN(); }
  2363. void SoftCPU::MUL_RM16(const X86::Instruction& insn)
  2364. {
  2365. auto src = insn.modrm().read16(*this, insn);
  2366. u32 result = (u32)ax().value() * (u32)src.value();
  2367. auto original_ax = ax();
  2368. set_ax(shadow_wrap_with_taint_from<u16>(result & 0xffff, src, original_ax));
  2369. set_dx(shadow_wrap_with_taint_from<u16>(result >> 16, src, original_ax));
  2370. taint_flags_from(src, original_ax);
  2371. set_cf(dx().value() != 0);
  2372. set_of(dx().value() != 0);
  2373. }
  2374. void SoftCPU::MUL_RM32(const X86::Instruction& insn)
  2375. {
  2376. auto src = insn.modrm().read32(*this, insn);
  2377. u64 result = (u64)eax().value() * (u64)src.value();
  2378. auto original_eax = eax();
  2379. set_eax(shadow_wrap_with_taint_from<u32>(result, src, original_eax));
  2380. set_edx(shadow_wrap_with_taint_from<u32>(result >> 32, src, original_eax));
  2381. taint_flags_from(src, original_eax);
  2382. set_cf(edx().value() != 0);
  2383. set_of(edx().value() != 0);
  2384. }
  2385. void SoftCPU::MUL_RM8(const X86::Instruction& insn)
  2386. {
  2387. auto src = insn.modrm().read8(*this, insn);
  2388. u16 result = (u16)al().value() * src.value();
  2389. auto original_al = al();
  2390. set_ax(shadow_wrap_with_taint_from(result, src, original_al));
  2391. taint_flags_from(src, original_al);
  2392. set_cf((result & 0xff00) != 0);
  2393. set_of((result & 0xff00) != 0);
  2394. }
  2395. void SoftCPU::NEG_RM16(const X86::Instruction& insn)
  2396. {
  2397. insn.modrm().write16(*this, insn, op_sub<ValueWithShadow<u16>>(*this, shadow_wrap_as_initialized<u16>(0), insn.modrm().read16(*this, insn)));
  2398. }
  2399. void SoftCPU::NEG_RM32(const X86::Instruction& insn)
  2400. {
  2401. insn.modrm().write32(*this, insn, op_sub<ValueWithShadow<u32>>(*this, shadow_wrap_as_initialized<u32>(0), insn.modrm().read32(*this, insn)));
  2402. }
  2403. void SoftCPU::NEG_RM8(const X86::Instruction& insn)
  2404. {
  2405. insn.modrm().write8(*this, insn, op_sub<ValueWithShadow<u8>>(*this, shadow_wrap_as_initialized<u8>(0), insn.modrm().read8(*this, insn)));
  2406. }
  2407. void SoftCPU::NOP(const X86::Instruction&)
  2408. {
  2409. }
  2410. void SoftCPU::NOT_RM16(const X86::Instruction& insn)
  2411. {
  2412. auto data = insn.modrm().read16(*this, insn);
  2413. insn.modrm().write16(*this, insn, ValueWithShadow<u16>(~data.value(), data.shadow()));
  2414. }
  2415. void SoftCPU::NOT_RM32(const X86::Instruction& insn)
  2416. {
  2417. auto data = insn.modrm().read32(*this, insn);
  2418. insn.modrm().write32(*this, insn, ValueWithShadow<u32>(~data.value(), data.shadow()));
  2419. }
  2420. void SoftCPU::NOT_RM8(const X86::Instruction& insn)
  2421. {
  2422. auto data = insn.modrm().read8(*this, insn);
  2423. insn.modrm().write8(*this, insn, ValueWithShadow<u8>(~data.value(), data.shadow()));
  2424. }
  2425. void SoftCPU::OUTSB(const X86::Instruction&) { TODO_INSN(); }
  2426. void SoftCPU::OUTSD(const X86::Instruction&) { TODO_INSN(); }
  2427. void SoftCPU::OUTSW(const X86::Instruction&) { TODO_INSN(); }
  2428. void SoftCPU::OUT_DX_AL(const X86::Instruction&) { TODO_INSN(); }
  2429. void SoftCPU::OUT_DX_AX(const X86::Instruction&) { TODO_INSN(); }
  2430. void SoftCPU::OUT_DX_EAX(const X86::Instruction&) { TODO_INSN(); }
  2431. void SoftCPU::OUT_imm8_AL(const X86::Instruction&) { TODO_INSN(); }
  2432. void SoftCPU::OUT_imm8_AX(const X86::Instruction&) { TODO_INSN(); }
  2433. void SoftCPU::OUT_imm8_EAX(const X86::Instruction&) { TODO_INSN(); }
  2434. void SoftCPU::PACKSSDW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2435. void SoftCPU::PACKSSWB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2436. void SoftCPU::PACKUSWB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2437. void SoftCPU::PADDB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2438. void SoftCPU::PADDW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2439. void SoftCPU::PADDD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2440. void SoftCPU::PADDSB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2441. void SoftCPU::PADDSW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2442. void SoftCPU::PADDUSB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2443. void SoftCPU::PADDUSW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2444. void SoftCPU::PAND_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2445. void SoftCPU::PANDN_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2446. void SoftCPU::PCMPEQB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2447. void SoftCPU::PCMPEQW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2448. void SoftCPU::PCMPEQD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2449. void SoftCPU::PCMPGTB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2450. void SoftCPU::PCMPGTW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2451. void SoftCPU::PCMPGTD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2452. void SoftCPU::PMADDWD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2453. void SoftCPU::PMULHW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2454. void SoftCPU::PMULLW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2455. void SoftCPU::POPA(const X86::Instruction&)
  2456. {
  2457. set_di(pop16());
  2458. set_si(pop16());
  2459. set_bp(pop16());
  2460. pop16();
  2461. set_bx(pop16());
  2462. set_dx(pop16());
  2463. set_cx(pop16());
  2464. set_ax(pop16());
  2465. }
  2466. void SoftCPU::POPAD(const X86::Instruction&)
  2467. {
  2468. set_edi(pop32());
  2469. set_esi(pop32());
  2470. set_ebp(pop32());
  2471. pop32();
  2472. set_ebx(pop32());
  2473. set_edx(pop32());
  2474. set_ecx(pop32());
  2475. set_eax(pop32());
  2476. }
  2477. void SoftCPU::POPF(const X86::Instruction&)
  2478. {
  2479. auto popped_value = pop16();
  2480. m_eflags &= ~0xffff;
  2481. m_eflags |= popped_value.value();
  2482. taint_flags_from(popped_value);
  2483. }
  2484. void SoftCPU::POPFD(const X86::Instruction&)
  2485. {
  2486. auto popped_value = pop32();
  2487. m_eflags &= ~0x00fcffff;
  2488. m_eflags |= popped_value.value() & 0x00fcffff;
  2489. taint_flags_from(popped_value);
  2490. }
  2491. void SoftCPU::POP_DS(const X86::Instruction&) { TODO_INSN(); }
  2492. void SoftCPU::POP_ES(const X86::Instruction&) { TODO_INSN(); }
  2493. void SoftCPU::POP_FS(const X86::Instruction&) { TODO_INSN(); }
  2494. void SoftCPU::POP_GS(const X86::Instruction&) { TODO_INSN(); }
  2495. void SoftCPU::POP_RM16(const X86::Instruction& insn)
  2496. {
  2497. insn.modrm().write16(*this, insn, pop16());
  2498. }
  2499. void SoftCPU::POP_RM32(const X86::Instruction& insn)
  2500. {
  2501. insn.modrm().write32(*this, insn, pop32());
  2502. }
  2503. void SoftCPU::POP_SS(const X86::Instruction&) { TODO_INSN(); }
  2504. void SoftCPU::POP_reg16(const X86::Instruction& insn)
  2505. {
  2506. gpr16(insn.reg16()) = pop16();
  2507. }
  2508. void SoftCPU::POP_reg32(const X86::Instruction& insn)
  2509. {
  2510. gpr32(insn.reg32()) = pop32();
  2511. }
  2512. void SoftCPU::POR_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2513. void SoftCPU::PSLLW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2514. void SoftCPU::PSLLW_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2515. void SoftCPU::PSLLD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2516. void SoftCPU::PSLLD_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2517. void SoftCPU::PSLLQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2518. void SoftCPU::PSLLQ_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2519. void SoftCPU::PSRAW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2520. void SoftCPU::PSRAW_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2521. void SoftCPU::PSRAD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2522. void SoftCPU::PSRAD_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2523. void SoftCPU::PSRLW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2524. void SoftCPU::PSRLW_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2525. void SoftCPU::PSRLD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2526. void SoftCPU::PSRLD_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2527. void SoftCPU::PSRLQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2528. void SoftCPU::PSRLQ_mm1_imm8(const X86::Instruction&) { TODO_INSN(); };
  2529. void SoftCPU::PSUBB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2530. void SoftCPU::PSUBW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2531. void SoftCPU::PSUBD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2532. void SoftCPU::PSUBSB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2533. void SoftCPU::PSUBSW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2534. void SoftCPU::PSUBUSB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2535. void SoftCPU::PSUBUSW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2536. void SoftCPU::PUNPCKHBW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2537. void SoftCPU::PUNPCKHWD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2538. void SoftCPU::PUNPCKHDQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2539. void SoftCPU::PUNPCKLBW_mm1_mm2m32(const X86::Instruction&) { TODO_INSN(); };
  2540. void SoftCPU::PUNPCKLWD_mm1_mm2m32(const X86::Instruction&) { TODO_INSN(); };
  2541. void SoftCPU::PUNPCKLDQ_mm1_mm2m32(const X86::Instruction&) { TODO_INSN(); };
  2542. void SoftCPU::PUSHA(const X86::Instruction&)
  2543. {
  2544. auto temp = sp();
  2545. push16(ax());
  2546. push16(cx());
  2547. push16(dx());
  2548. push16(bx());
  2549. push16(temp);
  2550. push16(bp());
  2551. push16(si());
  2552. push16(di());
  2553. }
  2554. void SoftCPU::PUSHAD(const X86::Instruction&)
  2555. {
  2556. auto temp = esp();
  2557. push32(eax());
  2558. push32(ecx());
  2559. push32(edx());
  2560. push32(ebx());
  2561. push32(temp);
  2562. push32(ebp());
  2563. push32(esi());
  2564. push32(edi());
  2565. }
  2566. void SoftCPU::PUSHF(const X86::Instruction&)
  2567. {
  2568. // FIXME: Respect shadow flags when they exist!
  2569. push16(shadow_wrap_as_initialized<u16>(m_eflags & 0xffff));
  2570. }
  2571. void SoftCPU::PUSHFD(const X86::Instruction&)
  2572. {
  2573. // FIXME: Respect shadow flags when they exist!
  2574. push32(shadow_wrap_as_initialized(m_eflags & 0x00fcffff));
  2575. }
  2576. void SoftCPU::PUSH_CS(const X86::Instruction&) { TODO_INSN(); }
  2577. void SoftCPU::PUSH_DS(const X86::Instruction&) { TODO_INSN(); }
  2578. void SoftCPU::PUSH_ES(const X86::Instruction&) { TODO_INSN(); }
  2579. void SoftCPU::PUSH_FS(const X86::Instruction&) { TODO_INSN(); }
  2580. void SoftCPU::PUSH_GS(const X86::Instruction&) { TODO_INSN(); }
  2581. void SoftCPU::PUSH_RM16(const X86::Instruction& insn)
  2582. {
  2583. push16(insn.modrm().read16(*this, insn));
  2584. }
  2585. void SoftCPU::PUSH_RM32(const X86::Instruction& insn)
  2586. {
  2587. push32(insn.modrm().read32(*this, insn));
  2588. }
  2589. void SoftCPU::PUSH_SP_8086_80186(const X86::Instruction&) { TODO_INSN(); }
  2590. void SoftCPU::PUSH_SS(const X86::Instruction&) { TODO_INSN(); }
  2591. void SoftCPU::PUSH_imm16(const X86::Instruction& insn)
  2592. {
  2593. push16(shadow_wrap_as_initialized(insn.imm16()));
  2594. }
  2595. void SoftCPU::PUSH_imm32(const X86::Instruction& insn)
  2596. {
  2597. push32(shadow_wrap_as_initialized(insn.imm32()));
  2598. }
  2599. void SoftCPU::PUSH_imm8(const X86::Instruction& insn)
  2600. {
  2601. VERIFY(!insn.has_operand_size_override_prefix());
  2602. push32(shadow_wrap_as_initialized<u32>(sign_extended_to<i32>(insn.imm8())));
  2603. }
  2604. void SoftCPU::PUSH_reg16(const X86::Instruction& insn)
  2605. {
  2606. push16(gpr16(insn.reg16()));
  2607. }
  2608. void SoftCPU::PUSH_reg32(const X86::Instruction& insn)
  2609. {
  2610. push32(gpr32(insn.reg32()));
  2611. }
  2612. void SoftCPU::PXOR_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); };
  2613. template<typename T, bool cf>
  2614. ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2615. {
  2616. if (steps.value() == 0)
  2617. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2618. u32 result = 0;
  2619. u32 new_flags = 0;
  2620. if constexpr (cf)
  2621. asm volatile("stc");
  2622. else
  2623. asm volatile("clc");
  2624. if constexpr (sizeof(typename T::ValueType) == 4) {
  2625. asm volatile("rcll %%cl, %%eax\n"
  2626. : "=a"(result)
  2627. : "a"(data.value()), "c"(steps.value()));
  2628. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2629. asm volatile("rclw %%cl, %%ax\n"
  2630. : "=a"(result)
  2631. : "a"(data.value()), "c"(steps.value()));
  2632. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2633. asm volatile("rclb %%cl, %%al\n"
  2634. : "=a"(result)
  2635. : "a"(data.value()), "c"(steps.value()));
  2636. }
  2637. asm volatile(
  2638. "pushf\n"
  2639. "pop %%ebx"
  2640. : "=b"(new_flags));
  2641. cpu.set_flags_oc(new_flags);
  2642. cpu.taint_flags_from(data, steps);
  2643. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2644. }
  2645. template<typename T>
  2646. ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2647. {
  2648. cpu.warn_if_flags_tainted("rcl");
  2649. if (cpu.cf())
  2650. return op_rcl_impl<T, true>(cpu, data, steps);
  2651. return op_rcl_impl<T, false>(cpu, data, steps);
  2652. }
  2653. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCL, op_rcl)
  2654. template<typename T, bool cf>
  2655. ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2656. {
  2657. if (steps.value() == 0)
  2658. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2659. u32 result = 0;
  2660. u32 new_flags = 0;
  2661. if constexpr (cf)
  2662. asm volatile("stc");
  2663. else
  2664. asm volatile("clc");
  2665. if constexpr (sizeof(typename T::ValueType) == 4) {
  2666. asm volatile("rcrl %%cl, %%eax\n"
  2667. : "=a"(result)
  2668. : "a"(data.value()), "c"(steps.value()));
  2669. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2670. asm volatile("rcrw %%cl, %%ax\n"
  2671. : "=a"(result)
  2672. : "a"(data.value()), "c"(steps.value()));
  2673. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2674. asm volatile("rcrb %%cl, %%al\n"
  2675. : "=a"(result)
  2676. : "a"(data.value()), "c"(steps.value()));
  2677. }
  2678. asm volatile(
  2679. "pushf\n"
  2680. "pop %%ebx"
  2681. : "=b"(new_flags));
  2682. cpu.set_flags_oc(new_flags);
  2683. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2684. }
  2685. template<typename T>
  2686. ALWAYS_INLINE static T op_rcr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2687. {
  2688. cpu.warn_if_flags_tainted("rcr");
  2689. if (cpu.cf())
  2690. return op_rcr_impl<T, true>(cpu, data, steps);
  2691. return op_rcr_impl<T, false>(cpu, data, steps);
  2692. }
  2693. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCR, op_rcr)
  2694. void SoftCPU::RDTSC(const X86::Instruction&) { TODO_INSN(); }
  2695. void SoftCPU::RET(const X86::Instruction& insn)
  2696. {
  2697. VERIFY(!insn.has_operand_size_override_prefix());
  2698. auto ret_address = pop32();
  2699. warn_if_uninitialized(ret_address, "ret");
  2700. set_eip(ret_address.value());
  2701. }
  2702. void SoftCPU::RETF(const X86::Instruction&) { TODO_INSN(); }
  2703. void SoftCPU::RETF_imm16(const X86::Instruction&) { TODO_INSN(); }
  2704. void SoftCPU::RET_imm16(const X86::Instruction& insn)
  2705. {
  2706. VERIFY(!insn.has_operand_size_override_prefix());
  2707. auto ret_address = pop32();
  2708. warn_if_uninitialized(ret_address, "ret imm16");
  2709. set_eip(ret_address.value());
  2710. set_esp({ esp().value() + insn.imm16(), esp().shadow() });
  2711. }
  2712. template<typename T>
  2713. ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2714. {
  2715. if (steps.value() == 0)
  2716. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2717. u32 result = 0;
  2718. u32 new_flags = 0;
  2719. if constexpr (sizeof(typename T::ValueType) == 4) {
  2720. asm volatile("roll %%cl, %%eax\n"
  2721. : "=a"(result)
  2722. : "a"(data.value()), "c"(steps.value()));
  2723. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2724. asm volatile("rolw %%cl, %%ax\n"
  2725. : "=a"(result)
  2726. : "a"(data.value()), "c"(steps.value()));
  2727. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2728. asm volatile("rolb %%cl, %%al\n"
  2729. : "=a"(result)
  2730. : "a"(data.value()), "c"(steps.value()));
  2731. }
  2732. asm volatile(
  2733. "pushf\n"
  2734. "pop %%ebx"
  2735. : "=b"(new_flags));
  2736. cpu.set_flags_oc(new_flags);
  2737. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2738. }
  2739. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROL, op_rol)
  2740. template<typename T>
  2741. ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2742. {
  2743. if (steps.value() == 0)
  2744. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2745. u32 result = 0;
  2746. u32 new_flags = 0;
  2747. if constexpr (sizeof(typename T::ValueType) == 4) {
  2748. asm volatile("rorl %%cl, %%eax\n"
  2749. : "=a"(result)
  2750. : "a"(data.value()), "c"(steps.value()));
  2751. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2752. asm volatile("rorw %%cl, %%ax\n"
  2753. : "=a"(result)
  2754. : "a"(data.value()), "c"(steps.value()));
  2755. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2756. asm volatile("rorb %%cl, %%al\n"
  2757. : "=a"(result)
  2758. : "a"(data.value()), "c"(steps.value()));
  2759. }
  2760. asm volatile(
  2761. "pushf\n"
  2762. "pop %%ebx"
  2763. : "=b"(new_flags));
  2764. cpu.set_flags_oc(new_flags);
  2765. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2766. }
  2767. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROR, op_ror)
  2768. void SoftCPU::SAHF(const X86::Instruction&)
  2769. {
  2770. // FIXME: Respect shadow flags once they exists!
  2771. set_al(shadow_wrap_as_initialized<u8>(eflags() & 0xff));
  2772. }
  2773. void SoftCPU::SALC(const X86::Instruction&)
  2774. {
  2775. // FIXME: Respect shadow flags once they exists!
  2776. set_al(shadow_wrap_as_initialized<u8>(cf() ? 0xff : 0x00));
  2777. }
  2778. template<typename T>
  2779. static T op_sar(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2780. {
  2781. if (steps.value() == 0)
  2782. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2783. u32 result = 0;
  2784. u32 new_flags = 0;
  2785. if constexpr (sizeof(typename T::ValueType) == 4) {
  2786. asm volatile("sarl %%cl, %%eax\n"
  2787. : "=a"(result)
  2788. : "a"(data.value()), "c"(steps.value()));
  2789. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2790. asm volatile("sarw %%cl, %%ax\n"
  2791. : "=a"(result)
  2792. : "a"(data.value()), "c"(steps.value()));
  2793. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2794. asm volatile("sarb %%cl, %%al\n"
  2795. : "=a"(result)
  2796. : "a"(data.value()), "c"(steps.value()));
  2797. }
  2798. asm volatile(
  2799. "pushf\n"
  2800. "pop %%ebx"
  2801. : "=b"(new_flags));
  2802. cpu.set_flags_oszapc(new_flags);
  2803. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2804. }
  2805. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SAR, op_sar)
  2806. template<typename T>
  2807. ALWAYS_INLINE static void do_scas(SoftCPU& cpu, const X86::Instruction& insn)
  2808. {
  2809. cpu.do_once_or_repeat<true>(insn, [&] {
  2810. auto src = cpu.const_gpr<T>(X86::RegisterAL);
  2811. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  2812. op_sub(cpu, dest, src);
  2813. cpu.step_destination_index(insn.a32(), sizeof(T));
  2814. });
  2815. }
  2816. void SoftCPU::SCASB(const X86::Instruction& insn)
  2817. {
  2818. do_scas<u8>(*this, insn);
  2819. }
  2820. void SoftCPU::SCASD(const X86::Instruction& insn)
  2821. {
  2822. do_scas<u32>(*this, insn);
  2823. }
  2824. void SoftCPU::SCASW(const X86::Instruction& insn)
  2825. {
  2826. do_scas<u16>(*this, insn);
  2827. }
  2828. void SoftCPU::SETcc_RM8(const X86::Instruction& insn)
  2829. {
  2830. warn_if_flags_tainted("setcc");
  2831. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized<u8>(evaluate_condition(insn.cc())));
  2832. }
  2833. void SoftCPU::SGDT(const X86::Instruction&) { TODO_INSN(); }
  2834. void SoftCPU::SHLD_RM16_reg16_CL(const X86::Instruction& insn)
  2835. {
  2836. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2837. }
  2838. void SoftCPU::SHLD_RM16_reg16_imm8(const X86::Instruction& insn)
  2839. {
  2840. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2841. }
  2842. void SoftCPU::SHLD_RM32_reg32_CL(const X86::Instruction& insn)
  2843. {
  2844. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2845. }
  2846. void SoftCPU::SHLD_RM32_reg32_imm8(const X86::Instruction& insn)
  2847. {
  2848. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2849. }
  2850. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHL, op_shl)
  2851. void SoftCPU::SHRD_RM16_reg16_CL(const X86::Instruction& insn)
  2852. {
  2853. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2854. }
  2855. void SoftCPU::SHRD_RM16_reg16_imm8(const X86::Instruction& insn)
  2856. {
  2857. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2858. }
  2859. void SoftCPU::SHRD_RM32_reg32_CL(const X86::Instruction& insn)
  2860. {
  2861. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2862. }
  2863. void SoftCPU::SHRD_RM32_reg32_imm8(const X86::Instruction& insn)
  2864. {
  2865. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2866. }
  2867. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHR, op_shr)
  2868. void SoftCPU::SIDT(const X86::Instruction&) { TODO_INSN(); }
  2869. void SoftCPU::SLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2870. void SoftCPU::SMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2871. void SoftCPU::STC(const X86::Instruction&)
  2872. {
  2873. set_cf(true);
  2874. }
  2875. void SoftCPU::STD(const X86::Instruction&)
  2876. {
  2877. set_df(true);
  2878. }
  2879. void SoftCPU::STI(const X86::Instruction&) { TODO_INSN(); }
  2880. void SoftCPU::STOSB(const X86::Instruction& insn)
  2881. {
  2882. if (insn.has_rep_prefix() && !df()) {
  2883. // Fast path for 8-bit forward memory fill.
  2884. if (m_emulator.mmu().fast_fill_memory8({ es(), destination_index(insn.a32()).value() }, ecx().value(), al())) {
  2885. if (insn.a32()) {
  2886. // FIXME: Should an uninitialized ECX taint EDI here?
  2887. set_edi({ (u32)(edi().value() + ecx().value()), edi().shadow() });
  2888. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2889. } else {
  2890. // FIXME: Should an uninitialized CX taint DI here?
  2891. set_di({ (u16)(di().value() + cx().value()), di().shadow() });
  2892. set_cx(shadow_wrap_as_initialized<u16>(0));
  2893. }
  2894. return;
  2895. }
  2896. }
  2897. do_once_or_repeat<false>(insn, [&] {
  2898. write_memory8({ es(), destination_index(insn.a32()).value() }, al());
  2899. step_destination_index(insn.a32(), 1);
  2900. });
  2901. }
  2902. void SoftCPU::STOSD(const X86::Instruction& insn)
  2903. {
  2904. if (insn.has_rep_prefix() && !df()) {
  2905. // Fast path for 32-bit forward memory fill.
  2906. if (m_emulator.mmu().fast_fill_memory32({ es(), destination_index(insn.a32()).value() }, ecx().value(), eax())) {
  2907. if (insn.a32()) {
  2908. // FIXME: Should an uninitialized ECX taint EDI here?
  2909. set_edi({ (u32)(edi().value() + (ecx().value() * sizeof(u32))), edi().shadow() });
  2910. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2911. } else {
  2912. // FIXME: Should an uninitialized CX taint DI here?
  2913. set_di({ (u16)(di().value() + (cx().value() * sizeof(u32))), di().shadow() });
  2914. set_cx(shadow_wrap_as_initialized<u16>(0));
  2915. }
  2916. return;
  2917. }
  2918. }
  2919. do_once_or_repeat<false>(insn, [&] {
  2920. write_memory32({ es(), destination_index(insn.a32()).value() }, eax());
  2921. step_destination_index(insn.a32(), 4);
  2922. });
  2923. }
  2924. void SoftCPU::STOSW(const X86::Instruction& insn)
  2925. {
  2926. do_once_or_repeat<false>(insn, [&] {
  2927. write_memory16({ es(), destination_index(insn.a32()).value() }, ax());
  2928. step_destination_index(insn.a32(), 2);
  2929. });
  2930. }
  2931. void SoftCPU::STR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2932. void SoftCPU::UD0(const X86::Instruction&) { TODO_INSN(); }
  2933. void SoftCPU::UD1(const X86::Instruction&) { TODO_INSN(); }
  2934. void SoftCPU::UD2(const X86::Instruction&) { TODO_INSN(); }
  2935. void SoftCPU::VERR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2936. void SoftCPU::VERW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2937. void SoftCPU::WAIT(const X86::Instruction&) { TODO_INSN(); }
  2938. void SoftCPU::WBINVD(const X86::Instruction&) { TODO_INSN(); }
  2939. void SoftCPU::XADD_RM16_reg16(const X86::Instruction& insn)
  2940. {
  2941. auto dest = insn.modrm().read16(*this, insn);
  2942. auto src = const_gpr16(insn.reg16());
  2943. auto result = op_add(*this, dest, src);
  2944. gpr16(insn.reg16()) = dest;
  2945. insn.modrm().write16(*this, insn, result);
  2946. }
  2947. void SoftCPU::XADD_RM32_reg32(const X86::Instruction& insn)
  2948. {
  2949. auto dest = insn.modrm().read32(*this, insn);
  2950. auto src = const_gpr32(insn.reg32());
  2951. auto result = op_add(*this, dest, src);
  2952. gpr32(insn.reg32()) = dest;
  2953. insn.modrm().write32(*this, insn, result);
  2954. }
  2955. void SoftCPU::XADD_RM8_reg8(const X86::Instruction& insn)
  2956. {
  2957. auto dest = insn.modrm().read8(*this, insn);
  2958. auto src = const_gpr8(insn.reg8());
  2959. auto result = op_add(*this, dest, src);
  2960. gpr8(insn.reg8()) = dest;
  2961. insn.modrm().write8(*this, insn, result);
  2962. }
  2963. void SoftCPU::XCHG_AX_reg16(const X86::Instruction& insn)
  2964. {
  2965. auto temp = gpr16(insn.reg16());
  2966. gpr16(insn.reg16()) = ax();
  2967. set_ax(temp);
  2968. }
  2969. void SoftCPU::XCHG_EAX_reg32(const X86::Instruction& insn)
  2970. {
  2971. auto temp = gpr32(insn.reg32());
  2972. gpr32(insn.reg32()) = eax();
  2973. set_eax(temp);
  2974. }
  2975. void SoftCPU::XCHG_reg16_RM16(const X86::Instruction& insn)
  2976. {
  2977. auto temp = insn.modrm().read16(*this, insn);
  2978. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2979. gpr16(insn.reg16()) = temp;
  2980. }
  2981. void SoftCPU::XCHG_reg32_RM32(const X86::Instruction& insn)
  2982. {
  2983. auto temp = insn.modrm().read32(*this, insn);
  2984. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2985. gpr32(insn.reg32()) = temp;
  2986. }
  2987. void SoftCPU::XCHG_reg8_RM8(const X86::Instruction& insn)
  2988. {
  2989. auto temp = insn.modrm().read8(*this, insn);
  2990. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2991. gpr8(insn.reg8()) = temp;
  2992. }
  2993. void SoftCPU::XLAT(const X86::Instruction& insn)
  2994. {
  2995. if (insn.a32())
  2996. warn_if_uninitialized(ebx(), "xlat ebx");
  2997. else
  2998. warn_if_uninitialized(bx(), "xlat bx");
  2999. warn_if_uninitialized(al(), "xlat al");
  3000. u32 offset = (insn.a32() ? ebx().value() : bx().value()) + al().value();
  3001. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), offset }));
  3002. }
  3003. #define DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  3004. void SoftCPU::mnemonic##_AL_imm8(const X86::Instruction& insn) { generic_AL_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  3005. void SoftCPU::mnemonic##_AX_imm16(const X86::Instruction& insn) { generic_AX_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  3006. void SoftCPU::mnemonic##_EAX_imm32(const X86::Instruction& insn) { generic_EAX_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  3007. void SoftCPU::mnemonic##_RM16_imm16(const X86::Instruction& insn) { generic_RM16_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  3008. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { generic_RM16_reg16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  3009. void SoftCPU::mnemonic##_RM32_imm32(const X86::Instruction& insn) { generic_RM32_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  3010. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { generic_RM32_reg32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  3011. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  3012. void SoftCPU::mnemonic##_RM8_reg8(const X86::Instruction& insn) { generic_RM8_reg8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  3013. #define DEFINE_GENERIC_INSN_HANDLERS(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  3014. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  3015. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  3016. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  3017. void SoftCPU::mnemonic##_reg16_RM16(const X86::Instruction& insn) { generic_reg16_RM16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  3018. void SoftCPU::mnemonic##_reg32_RM32(const X86::Instruction& insn) { generic_reg32_RM32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  3019. void SoftCPU::mnemonic##_reg8_RM8(const X86::Instruction& insn) { generic_reg8_RM8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  3020. DEFINE_GENERIC_INSN_HANDLERS(XOR, op_xor, true, true, false)
  3021. DEFINE_GENERIC_INSN_HANDLERS(OR, op_or, true, false, true)
  3022. DEFINE_GENERIC_INSN_HANDLERS(ADD, op_add, true, false, false)
  3023. DEFINE_GENERIC_INSN_HANDLERS(ADC, op_adc, true, false, false)
  3024. DEFINE_GENERIC_INSN_HANDLERS(SUB, op_sub, true, true, false)
  3025. DEFINE_GENERIC_INSN_HANDLERS(SBB, op_sbb, true, false, false)
  3026. DEFINE_GENERIC_INSN_HANDLERS(AND, op_and, true, false, false)
  3027. DEFINE_GENERIC_INSN_HANDLERS(CMP, op_sub, false, false, false)
  3028. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(TEST, op_and, false, false, false)
  3029. void SoftCPU::MOVQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  3030. void SoftCPU::EMMS(const X86::Instruction&) { TODO_INSN(); }
  3031. void SoftCPU::MOVQ_mm1_m64_mm2(const X86::Instruction&) { TODO_INSN(); }
  3032. void SoftCPU::wrap_0xC0(const X86::Instruction&) { TODO_INSN(); }
  3033. void SoftCPU::wrap_0xC1_16(const X86::Instruction&) { TODO_INSN(); }
  3034. void SoftCPU::wrap_0xC1_32(const X86::Instruction&) { TODO_INSN(); }
  3035. void SoftCPU::wrap_0xD0(const X86::Instruction&) { TODO_INSN(); }
  3036. void SoftCPU::wrap_0xD1_16(const X86::Instruction&) { TODO_INSN(); }
  3037. void SoftCPU::wrap_0xD1_32(const X86::Instruction&) { TODO_INSN(); }
  3038. void SoftCPU::wrap_0xD2(const X86::Instruction&) { TODO_INSN(); }
  3039. void SoftCPU::wrap_0xD3_16(const X86::Instruction&) { TODO_INSN(); }
  3040. void SoftCPU::wrap_0xD3_32(const X86::Instruction&) { TODO_INSN(); }
  3041. }