SoftCPU.cpp 112 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313
  1. /*
  2. * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include "SoftCPU.h"
  27. #include "Emulator.h"
  28. #include <AK/Assertions.h>
  29. #include <AK/Debug.h>
  30. #include <math.h>
  31. #include <stdio.h>
  32. #include <string.h>
  33. #if defined(__GNUC__) && !defined(__clang__)
  34. # pragma GCC optimize("O3")
  35. #endif
  36. #define TODO_INSN() \
  37. do { \
  38. reportln("\n=={}== Unimplemented instruction: {}\n", getpid(), __FUNCTION__); \
  39. m_emulator.dump_backtrace(); \
  40. _exit(0); \
  41. } while (0)
  42. #define DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(mnemonic, op) \
  43. void SoftCPU::mnemonic##_RM8_1(const X86::Instruction& insn) { generic_RM8_1(op<ValueWithShadow<u8>>, insn); } \
  44. void SoftCPU::mnemonic##_RM8_CL(const X86::Instruction& insn) { generic_RM8_CL(op<ValueWithShadow<u8>>, insn); } \
  45. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<true, false>(op<ValueWithShadow<u8>>, insn); } \
  46. void SoftCPU::mnemonic##_RM16_1(const X86::Instruction& insn) { generic_RM16_1(op<ValueWithShadow<u16>>, insn); } \
  47. void SoftCPU::mnemonic##_RM16_CL(const X86::Instruction& insn) { generic_RM16_CL(op<ValueWithShadow<u16>>, insn); } \
  48. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_unsigned_imm8<true>(op<ValueWithShadow<u16>>, insn); } \
  49. void SoftCPU::mnemonic##_RM32_1(const X86::Instruction& insn) { generic_RM32_1(op<ValueWithShadow<u32>>, insn); } \
  50. void SoftCPU::mnemonic##_RM32_CL(const X86::Instruction& insn) { generic_RM32_CL(op<ValueWithShadow<u32>>, insn); } \
  51. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_unsigned_imm8<true>(op<ValueWithShadow<u32>>, insn); }
  52. namespace UserspaceEmulator {
  53. template<class Dest, class Source>
  54. static inline Dest bit_cast(Source source)
  55. {
  56. static_assert(sizeof(Dest) == sizeof(Source));
  57. Dest dest;
  58. memcpy(&dest, &source, sizeof(dest));
  59. return dest;
  60. }
  61. template<typename T>
  62. ALWAYS_INLINE void warn_if_uninitialized(T value_with_shadow, const char* message)
  63. {
  64. if (value_with_shadow.is_uninitialized()) [[unlikely]] {
  65. reportln("\033[31;1mWarning! Use of uninitialized value: {}\033[0m\n", message);
  66. Emulator::the().dump_backtrace();
  67. }
  68. }
  69. ALWAYS_INLINE void SoftCPU::warn_if_flags_tainted(const char* message) const
  70. {
  71. if (m_flags_tainted) [[unlikely]] {
  72. reportln("\n=={}== \033[31;1mConditional depends on uninitialized data\033[0m ({})\n", getpid(), message);
  73. Emulator::the().dump_backtrace();
  74. }
  75. }
  76. template<typename T, typename U>
  77. constexpr T sign_extended_to(U value)
  78. {
  79. if (!(value & X86::TypeTrivia<U>::sign_bit))
  80. return value;
  81. return (X86::TypeTrivia<T>::mask & ~X86::TypeTrivia<U>::mask) | value;
  82. }
  83. SoftCPU::SoftCPU(Emulator& emulator)
  84. : m_emulator(emulator)
  85. {
  86. memset(m_gpr, 0, sizeof(m_gpr));
  87. memset(m_gpr_shadow, 1, sizeof(m_gpr_shadow));
  88. m_segment[(int)X86::SegmentRegister::CS] = 0x1b;
  89. m_segment[(int)X86::SegmentRegister::DS] = 0x23;
  90. m_segment[(int)X86::SegmentRegister::ES] = 0x23;
  91. m_segment[(int)X86::SegmentRegister::SS] = 0x23;
  92. m_segment[(int)X86::SegmentRegister::GS] = 0x2b;
  93. }
  94. void SoftCPU::dump() const
  95. {
  96. outln(" eax={:08x} ebx={:08x} ecx={:08x} edx={:08x} ebp={:08x} esp={:08x} esi={:08x} edi={:08x} o={:d} s={:d} z={:d} a={:d} p={:d} c={:d}",
  97. eax(), ebx(), ecx(), edx(), ebp(), esp(), esi(), edi(), of(), sf(), zf(), af(), pf(), cf());
  98. outln("#eax={:08x} #ebx={:08x} #ecx={:08x} #edx={:08x} #ebp={:08x} #esp={:08x} #esi={:08x} #edi={:08x} #f={}",
  99. eax().shadow(), ebx().shadow(), ecx().shadow(), edx().shadow(), ebp().shadow(), esp().shadow(), esi().shadow(), edi().shadow(), m_flags_tainted);
  100. fflush(stdout);
  101. }
  102. void SoftCPU::update_code_cache()
  103. {
  104. auto* region = m_emulator.mmu().find_region({ cs(), eip() });
  105. VERIFY(region);
  106. if (!region->is_executable()) {
  107. reportln("SoftCPU::update_code_cache: Non-executable region @ {:p}", eip());
  108. Emulator::the().dump_backtrace();
  109. TODO();
  110. }
  111. // FIXME: This cache needs to be invalidated if the code region is ever unmapped.
  112. m_cached_code_region = region;
  113. m_cached_code_base_ptr = region->data();
  114. }
  115. ValueWithShadow<u8> SoftCPU::read_memory8(X86::LogicalAddress address)
  116. {
  117. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  118. auto value = m_emulator.mmu().read8(address);
  119. #if MEMORY_DEBUG
  120. outln("\033[36;1mread_memory8: @{:04x}:{:08x} -> {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  121. #endif
  122. return value;
  123. }
  124. ValueWithShadow<u16> SoftCPU::read_memory16(X86::LogicalAddress address)
  125. {
  126. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  127. auto value = m_emulator.mmu().read16(address);
  128. #if MEMORY_DEBUG
  129. outln("\033[36;1mread_memory16: @{:04x}:{:08x} -> {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  130. #endif
  131. return value;
  132. }
  133. ValueWithShadow<u32> SoftCPU::read_memory32(X86::LogicalAddress address)
  134. {
  135. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  136. auto value = m_emulator.mmu().read32(address);
  137. #if MEMORY_DEBUG
  138. outln("\033[36;1mread_memory32: @{:04x}:{:08x} -> {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  139. #endif
  140. return value;
  141. }
  142. ValueWithShadow<u64> SoftCPU::read_memory64(X86::LogicalAddress address)
  143. {
  144. VERIFY(address.selector() == 0x1b || address.selector() == 0x23 || address.selector() == 0x2b);
  145. auto value = m_emulator.mmu().read64(address);
  146. #if MEMORY_DEBUG
  147. outln("\033[36;1mread_memory64: @{:04x}:{:08x} -> {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  148. #endif
  149. return value;
  150. }
  151. void SoftCPU::write_memory8(X86::LogicalAddress address, ValueWithShadow<u8> value)
  152. {
  153. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  154. #if MEMORY_DEBUG
  155. outln("\033[36;1mwrite_memory8: @{:04x}:{:08x} <- {:02x} ({:02x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  156. #endif
  157. m_emulator.mmu().write8(address, value);
  158. }
  159. void SoftCPU::write_memory16(X86::LogicalAddress address, ValueWithShadow<u16> value)
  160. {
  161. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  162. #if MEMORY_DEBUG
  163. outln("\033[36;1mwrite_memory16: @{:04x}:{:08x} <- {:04x} ({:04x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  164. #endif
  165. m_emulator.mmu().write16(address, value);
  166. }
  167. void SoftCPU::write_memory32(X86::LogicalAddress address, ValueWithShadow<u32> value)
  168. {
  169. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  170. #if MEMORY_DEBUG
  171. outln("\033[36;1mwrite_memory32: @{:04x}:{:08x} <- {:08x} ({:08x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  172. #endif
  173. m_emulator.mmu().write32(address, value);
  174. }
  175. void SoftCPU::write_memory64(X86::LogicalAddress address, ValueWithShadow<u64> value)
  176. {
  177. VERIFY(address.selector() == 0x23 || address.selector() == 0x2b);
  178. #if MEMORY_DEBUG
  179. outln("\033[36;1mwrite_memory64: @{:04x}:{:08x} <- {:016x} ({:016x})\033[0m", address.selector(), address.offset(), value, value.shadow());
  180. #endif
  181. m_emulator.mmu().write64(address, value);
  182. }
  183. void SoftCPU::push_string(const StringView& string)
  184. {
  185. size_t space_to_allocate = round_up_to_power_of_two(string.length() + 1, 16);
  186. set_esp({ esp().value() - space_to_allocate, esp().shadow() });
  187. m_emulator.mmu().copy_to_vm(esp().value(), string.characters_without_null_termination(), string.length());
  188. m_emulator.mmu().write8({ 0x23, esp().value() + string.length() }, shadow_wrap_as_initialized((u8)'\0'));
  189. }
  190. void SoftCPU::push_buffer(const u8* data, size_t size)
  191. {
  192. set_esp({ esp().value() - size, esp().shadow() });
  193. warn_if_uninitialized(esp(), "push_buffer");
  194. m_emulator.mmu().copy_to_vm(esp().value(), data, size);
  195. }
  196. void SoftCPU::push32(ValueWithShadow<u32> value)
  197. {
  198. set_esp({ esp().value() - sizeof(u32), esp().shadow() });
  199. warn_if_uninitialized(esp(), "push32");
  200. write_memory32({ ss(), esp().value() }, value);
  201. }
  202. ValueWithShadow<u32> SoftCPU::pop32()
  203. {
  204. warn_if_uninitialized(esp(), "pop32");
  205. auto value = read_memory32({ ss(), esp().value() });
  206. set_esp({ esp().value() + sizeof(u32), esp().shadow() });
  207. return value;
  208. }
  209. void SoftCPU::push16(ValueWithShadow<u16> value)
  210. {
  211. warn_if_uninitialized(esp(), "push16");
  212. set_esp({ esp().value() - sizeof(u16), esp().shadow() });
  213. write_memory16({ ss(), esp().value() }, value);
  214. }
  215. ValueWithShadow<u16> SoftCPU::pop16()
  216. {
  217. warn_if_uninitialized(esp(), "pop16");
  218. auto value = read_memory16({ ss(), esp().value() });
  219. set_esp({ esp().value() + sizeof(u16), esp().shadow() });
  220. return value;
  221. }
  222. template<bool check_zf, typename Callback>
  223. void SoftCPU::do_once_or_repeat(const X86::Instruction& insn, Callback callback)
  224. {
  225. if (!insn.has_rep_prefix())
  226. return callback();
  227. while (loop_index(insn.a32()).value()) {
  228. callback();
  229. decrement_loop_index(insn.a32());
  230. if constexpr (check_zf) {
  231. warn_if_flags_tainted("repz/repnz");
  232. if (insn.rep_prefix() == X86::Prefix::REPZ && !zf())
  233. break;
  234. if (insn.rep_prefix() == X86::Prefix::REPNZ && zf())
  235. break;
  236. }
  237. }
  238. }
  239. template<typename T>
  240. ALWAYS_INLINE static T op_inc(SoftCPU& cpu, T data)
  241. {
  242. typename T::ValueType result;
  243. u32 new_flags = 0;
  244. if constexpr (sizeof(typename T::ValueType) == 4) {
  245. asm volatile("incl %%eax\n"
  246. : "=a"(result)
  247. : "a"(data.value()));
  248. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  249. asm volatile("incw %%ax\n"
  250. : "=a"(result)
  251. : "a"(data.value()));
  252. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  253. asm volatile("incb %%al\n"
  254. : "=a"(result)
  255. : "a"(data.value()));
  256. }
  257. asm volatile(
  258. "pushf\n"
  259. "pop %%ebx"
  260. : "=b"(new_flags));
  261. cpu.set_flags_oszap(new_flags);
  262. cpu.taint_flags_from(data);
  263. return shadow_wrap_with_taint_from(result, data);
  264. }
  265. template<typename T>
  266. ALWAYS_INLINE static T op_dec(SoftCPU& cpu, T data)
  267. {
  268. typename T::ValueType result;
  269. u32 new_flags = 0;
  270. if constexpr (sizeof(typename T::ValueType) == 4) {
  271. asm volatile("decl %%eax\n"
  272. : "=a"(result)
  273. : "a"(data.value()));
  274. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  275. asm volatile("decw %%ax\n"
  276. : "=a"(result)
  277. : "a"(data.value()));
  278. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  279. asm volatile("decb %%al\n"
  280. : "=a"(result)
  281. : "a"(data.value()));
  282. }
  283. asm volatile(
  284. "pushf\n"
  285. "pop %%ebx"
  286. : "=b"(new_flags));
  287. cpu.set_flags_oszap(new_flags);
  288. cpu.taint_flags_from(data);
  289. return shadow_wrap_with_taint_from(result, data);
  290. }
  291. template<typename T>
  292. ALWAYS_INLINE static T op_xor(SoftCPU& cpu, const T& dest, const T& src)
  293. {
  294. typename T::ValueType result;
  295. u32 new_flags = 0;
  296. if constexpr (sizeof(typename T::ValueType) == 4) {
  297. asm volatile("xorl %%ecx, %%eax\n"
  298. : "=a"(result)
  299. : "a"(dest.value()), "c"(src.value()));
  300. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  301. asm volatile("xor %%cx, %%ax\n"
  302. : "=a"(result)
  303. : "a"(dest.value()), "c"(src.value()));
  304. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  305. asm volatile("xorb %%cl, %%al\n"
  306. : "=a"(result)
  307. : "a"(dest.value()), "c"(src.value()));
  308. } else {
  309. VERIFY_NOT_REACHED();
  310. }
  311. asm volatile(
  312. "pushf\n"
  313. "pop %%ebx"
  314. : "=b"(new_flags));
  315. cpu.set_flags_oszpc(new_flags);
  316. cpu.taint_flags_from(dest, src);
  317. return shadow_wrap_with_taint_from(result, dest, src);
  318. }
  319. template<typename T>
  320. ALWAYS_INLINE static T op_or(SoftCPU& cpu, const T& dest, const T& src)
  321. {
  322. typename T::ValueType result = 0;
  323. u32 new_flags = 0;
  324. if constexpr (sizeof(typename T::ValueType) == 4) {
  325. asm volatile("orl %%ecx, %%eax\n"
  326. : "=a"(result)
  327. : "a"(dest.value()), "c"(src.value()));
  328. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  329. asm volatile("or %%cx, %%ax\n"
  330. : "=a"(result)
  331. : "a"(dest.value()), "c"(src.value()));
  332. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  333. asm volatile("orb %%cl, %%al\n"
  334. : "=a"(result)
  335. : "a"(dest.value()), "c"(src.value()));
  336. } else {
  337. VERIFY_NOT_REACHED();
  338. }
  339. asm volatile(
  340. "pushf\n"
  341. "pop %%ebx"
  342. : "=b"(new_flags));
  343. cpu.set_flags_oszpc(new_flags);
  344. cpu.taint_flags_from(dest, src);
  345. return shadow_wrap_with_taint_from(result, dest, src);
  346. }
  347. template<typename T>
  348. ALWAYS_INLINE static T op_sub(SoftCPU& cpu, const T& dest, const T& src)
  349. {
  350. typename T::ValueType result = 0;
  351. u32 new_flags = 0;
  352. if constexpr (sizeof(typename T::ValueType) == 4) {
  353. asm volatile("subl %%ecx, %%eax\n"
  354. : "=a"(result)
  355. : "a"(dest.value()), "c"(src.value()));
  356. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  357. asm volatile("subw %%cx, %%ax\n"
  358. : "=a"(result)
  359. : "a"(dest.value()), "c"(src.value()));
  360. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  361. asm volatile("subb %%cl, %%al\n"
  362. : "=a"(result)
  363. : "a"(dest.value()), "c"(src.value()));
  364. } else {
  365. VERIFY_NOT_REACHED();
  366. }
  367. asm volatile(
  368. "pushf\n"
  369. "pop %%ebx"
  370. : "=b"(new_flags));
  371. cpu.set_flags_oszapc(new_flags);
  372. cpu.taint_flags_from(dest, src);
  373. return shadow_wrap_with_taint_from(result, dest, src);
  374. }
  375. template<typename T, bool cf>
  376. ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src)
  377. {
  378. typename T::ValueType result = 0;
  379. u32 new_flags = 0;
  380. if constexpr (cf)
  381. asm volatile("stc");
  382. else
  383. asm volatile("clc");
  384. if constexpr (sizeof(typename T::ValueType) == 4) {
  385. asm volatile("sbbl %%ecx, %%eax\n"
  386. : "=a"(result)
  387. : "a"(dest.value()), "c"(src.value()));
  388. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  389. asm volatile("sbbw %%cx, %%ax\n"
  390. : "=a"(result)
  391. : "a"(dest.value()), "c"(src.value()));
  392. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  393. asm volatile("sbbb %%cl, %%al\n"
  394. : "=a"(result)
  395. : "a"(dest.value()), "c"(src.value()));
  396. } else {
  397. VERIFY_NOT_REACHED();
  398. }
  399. asm volatile(
  400. "pushf\n"
  401. "pop %%ebx"
  402. : "=b"(new_flags));
  403. cpu.set_flags_oszapc(new_flags);
  404. cpu.taint_flags_from(dest, src);
  405. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  406. }
  407. template<typename T>
  408. ALWAYS_INLINE static T op_sbb(SoftCPU& cpu, T& dest, const T& src)
  409. {
  410. cpu.warn_if_flags_tainted("sbb");
  411. if (cpu.cf())
  412. return op_sbb_impl<T, true>(cpu, dest, src);
  413. return op_sbb_impl<T, false>(cpu, dest, src);
  414. }
  415. template<typename T>
  416. ALWAYS_INLINE static T op_add(SoftCPU& cpu, T& dest, const T& src)
  417. {
  418. typename T::ValueType result = 0;
  419. u32 new_flags = 0;
  420. if constexpr (sizeof(typename T::ValueType) == 4) {
  421. asm volatile("addl %%ecx, %%eax\n"
  422. : "=a"(result)
  423. : "a"(dest.value()), "c"(src.value()));
  424. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  425. asm volatile("addw %%cx, %%ax\n"
  426. : "=a"(result)
  427. : "a"(dest.value()), "c"(src.value()));
  428. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  429. asm volatile("addb %%cl, %%al\n"
  430. : "=a"(result)
  431. : "a"(dest.value()), "c"(src.value()));
  432. } else {
  433. VERIFY_NOT_REACHED();
  434. }
  435. asm volatile(
  436. "pushf\n"
  437. "pop %%ebx"
  438. : "=b"(new_flags));
  439. cpu.set_flags_oszapc(new_flags);
  440. cpu.taint_flags_from(dest, src);
  441. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  442. }
  443. template<typename T, bool cf>
  444. ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src)
  445. {
  446. typename T::ValueType result = 0;
  447. u32 new_flags = 0;
  448. if constexpr (cf)
  449. asm volatile("stc");
  450. else
  451. asm volatile("clc");
  452. if constexpr (sizeof(typename T::ValueType) == 4) {
  453. asm volatile("adcl %%ecx, %%eax\n"
  454. : "=a"(result)
  455. : "a"(dest.value()), "c"(src.value()));
  456. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  457. asm volatile("adcw %%cx, %%ax\n"
  458. : "=a"(result)
  459. : "a"(dest.value()), "c"(src.value()));
  460. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  461. asm volatile("adcb %%cl, %%al\n"
  462. : "=a"(result)
  463. : "a"(dest.value()), "c"(src.value()));
  464. } else {
  465. VERIFY_NOT_REACHED();
  466. }
  467. asm volatile(
  468. "pushf\n"
  469. "pop %%ebx"
  470. : "=b"(new_flags));
  471. cpu.set_flags_oszapc(new_flags);
  472. cpu.taint_flags_from(dest, src);
  473. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  474. }
  475. template<typename T>
  476. ALWAYS_INLINE static T op_adc(SoftCPU& cpu, T& dest, const T& src)
  477. {
  478. cpu.warn_if_flags_tainted("adc");
  479. if (cpu.cf())
  480. return op_adc_impl<T, true>(cpu, dest, src);
  481. return op_adc_impl<T, false>(cpu, dest, src);
  482. }
  483. template<typename T>
  484. ALWAYS_INLINE static T op_and(SoftCPU& cpu, const T& dest, const T& src)
  485. {
  486. typename T::ValueType result = 0;
  487. u32 new_flags = 0;
  488. if constexpr (sizeof(typename T::ValueType) == 4) {
  489. asm volatile("andl %%ecx, %%eax\n"
  490. : "=a"(result)
  491. : "a"(dest.value()), "c"(src.value()));
  492. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  493. asm volatile("andw %%cx, %%ax\n"
  494. : "=a"(result)
  495. : "a"(dest.value()), "c"(src.value()));
  496. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  497. asm volatile("andb %%cl, %%al\n"
  498. : "=a"(result)
  499. : "a"(dest.value()), "c"(src.value()));
  500. } else {
  501. VERIFY_NOT_REACHED();
  502. }
  503. asm volatile(
  504. "pushf\n"
  505. "pop %%ebx"
  506. : "=b"(new_flags));
  507. cpu.set_flags_oszpc(new_flags);
  508. cpu.taint_flags_from(dest, src);
  509. return shadow_wrap_with_taint_from<typename T::ValueType>(result, dest, src);
  510. }
  511. template<typename T>
  512. ALWAYS_INLINE static void op_imul(SoftCPU& cpu, const T& dest, const T& src, T& result_high, T& result_low)
  513. {
  514. bool did_overflow = false;
  515. if constexpr (sizeof(T) == 4) {
  516. i64 result = (i64)src * (i64)dest;
  517. result_low = result & 0xffffffff;
  518. result_high = result >> 32;
  519. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  520. } else if constexpr (sizeof(T) == 2) {
  521. i32 result = (i32)src * (i32)dest;
  522. result_low = result & 0xffff;
  523. result_high = result >> 16;
  524. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  525. } else if constexpr (sizeof(T) == 1) {
  526. i16 result = (i16)src * (i16)dest;
  527. result_low = result & 0xff;
  528. result_high = result >> 8;
  529. did_overflow = (result > NumericLimits<T>::max() || result < NumericLimits<T>::min());
  530. }
  531. if (did_overflow) {
  532. cpu.set_cf(true);
  533. cpu.set_of(true);
  534. } else {
  535. cpu.set_cf(false);
  536. cpu.set_of(false);
  537. }
  538. }
  539. template<typename T>
  540. ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  541. {
  542. if (steps.value() == 0)
  543. return shadow_wrap_with_taint_from(data.value(), data, steps);
  544. u32 result = 0;
  545. u32 new_flags = 0;
  546. if constexpr (sizeof(typename T::ValueType) == 4) {
  547. asm volatile("shrl %%cl, %%eax\n"
  548. : "=a"(result)
  549. : "a"(data.value()), "c"(steps.value()));
  550. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  551. asm volatile("shrw %%cl, %%ax\n"
  552. : "=a"(result)
  553. : "a"(data.value()), "c"(steps.value()));
  554. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  555. asm volatile("shrb %%cl, %%al\n"
  556. : "=a"(result)
  557. : "a"(data.value()), "c"(steps.value()));
  558. }
  559. asm volatile(
  560. "pushf\n"
  561. "pop %%ebx"
  562. : "=b"(new_flags));
  563. cpu.set_flags_oszapc(new_flags);
  564. cpu.taint_flags_from(data, steps);
  565. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  566. }
  567. template<typename T>
  568. ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  569. {
  570. if (steps.value() == 0)
  571. return shadow_wrap_with_taint_from(data.value(), data, steps);
  572. u32 result = 0;
  573. u32 new_flags = 0;
  574. if constexpr (sizeof(typename T::ValueType) == 4) {
  575. asm volatile("shll %%cl, %%eax\n"
  576. : "=a"(result)
  577. : "a"(data.value()), "c"(steps.value()));
  578. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  579. asm volatile("shlw %%cl, %%ax\n"
  580. : "=a"(result)
  581. : "a"(data.value()), "c"(steps.value()));
  582. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  583. asm volatile("shlb %%cl, %%al\n"
  584. : "=a"(result)
  585. : "a"(data.value()), "c"(steps.value()));
  586. }
  587. asm volatile(
  588. "pushf\n"
  589. "pop %%ebx"
  590. : "=b"(new_flags));
  591. cpu.set_flags_oszapc(new_flags);
  592. cpu.taint_flags_from(data, steps);
  593. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  594. }
  595. template<typename T>
  596. ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  597. {
  598. if (steps.value() == 0)
  599. return shadow_wrap_with_taint_from(data.value(), data, steps);
  600. u32 result = 0;
  601. u32 new_flags = 0;
  602. if constexpr (sizeof(typename T::ValueType) == 4) {
  603. asm volatile("shrd %%cl, %%edx, %%eax\n"
  604. : "=a"(result)
  605. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  606. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  607. asm volatile("shrd %%cl, %%dx, %%ax\n"
  608. : "=a"(result)
  609. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  610. }
  611. asm volatile(
  612. "pushf\n"
  613. "pop %%ebx"
  614. : "=b"(new_flags));
  615. cpu.set_flags_oszapc(new_flags);
  616. cpu.taint_flags_from(data, steps);
  617. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  618. }
  619. template<typename T>
  620. ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow<u8> steps)
  621. {
  622. if (steps.value() == 0)
  623. return shadow_wrap_with_taint_from(data.value(), data, steps);
  624. u32 result = 0;
  625. u32 new_flags = 0;
  626. if constexpr (sizeof(typename T::ValueType) == 4) {
  627. asm volatile("shld %%cl, %%edx, %%eax\n"
  628. : "=a"(result)
  629. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  630. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  631. asm volatile("shld %%cl, %%dx, %%ax\n"
  632. : "=a"(result)
  633. : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value()));
  634. }
  635. asm volatile(
  636. "pushf\n"
  637. "pop %%ebx"
  638. : "=b"(new_flags));
  639. cpu.set_flags_oszapc(new_flags);
  640. cpu.taint_flags_from(data, steps);
  641. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  642. }
  643. template<bool update_dest, bool is_or, typename Op>
  644. ALWAYS_INLINE void SoftCPU::generic_AL_imm8(Op op, const X86::Instruction& insn)
  645. {
  646. auto dest = al();
  647. auto src = shadow_wrap_as_initialized(insn.imm8());
  648. auto result = op(*this, dest, src);
  649. if (is_or && insn.imm8() == 0xff)
  650. result.set_initialized();
  651. if (update_dest)
  652. set_al(result);
  653. }
  654. template<bool update_dest, bool is_or, typename Op>
  655. ALWAYS_INLINE void SoftCPU::generic_AX_imm16(Op op, const X86::Instruction& insn)
  656. {
  657. auto dest = ax();
  658. auto src = shadow_wrap_as_initialized(insn.imm16());
  659. auto result = op(*this, dest, src);
  660. if (is_or && insn.imm16() == 0xffff)
  661. result.set_initialized();
  662. if (update_dest)
  663. set_ax(result);
  664. }
  665. template<bool update_dest, bool is_or, typename Op>
  666. ALWAYS_INLINE void SoftCPU::generic_EAX_imm32(Op op, const X86::Instruction& insn)
  667. {
  668. auto dest = eax();
  669. auto src = shadow_wrap_as_initialized(insn.imm32());
  670. auto result = op(*this, dest, src);
  671. if (is_or && insn.imm32() == 0xffffffff)
  672. result.set_initialized();
  673. if (update_dest)
  674. set_eax(result);
  675. }
  676. template<bool update_dest, bool is_or, typename Op>
  677. ALWAYS_INLINE void SoftCPU::generic_RM16_imm16(Op op, const X86::Instruction& insn)
  678. {
  679. auto dest = insn.modrm().read16(*this, insn);
  680. auto src = shadow_wrap_as_initialized(insn.imm16());
  681. auto result = op(*this, dest, src);
  682. if (is_or && insn.imm16() == 0xffff)
  683. result.set_initialized();
  684. if (update_dest)
  685. insn.modrm().write16(*this, insn, result);
  686. }
  687. template<bool update_dest, bool is_or, typename Op>
  688. ALWAYS_INLINE void SoftCPU::generic_RM16_imm8(Op op, const X86::Instruction& insn)
  689. {
  690. auto dest = insn.modrm().read16(*this, insn);
  691. auto src = shadow_wrap_as_initialized<u16>(sign_extended_to<u16>(insn.imm8()));
  692. auto result = op(*this, dest, src);
  693. if (is_or && src.value() == 0xffff)
  694. result.set_initialized();
  695. if (update_dest)
  696. insn.modrm().write16(*this, insn, result);
  697. }
  698. template<bool update_dest, typename Op>
  699. ALWAYS_INLINE void SoftCPU::generic_RM16_unsigned_imm8(Op op, const X86::Instruction& insn)
  700. {
  701. auto dest = insn.modrm().read16(*this, insn);
  702. auto src = shadow_wrap_as_initialized(insn.imm8());
  703. auto result = op(*this, dest, src);
  704. if (update_dest)
  705. insn.modrm().write16(*this, insn, result);
  706. }
  707. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  708. ALWAYS_INLINE void SoftCPU::generic_RM16_reg16(Op op, const X86::Instruction& insn)
  709. {
  710. auto dest = insn.modrm().read16(*this, insn);
  711. auto src = const_gpr16(insn.reg16());
  712. auto result = op(*this, dest, src);
  713. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  714. result.set_initialized();
  715. m_flags_tainted = false;
  716. }
  717. if (update_dest)
  718. insn.modrm().write16(*this, insn, result);
  719. }
  720. template<bool update_dest, bool is_or, typename Op>
  721. ALWAYS_INLINE void SoftCPU::generic_RM32_imm32(Op op, const X86::Instruction& insn)
  722. {
  723. auto dest = insn.modrm().read32(*this, insn);
  724. auto src = insn.imm32();
  725. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  726. if (is_or && src == 0xffffffff)
  727. result.set_initialized();
  728. if (update_dest)
  729. insn.modrm().write32(*this, insn, result);
  730. }
  731. template<bool update_dest, bool is_or, typename Op>
  732. ALWAYS_INLINE void SoftCPU::generic_RM32_imm8(Op op, const X86::Instruction& insn)
  733. {
  734. auto dest = insn.modrm().read32(*this, insn);
  735. auto src = sign_extended_to<u32>(insn.imm8());
  736. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  737. if (is_or && src == 0xffffffff)
  738. result.set_initialized();
  739. if (update_dest)
  740. insn.modrm().write32(*this, insn, result);
  741. }
  742. template<bool update_dest, typename Op>
  743. ALWAYS_INLINE void SoftCPU::generic_RM32_unsigned_imm8(Op op, const X86::Instruction& insn)
  744. {
  745. auto dest = insn.modrm().read32(*this, insn);
  746. auto src = shadow_wrap_as_initialized(insn.imm8());
  747. auto result = op(*this, dest, src);
  748. if (update_dest)
  749. insn.modrm().write32(*this, insn, result);
  750. }
  751. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  752. ALWAYS_INLINE void SoftCPU::generic_RM32_reg32(Op op, const X86::Instruction& insn)
  753. {
  754. auto dest = insn.modrm().read32(*this, insn);
  755. auto src = const_gpr32(insn.reg32());
  756. auto result = op(*this, dest, src);
  757. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  758. result.set_initialized();
  759. m_flags_tainted = false;
  760. }
  761. if (update_dest)
  762. insn.modrm().write32(*this, insn, result);
  763. }
  764. template<bool update_dest, bool is_or, typename Op>
  765. ALWAYS_INLINE void SoftCPU::generic_RM8_imm8(Op op, const X86::Instruction& insn)
  766. {
  767. auto dest = insn.modrm().read8(*this, insn);
  768. auto src = insn.imm8();
  769. auto result = op(*this, dest, shadow_wrap_as_initialized(src));
  770. if (is_or && src == 0xff)
  771. result.set_initialized();
  772. if (update_dest)
  773. insn.modrm().write8(*this, insn, result);
  774. }
  775. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  776. ALWAYS_INLINE void SoftCPU::generic_RM8_reg8(Op op, const X86::Instruction& insn)
  777. {
  778. auto dest = insn.modrm().read8(*this, insn);
  779. auto src = const_gpr8(insn.reg8());
  780. auto result = op(*this, dest, src);
  781. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  782. result.set_initialized();
  783. m_flags_tainted = false;
  784. }
  785. if (update_dest)
  786. insn.modrm().write8(*this, insn, result);
  787. }
  788. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  789. ALWAYS_INLINE void SoftCPU::generic_reg16_RM16(Op op, const X86::Instruction& insn)
  790. {
  791. auto dest = const_gpr16(insn.reg16());
  792. auto src = insn.modrm().read16(*this, insn);
  793. auto result = op(*this, dest, src);
  794. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  795. result.set_initialized();
  796. m_flags_tainted = false;
  797. }
  798. if (update_dest)
  799. gpr16(insn.reg16()) = result;
  800. }
  801. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  802. ALWAYS_INLINE void SoftCPU::generic_reg32_RM32(Op op, const X86::Instruction& insn)
  803. {
  804. auto dest = const_gpr32(insn.reg32());
  805. auto src = insn.modrm().read32(*this, insn);
  806. auto result = op(*this, dest, src);
  807. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  808. result.set_initialized();
  809. m_flags_tainted = false;
  810. }
  811. if (update_dest)
  812. gpr32(insn.reg32()) = result;
  813. }
  814. template<bool update_dest, bool dont_taint_for_same_operand, typename Op>
  815. ALWAYS_INLINE void SoftCPU::generic_reg8_RM8(Op op, const X86::Instruction& insn)
  816. {
  817. auto dest = const_gpr8(insn.reg8());
  818. auto src = insn.modrm().read8(*this, insn);
  819. auto result = op(*this, dest, src);
  820. if (dont_taint_for_same_operand && insn.modrm().is_register() && insn.modrm().register_index() == insn.register_index()) {
  821. result.set_initialized();
  822. m_flags_tainted = false;
  823. }
  824. if (update_dest)
  825. gpr8(insn.reg8()) = result;
  826. }
  827. template<typename Op>
  828. ALWAYS_INLINE void SoftCPU::generic_RM8_1(Op op, const X86::Instruction& insn)
  829. {
  830. auto data = insn.modrm().read8(*this, insn);
  831. insn.modrm().write8(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  832. }
  833. template<typename Op>
  834. ALWAYS_INLINE void SoftCPU::generic_RM8_CL(Op op, const X86::Instruction& insn)
  835. {
  836. auto data = insn.modrm().read8(*this, insn);
  837. insn.modrm().write8(*this, insn, op(*this, data, cl()));
  838. }
  839. template<typename Op>
  840. ALWAYS_INLINE void SoftCPU::generic_RM16_1(Op op, const X86::Instruction& insn)
  841. {
  842. auto data = insn.modrm().read16(*this, insn);
  843. insn.modrm().write16(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  844. }
  845. template<typename Op>
  846. ALWAYS_INLINE void SoftCPU::generic_RM16_CL(Op op, const X86::Instruction& insn)
  847. {
  848. auto data = insn.modrm().read16(*this, insn);
  849. insn.modrm().write16(*this, insn, op(*this, data, cl()));
  850. }
  851. template<typename Op>
  852. ALWAYS_INLINE void SoftCPU::generic_RM32_1(Op op, const X86::Instruction& insn)
  853. {
  854. auto data = insn.modrm().read32(*this, insn);
  855. insn.modrm().write32(*this, insn, op(*this, data, shadow_wrap_as_initialized<u8>(1)));
  856. }
  857. template<typename Op>
  858. ALWAYS_INLINE void SoftCPU::generic_RM32_CL(Op op, const X86::Instruction& insn)
  859. {
  860. auto data = insn.modrm().read32(*this, insn);
  861. insn.modrm().write32(*this, insn, op(*this, data, cl()));
  862. }
  863. void SoftCPU::AAA(const X86::Instruction&) { TODO_INSN(); }
  864. void SoftCPU::AAD(const X86::Instruction&) { TODO_INSN(); }
  865. void SoftCPU::AAM(const X86::Instruction&) { TODO_INSN(); }
  866. void SoftCPU::AAS(const X86::Instruction&) { TODO_INSN(); }
  867. void SoftCPU::ARPL(const X86::Instruction&) { TODO_INSN(); }
  868. void SoftCPU::BOUND(const X86::Instruction&) { TODO_INSN(); }
  869. template<typename T>
  870. ALWAYS_INLINE static T op_bsf(SoftCPU&, T value)
  871. {
  872. return { (typename T::ValueType)__builtin_ctz(value.value()), value.shadow() };
  873. }
  874. template<typename T>
  875. ALWAYS_INLINE static T op_bsr(SoftCPU&, T value)
  876. {
  877. typename T::ValueType bit_index = 0;
  878. if constexpr (sizeof(typename T::ValueType) == 4) {
  879. asm volatile("bsrl %%eax, %%edx"
  880. : "=d"(bit_index)
  881. : "a"(value.value()));
  882. }
  883. if constexpr (sizeof(typename T::ValueType) == 2) {
  884. asm volatile("bsrw %%ax, %%dx"
  885. : "=d"(bit_index)
  886. : "a"(value.value()));
  887. }
  888. return shadow_wrap_with_taint_from(bit_index, value);
  889. }
  890. void SoftCPU::BSF_reg16_RM16(const X86::Instruction& insn)
  891. {
  892. auto src = insn.modrm().read16(*this, insn);
  893. set_zf(!src.value());
  894. if (src.value())
  895. gpr16(insn.reg16()) = op_bsf(*this, src);
  896. taint_flags_from(src);
  897. }
  898. void SoftCPU::BSF_reg32_RM32(const X86::Instruction& insn)
  899. {
  900. auto src = insn.modrm().read32(*this, insn);
  901. set_zf(!src.value());
  902. if (src.value()) {
  903. gpr32(insn.reg32()) = op_bsf(*this, src);
  904. taint_flags_from(src);
  905. }
  906. }
  907. void SoftCPU::BSR_reg16_RM16(const X86::Instruction& insn)
  908. {
  909. auto src = insn.modrm().read16(*this, insn);
  910. set_zf(!src.value());
  911. if (src.value()) {
  912. gpr16(insn.reg16()) = op_bsr(*this, src);
  913. taint_flags_from(src);
  914. }
  915. }
  916. void SoftCPU::BSR_reg32_RM32(const X86::Instruction& insn)
  917. {
  918. auto src = insn.modrm().read32(*this, insn);
  919. set_zf(!src.value());
  920. if (src.value()) {
  921. gpr32(insn.reg32()) = op_bsr(*this, src);
  922. taint_flags_from(src);
  923. }
  924. }
  925. void SoftCPU::BSWAP_reg32(const X86::Instruction& insn)
  926. {
  927. gpr32(insn.reg32()) = { __builtin_bswap32(gpr32(insn.reg32()).value()), __builtin_bswap32(gpr32(insn.reg32()).shadow()) };
  928. }
  929. template<typename T>
  930. ALWAYS_INLINE static T op_bt(T value, T)
  931. {
  932. return value;
  933. }
  934. template<typename T>
  935. ALWAYS_INLINE static T op_bts(T value, T bit_mask)
  936. {
  937. return value | bit_mask;
  938. }
  939. template<typename T>
  940. ALWAYS_INLINE static T op_btr(T value, T bit_mask)
  941. {
  942. return value & ~bit_mask;
  943. }
  944. template<typename T>
  945. ALWAYS_INLINE static T op_btc(T value, T bit_mask)
  946. {
  947. return value ^ bit_mask;
  948. }
  949. template<bool should_update, typename Op>
  950. ALWAYS_INLINE void BTx_RM16_reg16(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  951. {
  952. if (insn.modrm().is_register()) {
  953. unsigned bit_index = cpu.const_gpr16(insn.reg16()).value() & (X86::TypeTrivia<u16>::bits - 1);
  954. auto original = insn.modrm().read16(cpu, insn);
  955. u16 bit_mask = 1 << bit_index;
  956. u16 result = op(original.value(), bit_mask);
  957. cpu.set_cf((original.value() & bit_mask) != 0);
  958. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), original);
  959. if (should_update)
  960. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), original));
  961. return;
  962. }
  963. // FIXME: Is this supposed to perform a full 16-bit read/modify/write?
  964. unsigned bit_offset_in_array = cpu.const_gpr16(insn.reg16()).value() / 8;
  965. unsigned bit_offset_in_byte = cpu.const_gpr16(insn.reg16()).value() & 7;
  966. auto address = insn.modrm().resolve(cpu, insn);
  967. address.set_offset(address.offset() + bit_offset_in_array);
  968. auto dest = cpu.read_memory8(address);
  969. u8 bit_mask = 1 << bit_offset_in_byte;
  970. u8 result = op(dest.value(), bit_mask);
  971. cpu.set_cf((dest.value() & bit_mask) != 0);
  972. cpu.taint_flags_from(cpu.gpr16(insn.reg16()), dest);
  973. if (should_update)
  974. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), dest));
  975. }
  976. template<bool should_update, typename Op>
  977. ALWAYS_INLINE void BTx_RM32_reg32(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  978. {
  979. if (insn.modrm().is_register()) {
  980. unsigned bit_index = cpu.const_gpr32(insn.reg32()).value() & (X86::TypeTrivia<u32>::bits - 1);
  981. auto original = insn.modrm().read32(cpu, insn);
  982. u32 bit_mask = 1 << bit_index;
  983. u32 result = op(original.value(), bit_mask);
  984. cpu.set_cf((original.value() & bit_mask) != 0);
  985. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), original);
  986. if (should_update)
  987. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), original));
  988. return;
  989. }
  990. // FIXME: Is this supposed to perform a full 32-bit read/modify/write?
  991. unsigned bit_offset_in_array = cpu.const_gpr32(insn.reg32()).value() / 8;
  992. unsigned bit_offset_in_byte = cpu.const_gpr32(insn.reg32()).value() & 7;
  993. auto address = insn.modrm().resolve(cpu, insn);
  994. address.set_offset(address.offset() + bit_offset_in_array);
  995. auto dest = cpu.read_memory8(address);
  996. u8 bit_mask = 1 << bit_offset_in_byte;
  997. u8 result = op(dest.value(), bit_mask);
  998. cpu.set_cf((dest.value() & bit_mask) != 0);
  999. cpu.taint_flags_from(cpu.gpr32(insn.reg32()), dest);
  1000. if (should_update)
  1001. cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), dest));
  1002. }
  1003. template<bool should_update, typename Op>
  1004. ALWAYS_INLINE void BTx_RM16_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  1005. {
  1006. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u16>::mask);
  1007. // FIXME: Support higher bit indices
  1008. VERIFY(bit_index < 16);
  1009. auto original = insn.modrm().read16(cpu, insn);
  1010. u16 bit_mask = 1 << bit_index;
  1011. auto result = op(original.value(), bit_mask);
  1012. cpu.set_cf((original.value() & bit_mask) != 0);
  1013. cpu.taint_flags_from(original);
  1014. if (should_update)
  1015. insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1016. }
  1017. template<bool should_update, typename Op>
  1018. ALWAYS_INLINE void BTx_RM32_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op op)
  1019. {
  1020. unsigned bit_index = insn.imm8() & (X86::TypeTrivia<u32>::mask);
  1021. // FIXME: Support higher bit indices
  1022. VERIFY(bit_index < 32);
  1023. auto original = insn.modrm().read32(cpu, insn);
  1024. u32 bit_mask = 1 << bit_index;
  1025. auto result = op(original.value(), bit_mask);
  1026. cpu.set_cf((original.value() & bit_mask) != 0);
  1027. cpu.taint_flags_from(original);
  1028. if (should_update)
  1029. insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, original));
  1030. }
  1031. #define DEFINE_GENERIC_BTx_INSN_HANDLERS(mnemonic, op, update_dest) \
  1032. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { BTx_RM32_reg32<update_dest>(*this, insn, op<u32>); } \
  1033. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { BTx_RM16_reg16<update_dest>(*this, insn, op<u16>); } \
  1034. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { BTx_RM32_imm8<update_dest>(*this, insn, op<u32>); } \
  1035. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { BTx_RM16_imm8<update_dest>(*this, insn, op<u16>); }
  1036. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTS, op_bts, true);
  1037. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTR, op_btr, true);
  1038. DEFINE_GENERIC_BTx_INSN_HANDLERS(BTC, op_btc, true);
  1039. DEFINE_GENERIC_BTx_INSN_HANDLERS(BT, op_bt, false);
  1040. void SoftCPU::CALL_FAR_mem16(const X86::Instruction&)
  1041. {
  1042. TODO();
  1043. }
  1044. void SoftCPU::CALL_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  1045. void SoftCPU::CALL_RM16(const X86::Instruction&) { TODO_INSN(); }
  1046. void SoftCPU::CALL_RM32(const X86::Instruction& insn)
  1047. {
  1048. push32(shadow_wrap_as_initialized(eip()));
  1049. auto address = insn.modrm().read32(*this, insn);
  1050. warn_if_uninitialized(address, "call rm32");
  1051. set_eip(address.value());
  1052. }
  1053. void SoftCPU::CALL_imm16(const X86::Instruction&) { TODO_INSN(); }
  1054. void SoftCPU::CALL_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  1055. void SoftCPU::CALL_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  1056. void SoftCPU::CALL_imm32(const X86::Instruction& insn)
  1057. {
  1058. push32(shadow_wrap_as_initialized(eip()));
  1059. set_eip(eip() + (i32)insn.imm32());
  1060. }
  1061. void SoftCPU::CBW(const X86::Instruction&)
  1062. {
  1063. set_ah(shadow_wrap_with_taint_from<u8>((al().value() & 0x80) ? 0xff : 0x00, al()));
  1064. }
  1065. void SoftCPU::CDQ(const X86::Instruction&)
  1066. {
  1067. if (eax().value() & 0x80000000)
  1068. set_edx(shadow_wrap_with_taint_from<u32>(0xffffffff, eax()));
  1069. else
  1070. set_edx(shadow_wrap_with_taint_from<u32>(0, eax()));
  1071. }
  1072. void SoftCPU::CLC(const X86::Instruction&)
  1073. {
  1074. set_cf(false);
  1075. }
  1076. void SoftCPU::CLD(const X86::Instruction&)
  1077. {
  1078. set_df(false);
  1079. }
  1080. void SoftCPU::CLI(const X86::Instruction&) { TODO_INSN(); }
  1081. void SoftCPU::CLTS(const X86::Instruction&) { TODO_INSN(); }
  1082. void SoftCPU::CMC(const X86::Instruction&) { TODO_INSN(); }
  1083. void SoftCPU::CMOVcc_reg16_RM16(const X86::Instruction& insn)
  1084. {
  1085. warn_if_flags_tainted("cmovcc reg16, rm16");
  1086. if (evaluate_condition(insn.cc()))
  1087. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  1088. }
  1089. void SoftCPU::CMOVcc_reg32_RM32(const X86::Instruction& insn)
  1090. {
  1091. warn_if_flags_tainted("cmovcc reg32, rm32");
  1092. if (evaluate_condition(insn.cc()))
  1093. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  1094. }
  1095. template<typename T>
  1096. ALWAYS_INLINE static void do_cmps(SoftCPU& cpu, const X86::Instruction& insn)
  1097. {
  1098. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  1099. cpu.do_once_or_repeat<true>(insn, [&] {
  1100. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  1101. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  1102. op_sub(cpu, dest, src);
  1103. cpu.step_source_index(insn.a32(), sizeof(T));
  1104. cpu.step_destination_index(insn.a32(), sizeof(T));
  1105. });
  1106. }
  1107. void SoftCPU::CMPSB(const X86::Instruction& insn)
  1108. {
  1109. do_cmps<u8>(*this, insn);
  1110. }
  1111. void SoftCPU::CMPSD(const X86::Instruction& insn)
  1112. {
  1113. do_cmps<u32>(*this, insn);
  1114. }
  1115. void SoftCPU::CMPSW(const X86::Instruction& insn)
  1116. {
  1117. do_cmps<u16>(*this, insn);
  1118. }
  1119. void SoftCPU::CMPXCHG_RM16_reg16(const X86::Instruction& insn)
  1120. {
  1121. auto current = insn.modrm().read16(*this, insn);
  1122. taint_flags_from(current, ax());
  1123. if (current.value() == ax().value()) {
  1124. set_zf(true);
  1125. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  1126. } else {
  1127. set_zf(false);
  1128. set_ax(current);
  1129. }
  1130. }
  1131. void SoftCPU::CMPXCHG_RM32_reg32(const X86::Instruction& insn)
  1132. {
  1133. auto current = insn.modrm().read32(*this, insn);
  1134. taint_flags_from(current, eax());
  1135. if (current.value() == eax().value()) {
  1136. set_zf(true);
  1137. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  1138. } else {
  1139. set_zf(false);
  1140. set_eax(current);
  1141. }
  1142. }
  1143. void SoftCPU::CMPXCHG_RM8_reg8(const X86::Instruction& insn)
  1144. {
  1145. auto current = insn.modrm().read8(*this, insn);
  1146. taint_flags_from(current, al());
  1147. if (current.value() == al().value()) {
  1148. set_zf(true);
  1149. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  1150. } else {
  1151. set_zf(false);
  1152. set_al(current);
  1153. }
  1154. }
  1155. void SoftCPU::CPUID(const X86::Instruction&)
  1156. {
  1157. if (eax().value() == 0) {
  1158. set_eax(shadow_wrap_as_initialized<u32>(1));
  1159. set_ebx(shadow_wrap_as_initialized<u32>(0x6c6c6548));
  1160. set_edx(shadow_wrap_as_initialized<u32>(0x6972466f));
  1161. set_ecx(shadow_wrap_as_initialized<u32>(0x73646e65));
  1162. return;
  1163. }
  1164. if (eax().value() == 1) {
  1165. u32 stepping = 0;
  1166. u32 model = 1;
  1167. u32 family = 3;
  1168. u32 type = 0;
  1169. set_eax(shadow_wrap_as_initialized<u32>(stepping | (model << 4) | (family << 8) | (type << 12)));
  1170. set_ebx(shadow_wrap_as_initialized<u32>(0));
  1171. set_edx(shadow_wrap_as_initialized<u32>((1 << 15))); // Features (CMOV)
  1172. set_ecx(shadow_wrap_as_initialized<u32>(0));
  1173. return;
  1174. }
  1175. dbgln("Unhandled CPUID with eax={:08x}", eax().value());
  1176. }
  1177. void SoftCPU::CWD(const X86::Instruction&)
  1178. {
  1179. set_dx(shadow_wrap_with_taint_from<u16>((ax().value() & 0x8000) ? 0xffff : 0x0000, ax()));
  1180. }
  1181. void SoftCPU::CWDE(const X86::Instruction&)
  1182. {
  1183. set_eax(shadow_wrap_with_taint_from(sign_extended_to<u32>(ax().value()), ax()));
  1184. }
  1185. void SoftCPU::DAA(const X86::Instruction&) { TODO_INSN(); }
  1186. void SoftCPU::DAS(const X86::Instruction&) { TODO_INSN(); }
  1187. void SoftCPU::DEC_RM16(const X86::Instruction& insn)
  1188. {
  1189. insn.modrm().write16(*this, insn, op_dec(*this, insn.modrm().read16(*this, insn)));
  1190. }
  1191. void SoftCPU::DEC_RM32(const X86::Instruction& insn)
  1192. {
  1193. insn.modrm().write32(*this, insn, op_dec(*this, insn.modrm().read32(*this, insn)));
  1194. }
  1195. void SoftCPU::DEC_RM8(const X86::Instruction& insn)
  1196. {
  1197. insn.modrm().write8(*this, insn, op_dec(*this, insn.modrm().read8(*this, insn)));
  1198. }
  1199. void SoftCPU::DEC_reg16(const X86::Instruction& insn)
  1200. {
  1201. gpr16(insn.reg16()) = op_dec(*this, const_gpr16(insn.reg16()));
  1202. }
  1203. void SoftCPU::DEC_reg32(const X86::Instruction& insn)
  1204. {
  1205. gpr32(insn.reg32()) = op_dec(*this, const_gpr32(insn.reg32()));
  1206. }
  1207. void SoftCPU::DIV_RM16(const X86::Instruction& insn)
  1208. {
  1209. auto divisor = insn.modrm().read16(*this, insn);
  1210. if (divisor.value() == 0) {
  1211. reportln("Divide by zero");
  1212. TODO();
  1213. }
  1214. u32 dividend = ((u32)dx().value() << 16) | ax().value();
  1215. auto quotient = dividend / divisor.value();
  1216. if (quotient > NumericLimits<u16>::max()) {
  1217. reportln("Divide overflow");
  1218. TODO();
  1219. }
  1220. auto remainder = dividend % divisor.value();
  1221. auto original_ax = ax();
  1222. set_ax(shadow_wrap_with_taint_from<u16>(quotient, original_ax, dx()));
  1223. set_dx(shadow_wrap_with_taint_from<u16>(remainder, original_ax, dx()));
  1224. }
  1225. void SoftCPU::DIV_RM32(const X86::Instruction& insn)
  1226. {
  1227. auto divisor = insn.modrm().read32(*this, insn);
  1228. if (divisor.value() == 0) {
  1229. reportln("Divide by zero");
  1230. TODO();
  1231. }
  1232. u64 dividend = ((u64)edx().value() << 32) | eax().value();
  1233. auto quotient = dividend / divisor.value();
  1234. if (quotient > NumericLimits<u32>::max()) {
  1235. reportln("Divide overflow");
  1236. TODO();
  1237. }
  1238. auto remainder = dividend % divisor.value();
  1239. auto original_eax = eax();
  1240. set_eax(shadow_wrap_with_taint_from<u32>(quotient, original_eax, edx(), divisor));
  1241. set_edx(shadow_wrap_with_taint_from<u32>(remainder, original_eax, edx(), divisor));
  1242. }
  1243. void SoftCPU::DIV_RM8(const X86::Instruction& insn)
  1244. {
  1245. auto divisor = insn.modrm().read8(*this, insn);
  1246. if (divisor.value() == 0) {
  1247. reportln("Divide by zero");
  1248. TODO();
  1249. }
  1250. u16 dividend = ax().value();
  1251. auto quotient = dividend / divisor.value();
  1252. if (quotient > NumericLimits<u8>::max()) {
  1253. reportln("Divide overflow");
  1254. TODO();
  1255. }
  1256. auto remainder = dividend % divisor.value();
  1257. auto original_ax = ax();
  1258. set_al(shadow_wrap_with_taint_from<u8>(quotient, original_ax, divisor));
  1259. set_ah(shadow_wrap_with_taint_from<u8>(remainder, original_ax, divisor));
  1260. }
  1261. void SoftCPU::ENTER16(const X86::Instruction&) { TODO_INSN(); }
  1262. void SoftCPU::ENTER32(const X86::Instruction&) { TODO_INSN(); }
  1263. void SoftCPU::ESCAPE(const X86::Instruction&)
  1264. {
  1265. reportln("FIXME: x87 floating-point support");
  1266. m_emulator.dump_backtrace();
  1267. TODO();
  1268. }
  1269. void SoftCPU::FADD_RM32(const X86::Instruction& insn)
  1270. {
  1271. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1272. if (insn.modrm().is_register()) {
  1273. fpu_set(0, fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1274. } else {
  1275. auto new_f32 = insn.modrm().read32(*this, insn);
  1276. // FIXME: Respect shadow values
  1277. auto f32 = bit_cast<float>(new_f32.value());
  1278. fpu_set(0, fpu_get(0) + f32);
  1279. }
  1280. }
  1281. void SoftCPU::FMUL_RM32(const X86::Instruction& insn)
  1282. {
  1283. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem32 ops
  1284. if (insn.modrm().is_register()) {
  1285. fpu_set(0, fpu_get(0) * fpu_get(insn.modrm().register_index()));
  1286. } else {
  1287. auto new_f32 = insn.modrm().read32(*this, insn);
  1288. // FIXME: Respect shadow values
  1289. auto f32 = bit_cast<float>(new_f32.value());
  1290. fpu_set(0, fpu_get(0) * f32);
  1291. }
  1292. }
  1293. void SoftCPU::FCOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1294. void SoftCPU::FCOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1295. void SoftCPU::FSUB_RM32(const X86::Instruction& insn)
  1296. {
  1297. if (insn.modrm().is_register()) {
  1298. fpu_set(0, fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1299. } else {
  1300. auto new_f32 = insn.modrm().read32(*this, insn);
  1301. // FIXME: Respect shadow values
  1302. auto f32 = bit_cast<float>(new_f32.value());
  1303. fpu_set(0, fpu_get(0) - f32);
  1304. }
  1305. }
  1306. void SoftCPU::FSUBR_RM32(const X86::Instruction& insn)
  1307. {
  1308. if (insn.modrm().is_register()) {
  1309. fpu_set(0, fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1310. } else {
  1311. auto new_f32 = insn.modrm().read32(*this, insn);
  1312. // FIXME: Respect shadow values
  1313. auto f32 = bit_cast<float>(new_f32.value());
  1314. fpu_set(0, f32 - fpu_get(0));
  1315. }
  1316. }
  1317. void SoftCPU::FDIV_RM32(const X86::Instruction& insn)
  1318. {
  1319. if (insn.modrm().is_register()) {
  1320. fpu_set(0, fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1321. } else {
  1322. auto new_f32 = insn.modrm().read32(*this, insn);
  1323. // FIXME: Respect shadow values
  1324. auto f32 = bit_cast<float>(new_f32.value());
  1325. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1326. fpu_set(0, fpu_get(0) / f32);
  1327. }
  1328. }
  1329. void SoftCPU::FDIVR_RM32(const X86::Instruction& insn)
  1330. {
  1331. if (insn.modrm().is_register()) {
  1332. fpu_set(0, fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1333. } else {
  1334. auto new_f32 = insn.modrm().read32(*this, insn);
  1335. // FIXME: Respect shadow values
  1336. auto f32 = bit_cast<float>(new_f32.value());
  1337. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1338. fpu_set(0, f32 / fpu_get(0));
  1339. }
  1340. }
  1341. void SoftCPU::FLD_RM32(const X86::Instruction& insn)
  1342. {
  1343. if (insn.modrm().is_register()) {
  1344. fpu_push(fpu_get(insn.modrm().register_index()));
  1345. } else {
  1346. auto new_f32 = insn.modrm().read32(*this, insn);
  1347. // FIXME: Respect shadow values
  1348. fpu_push(bit_cast<float>(new_f32.value()));
  1349. }
  1350. }
  1351. void SoftCPU::FXCH(const X86::Instruction& insn)
  1352. {
  1353. VERIFY(insn.modrm().is_register());
  1354. auto tmp = fpu_get(0);
  1355. fpu_set(0, fpu_get(insn.modrm().register_index()));
  1356. fpu_set(insn.modrm().register_index(), tmp);
  1357. }
  1358. void SoftCPU::FST_RM32(const X86::Instruction& insn)
  1359. {
  1360. VERIFY(!insn.modrm().is_register());
  1361. float f32 = (float)fpu_get(0);
  1362. // FIXME: Respect shadow values
  1363. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(f32)));
  1364. }
  1365. void SoftCPU::FNOP(const X86::Instruction&) { TODO_INSN(); }
  1366. void SoftCPU::FSTP_RM32(const X86::Instruction& insn)
  1367. {
  1368. FST_RM32(insn);
  1369. fpu_pop();
  1370. }
  1371. void SoftCPU::FLDENV(const X86::Instruction&) { TODO_INSN(); }
  1372. void SoftCPU::FCHS(const X86::Instruction&)
  1373. {
  1374. fpu_set(0, -fpu_get(0));
  1375. }
  1376. void SoftCPU::FABS(const X86::Instruction&)
  1377. {
  1378. fpu_set(0, __builtin_fabs(fpu_get(0)));
  1379. }
  1380. void SoftCPU::FTST(const X86::Instruction&) { TODO_INSN(); }
  1381. void SoftCPU::FXAM(const X86::Instruction&) { TODO_INSN(); }
  1382. void SoftCPU::FLDCW(const X86::Instruction& insn)
  1383. {
  1384. m_fpu_cw = insn.modrm().read16(*this, insn);
  1385. }
  1386. void SoftCPU::FLD1(const X86::Instruction&)
  1387. {
  1388. fpu_push(1.0);
  1389. }
  1390. void SoftCPU::FLDL2T(const X86::Instruction&)
  1391. {
  1392. fpu_push(log2f(10.0f));
  1393. }
  1394. void SoftCPU::FLDL2E(const X86::Instruction&)
  1395. {
  1396. fpu_push(log2f(M_E));
  1397. }
  1398. void SoftCPU::FLDPI(const X86::Instruction&)
  1399. {
  1400. fpu_push(M_PI);
  1401. }
  1402. void SoftCPU::FLDLG2(const X86::Instruction&)
  1403. {
  1404. fpu_push(log10f(2.0f));
  1405. }
  1406. void SoftCPU::FLDLN2(const X86::Instruction&)
  1407. {
  1408. fpu_push(M_LN2);
  1409. }
  1410. void SoftCPU::FLDZ(const X86::Instruction&)
  1411. {
  1412. fpu_push(0.0);
  1413. }
  1414. void SoftCPU::FNSTENV(const X86::Instruction&) { TODO_INSN(); }
  1415. void SoftCPU::F2XM1(const X86::Instruction&)
  1416. {
  1417. // FIXME: validate ST(0) is in range –1.0 to +1.0
  1418. auto f32 = fpu_get(0);
  1419. // FIXME: Set C0, C2, C3 in FPU status word.
  1420. fpu_set(0, powf(2, f32) - 1.0f);
  1421. }
  1422. void SoftCPU::FYL2X(const X86::Instruction&)
  1423. {
  1424. // FIXME: Raise IA on +-infinity, +-0, raise Z on +-0
  1425. auto f32 = fpu_get(0);
  1426. // FIXME: Set C0, C2, C3 in FPU status word.
  1427. fpu_set(1, fpu_get(1) * log2f(f32));
  1428. fpu_pop();
  1429. }
  1430. void SoftCPU::FYL2XP1(const X86::Instruction&)
  1431. {
  1432. // FIXME: validate ST(0) range
  1433. auto f32 = fpu_get(0);
  1434. // FIXME: Set C0, C2, C3 in FPU status word.
  1435. fpu_set(1, (fpu_get(1) * log2f(f32 + 1.0f)));
  1436. fpu_pop();
  1437. }
  1438. void SoftCPU::FPTAN(const X86::Instruction&) { TODO_INSN(); }
  1439. void SoftCPU::FPATAN(const X86::Instruction&) { TODO_INSN(); }
  1440. void SoftCPU::FXTRACT(const X86::Instruction&) { TODO_INSN(); }
  1441. void SoftCPU::FPREM1(const X86::Instruction&) { TODO_INSN(); }
  1442. void SoftCPU::FDECSTP(const X86::Instruction&)
  1443. {
  1444. m_fpu_top = (m_fpu_top == 0) ? 7 : m_fpu_top - 1;
  1445. set_cf(0);
  1446. }
  1447. void SoftCPU::FINCSTP(const X86::Instruction&)
  1448. {
  1449. m_fpu_top = (m_fpu_top == 7) ? 0 : m_fpu_top + 1;
  1450. set_cf(0);
  1451. }
  1452. void SoftCPU::FNSTCW(const X86::Instruction& insn)
  1453. {
  1454. insn.modrm().write16(*this, insn, m_fpu_cw);
  1455. }
  1456. void SoftCPU::FPREM(const X86::Instruction&) { TODO_INSN(); }
  1457. void SoftCPU::FSQRT(const X86::Instruction&)
  1458. {
  1459. fpu_set(0, sqrt(fpu_get(0)));
  1460. }
  1461. void SoftCPU::FSINCOS(const X86::Instruction&) { TODO_INSN(); }
  1462. void SoftCPU::FRNDINT(const X86::Instruction&)
  1463. {
  1464. // FIXME: support rounding mode
  1465. fpu_set(0, round(fpu_get(0)));
  1466. }
  1467. void SoftCPU::FSCALE(const X86::Instruction&)
  1468. {
  1469. // FIXME: set C1 upon stack overflow or if result was rounded
  1470. fpu_set(0, fpu_get(0) * powf(2, floorf(fpu_get(1))));
  1471. }
  1472. void SoftCPU::FSIN(const X86::Instruction&)
  1473. {
  1474. fpu_set(0, sin(fpu_get(0)));
  1475. }
  1476. void SoftCPU::FCOS(const X86::Instruction&)
  1477. {
  1478. fpu_set(0, cos(fpu_get(0)));
  1479. }
  1480. void SoftCPU::FIADD_RM32(const X86::Instruction& insn)
  1481. {
  1482. VERIFY(!insn.modrm().is_register());
  1483. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1484. // FIXME: Respect shadow values
  1485. fpu_set(0, fpu_get(0) + (long double)m32int);
  1486. }
  1487. void SoftCPU::FCMOVB(const X86::Instruction&) { TODO_INSN(); }
  1488. void SoftCPU::FIMUL_RM32(const X86::Instruction& insn)
  1489. {
  1490. VERIFY(!insn.modrm().is_register());
  1491. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1492. // FIXME: Respect shadow values
  1493. fpu_set(0, fpu_get(0) * (long double)m32int);
  1494. }
  1495. void SoftCPU::FCMOVE(const X86::Instruction&) { TODO_INSN(); }
  1496. void SoftCPU::FICOM_RM32(const X86::Instruction&) { TODO_INSN(); }
  1497. void SoftCPU::FCMOVBE(const X86::Instruction& insn)
  1498. {
  1499. if (evaluate_condition(6))
  1500. fpu_set(0, fpu_get(insn.rm() & 7));
  1501. }
  1502. void SoftCPU::FICOMP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1503. void SoftCPU::FCMOVU(const X86::Instruction&) { TODO_INSN(); }
  1504. void SoftCPU::FISUB_RM32(const X86::Instruction& insn)
  1505. {
  1506. VERIFY(!insn.modrm().is_register());
  1507. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1508. // FIXME: Respect shadow values
  1509. fpu_set(0, fpu_get(0) - (long double)m32int);
  1510. }
  1511. void SoftCPU::FISUBR_RM32(const X86::Instruction& insn)
  1512. {
  1513. VERIFY(!insn.modrm().is_register());
  1514. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1515. // FIXME: Respect shadow values
  1516. fpu_set(0, (long double)m32int - fpu_get(0));
  1517. }
  1518. void SoftCPU::FIDIV_RM32(const X86::Instruction& insn)
  1519. {
  1520. VERIFY(!insn.modrm().is_register());
  1521. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1522. // FIXME: Respect shadow values
  1523. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1524. fpu_set(0, fpu_get(0) / (long double)m32int);
  1525. }
  1526. void SoftCPU::FIDIVR_RM32(const X86::Instruction& insn)
  1527. {
  1528. VERIFY(!insn.modrm().is_register());
  1529. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1530. // FIXME: Respect shadow values
  1531. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1532. fpu_set(0, (long double)m32int / fpu_get(0));
  1533. }
  1534. void SoftCPU::FILD_RM32(const X86::Instruction& insn)
  1535. {
  1536. VERIFY(!insn.modrm().is_register());
  1537. auto m32int = (i32)insn.modrm().read32(*this, insn).value();
  1538. // FIXME: Respect shadow values
  1539. fpu_push((long double)m32int);
  1540. }
  1541. void SoftCPU::FCMOVNB(const X86::Instruction&) { TODO_INSN(); }
  1542. void SoftCPU::FISTTP_RM32(const X86::Instruction&) { TODO_INSN(); }
  1543. void SoftCPU::FCMOVNE(const X86::Instruction&) { TODO_INSN(); }
  1544. void SoftCPU::FIST_RM32(const X86::Instruction& insn)
  1545. {
  1546. VERIFY(!insn.modrm().is_register());
  1547. auto f = fpu_get(0);
  1548. // FIXME: Respect rounding mode in m_fpu_cw.
  1549. auto i32 = static_cast<int32_t>(f);
  1550. // FIXME: Respect shadow values
  1551. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(bit_cast<u32>(i32)));
  1552. }
  1553. void SoftCPU::FCMOVNBE(const X86::Instruction& insn)
  1554. {
  1555. if (evaluate_condition(7))
  1556. fpu_set(0, fpu_get(insn.rm() & 7));
  1557. }
  1558. void SoftCPU::FISTP_RM32(const X86::Instruction& insn)
  1559. {
  1560. FIST_RM32(insn);
  1561. fpu_pop();
  1562. }
  1563. void SoftCPU::FCMOVNU(const X86::Instruction&) { TODO_INSN(); }
  1564. void SoftCPU::FNENI(const X86::Instruction&) { TODO_INSN(); }
  1565. void SoftCPU::FNDISI(const X86::Instruction&) { TODO_INSN(); }
  1566. void SoftCPU::FNCLEX(const X86::Instruction&) { TODO_INSN(); }
  1567. void SoftCPU::FNINIT(const X86::Instruction&) { TODO_INSN(); }
  1568. void SoftCPU::FNSETPM(const X86::Instruction&) { TODO_INSN(); }
  1569. void SoftCPU::FLD_RM80(const X86::Instruction&) { TODO_INSN(); }
  1570. void SoftCPU::FUCOMI(const X86::Instruction& insn)
  1571. {
  1572. auto i = insn.rm() & 7;
  1573. // FIXME: Unordered comparison checks.
  1574. // FIXME: QNaN / exception handling.
  1575. // FIXME: Set C0, C2, C3 in FPU status word.
  1576. if (__builtin_isnan(fpu_get(0)) || __builtin_isnan(fpu_get(i))) {
  1577. set_zf(true);
  1578. set_pf(true);
  1579. set_cf(true);
  1580. } else {
  1581. set_zf(fpu_get(0) == fpu_get(i));
  1582. set_pf(false);
  1583. set_cf(fpu_get(0) < fpu_get(i));
  1584. set_of(false);
  1585. }
  1586. // FIXME: Taint should be based on ST(0) and ST(i)
  1587. m_flags_tainted = false;
  1588. }
  1589. void SoftCPU::FCOMI(const X86::Instruction& insn)
  1590. {
  1591. auto i = insn.rm() & 7;
  1592. // FIXME: QNaN / exception handling.
  1593. // FIXME: Set C0, C2, C3 in FPU status word.
  1594. set_zf(fpu_get(0) == fpu_get(i));
  1595. set_pf(false);
  1596. set_cf(fpu_get(0) < fpu_get(i));
  1597. set_of(false);
  1598. // FIXME: Taint should be based on ST(0) and ST(i)
  1599. m_flags_tainted = false;
  1600. }
  1601. void SoftCPU::FSTP_RM80(const X86::Instruction&) { TODO_INSN(); }
  1602. void SoftCPU::FADD_RM64(const X86::Instruction& insn)
  1603. {
  1604. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1605. if (insn.modrm().is_register()) {
  1606. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1607. } else {
  1608. auto new_f64 = insn.modrm().read64(*this, insn);
  1609. // FIXME: Respect shadow values
  1610. auto f64 = bit_cast<double>(new_f64.value());
  1611. fpu_set(0, fpu_get(0) + f64);
  1612. }
  1613. }
  1614. void SoftCPU::FMUL_RM64(const X86::Instruction& insn)
  1615. {
  1616. // XXX look at ::INC_foo for how mem/reg stuff is handled, and use that here too to make sure this is only called for mem64 ops
  1617. if (insn.modrm().is_register()) {
  1618. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1619. } else {
  1620. auto new_f64 = insn.modrm().read64(*this, insn);
  1621. // FIXME: Respect shadow values
  1622. auto f64 = bit_cast<double>(new_f64.value());
  1623. fpu_set(0, fpu_get(0) * f64);
  1624. }
  1625. }
  1626. void SoftCPU::FCOM_RM64(const X86::Instruction&) { TODO_INSN(); }
  1627. void SoftCPU::FCOMP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1628. void SoftCPU::FSUB_RM64(const X86::Instruction& insn)
  1629. {
  1630. if (insn.modrm().is_register()) {
  1631. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1632. } else {
  1633. auto new_f64 = insn.modrm().read64(*this, insn);
  1634. // FIXME: Respect shadow values
  1635. auto f64 = bit_cast<double>(new_f64.value());
  1636. fpu_set(0, fpu_get(0) - f64);
  1637. }
  1638. }
  1639. void SoftCPU::FSUBR_RM64(const X86::Instruction& insn)
  1640. {
  1641. if (insn.modrm().is_register()) {
  1642. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1643. } else {
  1644. auto new_f64 = insn.modrm().read64(*this, insn);
  1645. // FIXME: Respect shadow values
  1646. auto f64 = bit_cast<double>(new_f64.value());
  1647. fpu_set(0, f64 - fpu_get(0));
  1648. }
  1649. }
  1650. void SoftCPU::FDIV_RM64(const X86::Instruction& insn)
  1651. {
  1652. if (insn.modrm().is_register()) {
  1653. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1654. } else {
  1655. auto new_f64 = insn.modrm().read64(*this, insn);
  1656. // FIXME: Respect shadow values
  1657. auto f64 = bit_cast<double>(new_f64.value());
  1658. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1659. fpu_set(0, fpu_get(0) / f64);
  1660. }
  1661. }
  1662. void SoftCPU::FDIVR_RM64(const X86::Instruction& insn)
  1663. {
  1664. if (insn.modrm().is_register()) {
  1665. // XXX this is FDIVR, Instruction decodes this weirdly
  1666. //fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1667. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1668. } else {
  1669. auto new_f64 = insn.modrm().read64(*this, insn);
  1670. // FIXME: Respect shadow values
  1671. auto f64 = bit_cast<double>(new_f64.value());
  1672. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1673. fpu_set(0, f64 / fpu_get(0));
  1674. }
  1675. }
  1676. void SoftCPU::FLD_RM64(const X86::Instruction& insn)
  1677. {
  1678. VERIFY(!insn.modrm().is_register());
  1679. auto new_f64 = insn.modrm().read64(*this, insn);
  1680. // FIXME: Respect shadow values
  1681. fpu_push(bit_cast<double>(new_f64.value()));
  1682. }
  1683. void SoftCPU::FFREE(const X86::Instruction&) { TODO_INSN(); }
  1684. void SoftCPU::FISTTP_RM64(const X86::Instruction&) { TODO_INSN(); }
  1685. void SoftCPU::FST_RM64(const X86::Instruction& insn)
  1686. {
  1687. if (insn.modrm().is_register()) {
  1688. fpu_set(insn.modrm().register_index(), fpu_get(0));
  1689. } else {
  1690. // FIXME: Respect shadow values
  1691. double f64 = (double)fpu_get(0);
  1692. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(f64)));
  1693. }
  1694. }
  1695. void SoftCPU::FSTP_RM64(const X86::Instruction& insn)
  1696. {
  1697. FST_RM64(insn);
  1698. fpu_pop();
  1699. }
  1700. void SoftCPU::FRSTOR(const X86::Instruction&) { TODO_INSN(); }
  1701. void SoftCPU::FUCOM(const X86::Instruction&) { TODO_INSN(); }
  1702. void SoftCPU::FUCOMP(const X86::Instruction&) { TODO_INSN(); }
  1703. void SoftCPU::FUCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1704. void SoftCPU::FNSAVE(const X86::Instruction&) { TODO_INSN(); }
  1705. void SoftCPU::FNSTSW(const X86::Instruction&) { TODO_INSN(); }
  1706. void SoftCPU::FIADD_RM16(const X86::Instruction& insn)
  1707. {
  1708. VERIFY(!insn.modrm().is_register());
  1709. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1710. // FIXME: Respect shadow values
  1711. fpu_set(0, fpu_get(0) + (long double)m16int);
  1712. }
  1713. void SoftCPU::FADDP(const X86::Instruction& insn)
  1714. {
  1715. VERIFY(insn.modrm().is_register());
  1716. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) + fpu_get(0));
  1717. fpu_pop();
  1718. }
  1719. void SoftCPU::FIMUL_RM16(const X86::Instruction& insn)
  1720. {
  1721. VERIFY(!insn.modrm().is_register());
  1722. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1723. // FIXME: Respect shadow values
  1724. fpu_set(0, fpu_get(0) * (long double)m16int);
  1725. }
  1726. void SoftCPU::FMULP(const X86::Instruction& insn)
  1727. {
  1728. VERIFY(insn.modrm().is_register());
  1729. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) * fpu_get(0));
  1730. fpu_pop();
  1731. }
  1732. void SoftCPU::FICOM_RM16(const X86::Instruction&) { TODO_INSN(); }
  1733. void SoftCPU::FICOMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1734. void SoftCPU::FCOMPP(const X86::Instruction&) { TODO_INSN(); }
  1735. void SoftCPU::FISUB_RM16(const X86::Instruction& insn)
  1736. {
  1737. VERIFY(!insn.modrm().is_register());
  1738. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1739. // FIXME: Respect shadow values
  1740. fpu_set(0, fpu_get(0) - (long double)m16int);
  1741. }
  1742. void SoftCPU::FSUBRP(const X86::Instruction& insn)
  1743. {
  1744. VERIFY(insn.modrm().is_register());
  1745. fpu_set(insn.modrm().register_index(), fpu_get(0) - fpu_get(insn.modrm().register_index()));
  1746. fpu_pop();
  1747. }
  1748. void SoftCPU::FISUBR_RM16(const X86::Instruction& insn)
  1749. {
  1750. VERIFY(!insn.modrm().is_register());
  1751. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1752. // FIXME: Respect shadow values
  1753. fpu_set(0, (long double)m16int - fpu_get(0));
  1754. }
  1755. void SoftCPU::FSUBP(const X86::Instruction& insn)
  1756. {
  1757. VERIFY(insn.modrm().is_register());
  1758. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) - fpu_get(0));
  1759. fpu_pop();
  1760. }
  1761. void SoftCPU::FIDIV_RM16(const X86::Instruction& insn)
  1762. {
  1763. VERIFY(!insn.modrm().is_register());
  1764. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1765. // FIXME: Respect shadow values
  1766. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1767. fpu_set(0, fpu_get(0) / (long double)m16int);
  1768. }
  1769. void SoftCPU::FDIVRP(const X86::Instruction& insn)
  1770. {
  1771. VERIFY(insn.modrm().is_register());
  1772. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1773. fpu_set(insn.modrm().register_index(), fpu_get(0) / fpu_get(insn.modrm().register_index()));
  1774. fpu_pop();
  1775. }
  1776. void SoftCPU::FIDIVR_RM16(const X86::Instruction& insn)
  1777. {
  1778. VERIFY(!insn.modrm().is_register());
  1779. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1780. // FIXME: Respect shadow values
  1781. // FIXME: Raise IA on 0 / _=0, raise Z on finite / +-0
  1782. fpu_set(0, (long double)m16int / fpu_get(0));
  1783. }
  1784. void SoftCPU::FDIVP(const X86::Instruction& insn)
  1785. {
  1786. VERIFY(insn.modrm().is_register());
  1787. // FIXME: Raise IA on + infinity / +-infinity, +-0 / +-0, raise Z on finite / +-0
  1788. fpu_set(insn.modrm().register_index(), fpu_get(insn.modrm().register_index()) / fpu_get(0));
  1789. fpu_pop();
  1790. }
  1791. void SoftCPU::FILD_RM16(const X86::Instruction& insn)
  1792. {
  1793. VERIFY(!insn.modrm().is_register());
  1794. auto m16int = (i16)insn.modrm().read16(*this, insn).value();
  1795. // FIXME: Respect shadow values
  1796. fpu_push((long double)m16int);
  1797. }
  1798. void SoftCPU::FFREEP(const X86::Instruction&) { TODO_INSN(); }
  1799. void SoftCPU::FISTTP_RM16(const X86::Instruction&) { TODO_INSN(); }
  1800. void SoftCPU::FIST_RM16(const X86::Instruction& insn)
  1801. {
  1802. VERIFY(!insn.modrm().is_register());
  1803. auto f = fpu_get(0);
  1804. // FIXME: Respect rounding mode in m_fpu_cw.
  1805. auto i16 = static_cast<int16_t>(f);
  1806. // FIXME: Respect shadow values
  1807. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(bit_cast<u16>(i16)));
  1808. }
  1809. void SoftCPU::FISTP_RM16(const X86::Instruction& insn)
  1810. {
  1811. FIST_RM16(insn);
  1812. fpu_pop();
  1813. }
  1814. void SoftCPU::FBLD_M80(const X86::Instruction&) { TODO_INSN(); }
  1815. void SoftCPU::FNSTSW_AX(const X86::Instruction&) { TODO_INSN(); }
  1816. void SoftCPU::FILD_RM64(const X86::Instruction& insn)
  1817. {
  1818. VERIFY(!insn.modrm().is_register());
  1819. auto m64int = (i64)insn.modrm().read64(*this, insn).value();
  1820. // FIXME: Respect shadow values
  1821. fpu_push((long double)m64int);
  1822. }
  1823. void SoftCPU::FUCOMIP(const X86::Instruction& insn)
  1824. {
  1825. FUCOMI(insn);
  1826. fpu_pop();
  1827. }
  1828. void SoftCPU::FBSTP_M80(const X86::Instruction&) { TODO_INSN(); }
  1829. void SoftCPU::FCOMIP(const X86::Instruction& insn)
  1830. {
  1831. FCOMI(insn);
  1832. fpu_pop();
  1833. }
  1834. void SoftCPU::FISTP_RM64(const X86::Instruction& insn)
  1835. {
  1836. VERIFY(!insn.modrm().is_register());
  1837. auto f = fpu_pop();
  1838. // FIXME: Respect rounding mode in m_fpu_cw.
  1839. auto i64 = static_cast<int64_t>(f);
  1840. // FIXME: Respect shadow values
  1841. insn.modrm().write64(*this, insn, shadow_wrap_as_initialized(bit_cast<u64>(i64)));
  1842. }
  1843. void SoftCPU::HLT(const X86::Instruction&) { TODO_INSN(); }
  1844. void SoftCPU::IDIV_RM16(const X86::Instruction& insn)
  1845. {
  1846. auto divisor_with_shadow = insn.modrm().read16(*this, insn);
  1847. auto divisor = (i16)divisor_with_shadow.value();
  1848. if (divisor == 0) {
  1849. reportln("Divide by zero");
  1850. TODO();
  1851. }
  1852. i32 dividend = (i32)(((u32)dx().value() << 16) | (u32)ax().value());
  1853. i32 result = dividend / divisor;
  1854. if (result > NumericLimits<i16>::max() || result < NumericLimits<i16>::min()) {
  1855. reportln("Divide overflow");
  1856. TODO();
  1857. }
  1858. auto original_ax = ax();
  1859. set_ax(shadow_wrap_with_taint_from<u16>(result, original_ax, dx(), divisor_with_shadow));
  1860. set_dx(shadow_wrap_with_taint_from<u16>(dividend % divisor, original_ax, dx(), divisor_with_shadow));
  1861. }
  1862. void SoftCPU::IDIV_RM32(const X86::Instruction& insn)
  1863. {
  1864. auto divisor_with_shadow = insn.modrm().read32(*this, insn);
  1865. auto divisor = (i32)divisor_with_shadow.value();
  1866. if (divisor == 0) {
  1867. reportln("Divide by zero");
  1868. TODO();
  1869. }
  1870. i64 dividend = (i64)(((u64)edx().value() << 32) | (u64)eax().value());
  1871. i64 result = dividend / divisor;
  1872. if (result > NumericLimits<i32>::max() || result < NumericLimits<i32>::min()) {
  1873. reportln("Divide overflow");
  1874. TODO();
  1875. }
  1876. auto original_eax = eax();
  1877. set_eax(shadow_wrap_with_taint_from<u32>(result, original_eax, edx(), divisor_with_shadow));
  1878. set_edx(shadow_wrap_with_taint_from<u32>(dividend % divisor, original_eax, edx(), divisor_with_shadow));
  1879. }
  1880. void SoftCPU::IDIV_RM8(const X86::Instruction& insn)
  1881. {
  1882. auto divisor_with_shadow = insn.modrm().read8(*this, insn);
  1883. auto divisor = (i8)divisor_with_shadow.value();
  1884. if (divisor == 0) {
  1885. reportln("Divide by zero");
  1886. TODO();
  1887. }
  1888. i16 dividend = ax().value();
  1889. i16 result = dividend / divisor;
  1890. if (result > NumericLimits<i8>::max() || result < NumericLimits<i8>::min()) {
  1891. reportln("Divide overflow");
  1892. TODO();
  1893. }
  1894. auto original_ax = ax();
  1895. set_al(shadow_wrap_with_taint_from<u8>(result, divisor_with_shadow, original_ax));
  1896. set_ah(shadow_wrap_with_taint_from<u8>(dividend % divisor, divisor_with_shadow, original_ax));
  1897. }
  1898. void SoftCPU::IMUL_RM16(const X86::Instruction& insn)
  1899. {
  1900. i16 result_high;
  1901. i16 result_low;
  1902. auto src = insn.modrm().read16(*this, insn);
  1903. op_imul<i16>(*this, src.value(), ax().value(), result_high, result_low);
  1904. gpr16(X86::RegisterDX) = shadow_wrap_with_taint_from<u16>(result_high, src, ax());
  1905. gpr16(X86::RegisterAX) = shadow_wrap_with_taint_from<u16>(result_low, src, ax());
  1906. }
  1907. void SoftCPU::IMUL_RM32(const X86::Instruction& insn)
  1908. {
  1909. i32 result_high;
  1910. i32 result_low;
  1911. auto src = insn.modrm().read32(*this, insn);
  1912. op_imul<i32>(*this, src.value(), eax().value(), result_high, result_low);
  1913. gpr32(X86::RegisterEDX) = shadow_wrap_with_taint_from<u32>(result_high, src, eax());
  1914. gpr32(X86::RegisterEAX) = shadow_wrap_with_taint_from<u32>(result_low, src, eax());
  1915. }
  1916. void SoftCPU::IMUL_RM8(const X86::Instruction& insn)
  1917. {
  1918. i8 result_high;
  1919. i8 result_low;
  1920. auto src = insn.modrm().read8(*this, insn);
  1921. op_imul<i8>(*this, src.value(), al().value(), result_high, result_low);
  1922. gpr8(X86::RegisterAH) = shadow_wrap_with_taint_from<u8>(result_high, src, al());
  1923. gpr8(X86::RegisterAL) = shadow_wrap_with_taint_from<u8>(result_low, src, al());
  1924. }
  1925. void SoftCPU::IMUL_reg16_RM16(const X86::Instruction& insn)
  1926. {
  1927. i16 result_high;
  1928. i16 result_low;
  1929. auto src = insn.modrm().read16(*this, insn);
  1930. op_imul<i16>(*this, gpr16(insn.reg16()).value(), src.value(), result_high, result_low);
  1931. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src, gpr16(insn.reg16()));
  1932. }
  1933. void SoftCPU::IMUL_reg16_RM16_imm16(const X86::Instruction& insn)
  1934. {
  1935. i16 result_high;
  1936. i16 result_low;
  1937. auto src = insn.modrm().read16(*this, insn);
  1938. op_imul<i16>(*this, src.value(), insn.imm16(), result_high, result_low);
  1939. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  1940. }
  1941. void SoftCPU::IMUL_reg16_RM16_imm8(const X86::Instruction& insn)
  1942. {
  1943. i16 result_high;
  1944. i16 result_low;
  1945. auto src = insn.modrm().read16(*this, insn);
  1946. op_imul<i16>(*this, src.value(), sign_extended_to<i16>(insn.imm8()), result_high, result_low);
  1947. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(result_low, src);
  1948. }
  1949. void SoftCPU::IMUL_reg32_RM32(const X86::Instruction& insn)
  1950. {
  1951. i32 result_high;
  1952. i32 result_low;
  1953. auto src = insn.modrm().read32(*this, insn);
  1954. op_imul<i32>(*this, gpr32(insn.reg32()).value(), src.value(), result_high, result_low);
  1955. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src, gpr32(insn.reg32()));
  1956. }
  1957. void SoftCPU::IMUL_reg32_RM32_imm32(const X86::Instruction& insn)
  1958. {
  1959. i32 result_high;
  1960. i32 result_low;
  1961. auto src = insn.modrm().read32(*this, insn);
  1962. op_imul<i32>(*this, src.value(), insn.imm32(), result_high, result_low);
  1963. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  1964. }
  1965. void SoftCPU::IMUL_reg32_RM32_imm8(const X86::Instruction& insn)
  1966. {
  1967. i32 result_high;
  1968. i32 result_low;
  1969. auto src = insn.modrm().read32(*this, insn);
  1970. op_imul<i32>(*this, src.value(), sign_extended_to<i32>(insn.imm8()), result_high, result_low);
  1971. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(result_low, src);
  1972. }
  1973. void SoftCPU::INC_RM16(const X86::Instruction& insn)
  1974. {
  1975. insn.modrm().write16(*this, insn, op_inc(*this, insn.modrm().read16(*this, insn)));
  1976. }
  1977. void SoftCPU::INC_RM32(const X86::Instruction& insn)
  1978. {
  1979. insn.modrm().write32(*this, insn, op_inc(*this, insn.modrm().read32(*this, insn)));
  1980. }
  1981. void SoftCPU::INC_RM8(const X86::Instruction& insn)
  1982. {
  1983. insn.modrm().write8(*this, insn, op_inc(*this, insn.modrm().read8(*this, insn)));
  1984. }
  1985. void SoftCPU::INC_reg16(const X86::Instruction& insn)
  1986. {
  1987. gpr16(insn.reg16()) = op_inc(*this, const_gpr16(insn.reg16()));
  1988. }
  1989. void SoftCPU::INC_reg32(const X86::Instruction& insn)
  1990. {
  1991. gpr32(insn.reg32()) = op_inc(*this, const_gpr32(insn.reg32()));
  1992. }
  1993. void SoftCPU::INSB(const X86::Instruction&) { TODO_INSN(); }
  1994. void SoftCPU::INSD(const X86::Instruction&) { TODO_INSN(); }
  1995. void SoftCPU::INSW(const X86::Instruction&) { TODO_INSN(); }
  1996. void SoftCPU::INT3(const X86::Instruction&) { TODO_INSN(); }
  1997. void SoftCPU::INTO(const X86::Instruction&) { TODO_INSN(); }
  1998. void SoftCPU::INT_imm8(const X86::Instruction& insn)
  1999. {
  2000. VERIFY(insn.imm8() == 0x82);
  2001. // FIXME: virt_syscall should take ValueWithShadow and whine about uninitialized arguments
  2002. set_eax(shadow_wrap_as_initialized(m_emulator.virt_syscall(eax().value(), edx().value(), ecx().value(), ebx().value())));
  2003. }
  2004. void SoftCPU::INVLPG(const X86::Instruction&) { TODO_INSN(); }
  2005. void SoftCPU::IN_AL_DX(const X86::Instruction&) { TODO_INSN(); }
  2006. void SoftCPU::IN_AL_imm8(const X86::Instruction&) { TODO_INSN(); }
  2007. void SoftCPU::IN_AX_DX(const X86::Instruction&) { TODO_INSN(); }
  2008. void SoftCPU::IN_AX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2009. void SoftCPU::IN_EAX_DX(const X86::Instruction&) { TODO_INSN(); }
  2010. void SoftCPU::IN_EAX_imm8(const X86::Instruction&) { TODO_INSN(); }
  2011. void SoftCPU::IRET(const X86::Instruction&) { TODO_INSN(); }
  2012. void SoftCPU::JCXZ_imm8(const X86::Instruction& insn)
  2013. {
  2014. if (insn.a32()) {
  2015. warn_if_uninitialized(ecx(), "jecxz imm8");
  2016. if (ecx().value() == 0)
  2017. set_eip(eip() + (i8)insn.imm8());
  2018. } else {
  2019. warn_if_uninitialized(cx(), "jcxz imm8");
  2020. if (cx().value() == 0)
  2021. set_eip(eip() + (i8)insn.imm8());
  2022. }
  2023. }
  2024. void SoftCPU::JMP_FAR_mem16(const X86::Instruction&) { TODO_INSN(); }
  2025. void SoftCPU::JMP_FAR_mem32(const X86::Instruction&) { TODO_INSN(); }
  2026. void SoftCPU::JMP_RM16(const X86::Instruction&) { TODO_INSN(); }
  2027. void SoftCPU::JMP_RM32(const X86::Instruction& insn)
  2028. {
  2029. set_eip(insn.modrm().read32(*this, insn).value());
  2030. }
  2031. void SoftCPU::JMP_imm16(const X86::Instruction& insn)
  2032. {
  2033. set_eip(eip() + (i16)insn.imm16());
  2034. }
  2035. void SoftCPU::JMP_imm16_imm16(const X86::Instruction&) { TODO_INSN(); }
  2036. void SoftCPU::JMP_imm16_imm32(const X86::Instruction&) { TODO_INSN(); }
  2037. void SoftCPU::JMP_imm32(const X86::Instruction& insn)
  2038. {
  2039. set_eip(eip() + (i32)insn.imm32());
  2040. }
  2041. void SoftCPU::JMP_short_imm8(const X86::Instruction& insn)
  2042. {
  2043. set_eip(eip() + (i8)insn.imm8());
  2044. }
  2045. void SoftCPU::Jcc_NEAR_imm(const X86::Instruction& insn)
  2046. {
  2047. warn_if_flags_tainted("jcc near imm32");
  2048. if (evaluate_condition(insn.cc()))
  2049. set_eip(eip() + (i32)insn.imm32());
  2050. }
  2051. void SoftCPU::Jcc_imm8(const X86::Instruction& insn)
  2052. {
  2053. warn_if_flags_tainted("jcc imm8");
  2054. if (evaluate_condition(insn.cc()))
  2055. set_eip(eip() + (i8)insn.imm8());
  2056. }
  2057. void SoftCPU::LAHF(const X86::Instruction&) { TODO_INSN(); }
  2058. void SoftCPU::LAR_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2059. void SoftCPU::LAR_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2060. void SoftCPU::LDS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2061. void SoftCPU::LDS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2062. void SoftCPU::LEAVE16(const X86::Instruction&) { TODO_INSN(); }
  2063. void SoftCPU::LEAVE32(const X86::Instruction&)
  2064. {
  2065. auto new_ebp = read_memory32({ ss(), ebp().value() });
  2066. set_esp({ ebp().value() + 4, ebp().shadow() });
  2067. set_ebp(new_ebp);
  2068. }
  2069. void SoftCPU::LEA_reg16_mem16(const X86::Instruction& insn)
  2070. {
  2071. // FIXME: Respect shadow values
  2072. gpr16(insn.reg16()) = shadow_wrap_as_initialized<u16>(insn.modrm().resolve(*this, insn).offset());
  2073. }
  2074. void SoftCPU::LEA_reg32_mem32(const X86::Instruction& insn)
  2075. {
  2076. // FIXME: Respect shadow values
  2077. gpr32(insn.reg32()) = shadow_wrap_as_initialized<u32>(insn.modrm().resolve(*this, insn).offset());
  2078. }
  2079. void SoftCPU::LES_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2080. void SoftCPU::LES_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2081. void SoftCPU::LFS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2082. void SoftCPU::LFS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2083. void SoftCPU::LGDT(const X86::Instruction&) { TODO_INSN(); }
  2084. void SoftCPU::LGS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2085. void SoftCPU::LGS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2086. void SoftCPU::LIDT(const X86::Instruction&) { TODO_INSN(); }
  2087. void SoftCPU::LLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2088. void SoftCPU::LMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2089. template<typename T>
  2090. ALWAYS_INLINE static void do_lods(SoftCPU& cpu, const X86::Instruction& insn)
  2091. {
  2092. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2093. cpu.do_once_or_repeat<true>(insn, [&] {
  2094. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2095. cpu.gpr<T>(X86::RegisterAL) = src;
  2096. cpu.step_source_index(insn.a32(), sizeof(T));
  2097. });
  2098. }
  2099. void SoftCPU::LODSB(const X86::Instruction& insn)
  2100. {
  2101. do_lods<u8>(*this, insn);
  2102. }
  2103. void SoftCPU::LODSD(const X86::Instruction& insn)
  2104. {
  2105. do_lods<u32>(*this, insn);
  2106. }
  2107. void SoftCPU::LODSW(const X86::Instruction& insn)
  2108. {
  2109. do_lods<u16>(*this, insn);
  2110. }
  2111. void SoftCPU::LOOPNZ_imm8(const X86::Instruction& insn)
  2112. {
  2113. warn_if_flags_tainted("loopnz");
  2114. if (insn.a32()) {
  2115. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2116. if (ecx().value() != 0 && !zf())
  2117. set_eip(eip() + (i8)insn.imm8());
  2118. } else {
  2119. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2120. if (cx().value() != 0 && !zf())
  2121. set_eip(eip() + (i8)insn.imm8());
  2122. }
  2123. }
  2124. void SoftCPU::LOOPZ_imm8(const X86::Instruction& insn)
  2125. {
  2126. warn_if_flags_tainted("loopz");
  2127. if (insn.a32()) {
  2128. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2129. if (ecx().value() != 0 && zf())
  2130. set_eip(eip() + (i8)insn.imm8());
  2131. } else {
  2132. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2133. if (cx().value() != 0 && zf())
  2134. set_eip(eip() + (i8)insn.imm8());
  2135. }
  2136. }
  2137. void SoftCPU::LOOP_imm8(const X86::Instruction& insn)
  2138. {
  2139. if (insn.a32()) {
  2140. set_ecx({ ecx().value() - 1, ecx().shadow() });
  2141. if (ecx().value() != 0)
  2142. set_eip(eip() + (i8)insn.imm8());
  2143. } else {
  2144. set_cx({ (u16)(cx().value() - 1), cx().shadow() });
  2145. if (cx().value() != 0)
  2146. set_eip(eip() + (i8)insn.imm8());
  2147. }
  2148. }
  2149. void SoftCPU::LSL_reg16_RM16(const X86::Instruction&) { TODO_INSN(); }
  2150. void SoftCPU::LSL_reg32_RM32(const X86::Instruction&) { TODO_INSN(); }
  2151. void SoftCPU::LSS_reg16_mem16(const X86::Instruction&) { TODO_INSN(); }
  2152. void SoftCPU::LSS_reg32_mem32(const X86::Instruction&) { TODO_INSN(); }
  2153. void SoftCPU::LTR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2154. template<typename T>
  2155. ALWAYS_INLINE static void do_movs(SoftCPU& cpu, const X86::Instruction& insn)
  2156. {
  2157. auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS));
  2158. cpu.do_once_or_repeat<false>(insn, [&] {
  2159. auto src = cpu.read_memory<T>({ src_segment, cpu.source_index(insn.a32()).value() });
  2160. cpu.write_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() }, src);
  2161. cpu.step_source_index(insn.a32(), sizeof(T));
  2162. cpu.step_destination_index(insn.a32(), sizeof(T));
  2163. });
  2164. }
  2165. void SoftCPU::MOVSB(const X86::Instruction& insn)
  2166. {
  2167. do_movs<u8>(*this, insn);
  2168. }
  2169. void SoftCPU::MOVSD(const X86::Instruction& insn)
  2170. {
  2171. do_movs<u32>(*this, insn);
  2172. }
  2173. void SoftCPU::MOVSW(const X86::Instruction& insn)
  2174. {
  2175. do_movs<u16>(*this, insn);
  2176. }
  2177. void SoftCPU::MOVSX_reg16_RM8(const X86::Instruction& insn)
  2178. {
  2179. auto src = insn.modrm().read8(*this, insn);
  2180. gpr16(insn.reg16()) = shadow_wrap_with_taint_from<u16>(sign_extended_to<u16>(src.value()), src);
  2181. }
  2182. void SoftCPU::MOVSX_reg32_RM16(const X86::Instruction& insn)
  2183. {
  2184. auto src = insn.modrm().read16(*this, insn);
  2185. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2186. }
  2187. void SoftCPU::MOVSX_reg32_RM8(const X86::Instruction& insn)
  2188. {
  2189. auto src = insn.modrm().read8(*this, insn);
  2190. gpr32(insn.reg32()) = shadow_wrap_with_taint_from<u32>(sign_extended_to<u32>(src.value()), src);
  2191. }
  2192. void SoftCPU::MOVZX_reg16_RM8(const X86::Instruction& insn)
  2193. {
  2194. auto src = insn.modrm().read8(*this, insn);
  2195. gpr16(insn.reg16()) = ValueWithShadow<u16>(src.value(), 0x0100 | (src.shadow() & 0xff));
  2196. }
  2197. void SoftCPU::MOVZX_reg32_RM16(const X86::Instruction& insn)
  2198. {
  2199. auto src = insn.modrm().read16(*this, insn);
  2200. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010000 | (src.shadow() & 0xffff));
  2201. }
  2202. void SoftCPU::MOVZX_reg32_RM8(const X86::Instruction& insn)
  2203. {
  2204. auto src = insn.modrm().read8(*this, insn);
  2205. gpr32(insn.reg32()) = ValueWithShadow<u32>(src.value(), 0x01010100 | (src.shadow() & 0xff));
  2206. }
  2207. void SoftCPU::MOV_AL_moff8(const X86::Instruction& insn)
  2208. {
  2209. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2210. }
  2211. void SoftCPU::MOV_AX_moff16(const X86::Instruction& insn)
  2212. {
  2213. set_ax(read_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2214. }
  2215. void SoftCPU::MOV_CR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2216. void SoftCPU::MOV_DR_reg32(const X86::Instruction&) { TODO_INSN(); }
  2217. void SoftCPU::MOV_EAX_moff32(const X86::Instruction& insn)
  2218. {
  2219. set_eax(read_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }));
  2220. }
  2221. void SoftCPU::MOV_RM16_imm16(const X86::Instruction& insn)
  2222. {
  2223. insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(insn.imm16()));
  2224. }
  2225. void SoftCPU::MOV_RM16_reg16(const X86::Instruction& insn)
  2226. {
  2227. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2228. }
  2229. void SoftCPU::MOV_RM16_seg(const X86::Instruction&) { TODO_INSN(); }
  2230. void SoftCPU::MOV_RM32_imm32(const X86::Instruction& insn)
  2231. {
  2232. insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(insn.imm32()));
  2233. }
  2234. void SoftCPU::MOV_RM32_reg32(const X86::Instruction& insn)
  2235. {
  2236. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2237. }
  2238. void SoftCPU::MOV_RM8_imm8(const X86::Instruction& insn)
  2239. {
  2240. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized(insn.imm8()));
  2241. }
  2242. void SoftCPU::MOV_RM8_reg8(const X86::Instruction& insn)
  2243. {
  2244. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2245. }
  2246. void SoftCPU::MOV_moff16_AX(const X86::Instruction& insn)
  2247. {
  2248. write_memory16({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, ax());
  2249. }
  2250. void SoftCPU::MOV_moff32_EAX(const X86::Instruction& insn)
  2251. {
  2252. write_memory32({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, eax());
  2253. }
  2254. void SoftCPU::MOV_moff8_AL(const X86::Instruction& insn)
  2255. {
  2256. write_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), insn.imm_address() }, al());
  2257. }
  2258. void SoftCPU::MOV_reg16_RM16(const X86::Instruction& insn)
  2259. {
  2260. gpr16(insn.reg16()) = insn.modrm().read16(*this, insn);
  2261. }
  2262. void SoftCPU::MOV_reg16_imm16(const X86::Instruction& insn)
  2263. {
  2264. gpr16(insn.reg16()) = shadow_wrap_as_initialized(insn.imm16());
  2265. }
  2266. void SoftCPU::MOV_reg32_CR(const X86::Instruction&) { TODO_INSN(); }
  2267. void SoftCPU::MOV_reg32_DR(const X86::Instruction&) { TODO_INSN(); }
  2268. void SoftCPU::MOV_reg32_RM32(const X86::Instruction& insn)
  2269. {
  2270. gpr32(insn.reg32()) = insn.modrm().read32(*this, insn);
  2271. }
  2272. void SoftCPU::MOV_reg32_imm32(const X86::Instruction& insn)
  2273. {
  2274. gpr32(insn.reg32()) = shadow_wrap_as_initialized(insn.imm32());
  2275. }
  2276. void SoftCPU::MOV_reg8_RM8(const X86::Instruction& insn)
  2277. {
  2278. gpr8(insn.reg8()) = insn.modrm().read8(*this, insn);
  2279. }
  2280. void SoftCPU::MOV_reg8_imm8(const X86::Instruction& insn)
  2281. {
  2282. gpr8(insn.reg8()) = shadow_wrap_as_initialized(insn.imm8());
  2283. }
  2284. void SoftCPU::MOV_seg_RM16(const X86::Instruction&) { TODO_INSN(); }
  2285. void SoftCPU::MOV_seg_RM32(const X86::Instruction&) { TODO_INSN(); }
  2286. void SoftCPU::MUL_RM16(const X86::Instruction& insn)
  2287. {
  2288. auto src = insn.modrm().read16(*this, insn);
  2289. u32 result = (u32)ax().value() * (u32)src.value();
  2290. auto original_ax = ax();
  2291. set_ax(shadow_wrap_with_taint_from<u16>(result & 0xffff, src, original_ax));
  2292. set_dx(shadow_wrap_with_taint_from<u16>(result >> 16, src, original_ax));
  2293. taint_flags_from(src, original_ax);
  2294. set_cf(dx().value() != 0);
  2295. set_of(dx().value() != 0);
  2296. }
  2297. void SoftCPU::MUL_RM32(const X86::Instruction& insn)
  2298. {
  2299. auto src = insn.modrm().read32(*this, insn);
  2300. u64 result = (u64)eax().value() * (u64)src.value();
  2301. auto original_eax = eax();
  2302. set_eax(shadow_wrap_with_taint_from<u32>(result, src, original_eax));
  2303. set_edx(shadow_wrap_with_taint_from<u32>(result >> 32, src, original_eax));
  2304. taint_flags_from(src, original_eax);
  2305. set_cf(edx().value() != 0);
  2306. set_of(edx().value() != 0);
  2307. }
  2308. void SoftCPU::MUL_RM8(const X86::Instruction& insn)
  2309. {
  2310. auto src = insn.modrm().read8(*this, insn);
  2311. u16 result = (u16)al().value() * src.value();
  2312. auto original_al = al();
  2313. set_ax(shadow_wrap_with_taint_from(result, src, original_al));
  2314. taint_flags_from(src, original_al);
  2315. set_cf((result & 0xff00) != 0);
  2316. set_of((result & 0xff00) != 0);
  2317. }
  2318. void SoftCPU::NEG_RM16(const X86::Instruction& insn)
  2319. {
  2320. insn.modrm().write16(*this, insn, op_sub<ValueWithShadow<u16>>(*this, shadow_wrap_as_initialized<u16>(0), insn.modrm().read16(*this, insn)));
  2321. }
  2322. void SoftCPU::NEG_RM32(const X86::Instruction& insn)
  2323. {
  2324. insn.modrm().write32(*this, insn, op_sub<ValueWithShadow<u32>>(*this, shadow_wrap_as_initialized<u32>(0), insn.modrm().read32(*this, insn)));
  2325. }
  2326. void SoftCPU::NEG_RM8(const X86::Instruction& insn)
  2327. {
  2328. insn.modrm().write8(*this, insn, op_sub<ValueWithShadow<u8>>(*this, shadow_wrap_as_initialized<u8>(0), insn.modrm().read8(*this, insn)));
  2329. }
  2330. void SoftCPU::NOP(const X86::Instruction&)
  2331. {
  2332. }
  2333. void SoftCPU::NOT_RM16(const X86::Instruction& insn)
  2334. {
  2335. auto data = insn.modrm().read16(*this, insn);
  2336. insn.modrm().write16(*this, insn, ValueWithShadow<u16>(~data.value(), data.shadow()));
  2337. }
  2338. void SoftCPU::NOT_RM32(const X86::Instruction& insn)
  2339. {
  2340. auto data = insn.modrm().read32(*this, insn);
  2341. insn.modrm().write32(*this, insn, ValueWithShadow<u32>(~data.value(), data.shadow()));
  2342. }
  2343. void SoftCPU::NOT_RM8(const X86::Instruction& insn)
  2344. {
  2345. auto data = insn.modrm().read8(*this, insn);
  2346. insn.modrm().write8(*this, insn, ValueWithShadow<u8>(~data.value(), data.shadow()));
  2347. }
  2348. void SoftCPU::OUTSB(const X86::Instruction&) { TODO_INSN(); }
  2349. void SoftCPU::OUTSD(const X86::Instruction&) { TODO_INSN(); }
  2350. void SoftCPU::OUTSW(const X86::Instruction&) { TODO_INSN(); }
  2351. void SoftCPU::OUT_DX_AL(const X86::Instruction&) { TODO_INSN(); }
  2352. void SoftCPU::OUT_DX_AX(const X86::Instruction&) { TODO_INSN(); }
  2353. void SoftCPU::OUT_DX_EAX(const X86::Instruction&) { TODO_INSN(); }
  2354. void SoftCPU::OUT_imm8_AL(const X86::Instruction&) { TODO_INSN(); }
  2355. void SoftCPU::OUT_imm8_AX(const X86::Instruction&) { TODO_INSN(); }
  2356. void SoftCPU::OUT_imm8_EAX(const X86::Instruction&) { TODO_INSN(); }
  2357. void SoftCPU::PADDB_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2358. void SoftCPU::PADDW_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2359. void SoftCPU::PADDD_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2360. void SoftCPU::POPA(const X86::Instruction&) { TODO_INSN(); }
  2361. void SoftCPU::POPAD(const X86::Instruction&) { TODO_INSN(); }
  2362. void SoftCPU::POPF(const X86::Instruction&) { TODO_INSN(); }
  2363. void SoftCPU::POPFD(const X86::Instruction&)
  2364. {
  2365. auto popped_value = pop32();
  2366. m_eflags &= ~0x00fcffff;
  2367. m_eflags |= popped_value.value() & 0x00fcffff;
  2368. taint_flags_from(popped_value);
  2369. }
  2370. void SoftCPU::POP_DS(const X86::Instruction&) { TODO_INSN(); }
  2371. void SoftCPU::POP_ES(const X86::Instruction&) { TODO_INSN(); }
  2372. void SoftCPU::POP_FS(const X86::Instruction&) { TODO_INSN(); }
  2373. void SoftCPU::POP_GS(const X86::Instruction&) { TODO_INSN(); }
  2374. void SoftCPU::POP_RM16(const X86::Instruction& insn)
  2375. {
  2376. insn.modrm().write16(*this, insn, pop16());
  2377. }
  2378. void SoftCPU::POP_RM32(const X86::Instruction& insn)
  2379. {
  2380. insn.modrm().write32(*this, insn, pop32());
  2381. }
  2382. void SoftCPU::POP_SS(const X86::Instruction&) { TODO_INSN(); }
  2383. void SoftCPU::POP_reg16(const X86::Instruction& insn)
  2384. {
  2385. gpr16(insn.reg16()) = pop16();
  2386. }
  2387. void SoftCPU::POP_reg32(const X86::Instruction& insn)
  2388. {
  2389. gpr32(insn.reg32()) = pop32();
  2390. }
  2391. void SoftCPU::PUSHA(const X86::Instruction&) { TODO_INSN(); }
  2392. void SoftCPU::PUSHAD(const X86::Instruction&) { TODO_INSN(); }
  2393. void SoftCPU::PUSHF(const X86::Instruction&) { TODO_INSN(); }
  2394. void SoftCPU::PUSHFD(const X86::Instruction&)
  2395. {
  2396. // FIXME: Respect shadow flags when they exist!
  2397. push32(shadow_wrap_as_initialized(m_eflags & 0x00fcffff));
  2398. }
  2399. void SoftCPU::PUSH_CS(const X86::Instruction&) { TODO_INSN(); }
  2400. void SoftCPU::PUSH_DS(const X86::Instruction&) { TODO_INSN(); }
  2401. void SoftCPU::PUSH_ES(const X86::Instruction&) { TODO_INSN(); }
  2402. void SoftCPU::PUSH_FS(const X86::Instruction&) { TODO_INSN(); }
  2403. void SoftCPU::PUSH_GS(const X86::Instruction&) { TODO_INSN(); }
  2404. void SoftCPU::PUSH_RM16(const X86::Instruction&) { TODO_INSN(); }
  2405. void SoftCPU::PUSH_RM32(const X86::Instruction& insn)
  2406. {
  2407. push32(insn.modrm().read32(*this, insn));
  2408. }
  2409. void SoftCPU::PUSH_SP_8086_80186(const X86::Instruction&) { TODO_INSN(); }
  2410. void SoftCPU::PUSH_SS(const X86::Instruction&) { TODO_INSN(); }
  2411. void SoftCPU::PUSH_imm16(const X86::Instruction& insn)
  2412. {
  2413. push16(shadow_wrap_as_initialized(insn.imm16()));
  2414. }
  2415. void SoftCPU::PUSH_imm32(const X86::Instruction& insn)
  2416. {
  2417. push32(shadow_wrap_as_initialized(insn.imm32()));
  2418. }
  2419. void SoftCPU::PUSH_imm8(const X86::Instruction& insn)
  2420. {
  2421. VERIFY(!insn.has_operand_size_override_prefix());
  2422. push32(shadow_wrap_as_initialized<u32>(sign_extended_to<i32>(insn.imm8())));
  2423. }
  2424. void SoftCPU::PUSH_reg16(const X86::Instruction& insn)
  2425. {
  2426. push16(gpr16(insn.reg16()));
  2427. }
  2428. void SoftCPU::PUSH_reg32(const X86::Instruction& insn)
  2429. {
  2430. push32(gpr32(insn.reg32()));
  2431. }
  2432. template<typename T, bool cf>
  2433. ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2434. {
  2435. if (steps.value() == 0)
  2436. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2437. u32 result = 0;
  2438. u32 new_flags = 0;
  2439. if constexpr (cf)
  2440. asm volatile("stc");
  2441. else
  2442. asm volatile("clc");
  2443. if constexpr (sizeof(typename T::ValueType) == 4) {
  2444. asm volatile("rcll %%cl, %%eax\n"
  2445. : "=a"(result)
  2446. : "a"(data.value()), "c"(steps.value()));
  2447. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2448. asm volatile("rclw %%cl, %%ax\n"
  2449. : "=a"(result)
  2450. : "a"(data.value()), "c"(steps.value()));
  2451. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2452. asm volatile("rclb %%cl, %%al\n"
  2453. : "=a"(result)
  2454. : "a"(data.value()), "c"(steps.value()));
  2455. }
  2456. asm volatile(
  2457. "pushf\n"
  2458. "pop %%ebx"
  2459. : "=b"(new_flags));
  2460. cpu.set_flags_oc(new_flags);
  2461. cpu.taint_flags_from(data, steps);
  2462. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2463. }
  2464. template<typename T>
  2465. ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2466. {
  2467. cpu.warn_if_flags_tainted("rcl");
  2468. if (cpu.cf())
  2469. return op_rcl_impl<T, true>(cpu, data, steps);
  2470. return op_rcl_impl<T, false>(cpu, data, steps);
  2471. }
  2472. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCL, op_rcl)
  2473. template<typename T, bool cf>
  2474. ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2475. {
  2476. if (steps.value() == 0)
  2477. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2478. u32 result = 0;
  2479. u32 new_flags = 0;
  2480. if constexpr (cf)
  2481. asm volatile("stc");
  2482. else
  2483. asm volatile("clc");
  2484. if constexpr (sizeof(typename T::ValueType) == 4) {
  2485. asm volatile("rcrl %%cl, %%eax\n"
  2486. : "=a"(result)
  2487. : "a"(data.value()), "c"(steps.value()));
  2488. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2489. asm volatile("rcrw %%cl, %%ax\n"
  2490. : "=a"(result)
  2491. : "a"(data.value()), "c"(steps.value()));
  2492. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2493. asm volatile("rcrb %%cl, %%al\n"
  2494. : "=a"(result)
  2495. : "a"(data.value()), "c"(steps.value()));
  2496. }
  2497. asm volatile(
  2498. "pushf\n"
  2499. "pop %%ebx"
  2500. : "=b"(new_flags));
  2501. cpu.set_flags_oc(new_flags);
  2502. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2503. }
  2504. template<typename T>
  2505. ALWAYS_INLINE static T op_rcr(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2506. {
  2507. cpu.warn_if_flags_tainted("rcr");
  2508. if (cpu.cf())
  2509. return op_rcr_impl<T, true>(cpu, data, steps);
  2510. return op_rcr_impl<T, false>(cpu, data, steps);
  2511. }
  2512. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCR, op_rcr)
  2513. void SoftCPU::RDTSC(const X86::Instruction&) { TODO_INSN(); }
  2514. void SoftCPU::RET(const X86::Instruction& insn)
  2515. {
  2516. VERIFY(!insn.has_operand_size_override_prefix());
  2517. auto ret_address = pop32();
  2518. warn_if_uninitialized(ret_address, "ret");
  2519. set_eip(ret_address.value());
  2520. }
  2521. void SoftCPU::RETF(const X86::Instruction&) { TODO_INSN(); }
  2522. void SoftCPU::RETF_imm16(const X86::Instruction&) { TODO_INSN(); }
  2523. void SoftCPU::RET_imm16(const X86::Instruction& insn)
  2524. {
  2525. VERIFY(!insn.has_operand_size_override_prefix());
  2526. auto ret_address = pop32();
  2527. warn_if_uninitialized(ret_address, "ret imm16");
  2528. set_eip(ret_address.value());
  2529. set_esp({ esp().value() + insn.imm16(), esp().shadow() });
  2530. }
  2531. template<typename T>
  2532. ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2533. {
  2534. if (steps.value() == 0)
  2535. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2536. u32 result = 0;
  2537. u32 new_flags = 0;
  2538. if constexpr (sizeof(typename T::ValueType) == 4) {
  2539. asm volatile("roll %%cl, %%eax\n"
  2540. : "=a"(result)
  2541. : "a"(data.value()), "c"(steps.value()));
  2542. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2543. asm volatile("rolw %%cl, %%ax\n"
  2544. : "=a"(result)
  2545. : "a"(data.value()), "c"(steps.value()));
  2546. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2547. asm volatile("rolb %%cl, %%al\n"
  2548. : "=a"(result)
  2549. : "a"(data.value()), "c"(steps.value()));
  2550. }
  2551. asm volatile(
  2552. "pushf\n"
  2553. "pop %%ebx"
  2554. : "=b"(new_flags));
  2555. cpu.set_flags_oc(new_flags);
  2556. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2557. }
  2558. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROL, op_rol)
  2559. template<typename T>
  2560. ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2561. {
  2562. if (steps.value() == 0)
  2563. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2564. u32 result = 0;
  2565. u32 new_flags = 0;
  2566. if constexpr (sizeof(typename T::ValueType) == 4) {
  2567. asm volatile("rorl %%cl, %%eax\n"
  2568. : "=a"(result)
  2569. : "a"(data.value()), "c"(steps.value()));
  2570. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2571. asm volatile("rorw %%cl, %%ax\n"
  2572. : "=a"(result)
  2573. : "a"(data.value()), "c"(steps.value()));
  2574. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2575. asm volatile("rorb %%cl, %%al\n"
  2576. : "=a"(result)
  2577. : "a"(data.value()), "c"(steps.value()));
  2578. }
  2579. asm volatile(
  2580. "pushf\n"
  2581. "pop %%ebx"
  2582. : "=b"(new_flags));
  2583. cpu.set_flags_oc(new_flags);
  2584. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2585. }
  2586. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROR, op_ror)
  2587. void SoftCPU::SAHF(const X86::Instruction&) { TODO_INSN(); }
  2588. void SoftCPU::SALC(const X86::Instruction&)
  2589. {
  2590. // FIXME: Respect shadow flags once they exists!
  2591. set_al(shadow_wrap_as_initialized<u8>(cf() ? 0xff : 0x00));
  2592. }
  2593. template<typename T>
  2594. static T op_sar(SoftCPU& cpu, T data, ValueWithShadow<u8> steps)
  2595. {
  2596. if (steps.value() == 0)
  2597. return shadow_wrap_with_taint_from(data.value(), data, steps);
  2598. u32 result = 0;
  2599. u32 new_flags = 0;
  2600. if constexpr (sizeof(typename T::ValueType) == 4) {
  2601. asm volatile("sarl %%cl, %%eax\n"
  2602. : "=a"(result)
  2603. : "a"(data.value()), "c"(steps.value()));
  2604. } else if constexpr (sizeof(typename T::ValueType) == 2) {
  2605. asm volatile("sarw %%cl, %%ax\n"
  2606. : "=a"(result)
  2607. : "a"(data.value()), "c"(steps.value()));
  2608. } else if constexpr (sizeof(typename T::ValueType) == 1) {
  2609. asm volatile("sarb %%cl, %%al\n"
  2610. : "=a"(result)
  2611. : "a"(data.value()), "c"(steps.value()));
  2612. }
  2613. asm volatile(
  2614. "pushf\n"
  2615. "pop %%ebx"
  2616. : "=b"(new_flags));
  2617. cpu.set_flags_oszapc(new_flags);
  2618. return shadow_wrap_with_taint_from<typename T::ValueType>(result, data, steps);
  2619. }
  2620. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SAR, op_sar)
  2621. template<typename T>
  2622. ALWAYS_INLINE static void do_scas(SoftCPU& cpu, const X86::Instruction& insn)
  2623. {
  2624. cpu.do_once_or_repeat<true>(insn, [&] {
  2625. auto src = cpu.const_gpr<T>(X86::RegisterAL);
  2626. auto dest = cpu.read_memory<T>({ cpu.es(), cpu.destination_index(insn.a32()).value() });
  2627. op_sub(cpu, dest, src);
  2628. cpu.step_destination_index(insn.a32(), sizeof(T));
  2629. });
  2630. }
  2631. void SoftCPU::SCASB(const X86::Instruction& insn)
  2632. {
  2633. do_scas<u8>(*this, insn);
  2634. }
  2635. void SoftCPU::SCASD(const X86::Instruction& insn)
  2636. {
  2637. do_scas<u32>(*this, insn);
  2638. }
  2639. void SoftCPU::SCASW(const X86::Instruction& insn)
  2640. {
  2641. do_scas<u16>(*this, insn);
  2642. }
  2643. void SoftCPU::SETcc_RM8(const X86::Instruction& insn)
  2644. {
  2645. warn_if_flags_tainted("setcc");
  2646. insn.modrm().write8(*this, insn, shadow_wrap_as_initialized<u8>(evaluate_condition(insn.cc())));
  2647. }
  2648. void SoftCPU::SGDT(const X86::Instruction&) { TODO_INSN(); }
  2649. void SoftCPU::SHLD_RM16_reg16_CL(const X86::Instruction& insn)
  2650. {
  2651. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2652. }
  2653. void SoftCPU::SHLD_RM16_reg16_imm8(const X86::Instruction& insn)
  2654. {
  2655. insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2656. }
  2657. void SoftCPU::SHLD_RM32_reg32_CL(const X86::Instruction& insn)
  2658. {
  2659. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2660. }
  2661. void SoftCPU::SHLD_RM32_reg32_imm8(const X86::Instruction& insn)
  2662. {
  2663. insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2664. }
  2665. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHL, op_shl)
  2666. void SoftCPU::SHRD_RM16_reg16_CL(const X86::Instruction& insn)
  2667. {
  2668. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), cl()));
  2669. }
  2670. void SoftCPU::SHRD_RM16_reg16_imm8(const X86::Instruction& insn)
  2671. {
  2672. insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8())));
  2673. }
  2674. void SoftCPU::SHRD_RM32_reg32_CL(const X86::Instruction& insn)
  2675. {
  2676. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), cl()));
  2677. }
  2678. void SoftCPU::SHRD_RM32_reg32_imm8(const X86::Instruction& insn)
  2679. {
  2680. insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8())));
  2681. }
  2682. DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHR, op_shr)
  2683. void SoftCPU::SIDT(const X86::Instruction&) { TODO_INSN(); }
  2684. void SoftCPU::SLDT_RM16(const X86::Instruction&) { TODO_INSN(); }
  2685. void SoftCPU::SMSW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2686. void SoftCPU::STC(const X86::Instruction&)
  2687. {
  2688. set_cf(true);
  2689. }
  2690. void SoftCPU::STD(const X86::Instruction&)
  2691. {
  2692. set_df(true);
  2693. }
  2694. void SoftCPU::STI(const X86::Instruction&) { TODO_INSN(); }
  2695. void SoftCPU::STOSB(const X86::Instruction& insn)
  2696. {
  2697. if (insn.has_rep_prefix() && !df()) {
  2698. // Fast path for 8-bit forward memory fill.
  2699. if (m_emulator.mmu().fast_fill_memory8({ es(), destination_index(insn.a32()).value() }, ecx().value(), al())) {
  2700. if (insn.a32()) {
  2701. // FIXME: Should an uninitialized ECX taint EDI here?
  2702. set_edi({ (u32)(edi().value() + ecx().value()), edi().shadow() });
  2703. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2704. } else {
  2705. // FIXME: Should an uninitialized CX taint DI here?
  2706. set_di({ (u16)(di().value() + cx().value()), di().shadow() });
  2707. set_cx(shadow_wrap_as_initialized<u16>(0));
  2708. }
  2709. return;
  2710. }
  2711. }
  2712. do_once_or_repeat<false>(insn, [&] {
  2713. write_memory8({ es(), destination_index(insn.a32()).value() }, al());
  2714. step_destination_index(insn.a32(), 1);
  2715. });
  2716. }
  2717. void SoftCPU::STOSD(const X86::Instruction& insn)
  2718. {
  2719. if (insn.has_rep_prefix() && !df()) {
  2720. // Fast path for 32-bit forward memory fill.
  2721. if (m_emulator.mmu().fast_fill_memory32({ es(), destination_index(insn.a32()).value() }, ecx().value(), eax())) {
  2722. if (insn.a32()) {
  2723. // FIXME: Should an uninitialized ECX taint EDI here?
  2724. set_edi({ (u32)(edi().value() + (ecx().value() * sizeof(u32))), edi().shadow() });
  2725. set_ecx(shadow_wrap_as_initialized<u32>(0));
  2726. } else {
  2727. // FIXME: Should an uninitialized CX taint DI here?
  2728. set_di({ (u16)(di().value() + (cx().value() * sizeof(u32))), di().shadow() });
  2729. set_cx(shadow_wrap_as_initialized<u16>(0));
  2730. }
  2731. return;
  2732. }
  2733. }
  2734. do_once_or_repeat<false>(insn, [&] {
  2735. write_memory32({ es(), destination_index(insn.a32()).value() }, eax());
  2736. step_destination_index(insn.a32(), 4);
  2737. });
  2738. }
  2739. void SoftCPU::STOSW(const X86::Instruction& insn)
  2740. {
  2741. do_once_or_repeat<false>(insn, [&] {
  2742. write_memory16({ es(), destination_index(insn.a32()).value() }, ax());
  2743. step_destination_index(insn.a32(), 2);
  2744. });
  2745. }
  2746. void SoftCPU::STR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2747. void SoftCPU::UD0(const X86::Instruction&) { TODO_INSN(); }
  2748. void SoftCPU::UD1(const X86::Instruction&) { TODO_INSN(); }
  2749. void SoftCPU::UD2(const X86::Instruction&) { TODO_INSN(); }
  2750. void SoftCPU::VERR_RM16(const X86::Instruction&) { TODO_INSN(); }
  2751. void SoftCPU::VERW_RM16(const X86::Instruction&) { TODO_INSN(); }
  2752. void SoftCPU::WAIT(const X86::Instruction&) { TODO_INSN(); }
  2753. void SoftCPU::WBINVD(const X86::Instruction&) { TODO_INSN(); }
  2754. void SoftCPU::XADD_RM16_reg16(const X86::Instruction& insn)
  2755. {
  2756. auto dest = insn.modrm().read16(*this, insn);
  2757. auto src = const_gpr16(insn.reg16());
  2758. auto result = op_add(*this, dest, src);
  2759. gpr16(insn.reg16()) = dest;
  2760. insn.modrm().write16(*this, insn, result);
  2761. }
  2762. void SoftCPU::XADD_RM32_reg32(const X86::Instruction& insn)
  2763. {
  2764. auto dest = insn.modrm().read32(*this, insn);
  2765. auto src = const_gpr32(insn.reg32());
  2766. auto result = op_add(*this, dest, src);
  2767. gpr32(insn.reg32()) = dest;
  2768. insn.modrm().write32(*this, insn, result);
  2769. }
  2770. void SoftCPU::XADD_RM8_reg8(const X86::Instruction& insn)
  2771. {
  2772. auto dest = insn.modrm().read8(*this, insn);
  2773. auto src = const_gpr8(insn.reg8());
  2774. auto result = op_add(*this, dest, src);
  2775. gpr8(insn.reg8()) = dest;
  2776. insn.modrm().write8(*this, insn, result);
  2777. }
  2778. void SoftCPU::XCHG_AX_reg16(const X86::Instruction& insn)
  2779. {
  2780. auto temp = gpr16(insn.reg16());
  2781. gpr16(insn.reg16()) = ax();
  2782. set_ax(temp);
  2783. }
  2784. void SoftCPU::XCHG_EAX_reg32(const X86::Instruction& insn)
  2785. {
  2786. auto temp = gpr32(insn.reg32());
  2787. gpr32(insn.reg32()) = eax();
  2788. set_eax(temp);
  2789. }
  2790. void SoftCPU::XCHG_reg16_RM16(const X86::Instruction& insn)
  2791. {
  2792. auto temp = insn.modrm().read16(*this, insn);
  2793. insn.modrm().write16(*this, insn, const_gpr16(insn.reg16()));
  2794. gpr16(insn.reg16()) = temp;
  2795. }
  2796. void SoftCPU::XCHG_reg32_RM32(const X86::Instruction& insn)
  2797. {
  2798. auto temp = insn.modrm().read32(*this, insn);
  2799. insn.modrm().write32(*this, insn, const_gpr32(insn.reg32()));
  2800. gpr32(insn.reg32()) = temp;
  2801. }
  2802. void SoftCPU::XCHG_reg8_RM8(const X86::Instruction& insn)
  2803. {
  2804. auto temp = insn.modrm().read8(*this, insn);
  2805. insn.modrm().write8(*this, insn, const_gpr8(insn.reg8()));
  2806. gpr8(insn.reg8()) = temp;
  2807. }
  2808. void SoftCPU::XLAT(const X86::Instruction& insn)
  2809. {
  2810. if (insn.a32())
  2811. warn_if_uninitialized(ebx(), "xlat ebx");
  2812. else
  2813. warn_if_uninitialized(bx(), "xlat bx");
  2814. warn_if_uninitialized(al(), "xlat al");
  2815. u32 offset = (insn.a32() ? ebx().value() : bx().value()) + al().value();
  2816. set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), offset }));
  2817. }
  2818. #define DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2819. void SoftCPU::mnemonic##_AL_imm8(const X86::Instruction& insn) { generic_AL_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  2820. void SoftCPU::mnemonic##_AX_imm16(const X86::Instruction& insn) { generic_AX_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2821. void SoftCPU::mnemonic##_EAX_imm32(const X86::Instruction& insn) { generic_EAX_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2822. void SoftCPU::mnemonic##_RM16_imm16(const X86::Instruction& insn) { generic_RM16_imm16<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2823. void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { generic_RM16_reg16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2824. void SoftCPU::mnemonic##_RM32_imm32(const X86::Instruction& insn) { generic_RM32_imm32<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2825. void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { generic_RM32_reg32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2826. void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8<update_dest, is_or>(op<ValueWithShadow<u8>>, insn); } \
  2827. void SoftCPU::mnemonic##_RM8_reg8(const X86::Instruction& insn) { generic_RM8_reg8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2828. #define DEFINE_GENERIC_INSN_HANDLERS(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2829. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest, is_zero_idiom_if_both_operands_same, is_or) \
  2830. void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8<update_dest, is_or>(op<ValueWithShadow<u16>>, insn); } \
  2831. void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8<update_dest, is_or>(op<ValueWithShadow<u32>>, insn); } \
  2832. void SoftCPU::mnemonic##_reg16_RM16(const X86::Instruction& insn) { generic_reg16_RM16<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u16>>, insn); } \
  2833. void SoftCPU::mnemonic##_reg32_RM32(const X86::Instruction& insn) { generic_reg32_RM32<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u32>>, insn); } \
  2834. void SoftCPU::mnemonic##_reg8_RM8(const X86::Instruction& insn) { generic_reg8_RM8<update_dest, is_zero_idiom_if_both_operands_same>(op<ValueWithShadow<u8>>, insn); }
  2835. DEFINE_GENERIC_INSN_HANDLERS(XOR, op_xor, true, true, false)
  2836. DEFINE_GENERIC_INSN_HANDLERS(OR, op_or, true, false, true)
  2837. DEFINE_GENERIC_INSN_HANDLERS(ADD, op_add, true, false, false)
  2838. DEFINE_GENERIC_INSN_HANDLERS(ADC, op_adc, true, false, false)
  2839. DEFINE_GENERIC_INSN_HANDLERS(SUB, op_sub, true, true, false)
  2840. DEFINE_GENERIC_INSN_HANDLERS(SBB, op_sbb, true, false, false)
  2841. DEFINE_GENERIC_INSN_HANDLERS(AND, op_and, true, false, false)
  2842. DEFINE_GENERIC_INSN_HANDLERS(CMP, op_sub, false, false, false)
  2843. DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(TEST, op_and, false, false, false)
  2844. void SoftCPU::MOVQ_mm1_mm2m64(const X86::Instruction&) { TODO_INSN(); }
  2845. void SoftCPU::EMMS(const X86::Instruction&) { TODO_INSN(); }
  2846. void SoftCPU::MOVQ_mm1_m64_mm2(const X86::Instruction&) { TODO_INSN(); }
  2847. void SoftCPU::wrap_0xC0(const X86::Instruction&) { TODO_INSN(); }
  2848. void SoftCPU::wrap_0xC1_16(const X86::Instruction&) { TODO_INSN(); }
  2849. void SoftCPU::wrap_0xC1_32(const X86::Instruction&) { TODO_INSN(); }
  2850. void SoftCPU::wrap_0xD0(const X86::Instruction&) { TODO_INSN(); }
  2851. void SoftCPU::wrap_0xD1_16(const X86::Instruction&) { TODO_INSN(); }
  2852. void SoftCPU::wrap_0xD1_32(const X86::Instruction&) { TODO_INSN(); }
  2853. void SoftCPU::wrap_0xD2(const X86::Instruction&) { TODO_INSN(); }
  2854. void SoftCPU::wrap_0xD3_16(const X86::Instruction&) { TODO_INSN(); }
  2855. void SoftCPU::wrap_0xD3_32(const X86::Instruction&) { TODO_INSN(); }
  2856. }