- Timestamp:
- Dec 11, 2023 3:20:48 PM (10 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 10 edited
-
VMMAll/IEMAllInstOneByte.cpp.h (modified) (4 diffs)
-
VMMAll/IEMAllInstPython.py (modified) (1 diff)
-
VMMAll/IEMAllMemRWTmpl.cpp.h (modified) (4 diffs)
-
VMMAll/IEMAllMemRWTmplInline.cpp.h (modified) (12 diffs)
-
VMMAll/IEMAllN8vePython.py (modified) (1 diff)
-
VMMAll/IEMAllN8veRecompiler.cpp (modified) (1 diff)
-
VMMAll/IEMAllThrdPython.py (modified) (1 diff)
-
include/IEMInternal.h (modified) (1 diff)
-
include/IEMMc.h (modified) (5 diffs)
-
testcase/tstIEMCheckMc.cpp (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r102522 r102572 2352 2352 { 2353 2353 case IEMMODE_16BIT: 2354 IEM_MC_BEGIN(0, 1, 0, 0);2354 IEM_MC_BEGIN(0, 0, 0, 0); 2355 2355 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2356 IEM_MC_LOCAL(uint16_t *, pu16Dst); 2357 IEM_MC_REF_GREG_U16(pu16Dst, iReg); 2358 IEM_MC_POP_U16(pu16Dst); 2356 IEM_MC_POP_GREG_U16(iReg); 2359 2357 IEM_MC_ADVANCE_RIP_AND_FINISH(); 2360 2358 IEM_MC_END(); … … 2362 2360 2363 2361 case IEMMODE_32BIT: 2364 IEM_MC_BEGIN(0, 1, IEM_MC_F_MIN_386 | IEM_MC_F_NOT_64BIT, 0);2362 IEM_MC_BEGIN(0, 0, IEM_MC_F_MIN_386 | IEM_MC_F_NOT_64BIT, 0); 2365 2363 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2366 IEM_MC_LOCAL(uint32_t *, pu32Dst); 2367 IEM_MC_REF_GREG_U32(pu32Dst, iReg); 2368 IEM_MC_POP_U32(pu32Dst); 2369 IEM_MC_CLEAR_HIGH_GREG_U64(iReg); /** @todo testcase*/ 2364 IEM_MC_POP_GREG_U32(iReg); 2370 2365 IEM_MC_ADVANCE_RIP_AND_FINISH(); 2371 2366 IEM_MC_END(); … … 2373 2368 2374 2369 case IEMMODE_64BIT: 2375 IEM_MC_BEGIN(0, 1, IEM_MC_F_64BIT, 0);2370 IEM_MC_BEGIN(0, 0, IEM_MC_F_64BIT, 0); 2376 2371 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2377 IEM_MC_LOCAL(uint64_t *, pu64Dst); 2378 IEM_MC_REF_GREG_U64(pu64Dst, iReg); 2379 IEM_MC_POP_U64(pu64Dst); 2372 IEM_MC_POP_GREG_U64(iReg); 2380 2373 IEM_MC_ADVANCE_RIP_AND_FINISH(); 2381 2374 IEM_MC_END(); … … 2433 2426 { 2434 2427 IEMOP_MNEMONIC(pop_rSP, "pop rSP"); 2435 if (IEM_IS_64BIT_CODE(pVCpu)) 2436 { 2437 if (pVCpu->iem.s.uRexB) 2438 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP); 2439 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; 2440 pVCpu->iem.s.enmEffOpSize = !(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT; 2441 } 2442 2443 /** @todo add testcase for this instruction. */ 2444 switch (pVCpu->iem.s.enmEffOpSize) 2445 { 2446 case IEMMODE_16BIT: 2447 IEM_MC_BEGIN(0, 2, 0, 0); 2448 IEMOP_HLP_DECODED_NL_1(OP_POP, IEMOPFORM_FIXED, OP_PARM_REG_ESP, 2449 DISOPTYPE_HARMLESS | DISOPTYPE_X86_DEFAULT_64_OP_SIZE | DISOPTYPE_X86_REXB_EXTENDS_OPREG); 2450 IEM_MC_LOCAL(uint16_t, u16Dst); 2451 IEM_MC_LOCAL(uint16_t *, pu16Dst); 2452 IEM_MC_REF_LOCAL(pu16Dst, u16Dst); 2453 IEM_MC_POP_U16(pu16Dst); /** @todo not correct MC, fix later. */ 2454 IEM_MC_STORE_GREG_U16(X86_GREG_xSP, u16Dst); 2455 IEM_MC_ADVANCE_RIP_AND_FINISH(); 2456 IEM_MC_END(); 2457 break; 2458 2459 case IEMMODE_32BIT: 2460 IEM_MC_BEGIN(0, 2, IEM_MC_F_MIN_386, 0); 2461 IEMOP_HLP_DECODED_NL_1(OP_POP, IEMOPFORM_FIXED, OP_PARM_REG_ESP, 2462 DISOPTYPE_HARMLESS | DISOPTYPE_X86_DEFAULT_64_OP_SIZE | DISOPTYPE_X86_REXB_EXTENDS_OPREG); 2463 IEM_MC_LOCAL(uint32_t, u32Dst); 2464 IEM_MC_LOCAL(uint32_t *, pu32Dst); 2465 IEM_MC_REF_LOCAL(pu32Dst, u32Dst); 2466 IEM_MC_POP_U32(pu32Dst); 2467 IEM_MC_STORE_GREG_U32(X86_GREG_xSP, u32Dst); 2468 IEM_MC_ADVANCE_RIP_AND_FINISH(); 2469 IEM_MC_END(); 2470 break; 2471 2472 case IEMMODE_64BIT: 2473 IEM_MC_BEGIN(0, 2, IEM_MC_F_64BIT, 0); 2474 IEMOP_HLP_DECODED_NL_1(OP_POP, IEMOPFORM_FIXED, OP_PARM_REG_ESP, 2475 DISOPTYPE_HARMLESS | DISOPTYPE_X86_DEFAULT_64_OP_SIZE | DISOPTYPE_X86_REXB_EXTENDS_OPREG); 2476 IEM_MC_LOCAL(uint64_t, u64Dst); 2477 IEM_MC_LOCAL(uint64_t *, pu64Dst); 2478 IEM_MC_REF_LOCAL(pu64Dst, u64Dst); 2479 IEM_MC_POP_U64(pu64Dst); 2480 IEM_MC_STORE_GREG_U64(X86_GREG_xSP, u64Dst); 2481 IEM_MC_ADVANCE_RIP_AND_FINISH(); 2482 IEM_MC_END(); 2483 break; 2484 2485 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 2486 } 2428 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP); 2487 2429 } 2488 2430 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r102557 r102572 3061 3061 'IEM_MC_OR_LOCAL_U32': (McBlock.parseMcGeneric, False, False, ), 3062 3062 'IEM_MC_OR_LOCAL_U8': (McBlock.parseMcGeneric, False, False, ), 3063 'IEM_MC_POP_ U16':(McBlock.parseMcGeneric, True, False, ),3064 'IEM_MC_POP_ U32':(McBlock.parseMcGeneric, True, False, ),3065 'IEM_MC_POP_ U64':(McBlock.parseMcGeneric, True, False, ),3063 'IEM_MC_POP_GREG_U16': (McBlock.parseMcGeneric, True, False, ), 3064 'IEM_MC_POP_GREG_U32': (McBlock.parseMcGeneric, True, False, ), 3065 'IEM_MC_POP_GREG_U64': (McBlock.parseMcGeneric, True, False, ), 3066 3066 'IEM_MC_PREPARE_AVX_USAGE': (McBlock.parseMcGeneric, False, True), 3067 3067 'IEM_MC_PREPARE_FPU_USAGE': (McBlock.parseMcGeneric, False, True), -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h
r102430 r102572 258 258 259 259 #ifdef TMPL_MEM_WITH_STACK 260 261 /** 262 * Pops a general purpose register off the stack. 263 * 264 * @returns Strict VBox status code. 265 * @param pVCpu The cross context virtual CPU structure of the 266 * calling thread. 267 * @param iGReg The GREG to load the popped value into. 268 */ 269 VBOXSTRICTRC RT_CONCAT(iemMemStackPopGReg,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iGReg) RT_NOEXCEPT 270 { 271 Assert(iGReg < 16); 272 273 /* Increment the stack pointer. */ 274 uint64_t uNewRsp; 275 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp); 276 277 /* Load the word the lazy way. */ 278 uint8_t bUnmapInfo; 279 TMPL_MEM_TYPE const *puSrc; 280 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 281 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN); 282 if (rc == VINF_SUCCESS) 283 { 284 TMPL_MEM_TYPE const uValue = *puSrc; 285 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 286 287 /* Commit the register and new RSP values. */ 288 if (rc == VINF_SUCCESS) 289 { 290 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n", 291 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg)); 292 pVCpu->cpum.GstCtx.rsp = uNewRsp; 293 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t)) 294 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue; 295 else 296 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue; 297 return VINF_SUCCESS; 298 } 299 } 300 return rc; 301 } 302 260 303 261 304 /** … … 298 341 299 342 /** 300 * Pops an item off the stack. 343 * Pops a generic item off the stack, regular version. 344 * 345 * This is used by C-implementation code. 301 346 * 302 347 * @returns Strict VBox status code. … … 441 486 442 487 /** 443 * Safe/fallback stack pop function that longjmps on error.444 */ 445 TMPL_MEM_TYPE RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP488 * Safe/fallback stack pop greg function that longjmps on error. 489 */ 490 void RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP 446 491 { 447 492 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) … … 457 502 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, 458 503 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN); 459 TMPL_MEM_TYPE const u Ret= *puSrc;504 TMPL_MEM_TYPE const uValue = *puSrc; 460 505 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 461 506 462 /* Commit the RSP change and return the popped value. */463 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " \n",464 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, u Ret));507 /* Commit the register and RSP values. */ 508 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n", 509 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg)); 465 510 pVCpu->cpum.GstCtx.rsp = uNewRsp; 466 467 return uRet; 468 } 511 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t)) 512 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = *puSrc; 513 else 514 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = *puSrc; 515 } 516 469 517 470 518 # ifdef TMPL_WITH_PUSH_SREG -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h
r102424 r102572 785 785 786 786 /** 787 * Stack pop function that longjmps on error. 788 */ 789 DECL_INLINE_THROW(TMPL_MEM_TYPE) 790 RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 791 { 787 * Stack pop greg function that longjmps on error. 788 */ 789 DECL_INLINE_THROW(void) 790 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP 791 { 792 Assert(iGReg < 16); 793 792 794 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 793 795 /* … … 820 822 { 821 823 /* 822 * Do the push and return. 823 */ 824 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 825 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 826 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 827 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 828 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n", 829 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet)); 830 pVCpu->cpum.GstCtx.rsp = uNewRsp; 831 return uRet; 824 * Do the pop. 825 */ 826 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 827 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 828 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 829 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 830 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n", 831 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg)); 832 pVCpu->cpum.GstCtx.rsp = uNewRsp; /* must be first for 16-bit */ 833 # if TMPL_MEM_TYPE_SIZE == 2 834 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue; 835 # elif TMPL_MEM_TYPE_SIZE == 4 || TMPL_MEM_TYPE_SIZE == 8 836 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue; 837 # else 838 # error "TMPL_MEM_TYPE_SIZE" 839 # endif 840 return; 832 841 } 833 842 } … … 838 847 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 839 848 # endif 840 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);849 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg); 841 850 } 842 851 … … 968 977 969 978 /** 970 * 32-bit flat stack pop function that longjmps on error. 971 */ 972 DECL_INLINE_THROW(TMPL_MEM_TYPE) 973 RT_CONCAT3(iemMemFlat32StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 974 { 979 * 32-bit flat stack greg pop function that longjmps on error. 980 */ 981 DECL_INLINE_THROW(void) 982 RT_CONCAT3(iemMemFlat32StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP 983 { 984 Assert(iGReg < 16); 975 985 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 976 986 /* … … 978 988 */ 979 989 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp; 980 # if TMPL_MEM_TYPE_SIZE > 1990 # if TMPL_MEM_TYPE_SIZE > 1 981 991 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp))) 982 # endif992 # endif 983 993 { 984 994 /* … … 1000 1010 { 1001 1011 /* 1002 * Do the push and return. 1003 */ 1004 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1005 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1006 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 1007 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK]; 1008 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); 1009 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE "\n", 1010 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uRet)); 1011 return uRet; 1012 * Do the pop and update the register values. 1013 */ 1014 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 1015 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1016 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 1017 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK]; 1018 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */ 1019 # if TMPL_MEM_TYPE_SIZE == 2 1020 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue; 1021 # elif TMPL_MEM_TYPE_SIZE == 4 1022 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue; 1023 # else 1024 # error "TMPL_MEM_TYPE_SIZE" 1025 # endif 1026 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n", 1027 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg)); 1028 return; 1012 1029 } 1013 1030 } … … 1018 1035 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp)); 1019 1036 # endif 1020 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);1037 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg); 1021 1038 } 1022 1039 … … 1089 1106 RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 1090 1107 { 1091 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)1108 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1092 1109 /* 1093 1110 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 1094 1111 */ 1095 1112 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE); 1096 # if TMPL_MEM_TYPE_SIZE > 11113 # if TMPL_MEM_TYPE_SIZE > 1 1097 1114 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp))) 1098 # endif1115 # endif 1099 1116 { 1100 1117 /* … … 1134 1151 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1135 1152 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp)); 1136 # endif1153 # endif 1137 1154 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); 1138 1155 } … … 1142 1159 * 64-bit flat stack pop function that longjmps on error. 1143 1160 */ 1144 DECL_INLINE_THROW(TMPL_MEM_TYPE) 1145 RT_CONCAT3(iemMemFlat64StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 1146 { 1147 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1161 DECL_INLINE_THROW(void) 1162 RT_CONCAT3(iemMemFlat64StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP 1163 { 1164 Assert(iGReg < 16); 1165 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1148 1166 /* 1149 1167 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 1150 1168 */ 1151 1169 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp; 1152 # if TMPL_MEM_TYPE_SIZE > 11170 # if TMPL_MEM_TYPE_SIZE > 1 1153 1171 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp))) 1154 # endif1172 # endif 1155 1173 { 1156 1174 /* … … 1177 1195 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1178 1196 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 1179 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK]; 1180 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); 1181 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE "\n", 1182 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uRet)); 1183 return uRet; 1197 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK]; 1198 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */ 1199 # if TMPL_MEM_TYPE_SIZE == 2 1200 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue; 1201 # elif TMPL_MEM_TYPE_SIZE == 8 1202 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue; 1203 # else 1204 # error "TMPL_MEM_TYPE_SIZE" 1205 # endif 1206 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n", 1207 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg)); 1208 return; 1184 1209 } 1185 1210 } … … 1189 1214 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1190 1215 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp)); 1191 # endif1192 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);1193 } 1194 1195 # endif /* TMPL_MEM_TYPE_SIZE != 4 */1216 # endif 1217 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg); 1218 } 1219 1220 # endif /* TMPL_MEM_TYPE_SIZE != 4 */ 1196 1221 1197 1222 # endif /* IEM_WITH_SETJMP */ -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r102569 r102572 176 176 'IEM_MC_FLAT64_PUSH_U16': (None, True, False, ), 177 177 'IEM_MC_FLAT64_PUSH_U64': (None, True, False, ), 178 'IEM_MC_FLAT64_POP_ U16':(None, True, False, ),179 'IEM_MC_FLAT64_POP_ U64':(None, True, False, ),178 'IEM_MC_FLAT64_POP_GREG_U16': (None, True, False, ), 179 'IEM_MC_FLAT64_POP_GREG_U64': (None, True, False, ), 180 180 'IEM_MC_FLAT32_PUSH_U16': (None, True, False, ), 181 181 'IEM_MC_FLAT32_PUSH_U32': (None, True, False, ), 182 'IEM_MC_FLAT32_POP_ U16':(None, True, False, ),183 'IEM_MC_FLAT32_POP_ U32':(None, True, False, ),182 'IEM_MC_FLAT32_POP_GREG_U16': (None, True, False, ), 183 'IEM_MC_FLAT32_POP_GREG_U32': (None, True, False, ), 184 184 }; 185 185 -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102569 r102572 10250 10250 } 10251 10251 10252 10253 10254 /********************************************************************************************************************************* 10255 * Stack Accesses. * 10256 *********************************************************************************************************************************/ 10257 #define IEM_MC_PUSH_U16(a_u16Value) iemMemStackPushU16Jmp(pVCpu, (a_u16Value)) 10258 #define IEM_MC_PUSH_U32(a_u32Value) iemMemStackPushU32Jmp(pVCpu, (a_u32Value)) 10259 #define IEM_MC_PUSH_U32_SREG(a_uSegVal) iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal)) 10260 #define IEM_MC_PUSH_U64(a_u64Value) iemMemStackPushU64Jmp(pVCpu, (a_u64Value)) 10261 #define IEM_MC_FLAT32_PUSH_U16(a_u16Value) 10262 #define IEM_MC_FLAT32_PUSH_U32(a_u32Value) 10263 #define IEM_MC_FLAT64_PUSH_U16(a_u16Value) 10264 #define IEM_MC_FLAT64_PUSH_U64(a_u64Value) 10265 10266 #define IEM_MC_POP_GREG_U16(a_pu16Value) (*(a_pu16Value) = iemMemStackPopU16Jmp(pVCpu)) 10267 #define IEM_MC_POP_U32(a_pu32Value) (*(a_pu32Value) = iemMemStackPopU32Jmp(pVCpu)) 10268 #define IEM_MC_POP_U64(a_pu64Value) (*(a_pu64Value) = iemMemStackPopU64Jmp(pVCpu)) 10269 #define IEM_MC_FLAT64_POP_U16(a_pu16Value) 10270 #define IEM_MC_FLAT64_POP_U64(a_pu32Value) 10271 #define IEM_MC_FLAT32_POP_U16(a_pu16Value) 10272 #define IEM_MC_FLAT32_POP_U32(a_pu64Value) 10252 10273 10253 10274 -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r102429 r102572 734 734 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ), 735 735 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ), 736 'IEM_MC_POP_ U16': ( 'IEM_MC_FLAT32_POP_U16', 'IEM_MC_FLAT64_POP_U16', ),737 'IEM_MC_POP_ U32': ( 'IEM_MC_FLAT32_POP_U32', 'IEM_MC_POP_U32', ),738 'IEM_MC_POP_ U64': ( 'IEM_MC_POP_U64', 'IEM_MC_FLAT64_POP_U64', ),736 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ), 737 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ), 738 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ), 739 739 }; 740 740 -
trunk/src/VBox/VMM/include/IEMInternal.h
r102557 r102572 5221 5221 void iemMemStackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP; 5222 5222 void iemMemStackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP; 5223 uint16_t iemMemStackPopU16SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;5224 uint32_t iemMemStackPopU32SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;5225 uint64_t iemMemStackPopU64SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;5223 void iemMemStackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP; 5224 void iemMemStackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP; 5225 void iemMemStackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP; 5226 5226 5227 5227 void iemMemFlat32StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP; 5228 5228 void iemMemFlat32StackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP; 5229 5229 void iemMemFlat32StackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP; 5230 uint16_t iemMemFlat32StackPopU16SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;5231 uint32_t iemMemFlat32StackPopU32SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;5230 void iemMemFlat32StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP; 5231 void iemMemFlat32StackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP; 5232 5232 5233 5233 void iemMemFlat64StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP; 5234 5234 void iemMemFlat64StackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP; 5235 uint16_t iemMemFlat64StackPopU16SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;5236 uint64_t iemMemFlat64StackPopU64SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;5235 void iemMemFlat64StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP; 5236 void iemMemFlat64StackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP; 5237 5237 #endif 5238 5238 -
trunk/src/VBox/VMM/include/IEMMc.h
r102569 r102572 1455 1455 # define IEM_MC_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value))) 1456 1456 1457 # define IEM_MC_POP_ U16(a_pu16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))1458 # define IEM_MC_POP_ U32(a_pu32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))1459 # define IEM_MC_POP_ U64(a_pu64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))1457 # define IEM_MC_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg))) 1458 # define IEM_MC_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg))) 1459 # define IEM_MC_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg))) 1460 1460 #else 1461 1461 # define IEM_MC_PUSH_U16(a_u16Value) iemMemStackPushU16Jmp(pVCpu, (a_u16Value)) … … 1464 1464 # define IEM_MC_PUSH_U64(a_u64Value) iemMemStackPushU64Jmp(pVCpu, (a_u64Value)) 1465 1465 1466 # define IEM_MC_POP_ U16(a_pu16Value) (*(a_pu16Value) = iemMemStackPopU16Jmp(pVCpu))1467 # define IEM_MC_POP_ U32(a_pu32Value) (*(a_pu32Value) = iemMemStackPopU32Jmp(pVCpu))1468 # define IEM_MC_POP_ U64(a_pu64Value) (*(a_pu64Value) = iemMemStackPopU64Jmp(pVCpu))1466 # define IEM_MC_POP_GREG_U16(a_iGReg) iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg)) 1467 # define IEM_MC_POP_GREG_U32(a_iGReg) iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg)) 1468 # define IEM_MC_POP_GREG_U64(a_iGReg) iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg)) 1469 1469 #endif 1470 1470 … … 1475 1475 # define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal))) 1476 1476 1477 # define IEM_MC_FLAT32_POP_ U16(a_pu16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))1478 # define IEM_MC_FLAT32_POP_ U32(a_pu32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))1477 # define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg))) 1478 # define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg))) 1479 1479 #else 1480 1480 # define IEM_MC_FLAT32_PUSH_U16(a_u16Value) iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value)) … … 1482 1482 # define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal)) 1483 1483 1484 # define IEM_MC_FLAT32_POP_ U16(a_pu16Value) (*(a_pu16Value) = iemMemFlat32StackPopU16Jmp(pVCpu))1485 # define IEM_MC_FLAT32_POP_ U32(a_pu32Value) (*(a_pu32Value) = iemMemFlat32StackPopU32Jmp(pVCpu))1484 # define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg)) 1485 # define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg)) 1486 1486 #endif 1487 1487 … … 1491 1491 # define IEM_MC_FLAT64_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value))) 1492 1492 1493 # define IEM_MC_FLAT64_POP_ U16(a_pu16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))1494 # define IEM_MC_FLAT64_POP_ U64(a_pu64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))1493 # define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg))) 1494 # define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg))) 1495 1495 #else 1496 1496 # define IEM_MC_FLAT64_PUSH_U16(a_u16Value) iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value)) 1497 1497 # define IEM_MC_FLAT64_PUSH_U64(a_u64Value) iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value)) 1498 1498 1499 # define IEM_MC_FLAT64_POP_ U16(a_pu16Value) (*(a_pu16Value) = iemMemFlat64StackPopU16Jmp(pVCpu))1500 # define IEM_MC_FLAT64_POP_ U64(a_pu64Value) (*(a_pu64Value) = iemMemFlat64StackPopU64Jmp(pVCpu))1499 # define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg)) 1500 # define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg)) 1501 1501 #endif 1502 1502 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r102448 r102572 925 925 #define IEM_MC_PUSH_U32_SREG(a_u32Value) do { CHK_VAR(a_u32Value); (void)fMcBegin; } while (0) 926 926 #define IEM_MC_PUSH_U64(a_u64Value) do { CHK_VAR(a_u64Value); (void)fMcBegin; } while (0) 927 #define IEM_MC_POP_ U16(a_pu16Value) do { CHK_VAR(a_pu16Value); (void)fMcBegin; } while (0)928 #define IEM_MC_POP_ U32(a_pu32Value) do { CHK_VAR(a_pu32Value); (void)fMcBegin; } while (0)929 #define IEM_MC_POP_ U64(a_pu64Value) do { CHK_VAR(a_pu64Value); (void)fMcBegin; } while (0)927 #define IEM_MC_POP_GREG_U16(a_iGReg) do { CHK_GREG_IDX(a_iGReg); (void)fMcBegin; } while (0) 928 #define IEM_MC_POP_GREG_U32(a_iGReg) do { CHK_GREG_IDX(a_iGReg); (void)fMcBegin; } while (0) 929 #define IEM_MC_POP_GREG_U64(a_iGReg) do { CHK_GREG_IDX(a_iGReg); (void)fMcBegin; } while (0) 930 930 931 931 #define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pd80Mem); (a_pd80Mem) = NULL; CHK_PTYPE(RTPBCD80U *, a_pd80Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.

