- Timestamp:
- May 25, 2023 7:59:54 PM (16 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
-
VMMAll/IEMAll.cpp (modified) (6 diffs)
-
VMMAll/IEMAllInstructionsThreadedRecompiler.cpp (modified) (4 diffs)
-
VMMR3/EM.cpp (modified) (1 diff)
-
include/IEMInternal.h (modified) (9 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r99930 r99982 278 278 pVCpu->iem.s.offCurInstrStart = 0; 279 279 # ifdef VBOX_STRICT 280 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS; 280 281 pVCpu->iem.s.cbInstrBuf = UINT16_MAX; 281 282 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX; … … 384 385 pVCpu->iem.s.cbInstrBuf = 0; 385 386 pVCpu->iem.s.cbInstrBufTotal = 0; 387 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS; 386 388 } 387 389 } … … 392 394 pVCpu->iem.s.cbInstrBuf = 0; 393 395 pVCpu->iem.s.cbInstrBufTotal = 0; 396 # ifdef VBOX_STRICT 397 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS; 398 # endif 394 399 } 395 400 #else … … 956 961 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst; 957 962 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK; 963 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys; 958 964 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3; 959 965 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst); … … 10335 10341 pVCpu->iem.s.offCurInstrStart = 0; 10336 10342 pVCpu->iem.s.offInstrNextByte = 0; 10343 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS; 10337 10344 #else 10338 10345 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode)); … … 10383 10390 pVCpu->iem.s.offCurInstrStart = 0; 10384 10391 pVCpu->iem.s.offInstrNextByte = 0; 10392 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS; 10385 10393 #else 10386 10394 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode)); -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThreadedRecompiler.cpp
r99932 r99982 75 75 76 76 #include "IEMThreadedFunctions.h" 77 78 79 /* 80 * Narrow down configs here to avoid wasting time on unused configs here. 81 */ 82 83 #ifndef IEM_WITH_CODE_TLB 84 # error The code TLB must be enabled for the recompiler. 85 #endif 86 87 #ifndef IEM_WITH_DATA_TLB 88 # error The data TLB must be enabled for the recompiler. 89 #endif 90 91 #ifndef IEM_WITH_SETJMP 92 # error The setjmp approach must be enabled for the recompiler. 93 #endif 77 94 78 95 … … 275 292 276 293 277 static PIEMTB iemThreadedTbLookup(PVMCCV pVM, PVMCPUCC pVCpu )278 { 279 RT_NOREF(pVM, pVCpu );294 static PIEMTB iemThreadedTbLookup(PVMCCV pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysPC, uint64_t uPc) 295 { 296 RT_NOREF(pVM, pVCpu, GCPhysPC, uPc); 280 297 return NULL; 281 298 } … … 286 303 RT_NOREF(pVM, pVCpu, pTb); 287 304 return VERR_NOT_IMPLEMENTED; 305 } 306 307 308 /** 309 * This is called when the PC doesn't match the current pbInstrBuf. 310 */ 311 static uint64_t iemGetPcWithPhysAndCodeMissed(PVMCPUCC pVCpu, uint64_t const uPc, PRTGCPHYS pPhys) 312 { 313 /** @todo see iemOpcodeFetchBytesJmp */ 314 pVCpu->iem.s.pbInstrBuf = NULL; 315 316 pVCpu->iem.s.offInstrNextByte = 0; 317 pVCpu->iem.s.offCurInstrStart = 0; 318 pVCpu->iem.s.cbInstrBuf = 0; 319 pVCpu->iem.s.cbInstrBufTotal = 0; 320 321 } 322 323 /** @todo need private inline decl for throw/nothrow matching IEM_WITH_SETJMP? */ 324 DECL_INLINE_THROW(uint64_t) iemGetPcWithPhysAndCode(PVMCPUCC pVCpu, PRTGCPHYS pPhys) 325 { 326 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 327 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; 328 if (pVCpu->iem.s.pbInstrBuf) 329 { 330 uint64_t off = uPc - pVCpu->iem.s.uInstrBufPc; 331 if (off < pVCpu->iem.s.cbInstrBufTotal) 332 { 333 pVCpu->iem.s.offInstrNextByte = (uint32_t)off; 334 pVCpu->iem.s.offCurInstrStart = (uint16_t)off; 335 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal) 336 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15; 337 else 338 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal; 339 340 *pPhys = pVCpu->iem.s.GCPhysInstrBuf + off; 341 return uPc; 342 } 343 } 344 return iemGetPcWithPhysAndCodeMissed(pVCpu, uPc, pPhys); 288 345 } 289 346 … … 307 364 for (;;) 308 365 { 309 pTb = iemThreadedTbLookup(pVM, pVCpu); 366 /* Translate PC to physical address, we'll need this for both lookup and compilation. */ 367 RTGCPHYS GCPhysPC; 368 uint64_t const uPC = iemGetPcWithPhysAndCode(pVCpu, &GCPhysPC); 369 370 pTb = iemThreadedTbLookup(pVM, pVCpu, GCPhysPC, uPc); 310 371 if (pTb) 311 372 rcStrict = iemThreadedTbExec(pVM, pVCpu, pTb); 312 373 else 313 rcStrict = iemThreadedCompile(pVM, pVCpu );374 rcStrict = iemThreadedCompile(pVM, pVCpu, GCPhysPC, uPc); 314 375 if (rcStrict == VINF_SUCCESS) 315 376 { /* likely */ } -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r99930 r99982 1082 1082 #ifdef VBOX_WITH_IEM_RECOMPILER 1083 1083 if (pVM->em.s.fIemRecompiled) 1084 rcStrict = IEMExecRecompilerThreaded(pV Cpu);1084 rcStrict = IEMExecRecompilerThreaded(pVM, pVCpu); 1085 1085 else 1086 1086 #endif -
trunk/src/VBox/VMM/include/IEMInternal.h
r99930 r99982 600 600 * This is set to a non-canonical address when we need to invalidate it. */ 601 601 uint64_t uInstrBufPc; /* 0x18 */ 602 /** The guest physical address corresponding to pbInstrBuf. */ 603 RTGCPHYS GCPhysInstrBuf; /* 0x20 */ 602 604 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots). 603 605 * This takes the CS segment limit into account. */ 604 uint16_t cbInstrBufTotal; /* 0x2 0*/606 uint16_t cbInstrBufTotal; /* 0x28 */ 605 607 /** Offset into pbInstrBuf of the first byte of the current instruction. 606 608 * Can be negative to efficiently handle cross page instructions. */ 607 int16_t offCurInstrStart; /* 0x2 2*/609 int16_t offCurInstrStart; /* 0x2a */ 608 610 609 611 /** The prefix mask (IEM_OP_PRF_XXX). */ 610 uint32_t fPrefixes; /* 0x2 4*/612 uint32_t fPrefixes; /* 0x2c */ 611 613 /** The extra REX ModR/M register field bit (REX.R << 3). */ 612 uint8_t uRexReg; /* 0x 28*/614 uint8_t uRexReg; /* 0x30 */ 613 615 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit 614 616 * (REX.B << 3). */ 615 uint8_t uRexB; /* 0x 29*/617 uint8_t uRexB; /* 0x31 */ 616 618 /** The extra REX SIB index field bit (REX.X << 3). */ 617 uint8_t uRexIndex; /* 0x 2a*/619 uint8_t uRexIndex; /* 0x32 */ 618 620 619 621 /** The effective segment register (X86_SREG_XXX). */ 620 uint8_t iEffSeg; /* 0x 2b*/622 uint8_t iEffSeg; /* 0x33 */ 621 623 622 624 /** The offset of the ModR/M byte relative to the start of the instruction. */ 623 uint8_t offModRm; /* 0x 2c*/625 uint8_t offModRm; /* 0x34 */ 624 626 # else /* !IEM_WITH_CODE_TLB */ 625 627 /** The size of what has currently been fetched into abOpcode. */ … … 646 648 647 649 /** The effective operand mode. */ 648 IEMMODE enmEffOpSize; /* 0x 2d, 0x13 */650 IEMMODE enmEffOpSize; /* 0x35, 0x13 */ 649 651 /** The default addressing mode. */ 650 IEMMODE enmDefAddrMode; /* 0x 2e, 0x14 */652 IEMMODE enmDefAddrMode; /* 0x36, 0x14 */ 651 653 /** The effective addressing mode. */ 652 IEMMODE enmEffAddrMode; /* 0x 2f, 0x15 */654 IEMMODE enmEffAddrMode; /* 0x37, 0x15 */ 653 655 /** The default operand mode. */ 654 IEMMODE enmDefOpSize; /* 0x3 0, 0x16 */656 IEMMODE enmDefOpSize; /* 0x38, 0x16 */ 655 657 656 658 /** Prefix index (VEX.pp) for two byte and three byte tables. */ 657 uint8_t idxPrefix; /* 0x3 1, 0x17 */659 uint8_t idxPrefix; /* 0x39, 0x17 */ 658 660 /** 3rd VEX/EVEX/XOP register. 659 661 * Please use IEM_GET_EFFECTIVE_VVVV to access. */ 660 uint8_t uVex3rdReg; /* 0x3 2, 0x18 */662 uint8_t uVex3rdReg; /* 0x3a, 0x18 */ 661 663 /** The VEX/EVEX/XOP length field. */ 662 uint8_t uVexLength; /* 0x3 3, 0x19 */664 uint8_t uVexLength; /* 0x3b, 0x19 */ 663 665 /** Additional EVEX stuff. */ 664 uint8_t fEvexStuff; /* 0x3 4, 0x1a */666 uint8_t fEvexStuff; /* 0x3c, 0x1a */ 665 667 666 668 /** Explicit alignment padding. */ 667 uint8_t abAlignment2a[1]; /* 0x3 5, 0x1b */669 uint8_t abAlignment2a[1]; /* 0x3d, 0x1b */ 668 670 /** The FPU opcode (FOP). */ 669 uint16_t uFpuOpcode; /* 0x3 6, 0x1c */671 uint16_t uFpuOpcode; /* 0x3e, 0x1c */ 670 672 # ifndef IEM_WITH_CODE_TLB 671 673 /** Explicit alignment padding. */ … … 674 676 675 677 /** The opcode bytes. */ 676 uint8_t abOpcode[15]; /* 0x4 8, 0x20 */678 uint8_t abOpcode[15]; /* 0x40, 0x20 */ 677 679 /** Explicit alignment padding. */ 678 680 # ifdef IEM_WITH_CODE_TLB 679 uint8_t abAlignment2c[0x48 - 0x47]; /* 0x37*/681 //uint8_t abAlignment2c[0x4f - 0x4f]; /* 0x4f */ 680 682 # else 681 uint8_t abAlignment2c[0x4 8- 0x2f]; /* 0x2f */683 uint8_t abAlignment2c[0x4f - 0x2f]; /* 0x2f */ 682 684 # endif 683 685 #else /* IEM_WITH_OPAQUE_DECODER_STATE */ 684 uint8_t abOpaqueDecoder[0x4 8- 0x8];686 uint8_t abOpaqueDecoder[0x4f - 0x8]; 685 687 #endif /* IEM_WITH_OPAQUE_DECODER_STATE */ 686 688 /** @} */ 687 689 688 690 689 /** The flags of the current exception / interrupt. */690 uint32_t fCurXcpt; /* 0x48, 0x48 */691 /** The current exception / interrupt. */692 uint8_t uCurXcpt;693 /** Exception / interrupt recursion depth. */694 int8_t cXcptRecursions;695 696 691 /** The number of active guest memory mappings. */ 697 uint8_t cActiveMappings; 698 /** The next unused mapping index. */ 699 uint8_t iNextMapping; 692 uint8_t cActiveMappings; /* 0x4f, 0x4f */ 693 700 694 /** Records for tracking guest memory mappings. */ 701 695 struct 702 696 { 703 697 /** The address of the mapped bytes. */ 704 void *pv;698 R3R0PTRTYPE(void *) pv; 705 699 /** The access flags (IEM_ACCESS_XXX). 706 700 * IEM_ACCESS_INVALID if the entry is unused. */ … … 709 703 uint32_t u32Alignment4; /**< Alignment padding. */ 710 704 #endif 711 } aMemMappings[3]; 705 } aMemMappings[3]; /* 0x50 LB 0x30 */ 712 706 713 707 /** Locking records for the mapped memory. */ … … 716 710 PGMPAGEMAPLOCK Lock; 717 711 uint64_t au64Padding[2]; 718 } aMemMappingLocks[3]; 712 } aMemMappingLocks[3]; /* 0x80 LB 0x30 */ 719 713 720 714 /** Bounce buffer info. … … 734 728 /** Explicit alignment padding. */ 735 729 bool afAlignment5[3]; 736 } aMemBbMappings[3]; 737 738 /* Ensure that aBounceBuffers are aligned at a 32 byte boundrary. */ 739 uint64_t abAlignment7[1]; 730 } aMemBbMappings[3]; /* 0xb0 LB 0x48 */ 731 732 /** The flags of the current exception / interrupt. */ 733 uint32_t fCurXcpt; /* 0xf8 */ 734 /** The current exception / interrupt. */ 735 uint8_t uCurXcpt; /* 0xfc */ 736 /** Exception / interrupt recursion depth. */ 737 int8_t cXcptRecursions; /* 0xfb */ 738 739 /** The next unused mapping index. 740 * @todo try find room for this up with cActiveMappings. */ 741 uint8_t iNextMapping; /* 0xfd */ 742 uint8_t abAlignment7[1]; 740 743 741 744 /** Bounce buffer storage. … … 744 747 { 745 748 uint8_t ab[512]; 746 } aBounceBuffers[3]; 749 } aBounceBuffers[3]; /* 0x100 LB 0x600 */ 747 750 748 751 … … 816 819 uint8_t cLogRelWrMsr; 817 820 /** Alignment padding. */ 818 uint8_t abAlignment 8[42];821 uint8_t abAlignment9[46]; 819 822 820 823 /** @name Recompilation … … 846 849 #endif 847 850 } IEMCPU; 848 AssertCompileMemberOffset(IEMCPU, fCurXcpt, 0x48); 849 AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 8); 850 AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 16); 851 AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 32); 851 AssertCompileMemberOffset(IEMCPU, cActiveMappings, 0x4f); 852 AssertCompileMemberAlignment(IEMCPU, aMemMappings, 16); 853 AssertCompileMemberAlignment(IEMCPU, aMemMappingLocks, 16); 852 854 AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64); 853 855 AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
Note:
See TracChangeset
for help on using the changeset viewer.

