- Timestamp:
- May 31, 2017 9:10:12 AM (7 years ago)
- Location:
- trunk
- Files:
-
- 3 edited
-
include/VBox/vmm/hm.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/HMSVMAll.cpp (modified) (19 diffs)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm.h
r66751 r67156 156 156 VMM_INT_DECL(void) HMVmxNstGstVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason); 157 157 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated); 158 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb);158 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb); 159 159 VMM_INT_DECL(uint8_t) HMSvmNstGstGetInterrupt(PCCPUMCTX pCtx); 160 160 VMM_INT_DECL(bool) HMSvmNstGstCanTakeInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx); -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r66751 r67156 249 249 * @param pVCpu The cross context virtual CPU structure. 250 250 * @param pCtx Pointer to the guest-CPU context. 251 * @param cbInstr The length of the VMRUN instruction. 251 252 * @param GCPhysVmcb Guest physical address of the VMCB to run. 252 253 */ 253 254 /** @todo move this to IEM and make the VMRUN version that can execute under 254 255 * hardware SVM here instead. */ 255 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb)256 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb) 256 257 { 257 258 Assert(pVCpu); 258 259 Assert(pCtx); 259 260 PVM pVM = pVCpu->CTX_SUFF(pVM); 261 Log3(("HMSvmVmrun\n")); 260 262 261 263 /* … … 283 285 pHostState->uCr4 = pCtx->cr4; 284 286 pHostState->rflags = pCtx->rflags; 285 pHostState->uRip = pCtx->rip ;287 pHostState->uRip = pCtx->rip + cbInstr; 286 288 pHostState->uRsp = pCtx->rsp; 287 289 pHostState->uRax = pCtx->rax; … … 337 339 338 340 /* IO permission bitmap. */ 339 RTGCPHYS GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;341 RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr; 340 342 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK) 341 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)) 343 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap) 344 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE) 345 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1))) 342 346 { 343 347 Log(("HMSvmVmRun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap)); … … 346 350 347 351 /* MSR permission bitmap. */ 348 RTGCPHYS GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;352 RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr; 349 353 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK) 350 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)) 354 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap) 355 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE)) 351 356 { 352 357 Log(("HMSvmVmRun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap)); … … 378 383 379 384 /** @todo gPAT MSR validation? */ 385 386 /* 387 * Copy the IO permission bitmap into the cache. 388 */ 389 Assert(pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap)); 390 rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap, 391 SVM_IOPM_PAGES * X86_PAGE_4K_SIZE); 392 if (RT_FAILURE(rc)) 393 { 394 Log(("HMSvmVmRun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc)); 395 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 396 } 397 398 /* 399 * Copy the MSR permission bitmap into the cache. 400 */ 401 Assert(pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap)); 402 rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap, 403 SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE); 404 if (RT_FAILURE(rc)) 405 { 406 Log(("HMSvmVmRun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc)); 407 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 408 } 380 409 381 410 /* … … 468 497 /* 469 498 * TLB flush control. 470 */ 499 * Currently disabled since it's redundant as we unconditionally flush the TLB below. 500 */ 501 #if 0 471 502 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */ 472 503 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE … … 474 505 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS) 475 506 PGMFlushTLB(pVCpu, VmcbNstGst.u64CR3, true /* fGlobal */); 507 #endif 476 508 477 509 /** @todo @bugref{7243}: SVM TSC offset, see tmCpuTickGetInternal. */ … … 501 533 pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK); 502 534 pCtx->dr[7] |= X86_DR7_RA1_MASK; 535 536 /* 537 * Ask PGM to flush the TLB as if we continue to interpret the nested-guest 538 * instructions from guest memory we'd be in trouble otherwise. 539 */ 540 PGMFlushTLB(pVCpu, pCtx->cr3, true); 541 PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 542 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL); 503 543 504 544 /* … … 558 598 * NRIP for the nested-guest to calculate the instruction length 559 599 * below. */ 600 Log3(("HMSvmVmRun: InjectingEvent: uVector=%u enmType=%d uErrorCode=%u cr2=%#RX64\n", uVector, enmType, 601 uErrorCode, pCtx->cr2)); 560 602 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */); 561 603 if ( rcStrict == VINF_SVM_VMEXIT … … 564 606 } 565 607 608 Log3(("HMSvmVmRun: Entered nested-guest at CS:RIP=%04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip)); 566 609 return VINF_SUCCESS; 567 610 } … … 597 640 uint64_t uExitInfo2) 598 641 { 599 if ( CPUMIsGuestIn NestedHwVirtMode(pCtx)642 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 600 643 || uExitCode == SVM_EXIT_INVALID) 601 644 { 645 Log3(("HMSvmNstGstVmExit: uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode, uExitInfo1, uExitInfo2)); 646 602 647 /* 603 648 * Disable the global interrupt flag to prevent interrupts during the 'atomic' world switch. … … 734 779 pCtx->dr[7] |= X86_DR7_RA1_MASK; 735 780 781 PGMFlushTLB(pVCpu, pCtx->cr3, true); 782 PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 783 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL); 784 736 785 /** @todo if RIP is not canonical or outside the CS segment limit, we need to 737 786 * raise \#GP(0) in the guest. */ … … 745 794 { 746 795 Log(("HMNstGstSvmVmExit: Writing VMCB at %#RGp failed\n", pCtx->hwvirt.svm.GCPhysVmcb)); 747 Assert(!CPUMIsGuestIn NestedHwVirtMode(pCtx));796 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 748 797 rc = VERR_SVM_VMEXIT_FAILED; 749 798 } 750 799 800 Log3(("HMSvmNstGstVmExit: returns %Rrc\n", rc)); 751 801 return rc; 752 802 } … … 771 821 { 772 822 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl; 773 Assert(CPUMIsGuestIn NestedHwVirtMode(pCtx));823 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 774 824 775 825 X86RFLAGS RFlags; … … 836 886 } while (0) 837 887 838 if (!CPUMIsGuestIn NestedHwVirtMode(pCtx))888 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 839 889 return VINF_HM_INTERCEPT_NOT_ACTIVE; 840 890 … … 989 1039 * Check if any IO accesses are being intercepted. 990 1040 */ 991 Assert(CPUMIsGuestIn NestedHwVirtMode(pCtx));1041 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 992 1042 Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT)); 1043 Log(("HMSvmNstGstHandleIOIntercept: u16Port=%u\n", pIoExitInfo->n.u16Port)); 993 1044 994 1045 /* … … 1005 1056 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 }; 1006 1057 uint8_t const *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap); 1058 Assert(pbIopm); 1007 1059 1008 1060 uint16_t const u16Port = pIoExitInfo->n.u16Port; … … 1015 1067 uint16_t const u16Iopm = *(uint16_t *)pbIopm; 1016 1068 if (u16Iopm & fIopmMask) 1069 { 1070 Log(("HMSvmNstGstHandleIOIntercept: u16Port=%u offIoPm=%u fSizeMask=%#x cShift=%u fIopmMask=%#x\n", u16Port, offIopm, 1071 fSizeMask, cShift, fIopmMask)); 1017 1072 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip); 1018 1073 } 1074 1075 Log(("HMSvmNstGstHandleIOIntercept: huh!?\n")); 1076 AssertMsgFailed(("We expect an IO intercept here!\n")); 1019 1077 return VINF_HM_INTERCEPT_NOT_ACTIVE; 1020 1078 } … … 1045 1103 */ 1046 1104 Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT)); 1047 Assert(CPUMIsGuestIn NestedHwVirtMode(pCtx));1105 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 1048 1106 1049 1107 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ; -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r67080 r67156 1804 1804 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); 1805 1805 1806 #ifdef VBOX_WITH_NESTED_HWVIRT1807 /* Nested Hw. virt through SVM R0 execution is not yet implemented, IEM only, we shouldn't get here. */1808 if (CPUMIsGuestInNestedHwVirtMode(pCtx))1809 return VERR_NOT_IMPLEMENTED;1810 #endif1811 1812 1806 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx); 1813 1807 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); … … 2972 2966 { 2973 2967 HMSVM_ASSERT_PREEMPT_SAFE(); 2968 2969 #ifdef VBOX_WITH_NESTED_HWVIRT_IN_IEM 2970 /* Nested Hw. virt through SVM R0 execution is not yet implemented, IEM only, we shouldn't get here. */ 2971 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2972 return VINF_EM_RESCHEDULE_REM; 2973 #endif 2974 2974 2975 2975 /* Check force flag actions that might require us to go back to ring-3. */
Note:
See TracChangeset
for help on using the changeset viewer.

