Changeset 74785 in vbox
- Timestamp:
- Oct 12, 2018 10:14:19 AM (6 years ago)
- Location:
- trunk
- Files:
-
- 19 edited
-
include/VBox/vmm/vm.h (modified) (5 diffs)
-
src/VBox/VMM/VMMAll/APICAll.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h (modified) (3 diffs)
-
src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h (modified) (4 diffs)
-
src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h (modified) (10 diffs)
-
src/VBox/VMM/VMMAll/SELMAll.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/TMAll.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMAll/TMAllVirtual.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (16 diffs)
-
src/VBox/VMM/VMMR0/HMVMXR0.cpp (modified) (20 diffs)
-
src/VBox/VMM/VMMR0/NEMR0Native-win.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMR0/VMMR0.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/DBGF.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/EM.cpp (modified) (16 diffs)
-
src/VBox/VMM/VMMR3/EMRaw.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR3/VMM.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMRC/SELMRC.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/vm.h
r74783 r74785 673 673 #define VM_FF_IS_SET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag)) 674 674 675 675 676 /** @def VMCPU_FF_IS_SET 676 677 * Checks if a force action flag is set for the given VCPU. … … 679 680 * @param fFlag The flag to check. 680 681 */ 681 #define VMCPU_FF_IS_SET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag)) 682 #if !defined(VBOX_STRICT) || !defined(RT_COMPILER_SUPPORTS_LAMBDA) 683 # define VMCPU_FF_IS_SET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag)) 684 #else 685 # define VMCPU_FF_IS_SET(pVCpu, fFlag) \ 686 ([](PVMCPU a_pVCpu) -> bool \ 687 { \ 688 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \ 689 return (a_pVCpu->fLocalForcedActions & (fFlag)) == (fFlag); \ 690 }(pVCpu)) 691 #endif 682 692 683 693 /** @def VM_FF_IS_PENDING … … 688 698 */ 689 699 #define VM_FF_IS_PENDING(pVM, fFlags) RT_BOOL((pVM)->fGlobalForcedActions & (fFlags)) 700 701 /** @def VMCPU_FF_IS_PENDING 702 * Checks if one or more force action in the specified set is pending for the given VCPU. 703 * 704 * @param pVCpu The cross context virtual CPU structure. 705 * @param fFlags The flags to check for. 706 */ 707 #define VMCPU_FF_IS_PENDING(pVCpu, fFlags) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlags)) 690 708 691 709 /** @def VM_FF_TEST_AND_CLEAR … … 709 727 #define VMCPU_FF_TEST_AND_CLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT)) 710 728 711 /** @def VMCPU_FF_IS_PENDING712 * Checks if one or more force action in the specified set is pending for the given VCPU.713 *714 * @param pVCpu The cross context virtual CPU structure.715 * @param fFlags The flags to check for.716 */717 #define VMCPU_FF_IS_PENDING(pVCpu, fFlags) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlags))718 719 729 /** @def VM_FF_IS_PENDING_EXCEPT 720 730 * Checks if one or more force action in the specified set is pending while one … … 725 735 * @param fExcpt The flags that should not be set. 726 736 */ 727 #define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )737 #define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) ) 728 738 729 739 /** @def VM_IS_EMT -
trunk/src/VBox/VMM/VMMAll/APICAll.cpp
r73285 r74785 3341 3341 3342 3342 if ( fHasPendingIntrs 3343 && !VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC))3343 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)) 3344 3344 apicSignalNextPendingIntr(pVCpu); 3345 3345 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r74709 r74785 7189 7189 { 7190 7190 if ( IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INT_WINDOW_EXIT) 7191 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))7191 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 7192 7192 { 7193 7193 iemRegAddToRipAndClearRF(pVCpu, cbInstr); -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r74603 r74785 202 202 */ 203 203 PSVMVMCBCTRL pVmcbMemCtrl = &pVmcbMem->ctrl; 204 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))/* V_IRQ. */204 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) /* V_IRQ. */ 205 205 pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0; 206 206 else … … 212 212 pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR; /* V_TPR. */ 213 213 214 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)/* Interrupt shadow. */214 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadow. */ 215 215 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip) 216 216 { … … 733 733 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 734 734 else 735 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));735 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)); 736 736 737 737 /* -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74784 r74785 1925 1925 1926 1926 /* MTF should not be set outside VMX non-root mode. */ 1927 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_MTF));1927 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_MTF)); 1928 1928 1929 1929 /* … … 2131 2131 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 2132 2132 { /** @todo NSTVMX: Virtual-NMI blocking. */ } 2133 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))2133 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 2134 2134 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; 2135 2135 2136 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)2136 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 2137 2137 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu)) 2138 2138 { … … 5782 5782 } 5783 5783 else 5784 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));5784 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)); 5785 5785 5786 5786 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)) 5787 5787 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 5788 5788 else 5789 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));5789 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 5790 5790 5791 5791 /* SMI blocking is irrelevant. We don't support SMIs yet. */ … … 5930 5930 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we 5931 5931 * use block-by-STI here which is not quite correct. */ 5932 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)5932 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 5933 5933 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu)) 5934 5934 { -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r74517 r74785 417 417 { 418 418 ADD_REG64(WHvRegisterInterruptState, 0); 419 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)419 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 420 420 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip) 421 421 aValues[iReg - 1].InterruptState.InterruptShadow = 1; 422 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))422 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 423 423 aValues[iReg - 1].InterruptState.NmiMasked = 1; 424 424 } … … 426 426 { 427 427 if ( pVCpu->nem.s.fLastInterruptShadow 428 || ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)428 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 429 429 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)) 430 430 { 431 431 ADD_REG64(WHvRegisterInterruptState, 0); 432 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)432 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 433 433 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip) 434 434 aValues[iReg - 1].InterruptState.InterruptShadow = 1; … … 1575 1575 1576 1576 /* Update interrupt inhibition. */ 1577 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))1577 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1578 1578 { /* likely */ } 1579 1579 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) … … 1600 1600 1601 1601 /* Update interrupt inhibition. */ 1602 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))1602 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1603 1603 { /* likely */ } 1604 1604 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) … … 1916 1916 if (!pHdr->ExecutionState.InterruptShadow) 1917 1917 { 1918 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))1918 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1919 1919 { /* likely */ } 1920 1920 else … … 1947 1947 if (!pExitCtx->ExecutionState.InterruptShadow) 1948 1948 { 1949 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))1949 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1950 1950 { /* likely */ } 1951 1951 else … … 3970 3970 * We don't currently implement SMIs. 3971 3971 */ 3972 AssertReturn(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);3972 AssertReturn(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0); 3973 3973 3974 3974 /* … … 3977 3977 * for injection via IEM. 3978 3978 */ 3979 bool const fPendingNmi = VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI);3979 bool const fPendingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI); 3980 3980 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS 3981 3981 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0); … … 3987 3987 return rcStrict; 3988 3988 } 3989 bool const fInhibitInterrupts = VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)3989 bool const fInhibitInterrupts = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 3990 3990 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip; 3991 3991 … … 3996 3996 { 3997 3997 if ( !fInhibitInterrupts 3998 && !VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))3998 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 3999 3999 { 4000 4000 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r70948 r74785 78 78 memcpy(pvBuf, pvPtr, cbBuf); 79 79 VBOXSTRICTRC rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx); 80 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))80 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)) 81 81 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); 82 82 else -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r72685 r74785 801 801 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 802 802 { 803 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_ PENDING(pVCpuDst, VMCPU_FF_TIMER)));803 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))); 804 804 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 805 805 #if defined(IN_RING3) && defined(VBOX_WITH_REM) … … 847 847 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 848 848 { 849 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_ PENDING(pVCpuDst, VMCPU_FF_TIMER)));849 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))); 850 850 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 851 851 #if defined(IN_RING3) && defined(VBOX_WITH_REM) … … 946 946 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 947 947 { 948 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_ PENDING(pVCpuDst, VMCPU_FF_TIMER)));948 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))); 949 949 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 950 950 #if defined(IN_RING3) && defined(VBOX_WITH_REM) -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r69111 r74785 239 239 { 240 240 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF); 241 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_ PENDING(pVCpuDst, VMCPU_FF_TIMER)));241 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))); 242 242 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 243 243 #ifdef IN_RING3 … … 396 396 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 397 397 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 398 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_ PENDING(pVCpuDst, VMCPU_FF_TIMER)));398 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))); 399 399 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 400 400 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); … … 484 484 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 485 485 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 486 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER)));486 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))); 487 487 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 488 488 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); … … 711 711 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 712 712 { 713 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_ PENDING(pVCpuDst, VMCPU_FF_TIMER)));713 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))); 714 714 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */ 715 715 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r73606 r74785 1158 1158 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported); 1159 1159 1160 bool const fFlushPending = pVCpu->CTX_SUFF(pVM)->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);1160 bool const fFlushPending = pVCpu->CTX_SUFF(pVM)->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1161 1161 1162 1162 /* Skip it if a TLB flush is already pending. */ … … 2806 2806 { 2807 2807 if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending 2808 && VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))2808 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 2809 2809 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 2810 2810 } … … 2815 2815 if (pVmcbCtrl->IntShadow.n.u1IntShadow) 2816 2816 EMSetInhibitInterruptsPC(pVCpu, pVmcbGuest->u64RIP); 2817 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))2817 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2818 2818 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2819 2819 } … … 3022 3022 */ 3023 3023 if ( VMMRZCallRing3IsEnabled(pVCpu) 3024 && VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))3024 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 3025 3025 { 3026 3026 Assert(pCtx->cr3 == pVmcbGuest->u64CR3); … … 3669 3669 * delivery/window over a physical interrupt (from the outer guest) 3670 3670 * might be pending? */ 3671 bool const fEnableIntWindow = !VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);3671 bool const fEnableIntWindow = !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 3672 3672 if (!fEnableIntWindow) 3673 3673 { … … 3735 3735 bool const fVirtualGif = CPUMGetSvmNstGstVGif(pCtx); 3736 3736 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu); 3737 bool const fBlockNmi = VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);3737 bool const fBlockNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 3738 3738 3739 3739 Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n", 3740 3740 fVirtualGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), 3741 VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));3741 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))); 3742 3742 3743 3743 /** @todo SMI. SMIs take priority over NMIs. */ … … 3748 3748 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities". 3749 3749 */ 3750 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)3750 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI) 3751 3751 && !fBlockNmi) 3752 3752 { … … 3860 3860 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu); 3861 3861 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 3862 bool const fBlockNmi = VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);3862 bool const fBlockNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 3863 3863 3864 3864 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool fIntPending=%RTbool NMI pending=%RTbool\n", 3865 3865 fGif, fBlockNmi, fBlockInt, fIntShadow, 3866 3866 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), 3867 VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));3867 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))); 3868 3868 3869 3869 /** @todo SMI. SMIs take priority over NMIs. */ … … 3874 3874 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities". 3875 3875 */ 3876 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)3876 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI) 3877 3877 && !fBlockNmi) 3878 3878 { … … 4002 4002 if ( Event.n.u3Type == SVM_EVENT_NMI 4003 4003 && Event.n.u8Vector == X86_XCPT_NMI 4004 && !VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))4004 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 4005 4005 { 4006 4006 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); … … 4027 4027 * but we still need to intercept IRET in order to eventually clear NMI inhibition. 4028 4028 */ 4029 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))4029 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 4030 4030 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET); 4031 4031 … … 4209 4209 { 4210 4210 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 4211 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));4211 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 4212 4212 4213 4213 /* Could happen as a result of longjump. */ 4214 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))4214 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 4215 4215 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 4216 4216 … … 4229 4229 { 4230 4230 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, 4231 VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));4231 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 4232 4232 if (rc != VINF_SUCCESS) 4233 4233 { … … 4249 4249 4250 4250 /* Pending VM request packets, such as hardware interrupts. */ 4251 if ( VM_FF_IS_ PENDING(pVM, VM_FF_REQUEST)4252 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))4251 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST) 4252 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 4253 4253 { 4254 4254 Log4Func(("Pending VM request forcing us back to ring-3\n")); … … 6043 6043 6044 6044 /* Update interrupt shadow. */ 6045 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)6045 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 6046 6046 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 6047 6047 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); … … 7456 7456 7457 7457 /* Clear NMI blocking. */ 7458 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))7458 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7459 7459 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 7460 7460 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r74604 r74785 1848 1848 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt)); 1849 1849 1850 bool fFlushPending = VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);1850 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1851 1851 if (!fFlushPending) 1852 1852 { … … 3452 3452 */ 3453 3453 uint32_t fIntrState = 0; 3454 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))3454 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3455 3455 { 3456 3456 /* If inhibition is active, RIP & RFLAGS should've been accessed … … 3468 3468 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS; 3469 3469 } 3470 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))3470 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3471 3471 { 3472 3472 /* … … 3486 3486 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}. 3487 3487 */ 3488 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)3488 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) 3489 3489 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 3490 3490 { … … 6210 6210 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}. 6211 6211 */ 6212 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)6212 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) 6213 6213 && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI 6214 6214 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT … … 6312 6312 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception". 6313 6313 */ 6314 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6314 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6315 6315 { 6316 6316 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n", … … 6496 6496 if (!u32Val) 6497 6497 { 6498 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))6498 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6499 6499 { 6500 6500 rc = hmR0VmxImportGuestRip(pVCpu); … … 6503 6503 } 6504 6504 6505 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6505 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6506 6506 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6507 6507 } … … 6516 6516 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 6517 6517 } 6518 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))6518 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6519 6519 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6520 6520 6521 6521 if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI) 6522 6522 { 6523 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6523 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6524 6524 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6525 6525 } 6526 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6526 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6527 6527 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6528 6528 } … … 6841 6841 if (VMMRZCallRing3IsEnabled(pVCpu)) 6842 6842 { 6843 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))6843 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 6844 6844 { 6845 6845 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3)); … … 6847 6847 } 6848 6848 6849 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))6849 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 6850 6850 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 6851 6851 6852 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));6853 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));6852 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 6853 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 6854 6854 } 6855 6855 … … 6912 6912 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4))); 6913 6913 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, 6914 VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));6914 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 6915 6915 if (rcStrict2 != VINF_SUCCESS) 6916 6916 { … … 6933 6933 /* Pending VM request packets, such as hardware interrupts. */ 6934 6934 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 6935 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))6935 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 6936 6936 { 6937 6937 Log4Func(("Pending VM request forcing us back to ring-3\n")); … … 7519 7519 */ 7520 7520 /** @todo SMI. SMIs take priority over NMIs. */ 7521 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))/* NMI. NMIs take priority over regular interrupts. */7521 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */ 7522 7522 { 7523 7523 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ … … 8527 8527 * the below force flags to be set. 8528 8528 */ 8529 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))8529 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 8530 8530 { 8531 8531 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3)); … … 8533 8533 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3, 8534 8534 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS); 8535 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));8536 } 8537 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))8535 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 8536 } 8537 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 8538 8538 { 8539 8539 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 8540 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));8540 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 8541 8541 } 8542 8542 … … 8849 8849 VMMRZCallRing3Enable(pVCpu); 8850 8850 8851 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));8852 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));8851 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 8852 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 8853 8853 8854 8854 #if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE) … … 10465 10465 10466 10466 /* Update interrupt inhibition. */ 10467 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)10467 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 10468 10468 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) 10469 10469 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); … … 11290 11290 } 11291 11291 11292 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));11292 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)); 11293 11293 11294 11294 /* … … 11302 11302 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI); 11303 11303 if ( fBlockSti 11304 && VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))11304 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 11305 11305 { 11306 11306 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r74517 r74785 1219 1219 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1220 1220 pInput->Elements[iReg].Value.Reg64 = 0; 1221 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1221 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1222 1222 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip) 1223 1223 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; 1224 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))1224 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1225 1225 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1; 1226 1226 iReg++; … … 1229 1229 { 1230 1230 if ( pVCpu->nem.s.fLastInterruptShadow 1231 || ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1231 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1232 1232 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)) 1233 1233 { … … 1235 1235 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1236 1236 pInput->Elements[iReg].Value.Reg64 = 0; 1237 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1237 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1238 1238 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip) 1239 1239 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r73203 r74785 904 904 case VINF_EM_RAW_TO_R3: 905 905 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total); 906 if (VM_FF_IS_ PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))906 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC)) 907 907 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt); 908 else if (VM_FF_IS_ PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))908 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES)) 909 909 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages); 910 else if (VM_FF_IS_ PENDING(pVM, VM_FF_PDM_QUEUES))910 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES)) 911 911 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues); 912 else if (VM_FF_IS_ PENDING(pVM, VM_FF_EMT_RENDEZVOUS))912 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS)) 913 913 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous); 914 else if (VM_FF_IS_ PENDING(pVM, VM_FF_PDM_DMA))914 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA)) 915 915 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA); 916 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TIMER))916 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)) 917 917 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer); 918 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))918 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)) 919 919 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect); 920 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TO_R3))920 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3)) 921 921 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF); 922 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_IEM))922 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)) 923 923 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem); 924 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_IOM))924 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM)) 925 925 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom); 926 926 else -
trunk/src/VBox/VMM/VMMR3/DBGF.cpp
r73414 r74785 299 299 unfortunately required by plugin unloading. */ 300 300 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 301 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))301 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 302 302 { 303 303 LogFlow(("DBGFR3PowerOff: Processes priority requests...\n")); … … 392 392 /* Process priority stuff. */ 393 393 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 394 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))394 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 395 395 { 396 396 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/); … … 835 835 int rc; 836 836 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST) 837 && !VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))837 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 838 838 { 839 839 rc = RTSemPingWait(&pVM->dbgf.s.PingPong, cPollHack); … … 853 853 } 854 854 else if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 855 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))855 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 856 856 { 857 857 LogFlow(("dbgfR3VMMWait: Processes requests...\n")); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r73617 r74785 1650 1650 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc)); 1651 1651 1652 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))1652 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)) 1653 1653 PDMCritSectBothFF(pVCpu); 1654 1654 1655 1655 /* Update CR3 (Nested Paging case for HM). */ 1656 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))1656 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 1657 1657 { 1658 1658 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc); … … 1660 1660 if (RT_FAILURE(rc2)) 1661 1661 return rc2; 1662 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));1662 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 1663 1663 } 1664 1664 1665 1665 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */ 1666 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))1666 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 1667 1667 { 1668 1668 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc); … … 1673 1673 1674 1674 PGMGstUpdatePaePdpes(pVCpu, pPdpes); 1675 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));1675 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 1676 1676 } 1677 1677 else … … 1680 1680 1681 1681 /* IEM has pending work (typically memory write after INS instruction). */ 1682 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_IEM))1682 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)) 1683 1683 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc); 1684 1684 1685 1685 /* IOM has pending work (comitting an I/O or MMIO write). */ 1686 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_IOM))1686 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM)) 1687 1687 { 1688 1688 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc); … … 1696 1696 1697 1697 #ifdef VBOX_WITH_RAW_MODE 1698 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))1698 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION)) 1699 1699 CSAMR3DoPendingAction(pVM, pVCpu); 1700 1700 #endif … … 1779 1779 } 1780 1780 1781 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)1781 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST) 1782 1782 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, &pVCpu->cpum.GstCtx)) 1783 1783 { … … 1916 1916 * Debugger Facility polling. 1917 1917 */ 1918 if ( VM_FF_IS_ PENDING(pVM, VM_FF_DBGF)1919 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_DBGF) )1918 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF) 1919 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) ) 1920 1920 { 1921 1921 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc); … … 1938 1938 * CSAM page scanning. 1939 1939 */ 1940 if ( !VM_FF_IS_ PENDING(pVM, VM_FF_PGM_NO_MEMORY)1941 && VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))1940 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) 1941 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE)) 1942 1942 { 1943 1943 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */ … … 2056 2056 * (Executed in no particular order.) 2057 2057 */ 2058 if ( !VM_FF_IS_ PENDING(pVM, VM_FF_PGM_NO_MEMORY)2058 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) 2059 2059 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK)) 2060 2060 { … … 2062 2062 * Requests from other threads. 2063 2063 */ 2064 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))2064 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 2065 2065 { 2066 2066 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc); … … 2100 2100 * Timers before interrupts. 2101 2101 */ 2102 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TIMER)2102 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER) 2103 2103 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 2104 2104 TMR3TimerQueuesDo(pVM); … … 2122 2122 * you might think. 2123 2123 */ 2124 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)2124 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 2125 2125 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 2126 2126 { … … 2142 2142 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)) 2143 2143 { 2144 if ( !VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)2144 if ( !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 2145 2145 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */ 2146 2146 { … … 2210 2210 */ 2211 2211 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF) 2212 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_DBGF) )2212 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) ) 2213 2213 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) ) 2214 2214 { … … 2722 2722 if ( enmOldState == EMSTATE_HALTED 2723 2723 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE) 2724 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_UNHALT))2724 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT)) 2725 2725 && ( enmNewState == EMSTATE_RAW 2726 2726 || enmNewState == EMSTATE_HM … … 2739 2739 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0); 2740 2740 } 2741 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_UNHALT))2741 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT)) 2742 2742 { 2743 2743 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n")); -
trunk/src/VBox/VMM/VMMR3/EMRaw.cpp
r72634 r74785 1180 1180 * PGMSyncCR3+pgmR3PoolClearAll is pending. 1181 1181 */ 1182 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))1183 { 1184 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)1182 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT)) 1183 { 1184 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) 1185 1185 && EMIsRawRing0Enabled(pVM) 1186 1186 && CSAMIsEnabled(pVM)) … … 1199 1199 * Sync TSS. 1200 1200 */ 1201 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))1201 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS)) 1202 1202 { 1203 1203 int rc = SELMR3SyncTSS(pVM, pVCpu); -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r73481 r74785 2845 2845 * when entering other critsects here. 2846 2846 */ 2847 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))2847 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)) 2848 2848 PDMCritSectBothFF(pVCpu); 2849 2849 -
trunk/src/VBox/VMM/VMMRC/SELMRC.cpp
r74584 r74785 331 331 } 332 332 333 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))333 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)) 334 334 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); 335 335 else … … 474 474 475 475 if ( rcStrict == VINF_SUCCESS 476 && !VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS)476 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS) 477 477 && pVM->selm.s.offGuestIoBitmap != 0) 478 478 { -
trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
r72655 r74785 184 184 TMTimerPollVoid(pVM, pVCpu); 185 185 Log2(("TMTimerPoll at %08RX32 - VM_FF_TM_VIRTUAL_SYNC=%d VM_FF_TM_VIRTUAL_SYNC=%d\n", pRegFrame->eip, 186 VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TIMER)));186 VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))); 187 187 } 188 188 } … … 241 241 } 242 242 /* Pending timer action. */ 243 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TIMER))243 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)) 244 244 rc = VINF_EM_RAW_TIMER_PENDING; 245 245 /* The Virtual Sync clock has stopped. */ … … 251 251 /* Pending request packets might contain actions that need immediate 252 252 attention, such as pending hardware interrupts. */ 253 else if ( VM_FF_IS_ PENDING(pVM, VM_FF_REQUEST)254 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))253 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST) 254 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 255 255 rc = VINF_EM_PENDING_REQUEST; 256 256 /* Pending GDT/LDT/TSS sync. */ 257 257 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS)) 258 258 rc = VINF_SELM_SYNC_GDT; 259 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))259 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT)) 260 260 rc = VINF_EM_RAW_TO_R3; 261 261 /* Possibly pending interrupt: dispatch it. */
Note:
See TracChangeset
for help on using the changeset viewer.

