VirtualBox

Changeset 66581 in vbox


Ignore:
Timestamp:
Apr 17, 2017 3:00:00 AM (7 years ago)
Author:
vboxsync
Message:

VMM: Nested Hw.virt: Implemented various SVM intercepts in IEM, addressed some todos.

Location:
trunk
Files:
25 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r66403 r66581  
    10421042    uint32_t        fPadding : 23;
    10431043
    1044     /** Hardware virtualization features.
    1045      *
    1046      *  @todo r=bird: Please drop the unions and flatten this as much as possible.
    1047      *        Prefix the names with 'Svm' 'Vmx' if there is any confusion. Group the
    1048      *        flags into common and specific bunches.
    1049      *
    1050      */
    1051     union
    1052     {
    1053         /** SVM features.  */
    1054         struct
    1055         {
    1056             /** Features as reported by CPUID 0x8000000a.EDX.  */
    1057             union
    1058             {
    1059                 struct
    1060                 {
    1061                     uint32_t fNestedPaging         : 1;
    1062                     uint32_t fLbrVirt              : 1;
    1063                     uint32_t fSvmLock              : 1;
    1064                     uint32_t fNextRipSave          : 1;
    1065                     uint32_t fTscRateMsr           : 1;
    1066                     uint32_t fVmcbClean            : 1;
    1067                     uint32_t fFlusbByAsid          : 1;
    1068                     uint32_t fDecodeAssist         : 1;
    1069                     uint32_t u2Reserved0           : 2;
    1070                     uint32_t fPauseFilter          : 1;
    1071                     uint32_t u1Reserved0           : 1;
    1072                     uint32_t fPauseFilterThreshold : 1;
    1073                     uint32_t fAvic                 : 1;
    1074                     uint32_t u18Reserved0          : 18;
    1075                 } n;
    1076                 uint32_t    u;
    1077             } feat;
    1078             /** Maximum supported ASID. */
    1079             uint32_t        uMaxAsid;
    1080         } svm;
    1081 
    1082         /** VMX features. */
    1083         struct
    1084         {
    1085             uint32_t    uDummy1;
    1086             uint32_t    uDummy2;
    1087         } vmx;
    1088     } CPUM_UNION_NM(hwvirt);
     1044    /** SVM: Supports Nested-paging. */
     1045    uint32_t        fSvmNestedPaging : 1;
     1046    /** SVM: Support LBR (Last Branch Record) virtualization. */
     1047    uint32_t        fSvmLbrVirt : 1;
     1048    /** SVM: Supports SVM lock. */
     1049    uint32_t        fSvmSvmLock : 1;
     1050    /** SVM: Supports Next RIP save. */
     1051    uint32_t        fSvmNextRipSave : 1;
     1052    /** SVM: Supports TSC rate MSR. */
     1053    uint32_t        fSvmTscRateMsr : 1;
     1054    /** SVM: Supports VMCB clean bits. */
     1055    uint32_t        fSvmVmcbClean : 1;
     1056    /** SVM: Supports Flush-by-ASID. */
     1057    uint32_t        fSvmFlusbByAsid : 1;
     1058    /** SVM: Supports decode assist. */
     1059    uint32_t        fSvmDecodeAssist : 1;
     1060    /** SVM: Supports Pause filter. */
     1061    uint32_t        fSvmPauseFilter : 1;
     1062    /** SVM: Supports Pause filter threshold. */
     1063    uint32_t        fSvmPauseFilterThreshold : 1;
     1064    /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
     1065    uint32_t        fSvmAvic : 1;
     1066    /** SVM: Padding / reserved for future features. */
     1067    uint32_t        fSvmPadding0 : 21;
     1068    /** SVM: Maximum supported ASID. */
     1069    uint32_t        uSvmMaxAsid;
     1070
     1071    /** @todo VMX features. */
    10891072    uint32_t        auPadding[1];
    10901073} CPUMFEATURES;
     
    13961379 * @returns true if in intercept is active, false otherwise.
    13971380 * @param   pCtx        Pointer to the context.
    1398  * @param   enmXcpt     The exception.
    1399  */
    1400 DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCCPUMCTX pCtx, X86XCPT enmXcpt)
    1401 {
    1402     return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u32InterceptXcpt & enmXcpt);
     1381 * @param   uVector     The exception / interrupt vector.
     1382 */
     1383DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCCPUMCTX pCtx, uint8_t uVector)
     1384{
     1385    Assert(uVector < 32);
     1386    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u32InterceptXcpt & (UINT32_C(1) << uVector));
    14031387}
    14041388
     
    15541538VMMDECL(uint32_t)       CPUMGetGuestMxCsrMask(PVM pVM);
    15551539VMMDECL(uint64_t)       CPUMGetGuestScalableBusFrequency(PVM pVM);
    1556 VMMDECL(int)            CPUMGetValidateEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
    1557                                             uint64_t *puValidEfer);
     1540VMMDECL(int)            CPUMQueryValidatedGuestEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
     1541                                                    uint64_t *puValidEfer);
    15581542
    15591543/** @name Typical scalable bus frequency values.
  • trunk/include/VBox/vmm/cpum.mac

    r66276 r66581  
    258258    .abPadding          resb    12
    259259%endif
    260     .hwvirt.svm.uMsrHSavePa         resq    1
    261     .hwvirt.svm.GCPhysVmcb          resq    1
    262     .hwvirt.svm.VmcbCtrl            resb  256
    263     .hwvirt.svm.HostState           resb  184
    264     .hwvirt.svm.fGif                resb    1
    265     .hwvirt.svm.abPadding0          resb    7
    266     .hwvirt.svm.pvMsrBitmapR0       RTR0PTR_RES 1
    267     .hwvirt.svm.pvMsrBitmapR3       RTR3PTR_RES 1
    268     .hwvirt.svm.pvIoBitmapR0        RTR0PTR_RES 1
    269     .hwvirt.svm.pvIoBitmapR3        RTR3PTR_RES 1
     260    .hwvirt.svm.uMsrHSavePa              resq    1
     261    .hwvirt.svm.GCPhysVmcb               resq    1
     262    .hwvirt.svm.VmcbCtrl                 resb  256
     263    .hwvirt.svm.HostState                resb  184
     264    .hwvirt.svm.fGif                     resb    1
     265    .hwvirt.svm.cPauseFilter             resw    1
     266    .hwvirt.svm.cPauseFilterThreshold    resw    1
     267    .hwvirt.svm.abPadding0               resb    3
     268    .hwvirt.svm.pvMsrBitmapR0            RTR0PTR_RES 1
     269    .hwvirt.svm.pvMsrBitmapR3            RTR3PTR_RES 1
     270    .hwvirt.svm.pvIoBitmapR0             RTR0PTR_RES 1
     271    .hwvirt.svm.pvIoBitmapR3             RTR3PTR_RES 1
    270272%if HC_ARCH_BITS == 32
    271     .hwvirt.svm.abPadding1          resb   16
    272 %endif
    273     .hwvirt.fLocalForcedActions     resd    1
     273    .hwvirt.svm.abPadding1               resb   16
     274%endif
     275    .hwvirt.fLocalForcedActions          resd    1
    274276    alignb 64
    275277endstruc
  • trunk/include/VBox/vmm/cpumctx.h

    r66277 r66581  
    485485                /** 1184 - Global interrupt flag. */
    486486                uint8_t             fGif;
    487                 /** 1185 - Padding. */
    488                 uint8_t             abPadding0[7];
     487                /** 1185 - Pause filter count. */
     488                uint16_t            cPauseFilter;
     489                /** 1187 - Pause filter count. */
     490                uint16_t            cPauseFilterThreshold;
     491                /** 1189 - Padding. */
     492                uint8_t             abPadding0[3];
    489493                /** 1192 - MSR permission bitmap - R0 ptr. */
    490494                R0PTRTYPE(void *)   pvMsrBitmapR0;
     
    567571AssertCompileMemberOffset(CPUMCTX,                 aoffXState, HC_ARCH_BITS == 64 ? 596 : 588);
    568572AssertCompileMemberOffset(CPUMCTX, hwvirt, 728);
    569 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa,      728);
    570 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.GCPhysVmcb,       736);
    571 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.VmcbCtrl,         744);
    572 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HostState,       1000);
    573 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif,            1184);
    574 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0,   1192);
    575 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR3,   HC_ARCH_BITS == 64 ? 1200 : 1196);
    576 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR0,    HC_ARCH_BITS == 64 ? 1208 : 1200);
    577 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR3,    HC_ARCH_BITS == 64 ? 1216 : 1204);
    578 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) fLocalForcedActions, 1224);
     573AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa,            728);
     574AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.GCPhysVmcb,             736);
     575AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.VmcbCtrl,               744);
     576AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HostState,             1000);
     577AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif,                  1184);
     578AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.cPauseFilter,          1185);
     579AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.cPauseFilterThreshold, 1187);
     580AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0,         1192);
     581AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR3,         HC_ARCH_BITS == 64 ? 1200 : 1196);
     582AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR0,          HC_ARCH_BITS == 64 ? 1208 : 1200);
     583AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR3,          HC_ARCH_BITS == 64 ? 1216 : 1204);
     584AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) fLocalForcedActions,       1224);
    579585
    580586AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
  • trunk/include/VBox/vmm/em.h

    r65792 r66581  
    199199VMM_INT_DECL(bool)              EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx);
    200200VMM_INT_DECL(int)               EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys);
     201VMM_INT_DECL(bool)              EMMonitorIsArmed(PVMCPU pVCpu);
    201202VMM_INT_DECL(int)               EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx);
    202203VMM_INT_DECL(int)               EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst);
  • trunk/include/VBox/vmm/hm_svm.h

    r66373 r66581  
    4545 * @{
    4646 */
    47 
    48 /** @name SVM features for cpuid 0x8000000a
    49  * @{
    50  */
    51 /** Bit 0 - NP - Nested Paging supported. */
    52 #define AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING             RT_BIT(0)
    53 /** Bit 1 - LbrVirt - Support for saving five debug MSRs. */
    54 #define AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT                  RT_BIT(1)
    55 /** Bit 2 - SVML - SVM locking bit supported. */
    56 #define AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK                  RT_BIT(2)
    57 /** Bit 3 - NRIPS - Saving the next instruction pointer is supported. */
    58 #define AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE                 RT_BIT(3)
    59 /** Bit 4 - TscRateMsr - Support for MSR TSC ratio. */
    60 #define AMD_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR              RT_BIT(4)
    61 /** Bit 5 - VmcbClean - Support VMCB clean bits. */
    62 #define AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN                RT_BIT(5)
    63 /** Bit 6 - FlushByAsid - Indicate TLB flushing for current ASID only, and that
    64  *  VMCB.TLB_Control is supported. */
    65 #define AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID             RT_BIT(6)
    66 /** Bit 7 - DecodeAssist - Indicate decode assist is supported. */
    67 #define AMD_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST             RT_BIT(7)
    68 /** Bit 10 - PauseFilter - Indicates support for the PAUSE intercept filter. */
    69 #define AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER              RT_BIT(10)
    70 /** Bit 12 - PauseFilterThreshold - Indicates support for the PAUSE
    71  *  intercept filter cycle count threshold. */
    72 #define AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD    RT_BIT(12)
    73 /** Bit 13 - AVIC - Advanced Virtual Interrupt Controller. */
    74 #define AMD_CPUID_SVM_FEATURE_EDX_AVIC                      RT_BIT(13)
    75 /** @} */
    7647
    7748/** @name SVM generic / convenient defines.
     
    326297/** @} */
    327298
     299/** @name SVMVMCB.u64ExitInfo1 for Mov CRX accesses.
     300 * @{
     301 */
     302/** The access was via Mov CRx instruction bit number. */
     303#define SVM_EXIT1_MOV_CRX_MASK                RT_BIT_64(63)
     304/** @} */
     305
    328306
    329307/** @name SVMVMCB.ctrl.u64InterceptCtrl
     
    601579        uint32_t    u1OP16              : 1;   /**< Bit 5: 16-bit operand. */
    602580        uint32_t    u1OP32              : 1;   /**< Bit 6: 32-bit operand. */
    603         uint32_t    u1ADDR16            : 1;   /**< Bit 7: 16-bit operand. */
    604         uint32_t    u1ADDR32            : 1;   /**< Bit 8: 32-bit operand. */
    605         uint32_t    u1ADDR64            : 1;   /**< Bit 9: 64-bit operand. */
     581        uint32_t    u1ADDR16            : 1;   /**< Bit 7: 16-bit address size. */
     582        uint32_t    u1ADDR32            : 1;   /**< Bit 8: 32-bit address size. */
     583        uint32_t    u1ADDR64            : 1;   /**< Bit 9: 64-bit address size. */
    606584        uint32_t    u3SEG               : 3;   /**< BITS 12:10: Effective segment number. Added w/ decode assist in APM v3.17. */
    607585        uint32_t    u3Reserved          : 3;
     
    615593typedef const SVMIOIOEXITINFO *PCSVMIOIOEXITINFO;
    616594
    617 /** @name SVMIOIOEXITINFO.u1Type
    618  *  @{ */
     595/** 8-bit IO transfer. */
     596#define SVM_IOIO_8_BIT_OP               RT_BIT_32(4)
     597/** 16-bit IO transfer. */
     598#define SVM_IOIO_16_BIT_OP              RT_BIT_32(5)
     599/** 32-bit IO transfer. */
     600#define SVM_IOIO_32_BIT_OP              RT_BIT_32(6)
     601/** Mask of all possible IO transfer sizes. */
     602#define SVM_IOIO_OP_SIZE_MASK           (SVM_IOIO_8_BIT_OP | SVM_IOIO_16_BIT_OP | SVM_IOIO_32_BIT_OP)
     603/** 16-bit address for the IO buffer. */
     604#define SVM_IOIO_16_BIT_ADDR            RT_BIT_32(7)
     605/** 32-bit address for the IO buffer. */
     606#define SVM_IOIO_32_BIT_ADDR            RT_BIT_32(8)
     607/** 64-bit address for the IO buffer. */
     608#define SVM_IOIO_64_BIT_ADDR            RT_BIT_32(9)
     609/** Mask of all the IO address sizes. */
     610#define SVM_IOIO_ADDR_SIZE_MASK         (SVM_IOIO_16_BIT_ADDR | SVM_IOIO_32_BIT_ADDR | SVM_IOIO_64_BIT_ADDR)
     611/** Number of bits to left shift to get the IO port number. */
     612#define SVM_IOIO_PORT_SHIFT             16
    619613/** IO write. */
    620614#define SVM_IOIO_WRITE                  0
    621615/** IO read. */
    622616#define SVM_IOIO_READ                   1
     617/**
     618 * SVM IOIO transfer type.
     619 */
     620typedef enum
     621{
     622    SVMIOIOTYPE_OUT = SVM_IOIO_WRITE,
     623    SVMIOIOTYPE_IN  = SVM_IOIO_READ
     624} SVMIOIOTYPE;
    623625/** @}*/
    624626
  • trunk/include/VBox/vmm/iem.h

    r66000 r66581  
    4747#define IEMMODE_64BIT 2
    4848/** @} */
     49
     50
     51/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
     52 * @{ */
     53/** CPU exception. */
     54#define IEM_XCPT_FLAGS_T_CPU_XCPT       RT_BIT_32(0)
     55/** External interrupt (from PIC, APIC, whatever). */
     56#define IEM_XCPT_FLAGS_T_EXT_INT        RT_BIT_32(1)
     57/** Software interrupt (int or into, not bound).
     58 * Returns to the following instruction */
     59#define IEM_XCPT_FLAGS_T_SOFT_INT       RT_BIT_32(2)
     60/** Takes an error code. */
     61#define IEM_XCPT_FLAGS_ERR              RT_BIT_32(3)
     62/** Takes a CR2. */
     63#define IEM_XCPT_FLAGS_CR2              RT_BIT_32(4)
     64/** Generated by the breakpoint instruction. */
     65#define IEM_XCPT_FLAGS_BP_INSTR         RT_BIT_32(5)
     66/** Generated by a DRx instruction breakpoint and RF should be cleared. */
     67#define IEM_XCPT_FLAGS_DRx_INSTR_BP     RT_BIT_32(6)
     68/** @}  */
    4969
    5070
     
    117137VMM_INT_DECL(void)          IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr);
    118138VMM_INT_DECL(void)          IEMTlbInvalidateAllPhysical(PVMCPU pVCpu);
    119 #ifdef VBOX_WITH_NESTED_HWVIRT
    120 VMM_INT_DECL(bool)          IEMIsRaisingIntOrXcpt(PVMCPU pVCpu);
    121 #endif
     139VMM_INT_DECL(bool)          IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr,
     140                                              uint64_t *puCr2);
    122141
    123142/** @name Given Instruction Interpreters
  • trunk/include/iprt/x86.h

    r66392 r66581  
    739739/** Bit 12 - PA - Processor accumulator (MSR c001_007a). */
    740740#define X86_CPUID_AMD_ADVPOWER_EDX_PA        RT_BIT_32(12)
     741/** @} */
     742
     743
     744/** @name CPUID AMD SVM Feature information.
     745 * CPUID query with EAX=0x8000000a.
     746 * @{
     747 */
     748/** Bit 0 - NP - Nested Paging supported. */
     749#define X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING             RT_BIT(0)
     750/** Bit 1 - LbrVirt - Support for saving five debug MSRs. */
     751#define X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT                  RT_BIT(1)
     752/** Bit 2 - SVML - SVM locking bit supported. */
     753#define X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK                  RT_BIT(2)
     754/** Bit 3 - NRIPS - Saving the next instruction pointer is supported. */
     755#define X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE                 RT_BIT(3)
     756/** Bit 4 - TscRateMsr - Support for MSR TSC ratio. */
     757#define X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR              RT_BIT(4)
     758/** Bit 5 - VmcbClean - Support VMCB clean bits. */
     759#define X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN                RT_BIT(5)
     760/** Bit 6 - FlushByAsid - Indicate TLB flushing for current ASID only, and that
     761 *  VMCB.TLB_Control is supported. */
     762#define X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID             RT_BIT(6)
     763/** Bit 7 - DecodeAssist - Indicate decode assist is supported. */
     764#define X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST             RT_BIT(7)
     765/** Bit 10 - PauseFilter - Indicates support for the PAUSE intercept filter. */
     766#define X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER              RT_BIT(10)
     767/** Bit 12 - PauseFilterThreshold - Indicates support for the PAUSE
     768 *  intercept filter cycle count threshold. */
     769#define X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD    RT_BIT(12)
     770/** Bit 13 - AVIC - Advanced Virtual Interrupt Controller. */
     771#define X86_CPUID_SVM_FEATURE_EDX_AVIC                      RT_BIT(13)
    741772/** @} */
    742773
  • trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp

    r66551 r66581  
    6565#include <VBox/log.h>
    6666#include <VBox/err.h>
    67 #include <VBox/vmm/hm_svm.h>
    6867#include <VBox/vmm/hm_vmx.h>
    6968
     
    41454144                    /* Query AMD-V features. */
    41464145                    ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
    4147                     if (fSvmFeatures & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
     4146                    if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
    41484147                        *pfCaps |= SUPVTCAPS_NESTED_PAGING;
    41494148                }
  • trunk/src/VBox/Main/src-server/HostImpl.cpp

    r65854 r66581  
    156156#endif
    157157
    158 /* XXX Solaris: definitions in /usr/include/sys/regset.h clash with hm_svm.h */
    159 #undef DS
    160 #undef ES
    161 #undef CS
    162 #undef SS
    163 #undef FS
    164 #undef GS
    165 
    166158#include <VBox/usb.h>
    167 #include <VBox/vmm/hm_svm.h>
    168159#include <VBox/err.h>
    169160#include <VBox/settings.h>
     
    365356                        uint32_t fSVMFeaturesEdx;
    366357                        ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSVMFeaturesEdx);
    367                         if (fSVMFeaturesEdx & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
     358                        if (fSVMFeaturesEdx & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
    368359                            m->fNestedPagingSupported = true;
    369360                    }
  • trunk/src/VBox/VMM/Makefile.kmk

    r66479 r66581  
    525525 VMMRC_DEFS      = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 IN_SUP_RC \
    526526        $(VMM_COMMON_DEFS)
     527 VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT,$(VMMRC_DEFS))
    527528 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK
    528529  VMMRC_DEFS    += VMM_R0_SWITCH_STACK
  • trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r66227 r66581  
    14381438    uint64_t uValidatedEfer;
    14391439    uint64_t const uOldEfer = pVCpu->cpum.s.Guest.msrEFER;
    1440     int rc = CPUMGetValidateEfer(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.Guest.cr0, uOldEfer, uValue, &uValidatedEfer);
     1440    int rc = CPUMQueryValidatedGuestEfer(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.Guest.cr0, uOldEfer, uValue, &uValidatedEfer);
    14411441    if (RT_FAILURE(rc))
    14421442        return VERR_CPUM_RAISE_GP_0;
     
    61146114 *                          this function returns VINF_SUCCESS).
    61156115 */
    6116 VMMDECL(int) CPUMGetValidateEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer, uint64_t *puValidEfer)
     6116VMMDECL(int) CPUMQueryValidatedGuestEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer, uint64_t *puValidEfer)
    61176117{
    61186118    uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax >= 0x80000001
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r65792 r66581  
    193193    /** @todo Complete MONITOR implementation.  */
    194194    return VINF_SUCCESS;
     195}
     196
     197
     198/**
     199 * Checks if the monitor hardware is armed / active.
     200 *
     201 * @returns true if armed, false otherwise.
     202 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
     203 */
     204VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
     205{
     206    return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
    195207}
    196208
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r66386 r66581  
    176176
    177177
     178/**
     179 * Converts an SVM event type to a TRPM event type.
     180 *
     181 * @returns The TRPM event type.
     182 * @retval  TRPM_32BIT_HACK if the specified type of event isn't among the set
     183 *          of recognized trap types.
     184 *
     185 * @param   pEvent       Pointer to the SVM event.
     186 */
     187VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent)
     188{
     189    uint8_t const uType = pEvent->n.u3Type;
     190    switch (uType)
     191    {
     192        case SVM_EVENT_EXTERNAL_IRQ:    return TRPM_HARDWARE_INT;
     193        case SVM_EVENT_SOFTWARE_INT:    return TRPM_SOFTWARE_INT;
     194        case SVM_EVENT_EXCEPTION:
     195        case SVM_EVENT_NMI:             return TRPM_TRAP;
     196        default:
     197            break;
     198    }
     199    AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
     200    return TRPM_32BIT_HACK;
     201}
     202
     203
    178204#ifndef IN_RC
     205/**
     206 * Converts an IEM exception event type to an SVM event type.
     207 *
     208 * @returns The SVM event type.
     209 * @retval  UINT8_MAX if the specified type of event isn't among the set
     210 *          of recognized IEM event types.
     211 *
     212 * @param   uVector         The vector of the event.
     213 * @param   fIemXcptFlags   The IEM exception / interrupt flags.
     214 */
     215static uint8_t hmSvmEventTypeFromIemEvent(uint32_t uVector, uint32_t fIemXcptFlags)
     216{
     217    if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
     218        return SVM_EVENT_EXCEPTION;
     219    if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT)
     220        return uVector != X86_XCPT_NMI ? SVM_EVENT_EXTERNAL_IRQ : SVM_EVENT_NMI;
     221    if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
     222        return SVM_EVENT_SOFTWARE_INT;
     223    AssertMsgFailed(("hmSvmEventTypeFromIemEvent: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector));
     224    return UINT8_MAX;
     225}
     226
     227
    179228/**
    180229 * Performs the operations necessary that are part of the vmrun instruction
     
    247296            /* Nested paging. */
    248297            if (    pVmcbCtrl->NestedPaging.n.u1NestedPaging
    249                 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fNestedPaging)
     298                && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
    250299            {
    251300                Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n"));
     
    255304            /* AVIC. */
    256305            if (    pVmcbCtrl->IntCtrl.n.u1AvicEnable
    257                 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fAvic)
     306                && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
    258307            {
    259308                Log(("HMSvmVmRun: AVIC not supported -> #VMEXIT\n"));
     
    263312            /* Last branch record (LBR) virtualization. */
    264313            if (    (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)
    265                 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fLbrVirt)
     314                && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
    266315            {
    267316                Log(("HMSvmVmRun: LBR virtualization not supported -> #VMEXIT\n"));
     
    350399            /* EFER, CR0 and CR4. */
    351400            uint64_t uValidEfer;
    352             rc = CPUMGetValidateEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);
     401            rc = CPUMQueryValidatedGuestEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);
    353402            if (RT_FAILURE(rc))
    354403            {
     
    592641        pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo1 = uExitInfo1;
    593642        pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo2 = uExitInfo2;
     643
     644        /*
     645         * Update the exit interrupt information field if this #VMEXIT happened as a result
     646         * of delivering an event.
     647         */
     648        {
     649            uint8_t  uExitIntVector;
     650            uint32_t uExitIntErr;
     651            uint32_t fExitIntFlags;
     652            bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
     653                                                         NULL /* uExitIntCr2 */);
     654            pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1Valid = fRaisingEvent;
     655            if (fRaisingEvent)
     656            {
     657                pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u8Vector = uExitIntVector;
     658                pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u3Type   = hmSvmEventTypeFromIemEvent(uExitIntVector, fExitIntFlags);
     659                if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
     660                {
     661                    pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1ErrorCodeValid = true;
     662                    pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u32ErrorCode     = uExitIntErr;
     663                }
     664            }
     665        }
    594666
    595667        /*
     
    920992     * Check if any IO accesses are being intercepted.
    921993     */
    922     if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
    923     {
    924         Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
    925 
    926         /*
    927          * The IOPM layout:
    928          * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
    929          * two 4K pages. However, since it's possible to do a 32-bit port IO at port
    930          * 65534 (thus accessing 4 bytes), we need 3 extra bits beyond the two 4K page.
    931          *
    932          * For IO instructions that access more than a single byte, the permission bits
    933          * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
    934          */
    935         uint8_t *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
    936 
    937         uint16_t const u16Port     = pIoExitInfo->n.u16Port;
    938         uint16_t const offIoBitmap = u16Port >> 3;
    939         uint16_t const fSizeMask   = pIoExitInfo->n.u1OP32 ? 0xf : pIoExitInfo->n.u1OP16 ? 3 : 1;
    940         uint8_t  const cShift      = u16Port - (offIoBitmap << 3);
    941         uint16_t const fIopmMask   = (1 << cShift) | (fSizeMask << cShift);
    942 
    943         pbIopm += offIoBitmap;
    944         uint16_t const fIopmBits = *(uint16_t *)pbIopm;
    945         if (fIopmBits & fIopmMask)
    946             return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip);
    947     }
     994    Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
     995    Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT));
     996
     997    /*
     998     * The IOPM layout:
     999     * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
     1000     * two 4K pages.
     1001     *
     1002     * For IO instructions that access more than a single byte, the permission bits
     1003     * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
     1004     *
     1005     * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
     1006     * we need 3 extra bits beyond the second 4K page.
     1007     */
     1008    uint8_t const *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
     1009
     1010    uint16_t const u16Port   = pIoExitInfo->n.u16Port;
     1011    uint16_t const offIopm   = u16Port >> 3;
     1012    uint16_t const fSizeMask = pIoExitInfo->n.u1OP32 ? 0xf : pIoExitInfo->n.u1OP16 ? 3 : 1;
     1013    uint8_t  const cShift    = u16Port - (offIopm << 3);
     1014    uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
     1015
     1016    pbIopm += offIopm;
     1017    uint16_t const fIopmBits = *(uint16_t *)pbIopm;
     1018    if (fIopmBits & fIopmMask)
     1019        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip);
     1020
    9481021    return VINF_HM_INTERCEPT_NOT_ACTIVE;
    9491022}
     
    9551028 *
    9561029 * @returns Strict VBox status code.
    957  * @retval  VINF_SVM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
    958  *          we're not executing a nested-guest.
     1030 * @retval  VINF_SVM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not
     1031 *          specify interception of the accessed MSR @a idMsr.
    9591032 * @retval  VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
    9601033 *          successfully.
     
    9731046     * Check if any MSRs are being intercepted.
    9741047     */
    975     if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
     1048    Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT));
     1049    Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
     1050
     1051    uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
     1052
     1053    /*
     1054     * Get the byte and bit offset of the permission bits corresponding to the MSR.
     1055     */
     1056    uint16_t offMsrpm;
     1057    uint32_t uMsrpmBit;
     1058    int rc = hmSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
     1059    if (RT_SUCCESS(rc))
    9761060    {
    977         Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
    978         uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
     1061        Assert(uMsrpmBit < 0x3fff);
     1062        Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
     1063        if (fWrite)
     1064            ++uMsrpmBit;
    9791065
    9801066        /*
    981          * Get the byte and bit offset of the permission bits corresponding to the MSR.
     1067         * Check if the bit is set, if so, trigger a #VMEXIT.
    9821068         */
    983         uint16_t offMsrpm;
    984         uint32_t uMsrpmBit;
    985         int rc = hmSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
    986         if (RT_SUCCESS(rc))
    987         {
    988             Assert(uMsrpmBit < 0x3fff);
    989             Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
    990             if (fWrite)
    991                 ++uMsrpmBit;
    992 
    993             /*
    994              * Check if the bit is set, if so, trigger a #VMEXIT.
    995              */
    996             uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
    997             pbMsrpm += offMsrpm;
    998             if (ASMBitTest(pbMsrpm, uMsrpmBit))
    999                 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
    1000         }
    1001         else
    1002         {
    1003             /*
    1004              * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it.
    1005              */
    1006             Log(("HMSvmNstGstHandleIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool\n", idMsr, fWrite));
     1069        uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
     1070        pbMsrpm += offMsrpm;
     1071        if (ASMBitTest(pbMsrpm, uMsrpmBit))
    10071072            return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
    1008         }
     1073    }
     1074    else
     1075    {
     1076        /*
     1077         * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it.
     1078         */
     1079        Log(("HMSvmNstGstHandleIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool\n", idMsr, fWrite));
     1080        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
    10091081    }
    10101082    return VINF_HM_INTERCEPT_NOT_ACTIVE;
     
    10691141#endif /* !IN_RC */
    10701142
    1071 
    1072 /**
    1073  * Converts an SVM event type to a TRPM event type.
    1074  *
    1075  * @returns The TRPM event type.
    1076  * @retval  TRPM_32BIT_HACK if the specified type of event isn't among the set
    1077  *          of recognized trap types.
    1078  *
    1079  * @param   pEvent       Pointer to the SVM event.
    1080  */
    1081 VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent)
    1082 {
    1083     uint8_t const uType = pEvent->n.u3Type;
    1084     switch (uType)
    1085     {
    1086         case SVM_EVENT_EXTERNAL_IRQ:    return TRPM_HARDWARE_INT;
    1087         case SVM_EVENT_SOFTWARE_INT:    return TRPM_SOFTWARE_INT;
    1088         case SVM_EVENT_EXCEPTION:
    1089         case SVM_EVENT_NMI:             return TRPM_TRAP;
    1090         default:
    1091             break;
    1092     }
    1093     AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
    1094     return TRPM_32BIT_HACK;
    1095 }
    1096 
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r66457 r66581  
    370370 * Check the common SVM instruction preconditions.
    371371 */
    372 #define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
     372# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
    373373    do { \
    374374        if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
     
    392392 * Check if an SVM is enabled.
    393393 */
    394 #define IEM_IS_SVM_ENABLED(a_pVCpu)                         (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
     394# define IEM_IS_SVM_ENABLED(a_pVCpu)                         (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
    395395
    396396/**
    397397 * Check if an SVM control/instruction intercept is set.
    398398 */
    399 #define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
     399# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
    400400
    401401/**
    402402 * Check if an SVM read CRx intercept is set.
    403403 */
    404 #define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)    (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
     404# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)    (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
    405405
    406406/**
    407407 * Check if an SVM write CRx intercept is set.
    408408 */
    409 #define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)   (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
     409# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)   (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
    410410
    411411/**
    412412 * Check if an SVM read DRx intercept is set.
    413413 */
    414 #define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)    (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
     414# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)    (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
    415415
    416416/**
    417417 * Check if an SVM write DRx intercept is set.
    418418 */
    419 #define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)   (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
     419# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)   (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
    420420
    421421/**
    422422 * Check if an SVM exception intercept is set.
    423423 */
    424 #define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt)   (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt)))
    425 #endif /* VBOX_WITH_NESTED_HWVIRT */
     424# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector)   (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
     425
     426/**
     427 * Invokes the SVM \#VMEXIT handler for the nested-guest.
     428 */
     429# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
     430    do \
     431    { \
     432        VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
     433                                                        (a_uExitInfo2)); \
     434        return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
     435    } while (0)
     436
     437/**
     438 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
     439 * corresponding decode assist information.
     440 */
     441# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
     442    do \
     443    { \
     444        uint64_t uExitInfo1; \
     445        if (   IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
     446            && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
     447            uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
     448        else \
     449            uExitInfo1 = 0; \
     450        IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
     451    } while (0)
     452
     453/**
     454 * Checks and handles an SVM MSR intercept.
     455 */
     456# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
     457    HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
     458
     459#else
     460# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr)                                    do { } while (0)
     461# define IEM_IS_SVM_ENABLED(a_pVCpu)                                                      (false)
     462# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)                              (false)
     463# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                 (false)
     464# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                (false)
     465# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                 (false)
     466# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                (false)
     467# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector)                                (false)
     468# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2)  do { return VERR_SVM_IPE_1; } while (0)
     469# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
     470# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite)                        (VERR_SVM_IPE_1)
     471
     472#endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */
    426473
    427474
     
    834881IEM_STATIC VBOXSTRICTRC     iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
    835882
     883#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC)
     884/**
     885 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
     886 * accordingly.
     887 *
     888 * @returns VBox strict status code.
     889 * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
     890 * @param   u16Port         The IO port being accessed.
     891 * @param   enmIoType       The type of IO access.
     892 * @param   cbReg           The IO operand size in bytes.
     893 * @param   cAddrSizeBits   The address size bits (for 16, 32 or 64).
     894 * @param   iEffSeg         The effective segment number.
     895 * @param   fRep            Whether this is a repeating IO instruction (REP prefix).
     896 * @param   fStrIo          Whether this is a string IO instruction.
     897 * @param   cbInstr         The length of the IO instruction in bytes.
     898 *
     899 * @remarks This must be called only when IO instructions are intercepted by the
     900 *          nested-guest hypervisor.
     901 */
     902IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
     903                                                uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
     904{
     905    Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
     906    Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
     907    Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
     908
     909    static const uint32_t s_auIoOpSize[]   = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
     910    static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
     911
     912    SVMIOIOEXITINFO IoExitInfo;
     913    IoExitInfo.u         = s_auIoOpSize[cbReg & 7];
     914    IoExitInfo.u        |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
     915    IoExitInfo.n.u1STR   = fStrIo;
     916    IoExitInfo.n.u1REP   = fRep;
     917    IoExitInfo.n.u3SEG   = iEffSeg & 0x7;
     918    IoExitInfo.n.u1Type  = enmIoType;
     919    IoExitInfo.n.u16Port = u16Port;
     920
     921    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     922    return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
     923}
     924
     925#else
     926IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
     927                                                uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
     928{
     929    RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
     930    return VERR_IEM_IPE_9;
     931}
     932#endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */
    836933
    837934
     
    31263223IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
    31273224{
     3225    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
     3226    {
     3227        Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
     3228        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     3229    }
     3230
    31283231    RT_NOREF_PV(pVCpu);
    31293232    /** @todo Probably need a separate error code and handling for this to
     
    32493352 * @{
    32503353 */
    3251 
    3252 /** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
    3253  * @{ */
    3254 /** CPU exception. */
    3255 #define IEM_XCPT_FLAGS_T_CPU_XCPT       RT_BIT_32(0)
    3256 /** External interrupt (from PIC, APIC, whatever). */
    3257 #define IEM_XCPT_FLAGS_T_EXT_INT        RT_BIT_32(1)
    3258 /** Software interrupt (int or into, not bound).
    3259  * Returns to the following instruction */
    3260 #define IEM_XCPT_FLAGS_T_SOFT_INT       RT_BIT_32(2)
    3261 /** Takes an error code. */
    3262 #define IEM_XCPT_FLAGS_ERR              RT_BIT_32(3)
    3263 /** Takes a CR2. */
    3264 #define IEM_XCPT_FLAGS_CR2              RT_BIT_32(4)
    3265 /** Generated by the breakpoint instruction. */
    3266 #define IEM_XCPT_FLAGS_BP_INSTR         RT_BIT_32(5)
    3267 /** Generated by a DRx instruction breakpoint and RF should be cleared. */
    3268 #define IEM_XCPT_FLAGS_DRx_INSTR_BP     RT_BIT_32(6)
    3269 /** @}  */
    32703354
    32713355
     
    51655249#endif
    51665250
     5251#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC)
     5252    if (IEM_IS_SVM_ENABLED(pVCpu))
     5253    {
     5254        /*
     5255         * Handle nested-guest SVM exception and software interrupt intercepts,
     5256         * see AMD spec. 15.12 "Exception Intercepts".
     5257         *
     5258         *   - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
     5259         *   - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
     5260         *     even when they use a vector in the range 0 to 31.
     5261         *   - ICEBP should not trigger #DB intercept, but its own intercept, so we catch it early in iemOp_int1.
     5262         *   - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
     5263         */
     5264        /* Check NMI intercept */
     5265        if (   u8Vector == X86_XCPT_NMI
     5266            && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
     5267        {
     5268            Log(("iemRaiseXcptOrInt: NMI intercept -> #VMEXIT\n"));
     5269            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5270        }
     5271
     5272        /* Check CPU exception intercepts. */
     5273        if (   IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector)
     5274            && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
     5275        {
     5276            Assert(u8Vector <= 31 /* X86_XCPT_MAX */);
     5277            uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
     5278            uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
     5279            if (   IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
     5280                && u8Vector == X86_XCPT_PF
     5281                && !(uErr & X86_TRAP_PF_ID))
     5282            {
     5283                /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
     5284#ifdef IEM_WITH_CODE_TLB
     5285#else
     5286                uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
     5287                uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
     5288                if (   cbCurrent > 0
     5289                    && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
     5290                {
     5291                    Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
     5292                    memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
     5293                }
     5294#endif
     5295            }
     5296            Log(("iemRaiseXcptOrInt: Xcpt intercept (u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n", u8Vector,
     5297                 uExitInfo1, uExitInfo2));
     5298            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
     5299        }
     5300
     5301        /* Check software interrupt (INTn) intercepts. */
     5302        if (   IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN)
     5303            && (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
     5304        {
     5305            uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
     5306            Log(("iemRaiseXcptOrInt: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
     5307            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
     5308        }
     5309    }
     5310#endif
     5311
    51675312    /*
    51685313     * Do recursion accounting.
     
    51795324
    51805325        /** @todo double and tripple faults. */
     5326        /** @todo When implementing #DF, the SVM nested-guest #DF intercepts needs some
     5327         *        care. See AMD spec. 15.12 "Exception Intercepts". */
    51815328        if (pVCpu->iem.s.cXcptRecursions >= 3)
    51825329        {
     
    51945341    }
    51955342    pVCpu->iem.s.cXcptRecursions++;
    5196     pVCpu->iem.s.uCurXcpt = u8Vector;
    5197     pVCpu->iem.s.fCurXcpt = fFlags;
     5343    pVCpu->iem.s.uCurXcpt    = u8Vector;
     5344    pVCpu->iem.s.fCurXcpt    = fFlags;
     5345    pVCpu->iem.s.uCurXcptErr = uErr;
     5346    pVCpu->iem.s.uCurXcptCr2 = uCr2;
    51985347
    51995348    /*
     
    96699818iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
    96709819{
     9820    VBOXSTRICTRC rcStrict;
     9821    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
     9822    {
     9823        Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
     9824        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     9825    }
     9826
    96719827    /*
    96729828     * The SIDT and SGDT instructions actually stores the data using two
    96739829     * independent writes.  The instructions does not respond to opsize prefixes.
    96749830     */
    9675     VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
     9831    rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
    96769832    if (rcStrict == VINF_SUCCESS)
    96779833    {
     
    1175611912    } while (0)
    1175711913
    11758 #if 0
    11759 #ifdef VBOX_WITH_NESTED_HWVIRT
    11760 /** The instruction raises an \#UD when SVM is not enabled. */
    11761 #define IEMOP_HLP_NEEDS_SVM_ENABLED() \
    11762     do \
    11763     { \
    11764         if (IEM_IS_SVM_ENABLED(pVCpu)) \
    11765             return IEMOP_RAISE_INVALID_OPCODE(); \
    11766     } while (0)
    11767 #endif
    11768 #endif
    11769 
    1177011914/** The instruction is not available in 64-bit mode, throw \#UD if we're in
    1177111915 * 64-bit mode. */
     
    1191012054            return IEMOP_RAISE_INVALID_OPCODE(); \
    1191112055    } while (0)
     12056
     12057#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC)
     12058/** Check and handles SVM nested-guest control & instruction intercept. */
     12059# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
     12060    do \
     12061    { \
     12062        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
     12063            IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
     12064    } while (0)
     12065
     12066/** Check and handle SVM nested-guest CR0 read intercept. */
     12067# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
     12068    do \
     12069    { \
     12070        if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
     12071            IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
     12072    } while (0)
     12073
     12074#else
     12075# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
     12076    do { RT_NOREF5(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2); } while (0)
     12077
     12078# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
     12079    do { RT_NOREF4(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2); } while (0)
     12080
     12081#endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */
     12082
    1191212083
    1191312084/**
     
    1504015211
    1504115212
    15042 #ifdef VBOX_WITH_NESTED_HWVIRT
    1504315213/**
    1504415214 * Checks if IEM is in the process of delivering an event (interrupt or
    1504515215 * exception).
    1504615216 *
    15047  * @returns true if it's raising an interrupt or exception, false otherwise.
    15048  * @param   pVCpu       The cross context virtual CPU structure.
    15049  */
    15050 VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu)
    15051 {
    15052     return pVCpu->iem.s.cXcptRecursions > 0;
    15053 }
    15054 
    15055 
     15217 * @returns true if we're in the process of raising an interrupt or exception,
     15218 *          false otherwise.
     15219 * @param   pVCpu           The cross context virtual CPU structure.
     15220 * @param   puVector        Where to store the vector associated with the
     15221 *                          currently delivered event, optional.
     15222 * @param   pfFlags         Where to store th event delivery flags (see
     15223 *                          IEM_XCPT_FLAGS_XXX), optional.
     15224 * @param   puErr           Where to store the error code associated with the
     15225 *                          event, optional.
     15226 * @param   puCr2           Where to store the CR2 associated with the event,
     15227 *                          optional.
     15228 */
     15229VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
     15230{
     15231    bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
     15232    if (fRaisingXcpt)
     15233    {
     15234        if (puVector)
     15235            *puVector = pVCpu->iem.s.uCurXcpt;
     15236        if (pfFlags)
     15237            *pfFlags = pVCpu->iem.s.fCurXcpt;
     15238        /* The caller should check the flags to determine if the error code & CR2 are valid for the event. */
     15239        if (puErr)
     15240            *puErr = pVCpu->iem.s.uCurXcptErr;
     15241        if (puCr2)
     15242            *puCr2 = pVCpu->iem.s.uCurXcptCr2;
     15243    }
     15244    return fRaisingXcpt;
     15245}
     15246
     15247
     15248#ifdef VBOX_WITH_NESTED_HWVIRT
    1505615249/**
    1505715250 * Interface for HM and EM to emulate the STGI instruction.
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r66462 r66581  
    558558    VBOXSTRICTRC rcStrict;
    559559
     560    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
     561    {
     562        Log2(("pushf: Guest intercept -> #VMEXIT\n"));
     563        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     564    }
     565
    560566    /*
    561567     * If we're in V8086 mode some care is required (which is why we're in
     
    618624    VBOXSTRICTRC    rcStrict;
    619625    uint32_t        fEflNew;
     626
     627    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
     628    {
     629        Log2(("popf: Guest intercept -> #VMEXIT\n"));
     630        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     631    }
    620632
    621633    /*
     
    38563868
    38573869    /*
     3870     * The SVM nested-guest intercept for iret takes priority over all exceptions,
     3871     * see AMD spec. "15.9 Instruction Intercepts".
     3872     */
     3873    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
     3874    {
     3875        Log(("iret: Guest intercept -> #VMEXIT\n"));
     3876        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     3877    }
     3878
     3879    /*
    38583880     * Call a mode specific worker.
    38593881     */
     
    46324654    Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
    46334655
     4656    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
     4657    {
     4658        Log(("lgdt: Guest intercept -> #VMEXIT\n"));
     4659        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     4660    }
     4661
    46344662    /*
    46354663     * Fetch the limit and base address.
     
    46984726    Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
    46994727
     4728    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
     4729    {
     4730        Log(("lidt: Guest intercept -> #VMEXIT\n"));
     4731        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     4732    }
     4733
    47004734    /*
    47014735     * Fetch the limit and base address.
     
    47834817    if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
    47844818    {
     4819        /* Nested-guest SVM intercept. */
     4820        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
     4821        {
     4822            Log(("lldt: Guest intercept -> #VMEXIT\n"));
     4823            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     4824        }
     4825
    47854826        Log(("lldt %04x: Loading NULL selector.\n",  uNewLdt));
    47864827        if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
     
    48554896        Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
    48564897        return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
     4898    }
     4899
     4900    /* Nested-guest SVM intercept. */
     4901    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
     4902    {
     4903        Log(("lldt: Guest intercept -> #VMEXIT\n"));
     4904        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    48574905    }
    48584906
     
    49084956        return iemRaiseGeneralProtectionFault0(pVCpu);
    49094957    }
     4958    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
     4959    {
     4960        Log(("ltr: Guest intercept -> #VMEXIT\n"));
     4961        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     4962    }
    49104963
    49114964    /*
     
    50105063    Assert(!pCtx->eflags.Bits.u1VM);
    50115064
     5065    if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
     5066    {
     5067        Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5068        IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
     5069    }
     5070
    50125071    /* read it */
    50135072    uint64_t crX;
     
    50515110 * @param   iCrReg          The CRx register to write (valid).
    50525111 * @param   uNewCrX         The new value.
    5053  */
    5054 IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
     5112 * @param   enmAccessCrx    The instruction that caused the CrX load.
     5113 * @param   iGReg           The general register in case of a 'mov CRx,GReg'
     5114 *                          instruction.
     5115 */
     5116IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
    50555117{
    50565118    PCPUMCTX        pCtx  = IEM_GET_CTX(pVCpu);
    50575119    VBOXSTRICTRC    rcStrict;
    50585120    int             rc;
     5121#ifndef VBOX_WITH_NESTED_HWVIRT
     5122    RT_NOREF2(iGReg, enmAccessCrX);
     5123#endif
    50595124
    50605125    /*
     
    51285193
    51295194            /*
     5195             * SVM nested-guest CR0 write intercepts.
     5196             */
     5197            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
     5198            {
     5199                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5200                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
     5201            }
     5202            if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITES))
     5203            {
     5204                /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
     5205                if (   enmAccessCrX == IEMACCESSCRX_LMSW
     5206                    || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
     5207                {
     5208                    Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
     5209                    Log(("iemCImpl_load_Cr%#x: TS/MP bit changed or lmsw instr: Guest intercept -> #VMEXIT\n", iCrReg));
     5210                    IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5211                }
     5212            }
     5213
     5214            /*
    51305215             * Change CR0.
    51315216             */
     
    51865271         */
    51875272        case 2:
     5273        {
     5274            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
     5275            {
     5276                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5277                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
     5278            }
    51885279            pCtx->cr2 = uNewCrX;
    51895280            rcStrict  = VINF_SUCCESS;
    51905281            break;
     5282        }
    51915283
    51925284        /*
     
    52195311                     uNewCrX, uNewCrX & ~fValid));
    52205312                uNewCrX &= fValid;
     5313            }
     5314
     5315            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
     5316            {
     5317                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5318                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
    52215319            }
    52225320
     
    52845382            }
    52855383
     5384            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
     5385            {
     5386                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5387                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
     5388            }
    52865389
    52875390            /*
     
    53375440            }
    53385441
     5442            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
     5443            {
     5444                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5445                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
     5446            }
     5447
    53395448            if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
    53405449                APICSetTpr(pVCpu, (uint8_t)uNewCrX << 4);
     
    53795488    else
    53805489        uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
    5381     return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
     5490    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
    53825491}
    53835492
     
    54015510    uint64_t uNewCr0 = pCtx->cr0     & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    54025511    uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    5403     return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
     5512    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
    54045513}
    54055514
     
    54165525    uint64_t uNewCr0 = pCtx->cr0;
    54175526    uNewCr0 &= ~X86_CR0_TS;
    5418     return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
     5527    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
    54195528}
    54205529
     
    54785587    }
    54795588
     5589    /** @todo SVM nested-guest intercept for DR8-DR15? */
     5590    /*
     5591     * Check for any SVM nested-guest intercepts for the DRx read.
     5592     */
     5593    if (IEM_IS_SVM_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
     5594    {
     5595        Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
     5596        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
     5597                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
     5598    }
     5599
    54805600    if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    54815601        *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
     
    55685688    }
    55695689
     5690    /** @todo SVM nested-guest intercept for DR8-DR15? */
     5691    /*
     5692     * Check for any SVM nested-guest intercepts for the DRx write.
     5693     */
     5694    if (IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
     5695    {
     5696        Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
     5697        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
     5698                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
     5699    }
     5700
    55705701    /*
    55715702     * Do the actual setting.
     
    55975728    Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
    55985729
     5730    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
     5731    {
     5732        Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
     5733        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPG,
     5734                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? GCPtrPage : 0, 0 /* uExitInfo2 */);
     5735    }
     5736
    55995737    int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
    56005738    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     
    56295767        Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
    56305768        return iemRaiseGeneralProtectionFault0(pVCpu);
     5769    }
     5770
     5771    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
     5772    {
     5773        Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
     5774        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    56315775    }
    56325776
     
    56475791
    56485792/**
     5793 * Implements RDTSC.
     5794 */
     5795IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
     5796{
     5797    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5798
     5799    /*
     5800     * Check preconditions.
     5801     */
     5802    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
     5803        return iemRaiseUndefinedOpcode(pVCpu);
     5804
     5805    if (   (pCtx->cr4 & X86_CR4_TSD)
     5806        && pVCpu->iem.s.uCpl != 0)
     5807    {
     5808        Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     5809        return iemRaiseGeneralProtectionFault0(pVCpu);
     5810    }
     5811
     5812    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
     5813    {
     5814        Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
     5815        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5816    }
     5817
     5818    /*
     5819     * Do the job.
     5820     * Query the MSR first in case of trips to ring-3.
     5821     */
     5822    VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx);
     5823    if (rcStrict == VINF_SUCCESS)
     5824    {
     5825        /* Low dword of the TSC_AUX msr only. */
     5826        pCtx->rcx &= UINT32_C(0xffffffff);
     5827
     5828        uint64_t uTicks = TMCpuTickGet(pVCpu);
     5829        pCtx->rax = (uint32_t)uTicks;
     5830        pCtx->rdx = uTicks >> 32;
     5831#ifdef IEM_VERIFICATION_MODE_FULL
     5832        pVCpu->iem.s.fIgnoreRaxRdx = true;
     5833#endif
     5834        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5835    }
     5836    return rcStrict;
     5837}
     5838
     5839
     5840/**
     5841 * Implements RDPMC.
     5842 */
     5843IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
     5844{
     5845    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5846    if (   pVCpu->iem.s.uCpl != 0
     5847        && !(pCtx->cr4 & X86_CR4_PCE))
     5848        return iemRaiseGeneralProtectionFault0(pVCpu);
     5849
     5850    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
     5851    {
     5852        Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
     5853        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5854    }
     5855
     5856    /** @todo Implement RDPMC for the regular guest execution case (the above only
     5857     *        handles nested-guest intercepts). */
     5858    RT_NOREF(cbInstr);
     5859    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
     5860}
     5861
     5862
     5863/**
    56495864 * Implements RDMSR.
    56505865 */
     
    56655880     */
    56665881    RTUINT64U uValue;
    5667     VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
     5882    VBOXSTRICTRC rcStrict;
     5883    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
     5884    {
     5885        rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, false /* fWrite */);
     5886        if (rcStrict == VINF_SVM_VMEXIT)
     5887            return VINF_SUCCESS;
     5888        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     5889        {
     5890            Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
     5891            return rcStrict;
     5892        }
     5893    }
     5894
     5895    rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
    56685896    if (rcStrict == VINF_SUCCESS)
    56695897    {
     
    57185946
    57195947    VBOXSTRICTRC rcStrict;
     5948    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
     5949    {
     5950        rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, true /* fWrite */);
     5951        if (rcStrict == VINF_SVM_VMEXIT)
     5952            return VINF_SUCCESS;
     5953        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     5954        {
     5955            Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
     5956            return rcStrict;
     5957        }
     5958    }
     5959
    57205960    if (!IEM_VERIFICATION_ENABLED(pVCpu))
    57215961        rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
     
    57766016
    57776017    /*
     6018     * Check SVM nested-guest IO intercept.
     6019     */
     6020    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     6021    {
     6022        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, 0 /* N/A - cAddrSizeBits */,
     6023                                           0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
     6024        if (rcStrict == VINF_SVM_VMEXIT)
     6025            return VINF_SUCCESS;
     6026        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     6027        {
     6028            Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
     6029                 VBOXSTRICTRC_VAL(rcStrict)));
     6030            return rcStrict;
     6031        }
     6032    }
     6033
     6034    /*
    57786035     * Perform the I/O.
    57796036     */
     
    58466103
    58476104    /*
     6105     * Check SVM nested-guest IO intercept.
     6106     */
     6107    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     6108    {
     6109        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, 0 /* N/A - cAddrSizeBits */,
     6110                                           0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
     6111        if (rcStrict == VINF_SVM_VMEXIT)
     6112            return VINF_SUCCESS;
     6113        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     6114        {
     6115            Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
     6116                 VBOXSTRICTRC_VAL(rcStrict)));
     6117            return rcStrict;
     6118        }
     6119    }
     6120
     6121    /*
    58486122     * Perform the I/O.
    58496123     */
     
    59146188    }
    59156189
    5916 #ifndef IN_RC
    59176190    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
    59186191    {
    59196192        Log(("vmrun: Guest intercept -> #VMEXIT\n"));
    5920         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    5921     }
    5922 #endif
     6193        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6194    }
    59236195
    59246196    VBOXSTRICTRC rcStrict = HMSvmVmrun(pVCpu, pCtx, GCPhysVmcb);
     
    59416213{
    59426214    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    5943 #ifndef IN_RC
    59446215    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
    59456216    {
    5946         Log(("vmrun: Guest intercept -> #VMEXIT\n"));
    5947         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    5948     }
    5949 #endif
     6217        Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
     6218        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6219    }
    59506220
    59516221    bool fUpdatedRipAndRF;
     
    59696239    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    59706240    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
    5971 #ifndef IN_RC
    5972     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
    5973     {
    5974         Log(("vmload: Guest intercept -> #VMEXIT\n"));
    5975         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    5976     }
    5977 #endif
    59786241
    59796242    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
     
    59836246        Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
    59846247        return iemRaiseGeneralProtectionFault0(pVCpu);
     6248    }
     6249
     6250    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
     6251    {
     6252        Log(("vmload: Guest intercept -> #VMEXIT\n"));
     6253        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    59856254    }
    59866255
     
    60206289    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    60216290    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
    6022 #ifndef IN_RC
    6023     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
    6024     {
    6025         Log(("vmsave: Guest intercept -> #VMEXIT\n"));
    6026         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    6027     }
    6028 #endif
    60296291
    60306292    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
     
    60346296        Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
    60356297        return iemRaiseGeneralProtectionFault0(pVCpu);
     6298    }
     6299
     6300    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
     6301    {
     6302        Log(("vmsave: Guest intercept -> #VMEXIT\n"));
     6303        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60366304    }
    60376305
     
    60716339    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    60726340    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
    6073 #ifndef IN_RC
    60746341    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
    60756342    {
    60766343        Log(("clgi: Guest intercept -> #VMEXIT\n"));
    6077         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    6078     }
    6079 #endif
     6344        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6345    }
    60806346
    60816347    pCtx->hwvirt.svm.fGif = 0;
     
    60926358    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    60936359    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
    6094 #ifndef IN_RC
    60956360    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
    60966361    {
    60976362        Log2(("stgi: Guest intercept -> #VMEXIT\n"));
    6098         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    6099     }
    6100 #endif
     6363        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6364    }
    61016365
    61026366    pCtx->hwvirt.svm.fGif = 1;
     
    61126376{
    61136377    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    6114     IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
    6115 #ifndef IN_RC
    6116     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
    6117     {
    6118         Log2(("invlpga: Guest intercept -> #VMEXIT\n"));
    6119         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    6120     }
    6121 #endif
    6122 
    61236378    RTGCPTR  const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
    61246379    /** @todo PGM needs virtual ASID support. */
     
    61266381    uint32_t const uAsid     = pCtx->ecx;
    61276382#endif
     6383
     6384    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
     6385    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
     6386    {
     6387        Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
     6388        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6389    }
     6390
    61286391    PGMInvalidatePage(pVCpu, GCPtrPage);
    61296392    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    61306393    return VINF_SUCCESS;
     6394}
     6395
     6396
     6397/**
     6398 * Implements 'SKINIT'.
     6399 */
     6400IEM_CIMPL_DEF_0(iemCImpl_skinit)
     6401{
     6402    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
     6403
     6404    uint32_t uIgnore;
     6405    uint32_t fFeaturesECX;
     6406    CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
     6407    if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
     6408        return iemRaiseUndefinedOpcode(pVCpu);
     6409
     6410    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
     6411    {
     6412        Log2(("skinit: Guest intercept -> #VMEXIT\n"));
     6413        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6414    }
     6415
     6416    RT_NOREF(cbInstr);
     6417    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
    61316418}
    61326419#endif /* VBOX_WITH_NESTED_HWVIRT */
     
    62286515    if (pVCpu->iem.s.uCpl != 0)
    62296516        return iemRaiseGeneralProtectionFault0(pVCpu);
     6517
     6518    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
     6519    {
     6520        Log2(("hlt: Guest intercept -> #VMEXIT\n"));
     6521        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6522    }
     6523
    62306524    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    62316525    return VINF_EM_HALT;
     
    62766570        return rcStrict;
    62776571
     6572    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
     6573    {
     6574        Log2(("monitor: Guest intercept -> #VMEXIT\n"));
     6575        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6576    }
     6577
    62786578    /*
    62796579     * Call EM to prepare the monitor/wait.
     
    63346634
    63356635    /*
     6636     * Check SVM nested-guest mwait intercepts.
     6637     */
     6638    if (   IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
     6639        && EMMonitorIsArmed(pVCpu))
     6640    {
     6641        Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
     6642        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6643    }
     6644    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
     6645    {
     6646        Log2(("mwait: Guest intercept -> #VMEXIT\n"));
     6647        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6648    }
     6649
     6650    /*
    63366651     * Call EM to prepare the monitor/wait.
    63376652     */
     
    63786693{
    63796694    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     6695
     6696    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
     6697    {
     6698        Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
     6699        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6700    }
    63806701
    63816702    CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
     
    67267047    if (pCtx->cr4 & X86_CR4_OSXSAVE)
    67277048    {
     7049        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
     7050        {
     7051            Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
     7052            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     7053        }
     7054
    67287055        if (pVCpu->iem.s.uCpl == 0)
    67297056        {
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h

    r62478 r66581  
    12181218    }
    12191219
     1220    /*
     1221     * Check SVM nested-guest IO intercept.
     1222     */
     1223    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     1224    {
     1225        rcStrict = iemSvmHandleIOIntercept(pVCpu, pCtx->dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, false /* fRep */,
     1226                                           true /* fStrIo */, cbInstr);
     1227        if (rcStrict == VINF_SVM_VMEXIT)
     1228            return VINF_SUCCESS;
     1229        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1230        {
     1231            Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pCtx->dx, OP_SIZE / 8,
     1232                 VBOXSTRICTRC_VAL(rcStrict)));
     1233            return rcStrict;
     1234        }
     1235    }
     1236
    12201237    OP_TYPE        *puMem;
    12211238    rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
     
    12691286        if (rcStrict != VINF_SUCCESS)
    12701287            return rcStrict;
     1288    }
     1289
     1290    /*
     1291     * Check SVM nested-guest IO intercept.
     1292     */
     1293    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     1294    {
     1295        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, true /* fRep */,
     1296                                           true /* fStrIo */, cbInstr);
     1297        if (rcStrict == VINF_SVM_VMEXIT)
     1298            return VINF_SUCCESS;
     1299        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1300        {
     1301            Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
     1302                 VBOXSTRICTRC_VAL(rcStrict)));
     1303            return rcStrict;
     1304        }
    12711305    }
    12721306
     
    14551489    }
    14561490
     1491    /*
     1492     * Check SVM nested-guest IO intercept.
     1493     */
     1494    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     1495    {
     1496        rcStrict = iemSvmHandleIOIntercept(pVCpu, pCtx->dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, false /* fRep */,
     1497                                           true /* fStrIo */, cbInstr);
     1498        if (rcStrict == VINF_SVM_VMEXIT)
     1499            return VINF_SUCCESS;
     1500        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1501        {
     1502            Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pCtx->dx, OP_SIZE / 8,
     1503                 VBOXSTRICTRC_VAL(rcStrict)));
     1504            return rcStrict;
     1505        }
     1506    }
     1507
    14571508    OP_TYPE uValue;
    14581509    rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
     
    14961547        if (rcStrict != VINF_SUCCESS)
    14971548            return rcStrict;
     1549    }
     1550
     1551    /*
     1552     * Check SVM nested-guest IO intercept.
     1553     */
     1554    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     1555    {
     1556        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, true /* fRep */,
     1557                                           true /* fStrIo */, cbInstr);
     1558        if (rcStrict == VINF_SVM_VMEXIT)
     1559            return VINF_SUCCESS;
     1560        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1561        {
     1562            Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
     1563                 VBOXSTRICTRC_VAL(rcStrict)));
     1564            return rcStrict;
     1565        }
    14981566    }
    14991567
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h

    r66479 r66581  
    44544454
    44554455    if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)
     4456    {
    44564457        IEMOP_MNEMONIC(pause, "pause");
     4458#ifdef VBOX_WITH_NESTED_HWVIRT
     4459        /** @todo Pause filter count and threshold with SVM nested hardware virt. */
     4460        Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter);
     4461        Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold);
     4462#endif
     4463        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0);
     4464    }
    44574465    else
    44584466        IEMOP_MNEMONIC(nop, "nop");
     
    1058210590    IEMOP_HLP_MIN_386(); /** @todo does not generate #UD on 286, or so they say... */
    1058310591    /** @todo testcase! */
     10592    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_ICEBP, SVM_EXIT_ICEBP, 0, 0);
    1058410593    return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_DB, false /*fIsBpInstr*/);
    1058510594}
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r66474 r66581  
    3535    {
    3636        IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
     37        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
    3738        switch (pVCpu->iem.s.enmEffOpSize)
    3839        {
     
    7475        IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
    7576        IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
     77        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
    7678        IEM_MC_FETCH_LDTR_U16(u16Ldtr);
    7779        IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Ldtr);
     
    9395    {
    9496        IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
     97        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
    9598        switch (pVCpu->iem.s.enmEffOpSize)
    9699        {
     
    132135        IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
    133136        IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
     137        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
    134138        IEM_MC_FETCH_TR_U16(u16Tr);
    135139        IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Tr);
     
    482486    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
    483487}
     488
     489
     490/** Opcode 0x0f 0x01 0xde. */
     491FNIEMOP_DEF(iemOp_Grp7_Amd_skinit)
     492{
     493    IEMOP_MNEMONIC(skinit, "skinit");
     494    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_skinit);
     495}
    484496#else
    485497/** Opcode 0x0f 0x01 0xd8. */
     
    503515/** Opcode 0x0f 0x01 0xdf. */
    504516FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
    505 #endif /* VBOX_WITH_NESTED_HWVIRT */
    506517
    507518/** Opcode 0x0f 0x01 0xde. */
    508519FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit);
     520#endif /* VBOX_WITH_NESTED_HWVIRT */
    509521
    510522/** Opcode 0x0f 0x01 /4. */
     
    516528    {
    517529        IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     530        IEMOP_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    518531        switch (pVCpu->iem.s.enmEffOpSize)
    519532        {
     
    562575        IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
    563576        IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     577        IEMOP_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    564578        IEM_MC_FETCH_CR0_U16(u16Tmp);
    565579        if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
     
    636650FNIEMOP_DEF(iemOp_Grp7_rdtscp)
    637651{
    638     NOREF(pVCpu);
    639     IEMOP_BITCH_ABOUT_STUB();
    640     return VERR_IEM_INSTR_NOT_IMPLEMENTED;
     652    IEMOP_MNEMONIC(rdtscp, "rdtscp");
     653    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     654    /** @todo SVM intercept removal from here. */
     655    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP, SVM_EXIT_RDTSCP, 0, 0);
     656    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtscp);
    641657}
    642658
     
    868884
    869885/** Opcode 0x0f 0x08. */
    870 FNIEMOP_STUB(iemOp_invd);
     886FNIEMOP_DEF(iemOp_invd)
     887{
     888    IEMOP_MNEMONIC(invd, "invd");
     889#ifdef VBOX_WITH_NESTED_HWVIRT
     890    IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
     891    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);
     892#endif
     893    /** @todo implement invd for the regular case (above only handles nested SVM
     894     *        exits). */
     895    IEMOP_BITCH_ABOUT_STUB();
     896    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
     897}
     898
    871899// IEMOP_HLP_MIN_486();
    872900
     
    880908    IEM_MC_BEGIN(0, 0);
    881909    IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
     910    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);
    882911    IEM_MC_ADVANCE_RIP();
    883912    IEM_MC_END();
     
    20312060
    20322061/** Opcode 0x0f 0x34. */
    2033 FNIEMOP_STUB(iemOp_rdpmc);
     2062FNIEMOP_DEF(iemOp_rdpmc)
     2063{
     2064    IEMOP_MNEMONIC(rdpmc, "rdpmc");
     2065    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     2066    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdpmc);
     2067}
     2068
     2069
    20342070/** Opcode 0x0f 0x34. */
    20352071FNIEMOP_STUB(iemOp_sysenter);
     
    57225758
    57235759/** Opcode 0x0f 0xaa. */
    5724 FNIEMOP_STUB(iemOp_rsm);
     5760FNIEMOP_DEF(iemOp_rsm)
     5761{
     5762    IEMOP_MNEMONIC(rsm, "rsm");
     5763    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);
     5764    /** @todo rsm - for the regular case (above handles only the SVM nested-guest
     5765     *        intercept). */
     5766    IEMOP_BITCH_ABOUT_STUB();
     5767    return IEMOP_RAISE_INVALID_OPCODE();
     5768}
     5769
    57255770//IEMOP_HLP_MIN_386();
    57265771
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r66371 r66581  
    625625    Assert(pVM->hm.s.svm.fSupported);
    626626
    627     bool const fPauseFilter          = RT_BOOL(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
    628     bool const fPauseFilterThreshold = RT_BOOL(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
     627    bool const fPauseFilter          = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
     628    bool const fPauseFilterThreshold = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
    629629    bool const fUsePauseFilter       = fPauseFilter && pVM->hm.s.svm.cPauseFilter && pVM->hm.s.svm.cPauseFilterThresholdTicks;
    630630
     
    890890                fHitASIDLimit             = true;
    891891
    892                 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     892                if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    893893                {
    894894                    pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     
    905905                && pCpu->fFlushAsidBeforeUse)
    906906            {
    907                 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     907                if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    908908                    pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    909909                else
     
    920920        else
    921921        {
    922             if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     922            if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    923923                pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    924924            else
     
    31493149
    31503150    /* If VMCB Clean bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
    3151     if (!(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN))
     3151    if (!(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN))
    31523152        pVmcb->ctrl.u64VmcbCleanBits = 0;
    31533153}
     
    41484148DECLINLINE(void) hmR0SvmAdvanceRipHwAssist(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb)
    41494149{
    4150     if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     4150    if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    41514151    {
    41524152        PCSVMVMCB pVmcb = (PCSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     
    41744174{
    41754175    Assert(cbLikely <= 15);   /* See Intel spec. 2.3.11 "AVX Instruction Length" */
    4176     if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     4176    if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    41774177    {
    41784178        PCSVMVMCB pVmcb = (PCSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     
    45694569        }
    45704570
    4571         if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     4571        if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    45724572        {
    45734573            rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
     
    46164616        Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ);
    46174617
    4618         if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     4618        if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    46194619        {
    46204620            rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
     
    48014801                       only enabling it for Bulldozer and later with NRIP.  OS/2 broke on
    48024802                       2384 Opterons when only checking NRIP. */
    4803                     if (   (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     4803                    if (   (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    48044804                        && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First)
    48054805                    {
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r66403 r66581  
    17121712                PCCPUMCPUIDLEAF pSvmLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x8000000a);
    17131713                AssertLogRelReturn(pSvmLeaf, VERR_CPUM_IPE_1);
    1714                 pFeatures->svm.feat.u   = pSvmLeaf->uEdx;
    1715                 pFeatures->svm.uMaxAsid = pSvmLeaf->uEbx;
     1714                pFeatures->fSvmNestedPaging         = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING);
     1715                pFeatures->fSvmLbrVirt              = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT);
     1716                pFeatures->fSvmSvmLock              = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK);
     1717                pFeatures->fSvmNextRipSave          = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
     1718                pFeatures->fSvmTscRateMsr           = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR);
     1719                pFeatures->fSvmVmcbClean            = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN);
     1720                pFeatures->fSvmFlusbByAsid          = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID);
     1721                pFeatures->fSvmDecodeAssist         = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST);
     1722                pFeatures->fSvmPauseFilter          = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
     1723                pFeatures->fSvmPauseFilterThreshold = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
     1724                pFeatures->fSvmAvic                 = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_AVIC);
     1725                pFeatures->uSvmMaxAsid              = pSvmLeaf->uEbx;
    17161726            }
    17171727        }
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r65447 r66581  
    16181618    {
    16191619#define HMSVM_REPORT_FEATURE(a_StrDesc, a_Define) { a_Define, a_StrDesc }
    1620         HMSVM_REPORT_FEATURE("NESTED_PAGING",          AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
    1621         HMSVM_REPORT_FEATURE("LBR_VIRT",               AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
    1622         HMSVM_REPORT_FEATURE("SVM_LOCK",               AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
    1623         HMSVM_REPORT_FEATURE("NRIP_SAVE",              AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
    1624         HMSVM_REPORT_FEATURE("TSC_RATE_MSR",           AMD_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
    1625         HMSVM_REPORT_FEATURE("VMCB_CLEAN",             AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
    1626         HMSVM_REPORT_FEATURE("FLUSH_BY_ASID",          AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
    1627         HMSVM_REPORT_FEATURE("DECODE_ASSIST",          AMD_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST),
    1628         HMSVM_REPORT_FEATURE("PAUSE_FILTER",           AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
    1629         HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
    1630         HMSVM_REPORT_FEATURE("AVIC",                   AMD_CPUID_SVM_FEATURE_EDX_AVIC),
     1620        HMSVM_REPORT_FEATURE("NESTED_PAGING",          X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
     1621        HMSVM_REPORT_FEATURE("LBR_VIRT",               X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
     1622        HMSVM_REPORT_FEATURE("SVM_LOCK",               X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
     1623        HMSVM_REPORT_FEATURE("NRIP_SAVE",              X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
     1624        HMSVM_REPORT_FEATURE("TSC_RATE_MSR",           X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
     1625        HMSVM_REPORT_FEATURE("VMCB_CLEAN",             X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
     1626        HMSVM_REPORT_FEATURE("FLUSH_BY_ASID",          X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
     1627        HMSVM_REPORT_FEATURE("DECODE_ASSIST",          X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST),
     1628        HMSVM_REPORT_FEATURE("PAUSE_FILTER",           X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
     1629        HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
     1630        HMSVM_REPORT_FEATURE("AVIC",                   X86_CPUID_SVM_FEATURE_EDX_AVIC),
    16311631#undef HMSVM_REPORT_FEATURE
    16321632    };
     
    16481648     */
    16491649    AssertLogRelReturn(   !pVM->hm.s.fNestedPaging
    1650                        || (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
     1650                       || (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
    16511651                       VERR_HM_IPE_1);
    16521652
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r66403 r66581  
    231231    .Guest.abPadding          resb    12
    232232%endif
    233     .Guest.hwvirt.svm.uMsrHSavePa       resq    1
    234     .Guest.hwvirt.svm.GCPhysVmcb        resq    1
    235     .Guest.hwvirt.svm.VmcbCtrl          resb  256
    236     .Guest.hwvirt.svm.HostState         resb  184
    237     .Guest.hwvirt.svm.fGif              resb    1
    238     .Guest.hwvirt.svm.abPadding0        resb    7
    239     .Guest.hwvirt.svm.pvMsrBitmapR0     RTR0PTR_RES 1
    240     .Guest.hwvirt.svm.pvMsrBitmapR3     RTR3PTR_RES 1
    241     .Guest.hwvirt.svm.pvIoBitmapR0      RTR0PTR_RES 1
    242     .Guest.hwvirt.svm.pvIoBitmapR3      RTR3PTR_RES 1
     233    .Guest.hwvirt.svm.uMsrHSavePa               resq    1
     234    .Guest.hwvirt.svm.GCPhysVmcb                resq    1
     235    .Guest.hwvirt.svm.VmcbCtrl                  resb  256
     236    .Guest.hwvirt.svm.HostState                 resb  184
     237    .Guest.hwvirt.svm.fGif                      resb    1
     238    .Guest.hwvirt.svm.cPauseFilter              resw    1
     239    .Guest.hwvirt.svm.cPauseFilterThreshold     resw    1
     240    .Guest.hwvirt.svm.abPadding0                resb    3
     241    .Guest.hwvirt.svm.pvMsrBitmapR0             RTR0PTR_RES 1
     242    .Guest.hwvirt.svm.pvMsrBitmapR3             RTR3PTR_RES 1
     243    .Guest.hwvirt.svm.pvIoBitmapR0              RTR0PTR_RES 1
     244    .Guest.hwvirt.svm.pvIoBitmapR3              RTR3PTR_RES 1
    243245%if HC_ARCH_BITS == 32
    244     .Guest.hwvirt.svm.abPadding1        resb    16
    245 %endif
    246     .Guest.hwvirt.fLocalForcedActions   resd    1
     246    .Guest.hwvirt.svm.abPadding1                resb    16
     247%endif
     248    .Guest.hwvirt.fLocalForcedActions           resd    1
    247249    alignb 64
    248250
     
    508510    .Hyper.abPadding          resb    12
    509511%endif
    510     .Hyper.hwvirt.svm.uMsrHSavePa       resq    1
    511     .Hyper.hwvirt.svm.GCPhysVmcb        resq    1
    512     .Hyper.hwvirt.svm.VmcbCtrl          resb  256
    513     .Hyper.hwvirt.svm.HostState         resb  184
    514     .Hyper.hwvirt.svm.fGif              resb    1
    515     .Hyper.hwvirt.svm.abPadding0        resb    7
    516     .Hyper.hwvirt.svm.pvMsrBitmapR0     RTR0PTR_RES 1
    517     .Hyper.hwvirt.svm.pvMsrBitmapR3     RTR3PTR_RES 1
    518     .Hyper.hwvirt.svm.pvIoBitmapR0      RTR0PTR_RES 1
    519     .Hyper.hwvirt.svm.pvIoBitmapR3      RTR3PTR_RES 1
     512    .Hyper.hwvirt.svm.uMsrHSavePa               resq    1
     513    .Hyper.hwvirt.svm.GCPhysVmcb                resq    1
     514    .Hyper.hwvirt.svm.VmcbCtrl                  resb  256
     515    .Hyper.hwvirt.svm.HostState                 resb  184
     516    .Hyper.hwvirt.svm.fGif                      resb    1
     517    .Hyper.hwvirt.svm.cPauseFilter              resw    1
     518    .Hyper.hwvirt.svm.cPauseFilterThreshold     resw    1
     519    .Hyper.hwvirt.svm.abPadding0                resb    3
     520    .Hyper.hwvirt.svm.pvMsrBitmapR0             RTR0PTR_RES 1
     521    .Hyper.hwvirt.svm.pvMsrBitmapR3             RTR3PTR_RES 1
     522    .Hyper.hwvirt.svm.pvIoBitmapR0              RTR0PTR_RES 1
     523    .Hyper.hwvirt.svm.pvIoBitmapR3              RTR3PTR_RES 1
    520524%if HC_ARCH_BITS == 32
    521     .Hyper.hwvirt.svm.abPadding1        resb   16
    522 %endif
    523     .Hyper.hwvirt.fLocalForcedActions   resd    1
     525    .Hyper.hwvirt.svm.abPadding1                resb   16
     526%endif
     527    .Hyper.hwvirt.fLocalForcedActions           resd    1
    524528    alignb 64
    525529
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r66474 r66581  
    502502
    503503#else
    504     /** The size of what has currently been fetched into abOpcodes. */
     504    /** The size of what has currently been fetched into abOpcode. */
    505505    uint8_t                 cbOpcode;                                                                       /*       0x08 */
    506     /** The current offset into abOpcodes. */
     506    /** The current offset into abOpcode. */
    507507    uint8_t                 offOpcode;                                                                      /*       0x09 */
    508508
     
    951951AssertCompileSize(IEMTASKSWITCH, 4);
    952952
     953/**
     954 * Possible CrX load (write) sources.
     955 */
     956typedef enum IEMACCESSCRX
     957{
     958    /** CrX access caused by 'mov crX' instruction. */
     959    IEMACCESSCRX_MOV_CRX,
     960    /** CrX (CR0) write caused by 'lmsw' instruction. */
     961    IEMACCESSCRX_LMSW,
     962    /** CrX (CR0) write caused by 'clts' instruction. */
     963    IEMACCESSCRX_CLTS,
     964    /** CrX (CR0) read caused by 'smsw' instruction. */
     965    IEMACCESSCRX_SMSW
     966} IEMACCESSCRX;
    953967
    954968/**
  • trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp

    r66457 r66581  
    124124#define IEMOP_HLP_DONE_DECODING()                           do { } while (0)
    125125#define IEMOP_HLP_DONE_VEX_DECODING()                       do { } while (0)
     126
     127#define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
     128#define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2)                 do { } while (0)
    126129
    127130#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType)               do { } while (0)
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r66277 r66581  
    137137    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HostState);
    138138    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fGif);
     139    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilter);
     140    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilterThreshold);
    139141    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR0);
    140142    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR3);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette