Index: /trunk/include/VBox/settings.h
===================================================================
--- /trunk/include/VBox/settings.h	(revision 70605)
+++ /trunk/include/VBox/settings.h	(revision 70606)
@@ -905,4 +905,6 @@
                         fAPIC,                  // requires settings version 1.16 (VirtualBox 5.1)
                         fX2APIC;                // requires settings version 1.16 (VirtualBox 5.1)
+    bool                fIBPBOnVMExit;          //< added out of cycle, after 1.16 was out.
+    bool                fIBPBOnVMEntry;         //< added out of cycle, after 1.16 was out.
     typedef enum LongModeType { LongMode_Enabled, LongMode_Disabled, LongMode_Legacy } LongModeType;
     LongModeType        enmLongMode;
Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 70605)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 70606)
@@ -1026,4 +1026,12 @@
     /** Supports CLFLUSHOPT. */
     uint32_t        fClFlushOpt : 1;
+    /** Supports IA32_PRED_CMD.IBPB. */
+    uint32_t        fIbpb : 1;
+    /** Supports IA32_SPEC_CTRL.IBRS. */
+    uint32_t        fIbrs : 1;
+    /** Supports IA32_SPEC_CTRL.STIBP. */
+    uint32_t        fStibp : 1;
+    /** Supports IA32_ARCH_CAP. */
+    uint32_t        fArchCap : 1;
 
     /** Supports AMD 3DNow instructions. */
@@ -1059,5 +1067,5 @@
 
     /** Alignment padding / reserved for future use. */
-    uint32_t        fPadding : 23;
+    uint32_t        fPadding : 19;
 
     /** SVM: Supports Nested-paging. */
Index: /trunk/include/VBox/vmm/cpum.mac
===================================================================
--- /trunk/include/VBox/vmm/cpum.mac	(revision 70605)
+++ /trunk/include/VBox/vmm/cpum.mac	(revision 70606)
@@ -147,4 +147,5 @@
 %define XSTATE_SIZE             8192
 
+;; Note! Updates here must be reflected in CPUMInternal.mac too!
 struc CPUMCTX
     .eax                resq    1
@@ -250,12 +251,11 @@
     .fXStateMask        resq    1
     .pXStateR0      RTR0PTR_RES 1
+    alignb 8
     .pXStateR3      RTR3PTR_RES 1
+    alignb 8
     .pXStateRC      RTRCPTR_RES 1
     .aoffXState         resw    64
-%if HC_ARCH_BITS == 64
-    .abPadding          resb    4
-%else
-    .abPadding          resb    12
-%endif
+    .fWorldSwitcher     resd    1
+    alignb 8
     .hwvirt.svm.uMsrHSavePa            resq          1
     .hwvirt.svm.GCPhysVmcb             resq          1
@@ -284,4 +284,6 @@
 endstruc
 
+%define CPUMCTX_WSF_IBPB_EXIT           RT_BIT_32(0)
+%define CPUMCTX_WSF_IBPB_ENTRY          RT_BIT_32(1)
 
 %define CPUMSELREG_FLAGS_VALID      0x0001
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 70605)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 70606)
@@ -458,6 +458,12 @@
     /** Pointer to the FPU/SSE/AVX/XXXX state ring-0 mapping. */
     R0PTRTYPE(PX86XSAVEAREA)    pXStateR0;
+#if HC_ARCH_BITS == 32
+    uint32_t                    uXStateR0Padding;
+#endif
     /** Pointer to the FPU/SSE/AVX/XXXX state ring-3 mapping. */
     R3PTRTYPE(PX86XSAVEAREA)    pXStateR3;
+#if HC_ARCH_BITS == 32
+    uint32_t                    uXStateR3Padding;
+#endif
     /** Pointer to the FPU/SSE/AVX/XXXX state raw-mode mapping. */
     RCPTRTYPE(PX86XSAVEAREA)    pXStateRC;
@@ -465,6 +471,6 @@
     uint16_t                    aoffXState[64];
 
-    /** 724 - Size padding. */
-    uint8_t                     abPadding[HC_ARCH_BITS == 64 ? 4 : 12];
+    /** 0x2d4 - World switcher flags, CPUMCTX_WSF_XXX. */
+    uint32_t                    fWorldSwitcher;
 
     /** 728 - Hardware virtualization state.   */
@@ -579,7 +585,7 @@
 AssertCompileMemberOffset(CPUMCTX,                fXStateMask, 568);
 AssertCompileMemberOffset(CPUMCTX,                  pXStateR0, 576);
-AssertCompileMemberOffset(CPUMCTX,                  pXStateR3, HC_ARCH_BITS == 64 ? 584 : 580);
-AssertCompileMemberOffset(CPUMCTX,                  pXStateRC, HC_ARCH_BITS == 64 ? 592 : 584);
-AssertCompileMemberOffset(CPUMCTX,                 aoffXState, HC_ARCH_BITS == 64 ? 596 : 588);
+AssertCompileMemberOffset(CPUMCTX,                  pXStateR3, 584);
+AssertCompileMemberOffset(CPUMCTX,                  pXStateRC, 592);
+AssertCompileMemberOffset(CPUMCTX,                 aoffXState, 596);
 AssertCompileMemberOffset(CPUMCTX, hwvirt, 728);
 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa,            728);
@@ -737,4 +743,14 @@
 #endif /* !VBOX_FOR_DTRACE_LIB */
 
+
+/** @name CPUMCTX_WSF_XXX
+ * @{ */
+/** Touch IA32_PRED_CMD.IBPB on VM exit. */
+#define CPUMCTX_WSF_IBPB_EXIT           RT_BIT_32(0)
+/** Touch IA32_PRED_CMD.IBPB on VM entry. */
+#define CPUMCTX_WSF_IBPB_ENTRY          RT_BIT_32(1)
+/** @} */
+
+
 /**
  * Additional guest MSRs (i.e. not part of the CPU context structure).
Index: /trunk/include/iprt/x86.h
===================================================================
--- /trunk/include/iprt/x86.h	(revision 70605)
+++ /trunk/include/iprt/x86.h	(revision 70606)
@@ -598,4 +598,26 @@
 /** ECX Bit 0 - PREFETCHWT1 - Supports the PREFETCHWT1 instruction. */
 #define X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1       RT_BIT_32(0)
+/** ECX Bit 2 - UIMP - Supports user mode instruction prevention. */
+#define X86_CPUID_STEXT_FEATURE_ECX_UMIP              RT_BIT_32(2)
+/** ECX Bit 3 - PKU - Supports protection keys for user-mode pages. */
+#define X86_CPUID_STEXT_FEATURE_ECX_PKU               RT_BIT_32(3)
+/** ECX Bit 4 - OSPKE - Protection keys for user mode pages enabled. */
+#define X86_CPUID_STEXT_FEATURE_ECX_OSPKE             RT_BIT_32(4)
+/** ECX Bits 17-21 - MAWAU - Value used by BNDLDX and BNDSTX. */
+#define X86_CPUID_STEXT_FEATURE_ECX_MAWAU             UINT32_C(0x003e0000)
+/** ECX Bit 22 - RDPID - Support pread process ID. */
+#define X86_CPUID_STEXT_FEATURE_ECX_RDPID             RT_BIT_32(2)
+/** ECX Bit 30 - SGX_LC - Supports SGX launch configuration. */
+#define X86_CPUID_STEXT_FEATURE_ECX_SGX_LC            RT_BIT_32(30)
+
+/** EDX Bit 26 - IBRS & IBPB - Supports the IBRS flag in IA32_SPEC_CTRL and
+ *  IBPB command in IA32_PRED_CMD. */
+#define X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB         RT_BIT_32(26)
+/** EDX Bit 27 - IBRS & IBPB - Supports the STIBP flag in IA32_SPEC_CTRL. */
+#define X86_CPUID_STEXT_FEATURE_EDX_STIBP             RT_BIT_32(27)
+
+/** EDX Bit 29 - ARCHCAP - Supports the IA32_ARCH_CAP MSR. */
+#define X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP           RT_BIT_32(29)
+
 /** @} */
 
@@ -742,4 +764,20 @@
 
 
+/** @name CPUID AMD extended feature extensions ID (EBX).
+ * CPUID query with EAX=0x80000008.
+ * @{
+ */
+/** Bit 0 - CLZERO - Clear zero instruction. */
+#define X86_CPUID_AMD_EFEID_EBX_CLZERO       RT_BIT_32(0)
+/** Bit 1 - IRPerf - Instructions retired count support. */
+#define X86_CPUID_AMD_EFEID_EBX_IRPERF       RT_BIT_32(1)
+/** Bit 2 - XSaveErPtr - Always XSAVE* and XRSTR* error pointers. */
+#define X86_CPUID_AMD_EFEID_EBX_XSAVE_ER_PTR RT_BIT_32(2)
+/* AMD pipeline length: 9 feature bits ;-) */
+/** Bit 12 - IBPB - Supports the IBPB command in IA32_PRED_CMD. */
+#define X86_CPUID_AMD_EFEID_EBX_IBPB         RT_BIT_32(12)
+/** @} */
+
+
 /** @name CPUID AMD SVM Feature information.
  * CPUID query with EAX=0x8000000a.
@@ -1114,4 +1152,18 @@
 #define MSR_IA32_TSC_ADJUST                 0x3B
 
+/** Spectre control register.
+ * Logical processor scope. Reset value 0, unaffected by SIPI & INIT. */
+#define MSR_IA32_SPEC_CTRL                  0x48
+/** IBRS - Indirect branch restricted speculation. */
+#define MSR_IA32_SPEC_CTRL_F_IBRS           RT_BIT_32(0)
+/** STIBP - Single thread indirect branch predictors. */
+#define MSR_IA32_SPEC_CTRL_F_STIBP          RT_BIT_32(1)
+
+/** Prediction command register.
+ * Write only, logical processor scope, no state since write only. */
+#define MSR_IA32_PRED_CMD                   0x49
+/** IBPB - Indirect branch prediction barrie when written as 1. */
+#define MSR_IA32_PRED_CMD_F_IBPB            RT_BIT_32(0)
+
 /** BIOS update trigger (microcode update). */
 #define MSR_IA32_BIOS_UPDT_TRIG             0x79
@@ -1148,4 +1200,12 @@
 /** MTRR Capabilities. */
 #define MSR_IA32_MTRR_CAP                   0xFE
+
+/** Architecture capabilities (bugfixes).
+ * @note May move  */
+#define MSR_IA32_ARCH_CAP                   UINT32_C(0x10a)
+/** CPU is no subject to spectre problems. */
+#define MSR_IA32_ARCH_CAP_F_SPECTRE_FIX     RT_BIT_32(0)
+/** CPU has better IBRS and you can leave it on all the time. */
+#define MSR_IA32_ARCH_CAP_F_BETTER_IBRS     RT_BIT_32(1)
 
 /** Cache control/info. */
Index: /trunk/include/iprt/x86.mac
===================================================================
--- /trunk/include/iprt/x86.mac	(revision 70605)
+++ /trunk/include/iprt/x86.mac	(revision 70606)
@@ -177,4 +177,7 @@
 %define X86_CPUID_STEXT_FEATURE_EBX_SHA               RT_BIT_32(29)
 %define X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1       RT_BIT_32(0)
+%define X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB         RT_BIT_32(26)
+%define X86_CPUID_STEXT_FEATURE_EDX_STIBP             RT_BIT_32(27)
+%define X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP           RT_BIT_32(29)
 %define X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF     RT_BIT_32(0)
 %define X86_CPUID_EXT_FEATURE_EDX_SYSCALL       RT_BIT_32(11)
@@ -380,4 +383,9 @@
 %define MSR_IA32_FEATURE_CONTROL_VMXON      RT_BIT_32(2)
 %define MSR_IA32_TSC_ADJUST                 0x3B
+%define MSR_IA32_SPEC_CTRL                  0x48
+%define MSR_IA32_SPEC_CTRL_F_IBRS           RT_BIT_32(0)
+%define MSR_IA32_SPEC_CTRL_F_STIBP          RT_BIT_32(1)
+%define MSR_IA32_PRED_CMD                   0x49
+%define MSR_IA32_PRED_CMD_F_IBPB            RT_BIT_32(0)
 %define MSR_IA32_BIOS_UPDT_TRIG             0x79
 %define MSR_IA32_BIOS_SIGN_ID               0x8B
@@ -393,4 +401,7 @@
 %define MSR_IA32_APERF                      0xE8
 %define MSR_IA32_MTRR_CAP                   0xFE
+%define MSR_IA32_ARCH_CAP                   0x10a
+%define MSR_IA32_ARCH_CAP_F_SPECTRE_FIX     RT_BIT_32(0)
+%define MSR_IA32_ARCH_CAP_F_BETTER_IBRS     RT_BIT_32(1)
 %define MSR_BBL_CR_CTL3                     0x11e
 %ifndef MSR_IA32_SYSENTER_CS
Index: /trunk/src/VBox/Frontends/VBoxManage/VBoxManageHelp.cpp
===================================================================
--- /trunk/src/VBox/Frontends/VBoxManage/VBoxManageHelp.cpp	(revision 70605)
+++ /trunk/src/VBox/Frontends/VBoxManage/VBoxManageHelp.cpp	(revision 70606)
@@ -512,4 +512,6 @@
                      "                            [--pae on|off]\n"
                      "                            [--longmode on|off]\n"
+                     "                            [--ibpb-on-vm-exit on|off]\n"
+                     "                            [--ibpb-on-vm-entry on|off]\n"
                      "                            [--cpu-profile \"host|Intel 80[86|286|386]\"]\n"
                      "                            [--cpuid-portability-level <0..3>\n"
Index: /trunk/src/VBox/Frontends/VBoxManage/VBoxManageModifyVM.cpp
===================================================================
--- /trunk/src/VBox/Frontends/VBoxManage/VBoxManageModifyVM.cpp	(revision 70605)
+++ /trunk/src/VBox/Frontends/VBoxManage/VBoxManageModifyVM.cpp	(revision 70606)
@@ -75,4 +75,6 @@
     MODIFYVM_VTXVPID,
     MODIFYVM_VTXUX,
+    MODIFYVM_IBPB_ON_VM_EXIT,
+    MODIFYVM_IBPB_ON_VM_ENTRY,
     MODIFYVM_CPUS,
     MODIFYVM_CPUHOTPLUG,
@@ -255,4 +257,6 @@
     { "--vtxvpid",                  MODIFYVM_VTXVPID,                   RTGETOPT_REQ_BOOL_ONOFF },
     { "--vtxux",                    MODIFYVM_VTXUX,                     RTGETOPT_REQ_BOOL_ONOFF },
+    { "--ibpb-on-vm-exit",          MODIFYVM_IBPB_ON_VM_EXIT,           RTGETOPT_REQ_BOOL_ONOFF },
+    { "--ibpb-on-vm-entry",         MODIFYVM_IBPB_ON_VM_ENTRY,          RTGETOPT_REQ_BOOL_ONOFF },
     { "--cpuid-set",                MODIFYVM_SETCPUID,                  RTGETOPT_REQ_UINT32_OPTIONAL_PAIR | RTGETOPT_FLAG_HEX },
     { "--cpuid-remove",             MODIFYVM_DELCPUID,                  RTGETOPT_REQ_UINT32_OPTIONAL_PAIR | RTGETOPT_FLAG_HEX },
@@ -794,4 +798,12 @@
             }
 
+            case MODIFYVM_IBPB_ON_VM_EXIT:
+                CHECK_ERROR(sessionMachine, SetCPUProperty(CPUPropertyType_IBPBOnVMExit, ValueUnion.f));
+                break;
+
+            case MODIFYVM_IBPB_ON_VM_ENTRY:
+                CHECK_ERROR(sessionMachine, SetCPUProperty(CPUPropertyType_IBPBOnVMEntry, ValueUnion.f));
+                break;
+
             case MODIFYVM_CPUS:
             {
Index: /trunk/src/VBox/Main/idl/VirtualBox.xidl
===================================================================
--- /trunk/src/VBox/Main/idl/VirtualBox.xidl	(revision 70605)
+++ /trunk/src/VBox/Main/idl/VirtualBox.xidl	(revision 70606)
@@ -1002,4 +1002,20 @@
         Since this feature implies that the APIC feature is present, it
         automatically enables the APIC feature when set.
+      </desc>
+    </const>
+    <const name="IBPBOnVMExit"          value="6">
+      <desc>
+        If set, force an indirect branch prediction barrier on VM exits if the
+        host CPU supports it.  This setting will significantly slow down workloads
+        causing many VM exits, so it is only recommended for situation where there
+        real need to be paranoid.
+      </desc>
+    </const>
+    <const name="IBPBOnVMEntry"         value="7">
+      <desc>
+        If set, force an indirect branch prediction barrier on VM entry if the
+        host CPU supports it.  This setting will significantly slow down workloads
+        causing many VM exits, so it is only recommended for situation where there
+        real need to be paranoid.
       </desc>
     </const>
Index: /trunk/src/VBox/Main/include/MachineImpl.h
===================================================================
--- /trunk/src/VBox/Main/include/MachineImpl.h	(revision 70605)
+++ /trunk/src/VBox/Main/include/MachineImpl.h	(revision 70606)
@@ -288,4 +288,6 @@
         BOOL                mAPIC;
         BOOL                mX2APIC;
+        BOOL                mIBPBOnVMExit;
+        BOOL                mIBPBOnVMEntry;
         ULONG               mCPUCount;
         BOOL                mCPUHotPlugEnabled;
Index: /trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp
===================================================================
--- /trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp	(revision 70605)
+++ /trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp	(revision 70606)
@@ -1158,4 +1158,13 @@
         hrc = pMachine->GetHWVirtExProperty(HWVirtExPropertyType_UnrestrictedExecution, &fEnableUX); H();
         InsertConfigInteger(pHM, "EnableUX", fEnableUX);
+
+        /* Indirect branch prediction boundraries. */
+        BOOL fIBPBOnVMExit = false;
+        hrc = pMachine->GetCPUProperty(CPUPropertyType_IBPBOnVMExit, &fIBPBOnVMExit); H();
+        InsertConfigInteger(pHM, "IBPBOnVMExit", fIBPBOnVMExit);
+
+        BOOL fIBPBOnVMEntry = false;
+        hrc = pMachine->GetCPUProperty(CPUPropertyType_IBPBOnVMEntry, &fIBPBOnVMEntry); H();
+        InsertConfigInteger(pHM, "IBPBOnVMEntry", fIBPBOnVMEntry);
 
         /* Reset overwrite. */
Index: /trunk/src/VBox/Main/src-server/MachineImpl.cpp
===================================================================
--- /trunk/src/VBox/Main/src-server/MachineImpl.cpp	(revision 70605)
+++ /trunk/src/VBox/Main/src-server/MachineImpl.cpp	(revision 70606)
@@ -196,4 +196,6 @@
     mAPIC = true;
     mX2APIC = false;
+    mIBPBOnVMExit = false;
+    mIBPBOnVMEntry = false;
     mHPETEnabled = false;
     mCpuExecutionCap = 100; /* Maximum CPU execution cap by default. */
@@ -2256,4 +2258,12 @@
             break;
 
+        case CPUPropertyType_IBPBOnVMExit:
+            *aValue = mHWData->mIBPBOnVMExit;
+            break;
+
+        case CPUPropertyType_IBPBOnVMEntry:
+            *aValue = mHWData->mIBPBOnVMEntry;
+            break;
+
         default:
             return E_INVALIDARG;
@@ -2303,4 +2313,16 @@
             if (aValue)
                 mHWData->mAPIC = !!aValue;
+            break;
+
+        case CPUPropertyType_IBPBOnVMExit:
+            i_setModified(IsModified_MachineData);
+            mHWData.backup();
+            mHWData->mIBPBOnVMExit = !!aValue;
+            break;
+
+        case CPUPropertyType_IBPBOnVMEntry:
+            i_setModified(IsModified_MachineData);
+            mHWData.backup();
+            mHWData->mIBPBOnVMEntry = !!aValue;
             break;
 
@@ -8988,4 +9010,6 @@
         mHWData->mAPIC                        = data.fAPIC;
         mHWData->mX2APIC                      = data.fX2APIC;
+        mHWData->mIBPBOnVMExit                = data.fIBPBOnVMExit;
+        mHWData->mIBPBOnVMEntry               = data.fIBPBOnVMEntry;
         mHWData->mCPUCount                    = data.cCPUs;
         mHWData->mCPUHotPlugEnabled           = data.fCpuHotPlug;
@@ -10311,4 +10335,6 @@
         data.fAPIC                  = !!mHWData->mAPIC;
         data.fX2APIC                = !!mHWData->mX2APIC;
+        data.fIBPBOnVMExit          = !!mHWData->mIBPBOnVMExit;
+        data.fIBPBOnVMEntry         = !!mHWData->mIBPBOnVMEntry;
         data.cCPUs                  = mHWData->mCPUCount;
         data.fCpuHotPlug            = !!mHWData->mCPUHotPlugEnabled;
Index: /trunk/src/VBox/Main/xml/Settings.cpp
===================================================================
--- /trunk/src/VBox/Main/xml/Settings.cpp	(revision 70605)
+++ /trunk/src/VBox/Main/xml/Settings.cpp	(revision 70606)
@@ -2778,4 +2778,6 @@
     fAPIC(true),
     fX2APIC(false),
+    fIBPBOnVMExit(false),
+    fIBPBOnVMEntry(false),
     enmLongMode(HC_ARCH_BITS == 64 ? Hardware::LongMode_Enabled : Hardware::LongMode_Disabled),
     cCPUs(1),
@@ -2931,4 +2933,6 @@
             && fAPIC                     == h.fAPIC
             && fX2APIC                   == h.fX2APIC
+            && fIBPBOnVMExit             == h.fIBPBOnVMExit
+            && fIBPBOnVMEntry            == h.fIBPBOnVMEntry
             && cCPUs                     == h.cCPUs
             && fCpuHotPlug               == h.fCpuHotPlug
@@ -3933,4 +3937,10 @@
             if (hw.fX2APIC)
                 hw.fAPIC = true;
+            pelmCPUChild = pelmHwChild->findChildElement("IBPBOn");
+            if (pelmCPUChild)
+            {
+                pelmCPUChild->getAttributeValue("vmexit", hw.fIBPBOnVMExit);
+                pelmCPUChild->getAttributeValue("vmentry", hw.fIBPBOnVMEntry);
+            }
 
             if ((pelmCPUChild = pelmHwChild->findChildElement("CpuIdTree")))
@@ -5259,4 +5269,12 @@
     if (m->sv >= SettingsVersion_v1_16)
     {
+        if (hw.fIBPBOnVMEntry || hw.fIBPBOnVMExit)
+        {
+            xml::ElementNode *pelmChild = pelmCPU->createChild("IBPBOn");
+            if (hw.fIBPBOnVMExit)
+                pelmChild->setAttribute("vmexit", hw.fIBPBOnVMExit);
+            if (hw.fIBPBOnVMEntry)
+                pelmChild->setAttribute("vmentry", hw.fIBPBOnVMEntry);
+        }
     }
     if (m->sv >= SettingsVersion_v1_14 && hw.enmLongMode != Hardware::LongMode_Legacy)
@@ -6930,5 +6948,7 @@
             || hardwareMachine.biosSettings.apicMode != APICMode_APIC
             || !hardwareMachine.fAPIC
-            || hardwareMachine.fX2APIC)
+            || hardwareMachine.fX2APIC
+            || hardwareMachine.fIBPBOnVMExit
+            || hardwareMachine.fIBPBOnVMEntry)
         {
             m->sv = SettingsVersion_v1_16;
Index: /trunk/src/VBox/VMM/VMMR0/HMR0A.asm
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMR0A.asm	(revision 70605)
+++ /trunk/src/VBox/VMM/VMMR0/HMR0A.asm	(revision 70606)
@@ -49,4 +49,18 @@
 ; Use define because I'm too lazy to convert the struct.
 %define XMM_OFF_IN_X86FXSTATE   160
+
+;; Spectre filler for 32-bit mode.
+; Some user space address that points to a 4MB page boundrary in hope that it
+; will somehow make it less useful.
+%define SPECTRE_FILLER32        0x227fffff
+;; Spectre filler for 64-bit mode.
+; Choosen to be an invalid address (also with 5 level paging).
+%define SPECTRE_FILLER64        0x02204204207fffff
+;; Spectre filler for the current CPU mode.
+%ifdef RT_ARCH_AMD64
+ %define SPECTRE_FILLER         SPECTRE_FILLER64
+%else
+ %define SPECTRE_FILLER         SPECTRE_FILLER32
+%endif
 
 ;;
@@ -224,4 +238,19 @@
  %define MYPOPSEGS      MYPOPSEGS32
 %endif
+
+;;
+; Creates an indirect branch prediction barrier on CPUs that need and supports that.
+; @clobbers eax, edx, ecx
+; @param    1   How to address CPUMCTX.
+; @param    2   Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
+%macro INDIRECT_BRANCH_PREDICTION_BARRIER 2
+    test    byte [%1 + CPUMCTX.fWorldSwitcher], %2
+    jz      %%no_indirect_branch_barrier
+    mov     ecx, MSR_IA32_PRED_CMD
+    mov     eax, MSR_IA32_PRED_CMD_F_IBPB
+    xor     edx, edx
+    wrmsr
+%%no_indirect_branch_barrier:
+%endmacro
 
 
@@ -1185,9 +1214,15 @@
 
     mov     [ss:xDI + CPUMCTX.eax], eax
+    mov     xAX, SPECTRE_FILLER
     mov     [ss:xDI + CPUMCTX.ebx], ebx
+    mov     xBX, xAX
     mov     [ss:xDI + CPUMCTX.ecx], ecx
+    mov     xCX, xAX
     mov     [ss:xDI + CPUMCTX.edx], edx
+    mov     xDX, xAX
     mov     [ss:xDI + CPUMCTX.esi], esi
+    mov     xSI, xAX
     mov     [ss:xDI + CPUMCTX.ebp], ebp
+    mov     xBP, xAX
     mov     xAX, cr2
     mov     [ss:xDI + CPUMCTX.cr2], xAX
@@ -1199,4 +1234,7 @@
     pop     dword [ss:xDI + CPUMCTX.edi]        ; The guest edi we pushed above.
  %endif
+
+    ; Fight spectre.
+    INDIRECT_BRANCH_PREDICTION_BARRIER ss:xDI, CPUMCTX_WSF_IBPB_EXIT
 
  %ifndef VMX_SKIP_TR
@@ -1416,4 +1454,7 @@
     ; Don't mess with ESP anymore!!!
 
+    ; Fight spectre.
+    INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
+
     ; Load guest general purpose registers.
     mov     eax, [xSI + CPUMCTX.eax]
@@ -1490,17 +1531,31 @@
 
     mov     qword [xDI + CPUMCTX.eax], rax
+    mov     rax, SPECTRE_FILLER64
     mov     qword [xDI + CPUMCTX.ebx], rbx
+    mov     rbx, rax
     mov     qword [xDI + CPUMCTX.ecx], rcx
+    mov     rcx, rax
     mov     qword [xDI + CPUMCTX.edx], rdx
+    mov     rdx, rax
     mov     qword [xDI + CPUMCTX.esi], rsi
+    mov     rsi, rax
     mov     qword [xDI + CPUMCTX.ebp], rbp
+    mov     rbp, rax
     mov     qword [xDI + CPUMCTX.r8],  r8
+    mov     r8, rax
     mov     qword [xDI + CPUMCTX.r9],  r9
+    mov     r9, rax
     mov     qword [xDI + CPUMCTX.r10], r10
+    mov     r10, rax
     mov     qword [xDI + CPUMCTX.r11], r11
+    mov     r11, rax
     mov     qword [xDI + CPUMCTX.r12], r12
+    mov     r12, rax
     mov     qword [xDI + CPUMCTX.r13], r13
+    mov     r13, rax
     mov     qword [xDI + CPUMCTX.r14], r14
+    mov     r14, rax
     mov     qword [xDI + CPUMCTX.r15], r15
+    mov     r15, rax
     mov     rax, cr2
     mov     qword [xDI + CPUMCTX.cr2], rax
@@ -1508,4 +1563,7 @@
     pop     xAX                                 ; The guest rdi we pushed above
     mov     qword [xDI + CPUMCTX.edi], rax
+
+    ; Fight spectre.
+    INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT
 
  %ifndef VMX_SKIP_TR
@@ -1704,4 +1762,7 @@
     ; Note: assumes success!
     ; Don't mess with ESP anymore!!!
+
+    ; Fight spectre.
+    INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
 
     ; Load guest general purpose registers.
@@ -1833,4 +1894,7 @@
     vmsave
 
+    ; Fight spectre.
+    INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
+
     ; Setup xAX for VMLOAD.
     mov     xAX, [xBP + xCB * 2 + RTHCPHYS_CB]      ; HCPhysVmcb (64 bits physical address; x86: take low dword only)
@@ -1870,9 +1934,18 @@
 
     mov     [ss:xAX + CPUMCTX.ebx], ebx
+    mov     xBX, SPECTRE_FILLER
     mov     [ss:xAX + CPUMCTX.ecx], ecx
+    mov     xCX, xBX
     mov     [ss:xAX + CPUMCTX.edx], edx
+    mov     xDX, xBX
     mov     [ss:xAX + CPUMCTX.esi], esi
+    mov     xSI, xBX
     mov     [ss:xAX + CPUMCTX.edi], edi
+    mov     xDI, xBX
     mov     [ss:xAX + CPUMCTX.ebp], ebp
+    mov     xBP, xBX
+
+    ; Fight spectre.  Note! Trashes xAX!
+    INDIRECT_BRANCH_PREDICTION_BARRIER ss:xAX, CPUMCTX_WSF_IBPB_EXIT
 
     ; Restore the host xcr0 if necessary.
@@ -1978,4 +2051,7 @@
     vmsave
 
+    ; Fight spectre.
+    INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
+
     ; Setup rax for VMLOAD.
     mov     rax, [rbp + xCB * 2 + RTHCPHYS_CB]      ; HCPhysVmcb (64 bits physical address; take low dword only)
@@ -2022,17 +2098,34 @@
 
     mov     qword [rax + CPUMCTX.ebx], rbx
+    mov     rbx, SPECTRE_FILLER64
     mov     qword [rax + CPUMCTX.ecx], rcx
+    mov     rcx, rbx
     mov     qword [rax + CPUMCTX.edx], rdx
+    mov     rdx, rbx
     mov     qword [rax + CPUMCTX.esi], rsi
+    mov     rsi, rbx
     mov     qword [rax + CPUMCTX.edi], rdi
+    mov     rdi, rbx
     mov     qword [rax + CPUMCTX.ebp], rbp
+    mov     rbp, rbx
     mov     qword [rax + CPUMCTX.r8],  r8
+    mov     r8, rbx
     mov     qword [rax + CPUMCTX.r9],  r9
+    mov     r9, rbx
     mov     qword [rax + CPUMCTX.r10], r10
+    mov     r10, rbx
     mov     qword [rax + CPUMCTX.r11], r11
+    mov     r11, rbx
     mov     qword [rax + CPUMCTX.r12], r12
+    mov     r12, rbx
     mov     qword [rax + CPUMCTX.r13], r13
+    mov     r13, rbx
     mov     qword [rax + CPUMCTX.r14], r14
+    mov     r14, rbx
     mov     qword [rax + CPUMCTX.r15], r15
+    mov     r15, rbx
+
+    ; Fight spectre.  Note! Trashes rax!
+    INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT
 
     ; Restore the host xcr0 if necessary.
Index: /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 70605)
+++ /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 70606)
@@ -1702,7 +1702,7 @@
                                                                   pFeatures->uStepping);
 
-        PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008);
-        if (pLeaf)
-            pFeatures->cMaxPhysAddrWidth = pLeaf->uEax & 0xff;
+        PCCPUMCPUIDLEAF const pExtLeaf8 = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008);
+        if (pExtLeaf8)
+            pFeatures->cMaxPhysAddrWidth = pExtLeaf8->uEax & 0xff;
         else if (pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_PSE36)
             pFeatures->cMaxPhysAddrWidth = 36;
@@ -1743,4 +1743,9 @@
             pFeatures->fAvx512Foundation    = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F);
             pFeatures->fClFlushOpt          = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT);
+
+            pFeatures->fIbpb                = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB);
+            pFeatures->fIbrs                = pFeatures->fIbpb;
+            pFeatures->fStibp               = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_STIBP);
+            pFeatures->fArchCap             = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP);
         }
 
@@ -1782,4 +1787,5 @@
             pFeatures->fMmx            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MMX);
             pFeatures->fTsc            |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_TSC);
+            pFeatures->fIbpb           |= pExtLeaf8 && (pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB);
             pFeatures->fAmdMmxExts      = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX);
             pFeatures->fXop             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP);
@@ -2255,5 +2261,5 @@
 
     /*
-     * Configure XSAVE offsets according to the CPUID info.
+     * Configure XSAVE offsets according to the CPUID info and set the feature flags.
      */
     memset(&pVM->aCpus[0].cpum.s.Guest.aoffXState[0], 0xff, sizeof(pVM->aCpus[0].cpum.s.Guest.aoffXState));
@@ -3125,5 +3131,5 @@
                                //| X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1 - we do not do vector functions yet.
                                ;
-                pCurLeaf->uEdx &= 0;
+                pCurLeaf->uEdx &= 0; /** @todo X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB, X86_CPUID_STEXT_FEATURE_EDX_STIBP and X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP */
 
                 if (pCpum->u8PortableCpuIdLevel > 0)
@@ -3508,5 +3514,5 @@
     {
         pCurLeaf->uEax &= UINT32_C(0x0000ffff); /* Virtual & physical address sizes only. */
-        pCurLeaf->uEbx  = 0;  /* reserved */
+        pCurLeaf->uEbx  = 0;  /* reserved - [12] == IBPB */
         pCurLeaf->uEdx  = 0;  /* reserved */
 
@@ -5983,6 +5989,19 @@
 {
     DBGFREGSUBFIELD_RO("PREFETCHWT1\0" "PREFETCHWT1 instruction",                        0, 1, 0),
+    DBGFREGSUBFIELD_RO("UMIP\0"         "User mode insturction prevention",              2, 1, 0),
     DBGFREGSUBFIELD_RO("PKU\0"          "Protection Key for Usermode pages",             3, 1, 0),
-    DBGFREGSUBFIELD_RO("OSPKU\0"        "CR4.PKU mirror",                                4, 1, 0),
+    DBGFREGSUBFIELD_RO("OSPKE\0"        "CR4.PKU mirror",                                4, 1, 0),
+    DBGFREGSUBFIELD_RO("MAWAU\0"        "Value used by BNDLDX & BNDSTX",                17, 5, 0),
+    DBGFREGSUBFIELD_RO("RDPID\0"        "Read processor ID support",                    22, 1, 0),
+    DBGFREGSUBFIELD_RO("SGX_LC\0"       "Supports SGX Launch Configuration",            30, 1, 0),
+    DBGFREGSUBFIELD_TERMINATOR()
+};
+
+/** CPUID(7,0).EDX field descriptions.   */
+static DBGFREGSUBFIELD const g_aLeaf7Sub0EdxSubFields[] =
+{
+    DBGFREGSUBFIELD_RO("IBRS_IBPB\0"    "IA32_SPEC_CTRL.IBRS and IA32_PRED_CMD.IBPB",   26, 1, 0),
+    DBGFREGSUBFIELD_RO("STIBP\0"        "Supports IA32_SPEC_CTRL.STIBP",                27, 1, 0),
+    DBGFREGSUBFIELD_RO("ARCHCAP\0"      "Supports IA32_ARCH_CAP",                       29, 1, 0),
     DBGFREGSUBFIELD_TERMINATOR()
 };
@@ -6073,4 +6092,14 @@
 };
 
+/** CPUID(0x80000008,0).EBX field descriptions.   */
+static DBGFREGSUBFIELD const g_aExtLeaf8EbxSubFields[] =
+{
+    DBGFREGSUBFIELD_RO("CLZERO\0"       "Clear zero instruction (cacheline)",            0, 1, 0),
+    DBGFREGSUBFIELD_RO("IRPerf\0"       "Instructions retired count support",            1, 1, 0),
+    DBGFREGSUBFIELD_RO("XSaveErPtr\0"   "Save/restore error pointers (FXSAVE/RSTOR*)",   2, 1, 0),
+    DBGFREGSUBFIELD_RO("IBPB\0"         "Supports the IBPB command in IA32_PRED_CMD",   12, 1, 0),
+    DBGFREGSUBFIELD_TERMINATOR()
+};
+
 
 static void cpumR3CpuIdInfoMnemonicListU32(PCDBGFINFOHLP pHlp, uint32_t uVal, PCDBGFREGSUBFIELD pDesc,
@@ -6275,5 +6304,5 @@
                     cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, 56);
                     if (pCurLeaf->uEdx || Host.uEdx)
-                        pHlp->pfnPrintf(pHlp, "%36s %#x (%#x)\n", "Ext Features EDX:", pCurLeaf->uEdx, Host.uEdx);
+                        cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, 56);
                 }
                 else
@@ -6282,5 +6311,5 @@
                     cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf7Sub0EcxSubFields, "Ext Features ECX:", 36);
                     if (pCurLeaf->uEdx)
-                        pHlp->pfnPrintf(pHlp, "%36s %#x\n", "Ext Features EDX:", pCurLeaf->uEdx);
+                        cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf7Sub0EdxSubFields, "Ext Features EDX:", 36);
                 }
                 break;
@@ -6770,19 +6799,31 @@
         }
 
-        if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000008), 0)) != NULL)
-        {
-            uint32_t uEAX = pCurLeaf->uEax;
-            uint32_t uECX = pCurLeaf->uEcx;
-
-            pHlp->pfnPrintf(pHlp,
-                            "Physical Address Width:          %d bits\n"
-                            "Virtual Address Width:           %d bits\n"
-                            "Guest Physical Address Width:    %d bits\n",
-                            (uEAX >> 0) & 0xff,
-                            (uEAX >> 8) & 0xff,
-                            (uEAX >> 16) & 0xff);
-            pHlp->pfnPrintf(pHlp,
-                            "Physical Core Count:             %d\n",
-                            ((uECX >> 0) & 0xff) + 1);
+        pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000008), 0);
+        if (pCurLeaf != NULL)
+        {
+            if (pCurLeaf->uEbx || (Host.uEbx && iVerbosity))
+            {
+                if (iVerbosity < 1)
+                    cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aExtLeaf8EbxSubFields, "Ext Features ext IDs EBX:", 34);
+                else
+                    cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields, 56);
+            }
+
+            if (iVerbosity)
+            {
+                uint32_t uEAX = pCurLeaf->uEax;
+                uint32_t uECX = pCurLeaf->uEcx;
+
+                pHlp->pfnPrintf(pHlp,
+                                "Physical Address Width:          %d bits\n"
+                                "Virtual Address Width:           %d bits\n"
+                                "Guest Physical Address Width:    %d bits\n",
+                                (uEAX >> 0) & 0xff,
+                                (uEAX >> 8) & 0xff,
+                                (uEAX >> 16) & 0xff);
+                pHlp->pfnPrintf(pHlp,
+                                "Physical Core Count:             %d\n",
+                                ((uECX >> 0) & 0xff) + 1);
+            }
         }
 
Index: /trunk/src/VBox/VMM/VMMR3/HM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 70605)
+++ /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 70606)
@@ -453,4 +453,6 @@
                               "|EnableLargePages"
                               "|EnableVPID"
+                              "|IBPBOnVMExit"
+                              "|IBPBOnVMEntry"
                               "|TPRPatchingEnabled"
                               "|64bitEnabled"
@@ -611,4 +613,14 @@
      * available. */
     rc = CFGMR3QueryBoolDef(pCfgHm, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true);
+    AssertLogRelRCReturn(rc, rc);
+
+    /** @cfgm{/HM/IBPBOnVMExit, bool}
+     * Costly paranoia setting. */
+    rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMExit", &pVM->hm.s.fIbpbOnVmExit, false);
+    AssertLogRelRCReturn(rc, rc);
+
+    /** @cfgm{/HM/IBPBOnVMEntry, bool}
+     * Costly paranoia setting. */
+    rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMEntry", &pVM->hm.s.fIbpbOnVmEntry, false);
     AssertLogRelRCReturn(rc, rc);
 
@@ -1163,4 +1175,26 @@
         Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */
         pVM->hm.s.fTprPatchingAllowed = false;
+    }
+
+    /*
+     * Sync options.
+     */
+    /** @todo Move this out of of CPUMCTX and into some ring-0 only HM structure.
+     *        That will require a little bit of work, of course. */
+    for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
+    {
+        PVMCPU   pVCpu   = &pVM->aCpus[iCpu];
+        PCPUMCTX pCpuCtx = CPUMQueryGuestCtxPtr(pVCpu);
+        pCpuCtx->fWorldSwitcher &= ~(CPUMCTX_WSF_IBPB_EXIT | CPUMCTX_WSF_IBPB_ENTRY);
+        if (pVM->cpum.ro.HostFeatures.fIbpb)
+        {
+            if (pVM->hm.s.fIbpbOnVmExit)
+                pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_IBPB_EXIT;
+            if (pVM->hm.s.fIbpbOnVmEntry)
+                pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_IBPB_ENTRY;
+        }
+        if (iCpu == 0)
+            LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%d fIbpbOnVmEntry=%d)\n",
+                    pCpuCtx->fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry));
     }
 
Index: /trunk/src/VBox/VMM/include/CPUMInternal.mac
===================================================================
--- /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 70605)
+++ /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 70606)
@@ -119,5 +119,5 @@
     ;
     ; Guest context state
-    ; (Identical to the .Hyper chunk below.)
+    ; (Identical to the .Hyper chunk below and to CPUMCTX in cpum.mac.)
     ;
     .Guest                    resq    0
@@ -220,15 +220,15 @@
     .Guest.msrKERNELGSBASE    resb    8
     .Guest.uMsrPadding0       resb    8
+    alignb 8
     .Guest.aXcr               resq    2
     .Guest.fXStateMask        resq    1
     .Guest.pXStateR0      RTR0PTR_RES 1
+    alignb 8
     .Guest.pXStateR3      RTR3PTR_RES 1
+    alignb 8
     .Guest.pXStateRC      RTRCPTR_RES 1
     .Guest.aoffXState         resw    64
-%if HC_ARCH_BITS == 64
-    .Guest.abPadding          resb    4
-%else
-    .Guest.abPadding          resb    12
-%endif
+    .Guest.fWorldSwitcher     resd    1
+    alignb 8
     .Guest.hwvirt.svm.uMsrHSavePa            resq         1
     .Guest.hwvirt.svm.GCPhysVmcb             resq         1
@@ -506,15 +506,15 @@
     .Hyper.msrKERNELGSBASE    resb    8
     .Hyper.uMsrPadding0       resb    8
+    alignb 8
     .Hyper.aXcr               resq    2
     .Hyper.fXStateMask        resq    1
     .Hyper.pXStateR0      RTR0PTR_RES 1
+    alignb 8
     .Hyper.pXStateR3      RTR3PTR_RES 1
+    alignb 8
     .Hyper.pXStateRC      RTRCPTR_RES 1
     .Hyper.aoffXState         resw    64
-%if HC_ARCH_BITS == 64
-    .Hyper.abPadding          resb    4
-%else
-    .Hyper.abPadding          resb    12
-%endif
+    .Hyper.fWorldSwitcher     resd    1
+    alignb 8
     .Hyper.hwvirt.svm.uMsrHSavePa            resq         1
     .Hyper.hwvirt.svm.GCPhysVmcb             resq         1
Index: /trunk/src/VBox/VMM/include/HMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/HMInternal.h	(revision 70605)
+++ /trunk/src/VBox/VMM/include/HMInternal.h	(revision 70606)
@@ -417,9 +417,10 @@
     /** Set if posted interrupt processing is enabled. */
     bool                        fPostedIntrs;
-    /** Alignment. */
-    bool                        fAlignment0;
-
-    /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
-    uint32_t                    fHostKernelFeatures;
+    /** Set if indirect branch prediction barrier on VM exit. */
+    bool                        fIbpbOnVmExit;
+    /** Set if indirect branch prediction barrier on VM entry. */
+    bool                        fIbpbOnVmEntry;
+    /** Explicit padding. */
+    bool                        afPadding[3];
 
     /** Maximum ASID allowed. */
@@ -429,11 +430,13 @@
     uint32_t                    cMaxResumeLoops;
 
+    /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
+    uint32_t                    fHostKernelFeatures;
+
+    /** Size of the guest patch memory block. */
+    uint32_t                    cbGuestPatchMem;
     /** Guest allocated memory for patching purposes. */
     RTGCPTR                     pGuestPatchMem;
     /** Current free pointer inside the patch block. */
     RTGCPTR                     pFreeGuestPatchMem;
-    /** Size of the guest patch memory block. */
-    uint32_t                    cbGuestPatchMem;
-    uint32_t                    u32Alignment0;
 
 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
