VirtualBox

Changeset 19769 in vbox


Ignore:
Timestamp:
May 18, 2009 9:55:58 AM (15 years ago)
Author:
vboxsync
Message:

Deal with stale TLB entries for harmless page entry updates.
Wider locking.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGM.cpp

    r19735 r19769  
    17911791        PGM_REG_COUNTER(&pPGM->StatRZDirtyPageSkipped,            "/PGM/CPU%d/RZ/DirtyPage/Skipped",          "The number of pages already dirty or readonly.");
    17921792        PGM_REG_COUNTER(&pPGM->StatRZDirtyPageTrap,               "/PGM/CPU%d/RZ/DirtyPage/Trap",             "The number of traps generated for dirty bit tracking.");
     1793        PGM_REG_COUNTER(&pPGM->StatRZDirtyPageStale,              "/PGM/CPU%d/RZ/DirtyPage/Stale",            "The number of traps generated for dirty bit tracking (stale tlb entries).");
    17931794        PGM_REG_COUNTER(&pPGM->StatRZDirtiedPage,                 "/PGM/CPU%d/RZ/DirtyPage/SetDirty",         "The number of pages marked dirty because of write accesses.");
    17941795        PGM_REG_COUNTER(&pPGM->StatRZDirtyTrackRealPF,            "/PGM/CPU%d/RZ/DirtyPage/RealPF",           "The number of real pages faults during dirty bit tracking.");
     
    18351836        PGM_REG_COUNTER(&pPGM->StatR3DirtyPageSkipped,            "/PGM/CPU%d/R3/DirtyPage/Skipped",          "The number of pages already dirty or readonly.");
    18361837        PGM_REG_COUNTER(&pPGM->StatR3DirtyPageTrap,               "/PGM/CPU%d/R3/DirtyPage/Trap",             "The number of traps generated for dirty bit tracking.");
     1838        PGM_REG_COUNTER(&pPGM->StatR3DirtyPageStale,              "/PGM/CPU%d/R3/DirtyPage/Stale",            "The number of traps generated for dirty bit tracking (stale tlb entries).");
    18371839        PGM_REG_COUNTER(&pPGM->StatR3DirtiedPage,                 "/PGM/CPU%d/R3/DirtyPage/SetDirty",         "The number of pages marked dirty because of write accesses.");
    18381840        PGM_REG_COUNTER(&pPGM->StatR3DirtyTrackRealPF,            "/PGM/CPU%d/R3/DirtyPage/RealPF",           "The number of real pages faults during dirty bit tracking.");
  • trunk/src/VBox/VMM/PGMInternal.h

    r19731 r19769  
    27842784    STAMCOUNTER StatRZDirtyPageSkipped;             /**< RC/R0: The number of pages already dirty or readonly. */
    27852785    STAMCOUNTER StatRZDirtyPageTrap;                /**< RC/R0: The number of traps generated for dirty bit tracking. */
     2786    STAMCOUNTER StatRZDirtyPageStale;               /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
    27862787    STAMCOUNTER StatRZDirtyTrackRealPF;             /**< RC/R0: The number of real pages faults during dirty bit tracking. */
    27872788    STAMCOUNTER StatRZDirtiedPage;                  /**< RC/R0: The number of pages marked dirty because of write accesses. */
     
    28282829    STAMCOUNTER StatR3DirtyPageSkipped;             /**< R3: The number of pages already dirty or readonly. */
    28292830    STAMCOUNTER StatR3DirtyPageTrap;                /**< R3: The number of traps generated for dirty bit tracking. */
     2831    STAMCOUNTER StatR3DirtyPageStale;               /**< R3: The number of traps generated for dirty bit tracking (stale TLB entries). */
    28302832    STAMCOUNTER StatR3DirtyTrackRealPF;             /**< R3: The number of real pages faults during dirty bit tracking. */
    28312833    STAMCOUNTER StatR3DirtiedPage;                  /**< R3: The number of pages marked dirty because of write accesses. */
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r19765 r19769  
    150150#  endif /* !PGM_WITH_PAGING */
    151151
    152 
    153 #  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    154     const unsigned  iPDDst = pvFault >> SHW_PD_SHIFT;
    155     PX86PD          pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
    156 
    157 #  elif PGM_SHW_TYPE == PGM_TYPE_PAE
    158     const unsigned  iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK;   /* pPDDst index, not used with the pool. */
    159 
    160     PX86PDPAE       pPDDst;
    161 #    if PGM_GST_TYPE != PGM_TYPE_PAE
    162     X86PDPE         PdpeSrc;
    163 
    164     /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
    165     PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
    166 #    endif
    167     rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst);
    168     if (rc != VINF_SUCCESS)
    169     {
    170         AssertRC(rc);
    171         return rc;
    172     }
    173     Assert(pPDDst);
    174 
    175 #  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    176     const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
    177     PX86PDPAE       pPDDst;
    178 #   if PGM_GST_TYPE == PGM_TYPE_PROT
    179     /* AMD-V nested paging */
    180     X86PML4E        Pml4eSrc;
    181     X86PDPE         PdpeSrc;
    182     PX86PML4E       pPml4eSrc = &Pml4eSrc;
    183 
    184     /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
    185     Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
    186     PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
    187 #   endif
    188 
    189     rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
    190     if (rc != VINF_SUCCESS)
    191     {
    192         AssertRC(rc);
    193         return rc;
    194     }
    195     Assert(pPDDst);
    196 
    197 #  elif PGM_SHW_TYPE == PGM_TYPE_EPT
    198     const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
    199     PEPTPD          pPDDst;
    200 
    201     rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst);
    202     if (rc != VINF_SUCCESS)
    203     {
    204         AssertRC(rc);
    205         return rc;
    206     }
    207     Assert(pPDDst);
    208 #  endif
    209 
    210 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    211     /*
    212      * If we successfully correct the write protection fault due to dirty bit
    213      * tracking, or this page fault is a genuine one, then return immediately.
    214      */
    215     STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
    216     rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
    217     STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
    218     if (    rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
    219         ||  rc == VINF_EM_RAW_GUEST_TRAP)
    220     {
    221         STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
    222                      = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
    223         LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
    224         return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
    225     }
    226 
    227     STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]);
    228 #  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    229 
    230     /*
    231      * A common case is the not-present error caused by lazy page table syncing.
    232      *
    233      * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
    234      * so we can safely assume that the shadow PT is present when calling SyncPage later.
    235      *
    236      * On failure, we ASSUME that SyncPT is out of memory or detected some kind
    237      * of mapping conflict and defer to SyncCR3 in R3.
    238      * (Again, we do NOT support access handlers for non-present guest pages.)
    239      *
    240      */
     152    /* Fetch the guest PDE */
    241153#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    242154    GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
     
    249161    PdeSrc.n.u1User     = 1;
    250162#  endif
    251     if (    !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
    252         &&  !pPDDst->a[iPDDst].n.u1Present
    253         &&  PdeSrc.n.u1Present
    254        )
    255 
    256     {
    257         STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
    258         STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
    259         LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
    260         pgmLock(pVM);
    261         rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);
    262         pgmUnlock(pVM);
    263         if (RT_SUCCESS(rc))
    264         {
    265             STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
     163
     164    pgmLock(pVM);
     165    {   /* Force the shadow pointers to go out of scope after releasing the lock. */
     166#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
     167        const unsigned  iPDDst = pvFault >> SHW_PD_SHIFT;
     168        PX86PD          pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s);
     169
     170#  elif PGM_SHW_TYPE == PGM_TYPE_PAE
     171        const unsigned  iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK;   /* pPDDst index, not used with the pool. */
     172
     173        PX86PDPAE       pPDDst;
     174#    if PGM_GST_TYPE != PGM_TYPE_PAE
     175        X86PDPE         PdpeSrc;
     176
     177        /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
     178        PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
     179#    endif
     180        rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst);
     181        if (rc != VINF_SUCCESS)
     182        {
     183            pgmUnlock(pVM);
     184            AssertRC(rc);
    266185            return rc;
    267186        }
    268         Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
    269         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
    270         STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
    271         return VINF_PGM_SYNC_CR3;
     187        Assert(pPDDst);
     188
     189#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     190        const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
     191        PX86PDPAE       pPDDst;
     192#   if PGM_GST_TYPE == PGM_TYPE_PROT
     193        /* AMD-V nested paging */
     194        X86PML4E        Pml4eSrc;
     195        X86PDPE         PdpeSrc;
     196        PX86PML4E       pPml4eSrc = &Pml4eSrc;
     197
     198        /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
     199        Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
     200        PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
     201#   endif
     202
     203        rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
     204        if (rc != VINF_SUCCESS)
     205        {
     206            pgmUnlock(pVM);
     207            AssertRC(rc);
     208            return rc;
     209        }   
     210        Assert(pPDDst);
     211
     212#  elif PGM_SHW_TYPE == PGM_TYPE_EPT
     213        const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
     214        PEPTPD          pPDDst;
     215
     216        rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst);
     217        if (rc != VINF_SUCCESS)
     218        {
     219            pgmUnlock(pVM);
     220            AssertRC(rc);
     221            return rc;
     222        }
     223        Assert(pPDDst);
     224#  endif
     225
     226#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     227        /*
     228         * If we successfully correct the write protection fault due to dirty bit
     229         * tracking, or this page fault is a genuine one, then return immediately.
     230         */
     231        STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
     232        rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
     233        STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
     234        if (    rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
     235            ||  rc == VINF_EM_RAW_GUEST_TRAP)
     236        {
     237            STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)
     238                        = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
     239            LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
     240            pgmUnlock(pVM);
     241            return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
     242        }
     243
     244        STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]);
     245#  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
     246
     247        /*
     248         * A common case is the not-present error caused by lazy page table syncing.
     249         *
     250         * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
     251         * so we can safely assume that the shadow PT is present when calling SyncPage later.
     252         *
     253         * On failure, we ASSUME that SyncPT is out of memory or detected some kind
     254         * of mapping conflict and defer to SyncCR3 in R3.
     255         * (Again, we do NOT support access handlers for non-present guest pages.)
     256         *
     257         */
     258        if (    !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
     259            &&  !pPDDst->a[iPDDst].n.u1Present
     260            &&  PdeSrc.n.u1Present
     261        )
     262        {
     263            STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
     264            STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
     265            LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
     266            rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);
     267            pgmUnlock(pVM);
     268            if (RT_SUCCESS(rc))
     269            {
     270                STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
     271                return rc;
     272            }
     273            Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
     274            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
     275            STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
     276            return VINF_PGM_SYNC_CR3;
     277        }
     278        pgmUnlock(pVM);
    272279    }
    273280
     
    20712078    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    20722079
     2080    Assert(PGMIsLockOwner(pVM));
     2081
    20732082    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
    20742083    LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
     
    21562165            pPdeSrc->b.u1Dirty = 1;
    21572166
    2158             if (pPdeDst->n.u1Present && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY))
     2167            if (pPdeDst->n.u1Present)
    21592168            {
    2160                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
    2161 
    2162                 Assert(pPdeSrc->b.u1Write);
    2163 
    2164                 pPdeDst->n.u1Write      = 1;
    2165                 pPdeDst->n.u1Accessed   = 1;
    2166                 pPdeDst->au32[0]       &= ~PGM_PDFLAGS_TRACK_DIRTY;
     2169                if (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)
     2170                {
     2171                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
     2172                    Assert(pPdeSrc->b.u1Write);
     2173
     2174                    /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
     2175                     *       fault again and take this path to only invalidate the entry.
     2176                     */
     2177                    pPdeDst->n.u1Write      = 1;
     2178                    pPdeDst->n.u1Accessed   = 1;
     2179                    pPdeDst->au32[0]       &= ~PGM_PDFLAGS_TRACK_DIRTY;
     2180                }
     2181                else
     2182                {
     2183                    /* Stale TLB entry. */
     2184                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));
     2185                }
    21672186                PGM_INVL_BIG_PG(GCPtrPage);
    21682187                STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
     
    22562275                    PSHWPT      pPTDst   = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
    22572276                    PSHWPTE     pPteDst  = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];
    2258                     if (    pPteDst->n.u1Present    /** @todo Optimize accessed bit emulation? */
    2259                         &&  (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY))
     2277                    if (pPteDst->n.u1Present)    /** @todo Optimize accessed bit emulation? */
    22602278                    {
    2261                         LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
     2279                        if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)
     2280                        {
     2281                            LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));
    22622282#  ifdef VBOX_STRICT
    2263                         PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
    2264                         if (pPage)
    2265                             AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
    2266                                       ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
     2283                            PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);
     2284                            if (pPage)
     2285                                AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),
     2286                                        ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
    22672287#  endif
    2268                         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
    2269 
    2270                         Assert(pPteSrc->n.u1Write);
    2271 
    2272                         pPteDst->n.u1Write    = 1;
    2273                         pPteDst->n.u1Dirty    = 1;
    2274                         pPteDst->n.u1Accessed = 1;
    2275                         pPteDst->au32[0]     &= ~PGM_PTFLAGS_TRACK_DIRTY;
     2288                            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));
     2289
     2290                            Assert(pPteSrc->n.u1Write);
     2291
     2292                            /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
     2293                             *       fault again and take this path to only invalidate the entry.
     2294                             */
     2295                            pPteDst->n.u1Write    = 1;
     2296                            pPteDst->n.u1Dirty    = 1;
     2297                            pPteDst->n.u1Accessed = 1;
     2298                            pPteDst->au32[0]     &= ~PGM_PTFLAGS_TRACK_DIRTY;
     2299                        }
     2300                        else
     2301                        {
     2302                            /* Stale TLB entry. */
     2303                            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));
     2304                        }
    22762305                        PGM_INVL_PG(GCPtrPage);
    22772306
     
    31123141    int             rc = VINF_SUCCESS;
    31133142
     3143    pgmLock(pVM);
     3144
    31143145    /*
    31153146     * First check if the shadow pd is present.
     
    31693200    if (!pPdeDst->n.u1Present)
    31703201    {
    3171         pgmLock(pVM);
    31723202        rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
    3173         pgmUnlock(pVM);
    3174         AssertRC(rc);
    31753203        if (rc != VINF_SUCCESS)
    31763204        {
     
    31793207            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    31803208# endif
     3209            pgmUnlock(pVM);
     3210            AssertRC(rc);
    31813211            return rc;
    31823212        }
     
    31913221    {
    31923222        GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
    3193 #else
     3223# else
    31943224    {
    31953225        GSTPDE PdeSrc;
     
    32003230        PdeSrc.n.u1User     = 1;
    32013231
    3202 #endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
     3232# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    32033233        Assert(rc != VINF_EM_RAW_GUEST_TRAP);
    32043234        if (uErr & X86_TRAP_PF_US)
     
    32243254    PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    32253255# endif
     3256    pgmUnlock(pVM);
    32263257    return rc;
    32273258
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette