VirtualBox

Changeset 99557 in vbox


Ignore:
Timestamp:
Apr 28, 2023 12:41:21 PM (17 months ago)
Author:
vboxsync
Message:

VMM/NEMR3Native-darwin: Cleanup the memory state tracking, since macOS Ventura 13.3 hv_vm_map() is more strict and rejects mapping memory if there is something mapped at the same guest physical address already, bugref:9044 ticketref:21596 ticketref:21563

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp

    r99292 r99557  
    531531DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
    532532{
    533     if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
     533    if (*pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED)
    534534    {
    535535        Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
     
    561561
    562562/**
     563 * Resolves a NEM page state from the given protection flags.
     564 *
     565 * @returns NEM page state.
     566 * @param   fPageProt           The page protection flags.
     567 */
     568DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
     569{
     570    switch (fPageProt)
     571    {
     572        case NEM_PAGE_PROT_NONE:
     573            return NEM_DARWIN_PAGE_STATE_UNMAPPED;
     574        case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
     575            return NEM_DARWIN_PAGE_STATE_RX;
     576        case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
     577            return NEM_DARWIN_PAGE_STATE_RW;
     578        case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
     579            return NEM_DARWIN_PAGE_STATE_RWX;
     580        default:
     581            break;
     582    }
     583
     584    AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
     585    return NEM_DARWIN_PAGE_STATE_UNMAPPED;
     586}
     587
     588
     589/**
    563590 * Maps a given guest physical address range backed by the given memory with the given
    564591 * protection flags.
     
    594621    {
    595622        if (pu2State)
    596             *pu2State =   (fPageProt & NEM_PAGE_PROT_WRITE)
    597                         ? NEM_DARWIN_PAGE_STATE_WRITABLE
    598                         : NEM_DARWIN_PAGE_STATE_READABLE;
     623            *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
    599624        return VINF_SUCCESS;
    600625    }
     
    603628}
    604629
    605 #if 0 /* unused */
    606 DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
     630
     631/**
     632 * Changes the protection flags for the given guest physical address range.
     633 *
     634 * @returns VBox status code.
     635 * @param   pVM                 The cross context VM structure.
     636 * @param   GCPhys              The guest physical address to start mapping.
     637 * @param   cb                  The size of the range, page aligned.
     638 * @param   fPageProt           The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
     639 * @param   pu2State            Where to store the state for the new page, optional.
     640 */
     641DECLINLINE(int) nemR3DarwinProtect(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
    607642{
    608643    hv_memory_flags_t fHvMemProt = 0;
     
    619654    else
    620655        hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
     656    if (hrc == HV_SUCCESS)
     657    {
     658        if (pu2State)
     659            *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
     660        return VINF_SUCCESS;
     661    }
    621662
    622663    return nemR3DarwinHvSts2Rc(hrc);
    623664}
    624 #endif
     665
    625666
    626667DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
     
    12141255    {
    12151256        case NEM_DARWIN_PAGE_STATE_UNMAPPED:
    1216         case NEM_DARWIN_PAGE_STATE_NOT_SET:
    12171257        {
    12181258            if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
     
    12551295            return rc;
    12561296        }
    1257         case NEM_DARWIN_PAGE_STATE_READABLE:
     1297        case NEM_DARWIN_PAGE_STATE_RX:
    12581298            if (   !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
    12591299                && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
     
    12651305            break;
    12661306
    1267         case NEM_DARWIN_PAGE_STATE_WRITABLE:
     1307        case NEM_DARWIN_PAGE_STATE_RW:
     1308        case NEM_DARWIN_PAGE_STATE_RWX:
    12681309            if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
    12691310            {
    12701311                pState->fCanResume = true;
    1271                 if (pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_WRITABLE)
     1312                if (   pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_RW
     1313                    || pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_RWX)
    12721314                    Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: Spurious EPT fault\n", GCPhys));
    12731315                return VINF_SUCCESS;
     
    42364278    {
    42374279        Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
    4238         int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
     4280        int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE, pu2State);
    42394281        if (RT_FAILURE(rc))
    42404282        {
     
    42834325        if (RT_FAILURE(rc))
    42844326        {
    4285             LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
    4286                      GCPhys, cb, fFlags, rc));
    4287             rc = VERR_NEM_UNMAP_PAGES_FAILED;
     4327            LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
     4328                    GCPhys, cb, fFlags, rc));
     4329            return VERR_NEM_UNMAP_PAGES_FAILED;
    42884330        }
    42894331    }
     
    43314373                                                     uint8_t *pu2State, uint32_t *puNemRange)
    43324374{
    4333     RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
    4334 
    4335     Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
    4336     *pu2State   = UINT8_MAX;
     4375    RT_NOREF(pvPages);
     4376
     4377    Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
     4378          GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
     4379    if (fFlags & NEM_NOTIFY_PHYS_ROM_F_REPLACE)
     4380    {
     4381        int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
     4382        if (RT_FAILURE(rc))
     4383        {
     4384            LogRel(("NEMR3NotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
     4385                    GCPhys, cb, fFlags, rc));
     4386            return VERR_NEM_UNMAP_PAGES_FAILED;
     4387        }
     4388    }
     4389
    43374390    *puNemRange = 0;
    43384391    return VINF_SUCCESS;
     
    43464399          GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
    43474400    *pu2State = UINT8_MAX;
    4348 
    4349 #if defined(VBOX_WITH_PGM_NEM_MODE)
    4350     /*
    4351      * (Re-)map readonly.
    4352      */
    4353     AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
    4354     int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
    4355     if (RT_FAILURE(rc))
    4356     {
    4357         LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
    4358                 GCPhys, cb, pvPages, fFlags, rc));
    4359         return VERR_NEM_MAP_PAGES_FAILED;
    4360     }
    4361     RT_NOREF(fFlags, puNemRange);
     4401    RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
    43624402    return VINF_SUCCESS;
    4363 #else
    4364     RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
    4365     return VERR_NEM_MAP_PAGES_FAILED;
    4366 #endif
    43674403}
    43684404
     
    43714407                                                        RTR3PTR pvMemR3, uint8_t *pu2State)
    43724408{
    4373     RT_NOREF(pVM);
    4374 
    43754409    Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
    43764410          GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
    4377 
    43784411    *pu2State = UINT8_MAX;
    4379 #if defined(VBOX_WITH_PGM_NEM_MODE)
    4380     if (pvMemR3)
    4381     {
    4382         int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
    4383         AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
    4384                           pvMemR3, GCPhys, cb, rc));
    4385     }
    4386     RT_NOREF(enmKind);
    4387 #else
    43884412    RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
    4389     AssertFailed();
    4390 #endif
    43914413}
    43924414
     
    44294451                                                  PGMPAGETYPE enmType, uint8_t *pu2State)
    44304452{
    4431     Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
    4432           GCPhys, HCPhys, fPageProt, enmType, *pu2State));
     4453    Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp pvR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
     4454          GCPhys, HCPhys, pvR3, fPageProt, enmType, *pu2State));
    44334455    RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
    44344456
    4435     nemR3DarwinUnmap(pVM, GCPhys, X86_PAGE_SIZE, pu2State);
     4457    uint8_t u2StateOld = *pu2State;
     4458    /* Can return early if this is an unmap request and the page is not mapped. */
     4459    if (   fPageProt == NEM_PAGE_PROT_NONE
     4460        && u2StateOld == NEM_DARWIN_PAGE_STATE_UNMAPPED)
     4461    {
     4462        Assert(!pvR3);
     4463        return;
     4464    }
     4465
     4466    int rc;
     4467    if (u2StateOld == NEM_DARWIN_PAGE_STATE_UNMAPPED)
     4468    {
     4469        AssertPtr(pvR3);
     4470        rc = nemR3DarwinMap(pVM, GCPhys, pvR3, X86_PAGE_SIZE, fPageProt, pu2State);
     4471    }
     4472    else
     4473        rc = nemR3DarwinProtect(pVM, GCPhys, X86_PAGE_SIZE, fPageProt, pu2State);
     4474    AssertLogRelMsgRC(rc, ("NEMHCNotifyPhysPageProtChanged: nemR3DarwinMap/nemR3DarwinProtect(,%p,%RGp,%RGp,) u2StateOld=%u -> %Rrc\n",
     4475                      pvR3, GCPhys, X86_PAGE_SIZE, u2StateOld, rc));
    44364476}
    44374477
  • trunk/src/VBox/VMM/include/NEMInternal.h

    r99370 r99557  
    122122/** @name Darwin: Our two-bit physical page state for PGMPAGE
    123123 * @{ */
    124 # define NEM_DARWIN_PAGE_STATE_NOT_SET     0
    125 # define NEM_DARWIN_PAGE_STATE_UNMAPPED    1
    126 # define NEM_DARWIN_PAGE_STATE_READABLE    2
    127 # define NEM_DARWIN_PAGE_STATE_WRITABLE    3
     124# define NEM_DARWIN_PAGE_STATE_UNMAPPED    0
     125# define NEM_DARWIN_PAGE_STATE_RX          1
     126# define NEM_DARWIN_PAGE_STATE_RW          2
     127# define NEM_DARWIN_PAGE_STATE_RWX         3
    128128/** @} */
    129129
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette