VirtualBox

Changeset 71081 in vbox


Ignore:
Timestamp:
Feb 21, 2018 10:36:30 AM (7 years ago)
Author:
vboxsync
Message:

VMM,SUPDrv: More NEM/win experimentation. bugref:9044

Location:
trunk
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/pgm.h

    r71043 r71081  
    710710 *          Failure status will stop enumeration immediately and return.
    711711 * @param   pVM         The cross context VM structure.
     712 * @param   pVCpu       The cross context per virtual CPU structure.  Optional,
     713 *                      see PGMPhysNemEnumPagesByState.
    712714 * @param   GCPhys      The guest physical address (not A20 masked).
    713715 * @param   pu2NemState Pointer to variable with the NEM state.  This can be
     
    715717 * @param   pvUser      The user argument.
    716718 */
    717 typedef DECLCALLBACK(int) FNPGMPHYSNEMENUMCALLBACK(PVM pVM, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser);
     719typedef DECLCALLBACK(int) FNPGMPHYSNEMENUMCALLBACK(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser);
    718720/** Pointer to a FNPGMPHYSNEMENUMCALLBACK function. */
    719721typedef FNPGMPHYSNEMENUMCALLBACK *PFNPGMPHYSNEMENUMCALLBACK;
    720 VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVM pVM, uint8_t uMinState, PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser);
     722VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVM pVM, PVMCPU VCpu, uint8_t uMinState,
     723                                             PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser);
    721724
    722725
  • trunk/include/VBox/vmm/vmm.h

    r71075 r71081  
    291291
    292292
     293/** @defgroup grp_vmm_api_r0    The VMM Host Context Ring 0 API
     294 * @{
     295 */
     296
     297/**
     298 * The VMMR0Entry() codes.
     299 */
     300typedef enum VMMR0OPERATION
     301{
     302    /** Run guest context. */
     303    VMMR0_DO_RAW_RUN = SUP_VMMR0_DO_RAW_RUN,
     304    /** Run guest code using the available hardware acceleration technology. */
     305    VMMR0_DO_HM_RUN = SUP_VMMR0_DO_HM_RUN,
     306    /** Official NOP that we use for profiling. */
     307    VMMR0_DO_NOP = SUP_VMMR0_DO_NOP,
     308    /** Official slow iocl NOP that we use for profiling. */
     309    VMMR0_DO_SLOW_NOP,
     310
     311    /** Ask the GVMM to create a new VM. */
     312    VMMR0_DO_GVMM_CREATE_VM = 32,
     313    /** Ask the GVMM to destroy the VM. */
     314    VMMR0_DO_GVMM_DESTROY_VM,
     315    /** Call GVMMR0RegisterVCpu(). */
     316    VMMR0_DO_GVMM_REGISTER_VMCPU,
     317    /** Call GVMMR0DeregisterVCpu(). */
     318    VMMR0_DO_GVMM_DEREGISTER_VMCPU,
     319    /** Call GVMMR0SchedHalt(). */
     320    VMMR0_DO_GVMM_SCHED_HALT,
     321    /** Call GVMMR0SchedWakeUp(). */
     322    VMMR0_DO_GVMM_SCHED_WAKE_UP,
     323    /** Call GVMMR0SchedPoke(). */
     324    VMMR0_DO_GVMM_SCHED_POKE,
     325    /** Call GVMMR0SchedWakeUpAndPokeCpus(). */
     326    VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS,
     327    /** Call GVMMR0SchedPoll(). */
     328    VMMR0_DO_GVMM_SCHED_POLL,
     329    /** Call GVMMR0QueryStatistics(). */
     330    VMMR0_DO_GVMM_QUERY_STATISTICS,
     331    /** Call GVMMR0ResetStatistics(). */
     332    VMMR0_DO_GVMM_RESET_STATISTICS,
     333
     334    /** Call VMMR0 Per VM Init. */
     335    VMMR0_DO_VMMR0_INIT = 64,
     336    /** Call VMMR0 Per VM Termination. */
     337    VMMR0_DO_VMMR0_TERM,
     338
     339    /** Setup the hardware accelerated raw-mode session. */
     340    VMMR0_DO_HM_SETUP_VM = 128,
     341    /** Attempt to enable or disable hardware accelerated raw-mode. */
     342    VMMR0_DO_HM_ENABLE,
     343
     344    /** Call PGMR0PhysAllocateHandyPages(). */
     345    VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES = 192,
     346    /** Call PGMR0PhysFlushHandyPages(). */
     347    VMMR0_DO_PGM_FLUSH_HANDY_PAGES,
     348    /** Call PGMR0AllocateLargePage(). */
     349    VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE,
     350    /** Call PGMR0PhysSetupIommu(). */
     351    VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
     352
     353    /** Call GMMR0InitialReservation(). */
     354    VMMR0_DO_GMM_INITIAL_RESERVATION = 256,
     355    /** Call GMMR0UpdateReservation(). */
     356    VMMR0_DO_GMM_UPDATE_RESERVATION,
     357    /** Call GMMR0AllocatePages(). */
     358    VMMR0_DO_GMM_ALLOCATE_PAGES,
     359    /** Call GMMR0FreePages(). */
     360    VMMR0_DO_GMM_FREE_PAGES,
     361    /** Call GMMR0FreeLargePage(). */
     362    VMMR0_DO_GMM_FREE_LARGE_PAGE,
     363    /** Call GMMR0QueryHypervisorMemoryStatsReq(). */
     364    VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS,
     365    /** Call GMMR0QueryMemoryStatsReq(). */
     366    VMMR0_DO_GMM_QUERY_MEM_STATS,
     367    /** Call GMMR0BalloonedPages(). */
     368    VMMR0_DO_GMM_BALLOONED_PAGES,
     369    /** Call GMMR0MapUnmapChunk(). */
     370    VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
     371    /** Call GMMR0SeedChunk(). */
     372    VMMR0_DO_GMM_SEED_CHUNK,
     373    /** Call GMMR0RegisterSharedModule. */
     374    VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
     375    /** Call GMMR0UnregisterSharedModule. */
     376    VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE,
     377    /** Call GMMR0ResetSharedModules. */
     378    VMMR0_DO_GMM_RESET_SHARED_MODULES,
     379    /** Call GMMR0CheckSharedModules. */
     380    VMMR0_DO_GMM_CHECK_SHARED_MODULES,
     381    /** Call GMMR0FindDuplicatePage. */
     382    VMMR0_DO_GMM_FIND_DUPLICATE_PAGE,
     383    /** Call GMMR0QueryStatistics(). */
     384    VMMR0_DO_GMM_QUERY_STATISTICS,
     385    /** Call GMMR0ResetStatistics(). */
     386    VMMR0_DO_GMM_RESET_STATISTICS,
     387
     388    /** Call PDMR0DriverCallReqHandler. */
     389    VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER = 320,
     390    /** Call PDMR0DeviceCallReqHandler. */
     391    VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER,
     392
     393    /** Calls function in the hypervisor.
     394     * The caller must setup the hypervisor context so the call will be performed.
     395     * The difference between VMMR0_DO_RUN_GC and this one is the handling of
     396     * the return GC code. The return code will not be interpreted by this operation.
     397     */
     398    VMMR0_DO_CALL_HYPERVISOR = 384,
     399
     400    /** Set a GVMM or GMM configuration value. */
     401    VMMR0_DO_GCFGM_SET_VALUE = 400,
     402    /** Query a GVMM or GMM configuration value. */
     403    VMMR0_DO_GCFGM_QUERY_VALUE,
     404
     405    /** The start of the R0 service operations. */
     406    VMMR0_DO_SRV_START = 448,
     407    /** Call IntNetR0Open(). */
     408    VMMR0_DO_INTNET_OPEN,
     409    /** Call IntNetR0IfClose(). */
     410    VMMR0_DO_INTNET_IF_CLOSE,
     411    /** Call IntNetR0IfGetBufferPtrs(). */
     412    VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS,
     413    /** Call IntNetR0IfSetPromiscuousMode(). */
     414    VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE,
     415    /** Call IntNetR0IfSetMacAddress(). */
     416    VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS,
     417    /** Call IntNetR0IfSetActive(). */
     418    VMMR0_DO_INTNET_IF_SET_ACTIVE,
     419    /** Call IntNetR0IfSend(). */
     420    VMMR0_DO_INTNET_IF_SEND,
     421    /** Call IntNetR0IfWait(). */
     422    VMMR0_DO_INTNET_IF_WAIT,
     423    /** Call IntNetR0IfAbortWait(). */
     424    VMMR0_DO_INTNET_IF_ABORT_WAIT,
     425
     426    /** Forward call to the PCI driver */
     427    VMMR0_DO_PCIRAW_REQ = 512,
     428
     429    /** The end of the R0 service operations. */
     430    VMMR0_DO_SRV_END,
     431
     432    /** Call NEMR0InitVM() (host specific). */
     433    VMMR0_DO_NEM_INIT_VM = 576,
     434    /** Call NEMR0MapPages() (host specific). */
     435    VMMR0_DO_NEM_MAP_PAGES,
     436    /** Call NEMR0UnmapPages() (host specific). */
     437    VMMR0_DO_NEM_UNMAP_PAGES,
     438
     439    /** Official call we use for testing Ring-0 APIs. */
     440    VMMR0_DO_TESTS = 640,
     441    /** Test the 32->64 bits switcher. */
     442    VMMR0_DO_TEST_SWITCHER3264,
     443
     444    /** The usual 32-bit type blow up. */
     445    VMMR0_DO_32BIT_HACK = 0x7fffffff
     446} VMMR0OPERATION;
     447
     448
     449/**
     450 * Request buffer for VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE.
     451 * @todo Move got GCFGM.h when it's implemented.
     452 */
     453typedef struct GCFGMVALUEREQ
     454{
     455    /** The request header.*/
     456    SUPVMMR0REQHDR      Hdr;
     457    /** The support driver session handle. */
     458    PSUPDRVSESSION      pSession;
     459    /** The value.
     460     * This is input for the set request and output for the query. */
     461    uint64_t            u64Value;
     462    /** The variable name.
     463     * This is fixed sized just to make things simple for the mock-up. */
     464    char                szName[48];
     465} GCFGMVALUEREQ;
     466/** Pointer to a VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE request buffer.
     467 * @todo Move got GCFGM.h when it's implemented.
     468 */
     469typedef GCFGMVALUEREQ *PGCFGMVALUEREQ;
     470
     471#if defined(IN_RING0) || defined(DOXYGEN_RUNNING)
     472VMMR0DECL(void)      VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation);
     473VMMR0DECL(int)       VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
     474                                  PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
     475VMMR0_INT_DECL(int)  VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu);
     476VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu);
     477VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu);
     478VMMR0_INT_DECL(int)  VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu);
     479VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu);
     480VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu);
     481VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu);
     482
     483# ifdef LOG_ENABLED
     484VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu);
     485VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu);
     486VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu);
     487# else
     488#  define            VMMR0LogFlushDisable(pVCpu)     do { } while(0)
     489#  define            VMMR0LogFlushEnable(pVCpu)      do { } while(0)
     490#  define            VMMR0IsLogFlushDisabled(pVCpu)  (true)
     491# endif /* LOG_ENABLED */
     492#endif /* IN_RING0 */
     493
     494/** @} */
     495
     496
    293497#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
    294498/** @defgroup grp_vmm_api_r3    The VMM Host Context Ring 3 API
     
    317521# endif
    318522VMMR3DECL(int)          VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
     523VMMR3_INT_DECL(int)     VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
    319524VMMR3DECL(void)         VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr);
    320525VMMR3_INT_DECL(void)    VMMR3YieldSuspend(PVM pVM);
     
    362567
    363568
    364 /** @defgroup grp_vmm_api_r0    The VMM Host Context Ring 0 API
    365  * @{
    366  */
    367 
    368 /**
    369  * The VMMR0Entry() codes.
    370  */
    371 typedef enum VMMR0OPERATION
    372 {
    373     /** Run guest context. */
    374     VMMR0_DO_RAW_RUN = SUP_VMMR0_DO_RAW_RUN,
    375     /** Run guest code using the available hardware acceleration technology. */
    376     VMMR0_DO_HM_RUN = SUP_VMMR0_DO_HM_RUN,
    377     /** Official NOP that we use for profiling. */
    378     VMMR0_DO_NOP = SUP_VMMR0_DO_NOP,
    379     /** Official slow iocl NOP that we use for profiling. */
    380     VMMR0_DO_SLOW_NOP,
    381 
    382     /** Ask the GVMM to create a new VM. */
    383     VMMR0_DO_GVMM_CREATE_VM = 32,
    384     /** Ask the GVMM to destroy the VM. */
    385     VMMR0_DO_GVMM_DESTROY_VM,
    386     /** Call GVMMR0RegisterVCpu(). */
    387     VMMR0_DO_GVMM_REGISTER_VMCPU,
    388     /** Call GVMMR0DeregisterVCpu(). */
    389     VMMR0_DO_GVMM_DEREGISTER_VMCPU,
    390     /** Call GVMMR0SchedHalt(). */
    391     VMMR0_DO_GVMM_SCHED_HALT,
    392     /** Call GVMMR0SchedWakeUp(). */
    393     VMMR0_DO_GVMM_SCHED_WAKE_UP,
    394     /** Call GVMMR0SchedPoke(). */
    395     VMMR0_DO_GVMM_SCHED_POKE,
    396     /** Call GVMMR0SchedWakeUpAndPokeCpus(). */
    397     VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS,
    398     /** Call GVMMR0SchedPoll(). */
    399     VMMR0_DO_GVMM_SCHED_POLL,
    400     /** Call GVMMR0QueryStatistics(). */
    401     VMMR0_DO_GVMM_QUERY_STATISTICS,
    402     /** Call GVMMR0ResetStatistics(). */
    403     VMMR0_DO_GVMM_RESET_STATISTICS,
    404 
    405     /** Call VMMR0 Per VM Init. */
    406     VMMR0_DO_VMMR0_INIT = 64,
    407     /** Call VMMR0 Per VM Termination. */
    408     VMMR0_DO_VMMR0_TERM,
    409 
    410     /** Setup the hardware accelerated raw-mode session. */
    411     VMMR0_DO_HM_SETUP_VM = 128,
    412     /** Attempt to enable or disable hardware accelerated raw-mode. */
    413     VMMR0_DO_HM_ENABLE,
    414 
    415     /** Call PGMR0PhysAllocateHandyPages(). */
    416     VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES = 192,
    417     /** Call PGMR0PhysFlushHandyPages(). */
    418     VMMR0_DO_PGM_FLUSH_HANDY_PAGES,
    419     /** Call PGMR0AllocateLargePage(). */
    420     VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE,
    421     /** Call PGMR0PhysSetupIommu(). */
    422     VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
    423 
    424     /** Call GMMR0InitialReservation(). */
    425     VMMR0_DO_GMM_INITIAL_RESERVATION = 256,
    426     /** Call GMMR0UpdateReservation(). */
    427     VMMR0_DO_GMM_UPDATE_RESERVATION,
    428     /** Call GMMR0AllocatePages(). */
    429     VMMR0_DO_GMM_ALLOCATE_PAGES,
    430     /** Call GMMR0FreePages(). */
    431     VMMR0_DO_GMM_FREE_PAGES,
    432     /** Call GMMR0FreeLargePage(). */
    433     VMMR0_DO_GMM_FREE_LARGE_PAGE,
    434     /** Call GMMR0QueryHypervisorMemoryStatsReq(). */
    435     VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS,
    436     /** Call GMMR0QueryMemoryStatsReq(). */
    437     VMMR0_DO_GMM_QUERY_MEM_STATS,
    438     /** Call GMMR0BalloonedPages(). */
    439     VMMR0_DO_GMM_BALLOONED_PAGES,
    440     /** Call GMMR0MapUnmapChunk(). */
    441     VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
    442     /** Call GMMR0SeedChunk(). */
    443     VMMR0_DO_GMM_SEED_CHUNK,
    444     /** Call GMMR0RegisterSharedModule. */
    445     VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
    446     /** Call GMMR0UnregisterSharedModule. */
    447     VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE,
    448     /** Call GMMR0ResetSharedModules. */
    449     VMMR0_DO_GMM_RESET_SHARED_MODULES,
    450     /** Call GMMR0CheckSharedModules. */
    451     VMMR0_DO_GMM_CHECK_SHARED_MODULES,
    452     /** Call GMMR0FindDuplicatePage. */
    453     VMMR0_DO_GMM_FIND_DUPLICATE_PAGE,
    454     /** Call GMMR0QueryStatistics(). */
    455     VMMR0_DO_GMM_QUERY_STATISTICS,
    456     /** Call GMMR0ResetStatistics(). */
    457     VMMR0_DO_GMM_RESET_STATISTICS,
    458 
    459     /** Call PDMR0DriverCallReqHandler. */
    460     VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER = 320,
    461     /** Call PDMR0DeviceCallReqHandler. */
    462     VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER,
    463 
    464     /** Calls function in the hypervisor.
    465      * The caller must setup the hypervisor context so the call will be performed.
    466      * The difference between VMMR0_DO_RUN_GC and this one is the handling of
    467      * the return GC code. The return code will not be interpreted by this operation.
    468      */
    469     VMMR0_DO_CALL_HYPERVISOR = 384,
    470 
    471     /** Set a GVMM or GMM configuration value. */
    472     VMMR0_DO_GCFGM_SET_VALUE = 400,
    473     /** Query a GVMM or GMM configuration value. */
    474     VMMR0_DO_GCFGM_QUERY_VALUE,
    475 
    476     /** The start of the R0 service operations. */
    477     VMMR0_DO_SRV_START = 448,
    478     /** Call IntNetR0Open(). */
    479     VMMR0_DO_INTNET_OPEN,
    480     /** Call IntNetR0IfClose(). */
    481     VMMR0_DO_INTNET_IF_CLOSE,
    482     /** Call IntNetR0IfGetBufferPtrs(). */
    483     VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS,
    484     /** Call IntNetR0IfSetPromiscuousMode(). */
    485     VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE,
    486     /** Call IntNetR0IfSetMacAddress(). */
    487     VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS,
    488     /** Call IntNetR0IfSetActive(). */
    489     VMMR0_DO_INTNET_IF_SET_ACTIVE,
    490     /** Call IntNetR0IfSend(). */
    491     VMMR0_DO_INTNET_IF_SEND,
    492     /** Call IntNetR0IfWait(). */
    493     VMMR0_DO_INTNET_IF_WAIT,
    494     /** Call IntNetR0IfAbortWait(). */
    495     VMMR0_DO_INTNET_IF_ABORT_WAIT,
    496 
    497     /** Forward call to the PCI driver */
    498     VMMR0_DO_PCIRAW_REQ = 512,
    499 
    500     /** The end of the R0 service operations. */
    501     VMMR0_DO_SRV_END,
    502 
    503     /** Call NEMR0InitVM() (host specific). */
    504     VMMR0_DO_NEM_INIT_VM = 576,
    505     /** Call NEMR0MapPages() (host specific). */
    506     VMMR0_DO_NEM_MAP_PAGES,
    507     /** Call NEMR0UnmapPages() (host specific). */
    508     VMMR0_DO_NEM_UNMAP_PAGES,
    509 
    510     /** Official call we use for testing Ring-0 APIs. */
    511     VMMR0_DO_TESTS = 640,
    512     /** Test the 32->64 bits switcher. */
    513     VMMR0_DO_TEST_SWITCHER3264,
    514 
    515     /** The usual 32-bit type blow up. */
    516     VMMR0_DO_32BIT_HACK = 0x7fffffff
    517 } VMMR0OPERATION;
    518 
    519 
    520 /**
    521  * Request buffer for VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE.
    522  * @todo Move got GCFGM.h when it's implemented.
    523  */
    524 typedef struct GCFGMVALUEREQ
    525 {
    526     /** The request header.*/
    527     SUPVMMR0REQHDR      Hdr;
    528     /** The support driver session handle. */
    529     PSUPDRVSESSION      pSession;
    530     /** The value.
    531      * This is input for the set request and output for the query. */
    532     uint64_t            u64Value;
    533     /** The variable name.
    534      * This is fixed sized just to make things simple for the mock-up. */
    535     char                szName[48];
    536 } GCFGMVALUEREQ;
    537 /** Pointer to a VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE request buffer.
    538  * @todo Move got GCFGM.h when it's implemented.
    539  */
    540 typedef GCFGMVALUEREQ *PGCFGMVALUEREQ;
    541 
    542 #if defined(IN_RING0) || defined(DOXYGEN_RUNNING)
    543 VMMR0DECL(void)      VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation);
    544 VMMR0DECL(int)       VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
    545                                   PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
    546 VMMR0_INT_DECL(int)  VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu);
    547 VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu);
    548 VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu);
    549 VMMR0_INT_DECL(int)  VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu);
    550 VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu);
    551 VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu);
    552 VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu);
    553 
    554 # ifdef LOG_ENABLED
    555 VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu);
    556 VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu);
    557 VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu);
    558 # else
    559 #  define            VMMR0LogFlushDisable(pVCpu)     do { } while(0)
    560 #  define            VMMR0LogFlushEnable(pVCpu)      do { } while(0)
    561 #  define            VMMR0IsLogFlushDisabled(pVCpu)  (true)
    562 # endif /* LOG_ENABLED */
    563 #endif /* IN_RING0 */
    564 
    565 /** @} */
    566 
    567 
    568569#if defined(IN_RC) || defined(DOXYGEN_RUNNING)
    569570/** @defgroup grp_vmm_api_rc    The VMM Raw-Mode Context API
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r71043 r71081  
    48584858 * @returns VBox status code from callback.
    48594859 * @param   pVM             The cross context VM structure.
     4860 * @param   pVCpu           The cross context per CPU structure.  This is
     4861 *                          optional as its only for passing to callback.
    48604862 * @param   uMinState       The minimum NEM state value to call on.
    48614863 * @param   pfnCallback     The callback function.
    48624864 * @param   pvUser          User argument for the callback.
    48634865 */
    4864 VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVM pVM, uint8_t uMinState, PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
     4866VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVM pVM, PVMCPU pVCpu, uint8_t uMinState,
     4867                                             PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
    48654868{
    48664869    /*
     
    48794882            else
    48804883            {
    4881                 rc = pfnCallback(pVM, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
     4884                rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
    48824885                if (RT_SUCCESS(rc))
    48834886                    PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp

    r71076 r71081  
    223223*   Internal Functions                                                                                                           *
    224224*********************************************************************************************************************************/
    225 static int nemR3NativeSetPhysPage(PVM pVM, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
     225static int nemR3NativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
    226226                                  uint8_t *pu2State, bool fBackingChanged);
    227227
     
    985985
    986986
     987#ifdef NEM_WIN_USE_HYPERCALLS
     988
     989/**
     990 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
     991 *
     992 * @returns VBox status code.
     993 * @param   pVM         The cross context VM structure.
     994 * @param   pVCpu       The cross context virtual CPU structure of the caller.
     995 * @param   GCPhysSrc   The source page.  Does not need to be page aligned.
     996 * @param   GCPhysDst   The destination page.  Same as @a GCPhysSrc except for
     997 *                      when A20 is disabled.
     998 * @param   fFlags      HV_MAP_GPA_XXX.
     999 */
     1000DECLINLINE(int) nemR3WinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
     1001{
     1002    pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc   = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
     1003    pVCpu->nem.s.Hypercall.MapPages.GCPhysDst   = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
     1004    pVCpu->nem.s.Hypercall.MapPages.cPages      = 1;
     1005    pVCpu->nem.s.Hypercall.MapPages.fFlags      = fFlags;
     1006    return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
     1007}
     1008
     1009
     1010/**
     1011 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
     1012 *
     1013 * @returns VBox status code.
     1014 * @param   pVM         The cross context VM structure.
     1015 * @param   pVCpu       The cross context virtual CPU structure of the caller.
     1016 * @param   GCPhys      The page to unmap.  Does not need to be page aligned.
     1017 */
     1018DECLINLINE(int) nemR3WinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
     1019{
     1020    pVCpu->nem.s.Hypercall.UnmapPages.GCPhys    = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
     1021    pVCpu->nem.s.Hypercall.UnmapPages.cPages    = 1;
     1022    return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
     1023}
     1024
     1025#endif /* NEM_WIN_USE_HYPERCALLS */
     1026
    9871027static int nemR3WinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    9881028{
     
    18411881
    18421882
    1843 static DECLCALLBACK(int) nemR3WinUnmapOnePageCallback(PVM pVM, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
     1883static DECLCALLBACK(int) nemR3WinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
    18441884{
    18451885    RT_NOREF_PV(pvUser);
    18461886#ifdef NEM_WIN_USE_HYPERCALLS
    1847     PVMCPU pVCpu = VMMGetCpu(pVM);
    1848     pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys;
    1849     pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
    1850     int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
     1887    int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhys);
    18511888    AssertRC(rc);
    18521889    if (RT_SUCCESS(rc))
    18531890#else
     1891    RT_NOREF_PV(pVCpu);
    18541892    HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
    18551893    if (SUCCEEDED(hrc))
     
    19491987            /* Map the page. */
    19501988            int rc = nemR3NativeSetPhysPage(pVM,
     1989                                            pVCpu,
    19511990                                            GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
    19521991                                            GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
     
    19702009                return VINF_SUCCESS;
    19712010            }
     2011
     2012#ifdef NEM_WIN_USE_HYPERCALLS
     2013            /* Upgrade page to writable. */
     2014/** @todo test this*/
     2015            if (   (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
     2016                && pState->fWriteAccess)
     2017            {
     2018                int rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
     2019                                                    HV_MAP_GPA_READABLE   | HV_MAP_GPA_WRITABLE
     2020                                                  | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
     2021                AssertRC(rc);
     2022                if (RT_SUCCESS(rc))
     2023                {
     2024                    pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
     2025                    pState->fDidSomething = true;
     2026                    pState->fCanResume    = true;
     2027                    Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
     2028                          GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
     2029                    return rc;
     2030                }
     2031            }
     2032#endif
    19722033            break;
    19732034
     
    19892050     */
    19902051#ifdef NEM_WIN_USE_HYPERCALLS
    1991     pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys;
    1992     pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
    1993     int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
     2052    int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhys);
    19942053    AssertRC(rc);
    19952054    if (RT_SUCCESS(rc))
     
    20152074            pVM->nem.s.cMappedPages));
    20162075
    2017     PGMPhysNemEnumPagesByState(pVM, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
     2076    PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
    20182077    Log(("nemR3WinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
    20192078
     
    23822441        }
    23832442
    2384 #ifdef NEM_WIN_USE_HYPERCALLS
     2443#ifndef NEM_WIN_USE_HYPERCALLS
    23852444        /* Hack alert! */
    23862445        uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
     
    23892448        else
    23902449        {
    2391             PGMPhysNemEnumPagesByState(pVM, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
     2450            PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
    23922451            Log(("nemR3NativeRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
    23932452        }
     
    25652624    {
    25662625#ifdef NEM_WIN_USE_HYPERCALLS
    2567         pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys;
    2568         pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
    2569         int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
     2626        int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhys);
    25702627        AssertRC(rc);
    25712628        if (RT_SUCCESS(rc))
     
    26682725 * @returns VBox status code.
    26692726 * @param   pVM             The cross context VM structure.
     2727 * @param   pVCpu           The cross context virtual CPU structure of the
     2728 *                          calling EMT.
    26702729 * @param   GCPhysSrc       The source page address.
    26712730 * @param   GCPhysDst       The hyper-V destination page.  This may differ from
     
    26742733 * @param   pu2State        Our page state (input/output).
    26752734 * @param   fBackingChanged Set if the page backing is being changed.
     2735 * @thread  EMT(pVCpu)
    26762736 */
    2677 static int nemR3NativeSetPhysPage(PVM pVM, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
     2737static int nemR3NativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
    26782738                                  uint8_t *pu2State, bool fBackingChanged)
    26792739{
     
    26922752        {
    26932753#ifdef NEM_WIN_USE_HYPERCALLS
    2694             PVMCPU pVCpu = VMMGetCpu(pVM);
    2695             pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhysDst;
    2696             pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
    2697             int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
     2754            int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
    26982755            AssertRC(rc);
    26992756            if (RT_SUCCESS(rc))
     
    27422799    {
    27432800#ifdef NEM_WIN_USE_HYPERCALLS
    2744         RT_NOREF_PV(GCPhysSrc);
    2745         PVMCPU pVCpu = VMMGetCpu(pVM);
    2746         pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc;
    2747         pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst;
    2748         pVCpu->nem.s.Hypercall.MapPages.cPages    = 1;
    2749         pVCpu->nem.s.Hypercall.MapPages.fFlags    = HV_MAP_GPA_READABLE   | HV_MAP_GPA_WRITABLE
    2750                                                   | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN;
    2751         int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
     2801        int rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
     2802                                            HV_MAP_GPA_READABLE   | HV_MAP_GPA_WRITABLE
     2803                                          | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
    27522804        AssertRC(rc);
    27532805        if (RT_SUCCESS(rc))
     
    27882840    {
    27892841#ifdef NEM_WIN_USE_HYPERCALLS
    2790         PVMCPU pVCpu = VMMGetCpu(pVM);
    2791         pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc;
    2792         pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst;
    2793         pVCpu->nem.s.Hypercall.MapPages.cPages    = 1;
    2794         pVCpu->nem.s.Hypercall.MapPages.fFlags    = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN;
    2795         int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
     2842        int rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
     2843                                          HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
    27962844        AssertRC(rc);
    27972845        if (RT_SUCCESS(rc))
     
    28462894#ifdef NEM_WIN_USE_HYPERCALLS
    28472895    PVMCPU pVCpu = VMMGetCpu(pVM);
    2848     pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
    2849     pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
    2850     int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
     2896    int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
    28512897    AssertRC(rc);
    28522898    if (RT_SUCCESS(rc))
     
    28572903        return VINF_SUCCESS;
    28582904    }
    2859     LogRel(("nemR3NativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
     2905    LogRel(("nemR3JustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
    28602906    return rc;
    28612907#else
     
    28832929
    28842930    int rc;
    2885 #if 0
     2931#ifdef NEM_WIN_USE_HYPERCALLS
     2932    PVMCPU pVCpu = VMMGetCpu(pVM);
    28862933    if (   pVM->nem.s.fA20Enabled
    28872934        || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
    2888         rc = nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
     2935        rc = nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
    28892936    else
    28902937    {
     
    28922939        rc = nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
    28932940        if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
    2894             rc = nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
     2941            rc = nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
    28952942
    28962943    }
     
    29162963    RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
    29172964
    2918 #if 0
     2965#ifdef NEM_WIN_USE_HYPERCALLS
     2966    PVMCPU pVCpu = VMMGetCpu(pVM);
    29192967    if (   pVM->nem.s.fA20Enabled
    29202968        || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
    2921         nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
     2969        nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
    29222970    else
    29232971    {
     
    29252973        nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
    29262974        if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
    2927             nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
     2975            nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
    29282976    }
    29292977#else
     
    29462994    RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
    29472995
    2948 #if 0
     2996#ifdef NEM_WIN_USE_HYPERCALLS
     2997    PVMCPU pVCpu = VMMGetCpu(pVM);
    29492998    if (   pVM->nem.s.fA20Enabled
    29502999        || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
    2951         nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
     3000        nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
    29523001    else
    29533002    {
     
    29553004        nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
    29563005        if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
    2957             nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
     3006            nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
    29583007    }
    29593008#else
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r71040 r71081  
    24872487    PVMCPU pVCpu = VMMGetCpu(pVM);
    24882488    AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
    2489 
    2490     /*
    2491      * Call Ring-0 entry with init code.
    2492      */
     2489    return VMMR3CallR0Emt(pVM, pVCpu, (VMMR0OPERATION)uOperation, u64Arg, pReqHdr);
     2490}
     2491
     2492
     2493/**
     2494 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
     2495 *
     2496 * @returns VBox status code.
     2497 * @param   pVM         The cross context VM structure.
     2498 * @param   pVCpu       The cross context VM structure.
     2499 * @param   uOperation  Operation to execute.
     2500 * @param   u64Arg      Constant argument.
     2501 * @param   pReqHdr     Pointer to a request header. See SUPR3CallVMMR0Ex for
     2502 *                      details.
     2503 */
     2504VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
     2505{
    24932506    int rc;
    24942507    for (;;)
     
    24972510        rc = VERR_GENERAL_FAILURE;
    24982511#else
    2499         rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, uOperation, u64Arg, pReqHdr);
     2512        rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
    25002513#endif
    25012514        /*
     
    25162529
    25172530    AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
    2518                           ("uOperation=%u rc=%Rrc\n", uOperation, rc),
     2531                          ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
    25192532                          VERR_IPE_UNEXPECTED_INFO_STATUS);
    25202533    return rc;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette