VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp

Last change on this file was 107033, checked in by vboxsync, 3 weeks ago

VMM/NEM/ARM: Loading a 32-bit value into a register clears the upper half on real hardware, workaround for ldp instruction accessing the TPM MMIO area caused by tpm.sys in a Windows 11/ARM guest, bugref:10777 [windows build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 147.4 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 107033 2024-11-18 15:28:18Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/dis.h>
57#include <VBox/vmm/nem.h>
58#include <VBox/vmm/iem.h>
59#include <VBox/vmm/em.h>
60#include <VBox/vmm/apic.h>
61#include <VBox/vmm/pdm.h>
62#include <VBox/vmm/dbgftrace.h>
63#include "NEMInternal.h"
64#include <VBox/vmm/vmcc.h>
65
66#include <iprt/formats/arm-psci.h>
67
68#include <iprt/ldr.h>
69#include <iprt/path.h>
70#include <iprt/string.h>
71#include <iprt/system.h>
72#include <iprt/utf16.h>
73
74#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
75HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
76# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
77#endif
78
79
80/*
81 * The following definitions appeared in build 27744 allow configuring the base address of the GICv3 controller,
82 * (there is no official SDK for this yet).
83 */
84/** @todo Better way of defining these which doesn't require casting later on when calling APIs. */
85#define WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS UINT32_C(0x00001012)
86/** No GIC present. */
87#define WHV_ARM64_IC_EMULATION_MODE_NONE 0
88/** Hyper-V emulates a GICv3. */
89#define WHV_ARM64_IC_EMULATION_MODE_GICV3 1
90
91/**
92 * Configures the interrupt controller emulated by Hyper-V.
93 */
94typedef struct MY_WHV_ARM64_IC_PARAMETERS
95{
96 uint32_t u32EmulationMode;
97 uint32_t u32Rsvd;
98 union
99 {
100 struct
101 {
102 RTGCPHYS GCPhysGicdBase;
103 RTGCPHYS GCPhysGitsTranslaterBase;
104 uint32_t u32Rsvd;
105 uint32_t cLpiIntIdBits;
106 uint32_t u32PpiCntvOverflw;
107 uint32_t u32PpiPmu;
108 uint32_t au32Rsvd[6];
109 } GicV3;
110 } u;
111} MY_WHV_ARM64_IC_PARAMETERS;
112AssertCompileSize(MY_WHV_ARM64_IC_PARAMETERS, 64);
113
114
115/**
116 * The hypercall exit context.
117 */
118typedef struct MY_WHV_HYPERCALL_CONTEXT
119{
120 WHV_INTERCEPT_MESSAGE_HEADER Header;
121 uint16_t Immediate;
122 uint16_t u16Rsvd;
123 uint32_t u32Rsvd;
124 uint64_t X[18];
125} MY_WHV_HYPERCALL_CONTEXT;
126typedef MY_WHV_HYPERCALL_CONTEXT *PMY_WHV_HYPERCALL_CONTEXT;
127AssertCompileSize(MY_WHV_HYPERCALL_CONTEXT, 24 + 19 * sizeof(uint64_t));
128
129
130/**
131 * The ARM64 reset context.
132 */
133typedef struct MY_WHV_ARM64_RESET_CONTEXT
134{
135 WHV_INTERCEPT_MESSAGE_HEADER Header;
136 uint32_t ResetType;
137 uint32_t u32Rsvd;
138} MY_WHV_ARM64_RESET_CONTEXT;
139typedef MY_WHV_ARM64_RESET_CONTEXT *PMY_WHV_ARM64_RESET_CONTEXT;
140AssertCompileSize(MY_WHV_ARM64_RESET_CONTEXT, 24 + 2 * sizeof(uint32_t));
141
142
143#define WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF 0
144#define WHV_ARM64_RESET_CONTEXT_TYPE_RESET 1
145
146
147/**
148 * The exit reason context for arm64, the size is different
149 * from the default SDK we build against.
150 */
151typedef struct MY_WHV_RUN_VP_EXIT_CONTEXT
152{
153 WHV_RUN_VP_EXIT_REASON ExitReason;
154 uint32_t u32Rsvd;
155 uint64_t u64Rsvd;
156 union
157 {
158 WHV_MEMORY_ACCESS_CONTEXT MemoryAccess;
159 WHV_RUN_VP_CANCELED_CONTEXT CancelReason;
160 MY_WHV_HYPERCALL_CONTEXT Hypercall;
161 WHV_UNRECOVERABLE_EXCEPTION_CONTEXT UnrecoverableException;
162 MY_WHV_ARM64_RESET_CONTEXT Arm64Reset;
163 uint64_t au64Rsvd2[32];
164 };
165} MY_WHV_RUN_VP_EXIT_CONTEXT;
166typedef MY_WHV_RUN_VP_EXIT_CONTEXT *PMY_WHV_RUN_VP_EXIT_CONTEXT;
167AssertCompileSize(MY_WHV_RUN_VP_EXIT_CONTEXT, 272);
168
169#define My_WHvArm64RegisterGicrBaseGpa ((WHV_REGISTER_NAME)UINT32_C(0x00063000))
170
171
172/*********************************************************************************************************************************
173* Defined Constants And Macros *
174*********************************************************************************************************************************/
175
176
177/*********************************************************************************************************************************
178* Global Variables *
179*********************************************************************************************************************************/
180/** @name APIs imported from WinHvPlatform.dll
181 * @{ */
182static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
183static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
184static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
185static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
186static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
187static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
188static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
189static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
190static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
191static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
192static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
193static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
194static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
195static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
196static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
197static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
198//static decltype(WHvGetVirtualProcessorState) * g_pfnWHvGetVirtualProcessorState;
199decltype(WHvRequestInterrupt) * g_pfnWHvRequestInterrupt;
200/** @} */
201
202/** The Windows build number. */
203static uint32_t g_uBuildNo = 17134;
204
205
206
207/**
208 * Import instructions.
209 */
210static const struct
211{
212 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
213 bool fOptional; /**< Set if import is optional. */
214 PFNRT *ppfn; /**< The function pointer variable. */
215 const char *pszName; /**< The function name. */
216} g_aImports[] =
217{
218#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
219 NEM_WIN_IMPORT(0, false, WHvGetCapability),
220 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
221 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
222 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
223 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
224 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
225 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
226 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
227 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
228 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
229 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
230 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
231 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
232 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
233 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
234 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
235// NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorState),
236 NEM_WIN_IMPORT(0, false, WHvRequestInterrupt),
237#undef NEM_WIN_IMPORT
238};
239
240
241/*
242 * Let the preprocessor alias the APIs to import variables for better autocompletion.
243 */
244#ifndef IN_SLICKEDIT
245# define WHvGetCapability g_pfnWHvGetCapability
246# define WHvCreatePartition g_pfnWHvCreatePartition
247# define WHvSetupPartition g_pfnWHvSetupPartition
248# define WHvDeletePartition g_pfnWHvDeletePartition
249# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
250# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
251# define WHvMapGpaRange g_pfnWHvMapGpaRange
252# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
253# define WHvTranslateGva g_pfnWHvTranslateGva
254# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
255# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
256# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
257# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
258# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
259# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
260# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
261# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
262//# define WHvGetVirtualProcessorState g_pfnWHvGetVirtualProcessorState
263# define WHvRequestInterrupt g_pfnWHvRequestInterrupt
264
265# define VidMessageSlotHandleAndGetNext g_pfnVidMessageSlotHandleAndGetNext
266# define VidStartVirtualProcessor g_pfnVidStartVirtualProcessor
267# define VidStopVirtualProcessor g_pfnVidStopVirtualProcessor
268
269#endif
270
271#if 0 /* unused */
272/** WHV_MEMORY_ACCESS_TYPE names */
273static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
274#endif
275/** NEM_WIN_PAGE_STATE_XXX names. */
276NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
277#ifdef LOG_ENABLED
278/** HV_INTERCEPT_ACCESS_TYPE names. */
279static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
280#endif
281
282
283/*********************************************************************************************************************************
284* Internal Functions *
285*********************************************************************************************************************************/
286DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
287DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
288
289NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
290 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
291
292/**
293 * Worker for nemR3NativeInit that probes and load the native API.
294 *
295 * @returns VBox status code.
296 * @param fForced Whether the HMForced flag is set and we should
297 * fail if we cannot initialize.
298 * @param pErrInfo Where to always return error info.
299 */
300static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
301{
302 /*
303 * Check that the DLL files we need are present, but without loading them.
304 * We'd like to avoid loading them unnecessarily.
305 */
306 WCHAR wszPath[MAX_PATH + 64];
307 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
308 if (cwcPath >= MAX_PATH || cwcPath < 2)
309 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
310
311 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
312 wszPath[cwcPath++] = '\\';
313 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
314 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
315 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
316
317 /*
318 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
319 */
320 /** @todo */
321
322 /** @todo would be great if we could recognize a root partition from the
323 * CPUID info, but I currently don't dare do that. */
324
325 /*
326 * Now try load the DLLs and resolve the APIs.
327 */
328 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
329 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
330 int rc = VINF_SUCCESS;
331 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
332 {
333 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
334 if (RT_FAILURE(rc2))
335 {
336 if (!RTErrInfoIsSet(pErrInfo))
337 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
338 else
339 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
340 ahMods[i] = NIL_RTLDRMOD;
341 rc = VERR_NEM_INIT_FAILED;
342 }
343 }
344 if (RT_SUCCESS(rc))
345 {
346 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
347 {
348 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
349 if (RT_SUCCESS(rc2))
350 {
351 if (g_aImports[i].fOptional)
352 LogRel(("NEM: info: Found optional import %s!%s.\n",
353 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
354 }
355 else
356 {
357 *g_aImports[i].ppfn = NULL;
358
359 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
360 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
361 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
362 if (!g_aImports[i].fOptional)
363 {
364 if (RTErrInfoIsSet(pErrInfo))
365 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
366 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
367 else
368 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
369 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
370 Assert(RT_FAILURE(rc));
371 }
372 }
373 }
374 if (RT_SUCCESS(rc))
375 {
376 Assert(!RTErrInfoIsSet(pErrInfo));
377 }
378 }
379
380 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
381 RTLdrClose(ahMods[i]);
382 return rc;
383}
384
385
386/**
387 * Wrapper for different WHvGetCapability signatures.
388 */
389DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
390{
391 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
392}
393
394
395/**
396 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
397 *
398 * @returns VBox status code.
399 * @param pVM The cross context VM structure.
400 * @param pErrInfo Where to always return error info.
401 */
402static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
403{
404#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
405#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
406#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
407
408 /*
409 * Is the hypervisor present with the desired capability?
410 *
411 * In build 17083 this translates into:
412 * - CPUID[0x00000001].HVP is set
413 * - CPUID[0x40000000] == "Microsoft Hv"
414 * - CPUID[0x40000001].eax == "Hv#1"
415 * - CPUID[0x40000003].ebx[12] is set.
416 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
417 * a non-zero value.
418 */
419 /**
420 * @todo Someone at Microsoft please explain weird API design:
421 * 1. Pointless CapabilityCode duplication int the output;
422 * 2. No output size.
423 */
424 WHV_CAPABILITY Caps;
425 RT_ZERO(Caps);
426 SetLastError(0);
427 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
428 DWORD rcWin = GetLastError();
429 if (FAILED(hrc))
430 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
431 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
432 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
433 if (!Caps.HypervisorPresent)
434 {
435 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
436 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
437 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
438 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
439 }
440 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
441
442
443 /*
444 * Check what extended VM exits are supported.
445 */
446 RT_ZERO(Caps);
447 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
448 if (FAILED(hrc))
449 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
450 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
451 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
452 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
453 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
454 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
455 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
456 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
457 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
458 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
459 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
460
461 /*
462 * Check features in case they end up defining any.
463 */
464 RT_ZERO(Caps);
465 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
466 if (FAILED(hrc))
467 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
468 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
469 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
470 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
471 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
472 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
473
474 /*
475 * Check that the CPU vendor is supported.
476 */
477 RT_ZERO(Caps);
478 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
479 if (FAILED(hrc))
480 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
481 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
482 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
483 switch (Caps.ProcessorVendor)
484 {
485 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
486 case WHvProcessorVendorArm:
487 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
488 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
489 break;
490 default:
491 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
492 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
493 }
494
495 /*
496 * CPU features, guessing these are virtual CPU features?
497 */
498 RT_ZERO(Caps);
499 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
500 if (FAILED(hrc))
501 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
502 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
503 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
504 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
505#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
506 NEM_LOG_REL_CPU_FEATURE(Asid16);
507 NEM_LOG_REL_CPU_FEATURE(TGran16);
508 NEM_LOG_REL_CPU_FEATURE(TGran64);
509 NEM_LOG_REL_CPU_FEATURE(Haf);
510 NEM_LOG_REL_CPU_FEATURE(Hdbs);
511 NEM_LOG_REL_CPU_FEATURE(Pan);
512 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
513 NEM_LOG_REL_CPU_FEATURE(Uao);
514 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
515 NEM_LOG_REL_CPU_FEATURE(Fp);
516 NEM_LOG_REL_CPU_FEATURE(FpHp);
517 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
518 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
519 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
520 NEM_LOG_REL_CPU_FEATURE(GicV41);
521 NEM_LOG_REL_CPU_FEATURE(Ras);
522 NEM_LOG_REL_CPU_FEATURE(PmuV3);
523 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
524 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
525 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
526 NEM_LOG_REL_CPU_FEATURE(Aes);
527 NEM_LOG_REL_CPU_FEATURE(PolyMul);
528 NEM_LOG_REL_CPU_FEATURE(Sha1);
529 NEM_LOG_REL_CPU_FEATURE(Sha256);
530 NEM_LOG_REL_CPU_FEATURE(Sha512);
531 NEM_LOG_REL_CPU_FEATURE(Crc32);
532 NEM_LOG_REL_CPU_FEATURE(Atomic);
533 NEM_LOG_REL_CPU_FEATURE(Rdm);
534 NEM_LOG_REL_CPU_FEATURE(Sha3);
535 NEM_LOG_REL_CPU_FEATURE(Sm3);
536 NEM_LOG_REL_CPU_FEATURE(Sm4);
537 NEM_LOG_REL_CPU_FEATURE(Dp);
538 NEM_LOG_REL_CPU_FEATURE(Fhm);
539 NEM_LOG_REL_CPU_FEATURE(DcCvap);
540 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
541 NEM_LOG_REL_CPU_FEATURE(ApaBase);
542 NEM_LOG_REL_CPU_FEATURE(ApaEp);
543 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
544 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
545 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
546 NEM_LOG_REL_CPU_FEATURE(Jscvt);
547 NEM_LOG_REL_CPU_FEATURE(Fcma);
548 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
549 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
550 NEM_LOG_REL_CPU_FEATURE(Gpa);
551 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
552 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
553
554#undef NEM_LOG_REL_CPU_FEATURE
555 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
556 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
557 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
558 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
559
560 /*
561 * The cache line flush size.
562 */
563 RT_ZERO(Caps);
564 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
565 if (FAILED(hrc))
566 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
567 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
568 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
569 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
570 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
571 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
572 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
573
574 RT_ZERO(Caps);
575 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
576 if (FAILED(hrc))
577 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
578 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
579 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
580 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
581 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
582 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
583 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
584
585
586 /*
587 * See if they've added more properties that we're not aware of.
588 */
589 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
590 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
591 {
592 static const struct
593 {
594 uint32_t iMin, iMax; } s_aUnknowns[] =
595 {
596 { 0x0004, 0x000f },
597 { 0x1003, 0x100f },
598 { 0x2000, 0x200f },
599 { 0x3000, 0x300f },
600 { 0x4000, 0x400f },
601 };
602 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
603 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
604 {
605 RT_ZERO(Caps);
606 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
607 if (SUCCEEDED(hrc))
608 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
609 }
610 }
611
612 /*
613 * For proper operation, we require CPUID exits.
614 */
615 /** @todo Any? */
616
617#undef NEM_LOG_REL_CAP_EX
618#undef NEM_LOG_REL_CAP_SUB_EX
619#undef NEM_LOG_REL_CAP_SUB
620 return VINF_SUCCESS;
621}
622
623
624/**
625 * Initializes the GIC controller emulation provided by Hyper-V.
626 *
627 * @returns VBox status code.
628 * @param pVM The cross context VM structure.
629 *
630 * @note Needs to be done early when setting up the partition so this has to live here and not in GICNem-win.cpp
631 */
632static int nemR3WinGicCreate(PVM pVM)
633{
634 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
635 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
636
637 /*
638 * Query the MMIO ranges.
639 */
640 RTGCPHYS GCPhysMmioBaseDist = 0;
641 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
642 if (RT_FAILURE(rc))
643 return VMSetError(pVM, rc, RT_SRC_POS,
644 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
645
646 RTGCPHYS GCPhysMmioBaseReDist = 0;
647 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
648 if (RT_FAILURE(rc))
649 return VMSetError(pVM, rc, RT_SRC_POS,
650 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
651
652 RTGCPHYS GCPhysMmioBaseIts = 0;
653 rc = CFGMR3QueryU64(pGicCfg, "ItsMmioBase", &GCPhysMmioBaseIts);
654 if (RT_FAILURE(rc))
655 return VMSetError(pVM, rc, RT_SRC_POS,
656 "Configuration error: Failed to get the \"ItsMmioBase\" value\n");
657
658 /*
659 * One can only set the GIC distributor base. The re-distributor regions for the individual
660 * vCPUs are configured when the vCPUs are created, so we need to save the base of the MMIO region.
661 */
662 pVM->nem.s.GCPhysMmioBaseReDist = GCPhysMmioBaseReDist;
663
664 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
665
666 MY_WHV_ARM64_IC_PARAMETERS Property; RT_ZERO(Property);
667 Property.u32EmulationMode = WHV_ARM64_IC_EMULATION_MODE_GICV3;
668 Property.u.GicV3.GCPhysGicdBase = GCPhysMmioBaseDist;
669 Property.u.GicV3.GCPhysGitsTranslaterBase = GCPhysMmioBaseIts;
670 Property.u.GicV3.cLpiIntIdBits = 1; /** @todo LPIs are currently not supported with our device emulations. */
671 Property.u.GicV3.u32PpiCntvOverflw = pVM->nem.s.u32GicPpiVTimer + 16; /* Calculate the absolute timer INTID. */
672 Property.u.GicV3.u32PpiPmu = 23; /** @todo Configure dynamically (from SBSA, needs a PMU/NEM emulation just like with the GIC probably). */
673 HRESULT hrc = WHvSetPartitionProperty(hPartition, (WHV_PARTITION_PROPERTY_CODE)WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS, &Property, sizeof(Property));
674 if (FAILED(hrc))
675 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
676 "Failed to set WHvPartitionPropertyCodeArm64IcParameters: %Rhrc (Last=%#x/%u)",
677 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
678
679 return rc;
680}
681
682
683/**
684 * Creates and sets up a Hyper-V (exo) partition.
685 *
686 * @returns VBox status code.
687 * @param pVM The cross context VM structure.
688 * @param pErrInfo Where to always return error info.
689 */
690static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
691{
692 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
693 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
694
695 /*
696 * Create the partition.
697 */
698 WHV_PARTITION_HANDLE hPartition;
699 HRESULT hrc = WHvCreatePartition(&hPartition);
700 if (FAILED(hrc))
701 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
702 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
703
704 int rc;
705
706 /*
707 * Set partition properties, most importantly the CPU count.
708 */
709 /**
710 * @todo Someone at Microsoft please explain another weird API:
711 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
712 * argument rather than as part of the struct. That is so weird if you've
713 * used any other NT or windows API, including WHvGetCapability().
714 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
715 * technically only need 9 bytes for setting/getting
716 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
717 WHV_PARTITION_PROPERTY Property;
718 RT_ZERO(Property);
719 Property.ProcessorCount = pVM->cCpus;
720 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
721 if (SUCCEEDED(hrc))
722 {
723 RT_ZERO(Property);
724 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
725 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
726 if (SUCCEEDED(hrc))
727 {
728 /*
729 * We'll continue setup in nemR3NativeInitAfterCPUM.
730 */
731 pVM->nem.s.fCreatedEmts = false;
732 pVM->nem.s.hPartition = hPartition;
733 LogRel(("NEM: Created partition %p.\n", hPartition));
734 return VINF_SUCCESS;
735 }
736
737 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
738 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
739 Property.ExtendedVmExits.AsUINT64, hrc);
740 }
741 else
742 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
743 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
744 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
745 WHvDeletePartition(hPartition);
746
747 Assert(!pVM->nem.s.hPartitionDevice);
748 Assert(!pVM->nem.s.hPartition);
749 return rc;
750}
751
752
753static int nemR3NativeInitSetupVm(PVM pVM)
754{
755 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
756 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
757 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
758 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
759
760 /*
761 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
762 */
763 WHV_PARTITION_PROPERTY Property;
764 HRESULT hrc;
765
766 /* Not sure if we really need to set the cache line flush size. */
767 RT_ZERO(Property);
768 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
769 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
770 if (FAILED(hrc))
771 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
772 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
773 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
774
775 /*
776 * Sync CPU features with CPUM.
777 */
778 /** @todo sync CPU features with CPUM. */
779
780 /* Set the partition property. */
781 RT_ZERO(Property);
782 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
783 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
784 if (FAILED(hrc))
785 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
786 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
787 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
788
789 /* Configure the GIC. */
790 int rc = nemR3WinGicCreate(pVM);
791 if (RT_FAILURE(rc))
792 return rc;
793
794 /*
795 * Set up the partition.
796 *
797 * Seems like this is where the partition is actually instantiated and we get
798 * a handle to it.
799 */
800 hrc = WHvSetupPartition(hPartition);
801 if (FAILED(hrc))
802 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
803 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
804 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
805
806 /*
807 * Setup the EMTs.
808 */
809 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
810 {
811 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
812 if (FAILED(hrc))
813 {
814 NTSTATUS const rcNtLast = RTNtLastStatusValue();
815 DWORD const dwErrLast = RTNtLastErrorValue();
816 while (idCpu-- > 0)
817 {
818 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
819 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
820 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
821 RTNtLastErrorValue()));
822 }
823 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
824 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
825 }
826
827 if (idCpu == 0)
828 {
829 /*
830 * Need to query the ID registers and populate CPUM,
831 * these are partition wide registers and need to be queried/set with WHV_ANY_VP.
832 */
833 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
834
835 WHV_REGISTER_NAME aenmNames[10];
836 WHV_REGISTER_VALUE aValues[10];
837 RT_ZERO(aValues);
838
839 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
840 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
841 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
842 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
843 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
844 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
845 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
846 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
847 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
848 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
849
850 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
851 AssertLogRelMsgReturn(SUCCEEDED(hrc),
852 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
853 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
854 , VERR_NEM_GET_REGISTERS_FAILED);
855
856 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
857 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
858 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
859 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
860 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
861 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
862 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
863 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
864 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
865 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
866
867 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
868 if (RT_FAILURE(rc))
869 return rc;
870
871 /* Apply any overrides to the partition. */
872 PCCPUMIDREGS pIdRegsGst = NULL;
873 rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
874 AssertRCReturn(rc, rc);
875
876 aValues[0].Reg64 = pIdRegsGst->u64RegIdAa64Dfr0El1;
877 aValues[1].Reg64 = pIdRegsGst->u64RegIdAa64Dfr1El1;
878 aValues[2].Reg64 = pIdRegsGst->u64RegIdAa64Isar0El1;
879 aValues[3].Reg64 = pIdRegsGst->u64RegIdAa64Isar1El1;
880 aValues[4].Reg64 = pIdRegsGst->u64RegIdAa64Isar2El1;
881 aValues[5].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr0El1;
882 aValues[6].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr1El1;
883 aValues[7].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr2El1;
884 aValues[8].Reg64 = pIdRegsGst->u64RegIdAa64Pfr0El1;
885 aValues[9].Reg64 = pIdRegsGst->u64RegIdAa64Pfr1El1;
886
887 hrc = WHvSetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
888 AssertLogRelMsgReturn(SUCCEEDED(hrc),
889 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
890 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
891 , VERR_NEM_SET_REGISTERS_FAILED);
892
893 /* Save the amount of break-/watchpoints supported for syncing the guest register state later. */
894 pVM->nem.s.cBreakpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_BRPS) + 1;
895 pVM->nem.s.cWatchpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_WRPS) + 1;
896 }
897
898 /* Configure the GIC re-distributor region for the GIC. */
899 WHV_REGISTER_NAME enmName = My_WHvArm64RegisterGicrBaseGpa;
900 WHV_REGISTER_VALUE Value;
901 Value.Reg64 = pVM->nem.s.GCPhysMmioBaseReDist + idCpu * _128K;
902
903 hrc = WHvSetVirtualProcessorRegisters(hPartition, idCpu, &enmName, 1, &Value);
904 AssertLogRelMsgReturn(SUCCEEDED(hrc),
905 ("WHvSetVirtualProcessorRegisters(%p, %u, WHvArm64RegisterGicrBaseGpa,) -> %Rhrc (Last=%#x/%u)\n",
906 hPartition, idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
907 , VERR_NEM_SET_REGISTERS_FAILED);
908 }
909
910 pVM->nem.s.fCreatedEmts = true;
911
912 LogRel(("NEM: Successfully set up partition\n"));
913 return VINF_SUCCESS;
914}
915
916
917/**
918 * Try initialize the native API.
919 *
920 * This may only do part of the job, more can be done in
921 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
922 *
923 * @returns VBox status code.
924 * @param pVM The cross context VM structure.
925 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
926 * the latter we'll fail if we cannot initialize.
927 * @param fForced Whether the HMForced flag is set and we should
928 * fail if we cannot initialize.
929 */
930int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
931{
932 g_uBuildNo = RTSystemGetNtBuildNo();
933
934 /*
935 * Error state.
936 * The error message will be non-empty on failure and 'rc' will be set too.
937 */
938 RTERRINFOSTATIC ErrInfo;
939 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
940 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
941 if (RT_SUCCESS(rc))
942 {
943 /*
944 * Check the capabilties of the hypervisor, starting with whether it's present.
945 */
946 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
947 if (RT_SUCCESS(rc))
948 {
949 /*
950 * Create and initialize a partition.
951 */
952 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
953 if (RT_SUCCESS(rc))
954 {
955 rc = nemR3NativeInitSetupVm(pVM);
956 if (RT_SUCCESS(rc))
957 {
958 /*
959 * Set ourselves as the execution engine and make config adjustments.
960 */
961 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
962 Log(("NEM: Marked active!\n"));
963 PGMR3EnableNemMode(pVM);
964
965 /*
966 * Register release statistics
967 */
968 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
969 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
970 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
971 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
972 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
973 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
974 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
975 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
976 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
977 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
978 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
979 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
980 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
981 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
982 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
983 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
984 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
985 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
986
987 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
988 {
989 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
990 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
991 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
992 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
993 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
994 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
995 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
996 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
997 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
998 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
999 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
1000 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
1001 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
1002 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
1003 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
1004 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
1005 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
1006 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
1007 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
1008 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
1009 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
1010 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
1011 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
1012 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
1013 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
1014 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
1015 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
1016 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
1017 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
1018 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
1019 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
1020 }
1021
1022 if (!SUPR3IsDriverless())
1023 {
1024 PUVM pUVM = pVM->pUVM;
1025 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1026 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
1027 "/NEM/R0Stats/cPagesAvailable");
1028 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1029 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
1030 "/NEM/R0Stats/cPagesInUse");
1031 }
1032 }
1033
1034 }
1035
1036 }
1037 }
1038
1039 /*
1040 * We only fail if in forced mode, otherwise just log the complaint and return.
1041 */
1042 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1043 if ( (fForced || !fFallback)
1044 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1045 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1046
1047 if (RTErrInfoIsSet(pErrInfo))
1048 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1049 return VINF_SUCCESS;
1050}
1051
1052
1053/**
1054 * This is called after CPUMR3Init is done.
1055 *
1056 * @returns VBox status code.
1057 * @param pVM The VM handle..
1058 */
1059int nemR3NativeInitAfterCPUM(PVM pVM)
1060{
1061 /*
1062 * Validate sanity.
1063 */
1064 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1065
1066 /** @todo */
1067
1068 /*
1069 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
1070 */
1071 /** @todo stats */
1072
1073 /*
1074 * Adjust features.
1075 *
1076 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
1077 * the first init call.
1078 */
1079
1080 return VINF_SUCCESS;
1081}
1082
1083
1084int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1085{
1086 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
1087 //AssertLogRel(fRet);
1088
1089 NOREF(pVM); NOREF(enmWhat);
1090 return VINF_SUCCESS;
1091}
1092
1093
1094int nemR3NativeTerm(PVM pVM)
1095{
1096 /*
1097 * Delete the partition.
1098 */
1099 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1100 pVM->nem.s.hPartition = NULL;
1101 pVM->nem.s.hPartitionDevice = NULL;
1102 if (hPartition != NULL)
1103 {
1104 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
1105 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
1106 while (idCpu-- > 0)
1107 {
1108 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
1109 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1110 hPartition, idCpu, hrc, RTNtLastStatusValue(),
1111 RTNtLastErrorValue()));
1112 }
1113 WHvDeletePartition(hPartition);
1114 }
1115 pVM->nem.s.fCreatedEmts = false;
1116 return VINF_SUCCESS;
1117}
1118
1119
1120/**
1121 * VM reset notification.
1122 *
1123 * @param pVM The cross context VM structure.
1124 */
1125void nemR3NativeReset(PVM pVM)
1126{
1127 RT_NOREF(pVM);
1128}
1129
1130
1131/**
1132 * Reset CPU due to INIT IPI or hot (un)plugging.
1133 *
1134 * @param pVCpu The cross context virtual CPU structure of the CPU being
1135 * reset.
1136 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1137 */
1138void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1139{
1140 RT_NOREF(pVCpu, fInitIpi);
1141}
1142
1143
1144NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
1145{
1146 WHV_REGISTER_NAME aenmNames[128];
1147 WHV_REGISTER_VALUE aValues[128];
1148
1149 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1150 if (!fWhat)
1151 return VINF_SUCCESS;
1152 uintptr_t iReg = 0;
1153
1154#define ADD_REG64(a_enmName, a_uValue) do { \
1155 aenmNames[iReg] = (a_enmName); \
1156 aValues[iReg].Reg128.High64 = 0; \
1157 aValues[iReg].Reg64 = (a_uValue).x; \
1158 iReg++; \
1159 } while (0)
1160#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
1161 aenmNames[iReg] = (a_enmName); \
1162 aValues[iReg].Reg128.High64 = 0; \
1163 aValues[iReg].Reg64 = (a_uValue); \
1164 iReg++; \
1165 } while (0)
1166#define ADD_SYSREG64(a_enmName, a_uValue) do { \
1167 aenmNames[iReg] = (a_enmName); \
1168 aValues[iReg].Reg128.High64 = 0; \
1169 aValues[iReg].Reg64 = (a_uValue).u64; \
1170 iReg++; \
1171 } while (0)
1172#define ADD_REG128(a_enmName, a_uValue) do { \
1173 aenmNames[iReg] = (a_enmName); \
1174 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
1175 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
1176 iReg++; \
1177 } while (0)
1178
1179 /* GPRs */
1180 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1181 {
1182 if (fWhat & CPUMCTX_EXTRN_X0)
1183 ADD_REG64(WHvArm64RegisterX0, pVCpu->cpum.GstCtx.aGRegs[0]);
1184 if (fWhat & CPUMCTX_EXTRN_X1)
1185 ADD_REG64(WHvArm64RegisterX1, pVCpu->cpum.GstCtx.aGRegs[1]);
1186 if (fWhat & CPUMCTX_EXTRN_X2)
1187 ADD_REG64(WHvArm64RegisterX2, pVCpu->cpum.GstCtx.aGRegs[2]);
1188 if (fWhat & CPUMCTX_EXTRN_X3)
1189 ADD_REG64(WHvArm64RegisterX3, pVCpu->cpum.GstCtx.aGRegs[3]);
1190 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1191 {
1192 ADD_REG64(WHvArm64RegisterX4, pVCpu->cpum.GstCtx.aGRegs[4]);
1193 ADD_REG64(WHvArm64RegisterX5, pVCpu->cpum.GstCtx.aGRegs[5]);
1194 ADD_REG64(WHvArm64RegisterX6, pVCpu->cpum.GstCtx.aGRegs[6]);
1195 ADD_REG64(WHvArm64RegisterX7, pVCpu->cpum.GstCtx.aGRegs[7]);
1196 ADD_REG64(WHvArm64RegisterX8, pVCpu->cpum.GstCtx.aGRegs[8]);
1197 ADD_REG64(WHvArm64RegisterX9, pVCpu->cpum.GstCtx.aGRegs[9]);
1198 ADD_REG64(WHvArm64RegisterX10, pVCpu->cpum.GstCtx.aGRegs[10]);
1199 ADD_REG64(WHvArm64RegisterX11, pVCpu->cpum.GstCtx.aGRegs[11]);
1200 ADD_REG64(WHvArm64RegisterX12, pVCpu->cpum.GstCtx.aGRegs[12]);
1201 ADD_REG64(WHvArm64RegisterX13, pVCpu->cpum.GstCtx.aGRegs[13]);
1202 ADD_REG64(WHvArm64RegisterX14, pVCpu->cpum.GstCtx.aGRegs[14]);
1203 ADD_REG64(WHvArm64RegisterX15, pVCpu->cpum.GstCtx.aGRegs[15]);
1204 ADD_REG64(WHvArm64RegisterX16, pVCpu->cpum.GstCtx.aGRegs[16]);
1205 ADD_REG64(WHvArm64RegisterX17, pVCpu->cpum.GstCtx.aGRegs[17]);
1206 ADD_REG64(WHvArm64RegisterX18, pVCpu->cpum.GstCtx.aGRegs[18]);
1207 ADD_REG64(WHvArm64RegisterX19, pVCpu->cpum.GstCtx.aGRegs[19]);
1208 ADD_REG64(WHvArm64RegisterX20, pVCpu->cpum.GstCtx.aGRegs[20]);
1209 ADD_REG64(WHvArm64RegisterX21, pVCpu->cpum.GstCtx.aGRegs[21]);
1210 ADD_REG64(WHvArm64RegisterX22, pVCpu->cpum.GstCtx.aGRegs[22]);
1211 ADD_REG64(WHvArm64RegisterX23, pVCpu->cpum.GstCtx.aGRegs[23]);
1212 ADD_REG64(WHvArm64RegisterX24, pVCpu->cpum.GstCtx.aGRegs[24]);
1213 ADD_REG64(WHvArm64RegisterX25, pVCpu->cpum.GstCtx.aGRegs[25]);
1214 ADD_REG64(WHvArm64RegisterX26, pVCpu->cpum.GstCtx.aGRegs[26]);
1215 ADD_REG64(WHvArm64RegisterX27, pVCpu->cpum.GstCtx.aGRegs[27]);
1216 ADD_REG64(WHvArm64RegisterX28, pVCpu->cpum.GstCtx.aGRegs[28]);
1217 }
1218 if (fWhat & CPUMCTX_EXTRN_LR)
1219 ADD_REG64(WHvArm64RegisterLr, pVCpu->cpum.GstCtx.aGRegs[30]);
1220 if (fWhat & CPUMCTX_EXTRN_FP)
1221 ADD_REG64(WHvArm64RegisterFp, pVCpu->cpum.GstCtx.aGRegs[29]);
1222 }
1223
1224 /* RIP & Flags */
1225 if (fWhat & CPUMCTX_EXTRN_PC)
1226 ADD_SYSREG64(WHvArm64RegisterPc, pVCpu->cpum.GstCtx.Pc);
1227 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1228 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1229 if (fWhat & CPUMCTX_EXTRN_SPSR)
1230 ADD_SYSREG64(WHvArm64RegisterSpsrEl1, pVCpu->cpum.GstCtx.Spsr);
1231 if (fWhat & CPUMCTX_EXTRN_ELR)
1232 ADD_SYSREG64(WHvArm64RegisterElrEl1, pVCpu->cpum.GstCtx.Elr);
1233 if (fWhat & CPUMCTX_EXTRN_SP)
1234 {
1235 ADD_SYSREG64(WHvArm64RegisterSpEl0, pVCpu->cpum.GstCtx.aSpReg[0]);
1236 ADD_SYSREG64(WHvArm64RegisterSpEl1, pVCpu->cpum.GstCtx.aSpReg[1]);
1237 }
1238 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1239 {
1240 ADD_SYSREG64(WHvArm64RegisterSctlrEl1, pVCpu->cpum.GstCtx.Sctlr);
1241 ADD_SYSREG64(WHvArm64RegisterTcrEl1, pVCpu->cpum.GstCtx.Tcr);
1242 ADD_SYSREG64(WHvArm64RegisterTtbr0El1, pVCpu->cpum.GstCtx.Ttbr0);
1243 ADD_SYSREG64(WHvArm64RegisterTtbr1El1, pVCpu->cpum.GstCtx.Ttbr1);
1244 }
1245
1246 /* Vector state. */
1247 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1248 {
1249 ADD_REG128(WHvArm64RegisterQ0, pVCpu->cpum.GstCtx.aVRegs[0]);
1250 ADD_REG128(WHvArm64RegisterQ1, pVCpu->cpum.GstCtx.aVRegs[1]);
1251 ADD_REG128(WHvArm64RegisterQ2, pVCpu->cpum.GstCtx.aVRegs[2]);
1252 ADD_REG128(WHvArm64RegisterQ3, pVCpu->cpum.GstCtx.aVRegs[3]);
1253 ADD_REG128(WHvArm64RegisterQ4, pVCpu->cpum.GstCtx.aVRegs[4]);
1254 ADD_REG128(WHvArm64RegisterQ5, pVCpu->cpum.GstCtx.aVRegs[5]);
1255 ADD_REG128(WHvArm64RegisterQ6, pVCpu->cpum.GstCtx.aVRegs[6]);
1256 ADD_REG128(WHvArm64RegisterQ7, pVCpu->cpum.GstCtx.aVRegs[7]);
1257 ADD_REG128(WHvArm64RegisterQ8, pVCpu->cpum.GstCtx.aVRegs[8]);
1258 ADD_REG128(WHvArm64RegisterQ9, pVCpu->cpum.GstCtx.aVRegs[9]);
1259 ADD_REG128(WHvArm64RegisterQ10, pVCpu->cpum.GstCtx.aVRegs[10]);
1260 ADD_REG128(WHvArm64RegisterQ11, pVCpu->cpum.GstCtx.aVRegs[11]);
1261 ADD_REG128(WHvArm64RegisterQ12, pVCpu->cpum.GstCtx.aVRegs[12]);
1262 ADD_REG128(WHvArm64RegisterQ13, pVCpu->cpum.GstCtx.aVRegs[13]);
1263 ADD_REG128(WHvArm64RegisterQ14, pVCpu->cpum.GstCtx.aVRegs[14]);
1264 ADD_REG128(WHvArm64RegisterQ15, pVCpu->cpum.GstCtx.aVRegs[15]);
1265 ADD_REG128(WHvArm64RegisterQ16, pVCpu->cpum.GstCtx.aVRegs[16]);
1266 ADD_REG128(WHvArm64RegisterQ17, pVCpu->cpum.GstCtx.aVRegs[17]);
1267 ADD_REG128(WHvArm64RegisterQ18, pVCpu->cpum.GstCtx.aVRegs[18]);
1268 ADD_REG128(WHvArm64RegisterQ19, pVCpu->cpum.GstCtx.aVRegs[19]);
1269 ADD_REG128(WHvArm64RegisterQ20, pVCpu->cpum.GstCtx.aVRegs[20]);
1270 ADD_REG128(WHvArm64RegisterQ21, pVCpu->cpum.GstCtx.aVRegs[21]);
1271 ADD_REG128(WHvArm64RegisterQ22, pVCpu->cpum.GstCtx.aVRegs[22]);
1272 ADD_REG128(WHvArm64RegisterQ23, pVCpu->cpum.GstCtx.aVRegs[23]);
1273 ADD_REG128(WHvArm64RegisterQ24, pVCpu->cpum.GstCtx.aVRegs[24]);
1274 ADD_REG128(WHvArm64RegisterQ25, pVCpu->cpum.GstCtx.aVRegs[25]);
1275 ADD_REG128(WHvArm64RegisterQ26, pVCpu->cpum.GstCtx.aVRegs[26]);
1276 ADD_REG128(WHvArm64RegisterQ27, pVCpu->cpum.GstCtx.aVRegs[27]);
1277 ADD_REG128(WHvArm64RegisterQ28, pVCpu->cpum.GstCtx.aVRegs[28]);
1278 ADD_REG128(WHvArm64RegisterQ29, pVCpu->cpum.GstCtx.aVRegs[29]);
1279 ADD_REG128(WHvArm64RegisterQ30, pVCpu->cpum.GstCtx.aVRegs[30]);
1280 ADD_REG128(WHvArm64RegisterQ31, pVCpu->cpum.GstCtx.aVRegs[31]);
1281 }
1282
1283 if (fWhat & CPUMCTX_EXTRN_FPCR)
1284 ADD_REG64_RAW(WHvArm64RegisterFpcr, pVCpu->cpum.GstCtx.fpcr);
1285 if (fWhat & CPUMCTX_EXTRN_FPSR)
1286 ADD_REG64_RAW(WHvArm64RegisterFpsr, pVCpu->cpum.GstCtx.fpsr);
1287
1288 /* System registers. */
1289 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1290 {
1291 ADD_SYSREG64(WHvArm64RegisterVbarEl1, pVCpu->cpum.GstCtx.VBar);
1292 ADD_SYSREG64(WHvArm64RegisterEsrEl1, pVCpu->cpum.GstCtx.Esr);
1293 ADD_SYSREG64(WHvArm64RegisterFarEl1, pVCpu->cpum.GstCtx.Far);
1294 ADD_SYSREG64(WHvArm64RegisterCntkctlEl1, pVCpu->cpum.GstCtx.CntKCtl);
1295 ADD_SYSREG64(WHvArm64RegisterContextidrEl1, pVCpu->cpum.GstCtx.ContextIdr);
1296 ADD_SYSREG64(WHvArm64RegisterCpacrEl1, pVCpu->cpum.GstCtx.Cpacr);
1297 ADD_SYSREG64(WHvArm64RegisterCsselrEl1, pVCpu->cpum.GstCtx.Csselr);
1298 ADD_SYSREG64(WHvArm64RegisterMairEl1, pVCpu->cpum.GstCtx.Mair);
1299 ADD_SYSREG64(WHvArm64RegisterParEl1, pVCpu->cpum.GstCtx.Par);
1300 ADD_SYSREG64(WHvArm64RegisterTpidrroEl0, pVCpu->cpum.GstCtx.TpIdrRoEl0);
1301 ADD_SYSREG64(WHvArm64RegisterTpidrEl0, pVCpu->cpum.GstCtx.aTpIdr[0]);
1302 ADD_SYSREG64(WHvArm64RegisterTpidrEl1, pVCpu->cpum.GstCtx.aTpIdr[1]);
1303 }
1304
1305 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1306 {
1307 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1308 {
1309 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Ctrl);
1310 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Value);
1311 }
1312
1313 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1314 {
1315 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Ctrl);
1316 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Value);
1317 }
1318
1319 ADD_SYSREG64(WHvArm64RegisterMdscrEl1, pVCpu->cpum.GstCtx.Mdscr);
1320 }
1321
1322 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1323 {
1324 ADD_SYSREG64(WHvArm64RegisterApdAKeyHiEl1, pVCpu->cpum.GstCtx.Apda.High);
1325 ADD_SYSREG64(WHvArm64RegisterApdAKeyLoEl1, pVCpu->cpum.GstCtx.Apda.Low);
1326 ADD_SYSREG64(WHvArm64RegisterApdBKeyHiEl1, pVCpu->cpum.GstCtx.Apdb.High);
1327 ADD_SYSREG64(WHvArm64RegisterApdBKeyLoEl1, pVCpu->cpum.GstCtx.Apdb.Low);
1328 ADD_SYSREG64(WHvArm64RegisterApgAKeyHiEl1, pVCpu->cpum.GstCtx.Apga.High);
1329 ADD_SYSREG64(WHvArm64RegisterApgAKeyLoEl1, pVCpu->cpum.GstCtx.Apga.Low);
1330 ADD_SYSREG64(WHvArm64RegisterApiAKeyHiEl1, pVCpu->cpum.GstCtx.Apia.High);
1331 ADD_SYSREG64(WHvArm64RegisterApiAKeyLoEl1, pVCpu->cpum.GstCtx.Apia.Low);
1332 ADD_SYSREG64(WHvArm64RegisterApiBKeyHiEl1, pVCpu->cpum.GstCtx.Apib.High);
1333 ADD_SYSREG64(WHvArm64RegisterApiBKeyLoEl1, pVCpu->cpum.GstCtx.Apib.Low);
1334 }
1335
1336#undef ADD_REG64
1337#undef ADD_REG64_RAW
1338#undef ADD_REG128
1339
1340 /*
1341 * Set the registers.
1342 */
1343 Assert(iReg < RT_ELEMENTS(aValues));
1344 Assert(iReg < RT_ELEMENTS(aenmNames));
1345 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1346 if (SUCCEEDED(hrc))
1347 {
1348 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1349 return VINF_SUCCESS;
1350 }
1351 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1352 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1353 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1354 return VERR_INTERNAL_ERROR;
1355}
1356
1357
1358NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1359{
1360 WHV_REGISTER_NAME aenmNames[256];
1361
1362 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1363 if (!fWhat)
1364 return VINF_SUCCESS;
1365
1366 uintptr_t iReg = 0;
1367
1368 /* GPRs */
1369 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1370 {
1371 if (fWhat & CPUMCTX_EXTRN_X0)
1372 aenmNames[iReg++] = WHvArm64RegisterX0;
1373 if (fWhat & CPUMCTX_EXTRN_X1)
1374 aenmNames[iReg++] = WHvArm64RegisterX1;
1375 if (fWhat & CPUMCTX_EXTRN_X2)
1376 aenmNames[iReg++] = WHvArm64RegisterX2;
1377 if (fWhat & CPUMCTX_EXTRN_X3)
1378 aenmNames[iReg++] = WHvArm64RegisterX3;
1379 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1380 {
1381 aenmNames[iReg++] = WHvArm64RegisterX4;
1382 aenmNames[iReg++] = WHvArm64RegisterX5;
1383 aenmNames[iReg++] = WHvArm64RegisterX6;
1384 aenmNames[iReg++] = WHvArm64RegisterX7;
1385 aenmNames[iReg++] = WHvArm64RegisterX8;
1386 aenmNames[iReg++] = WHvArm64RegisterX9;
1387 aenmNames[iReg++] = WHvArm64RegisterX10;
1388 aenmNames[iReg++] = WHvArm64RegisterX11;
1389 aenmNames[iReg++] = WHvArm64RegisterX12;
1390 aenmNames[iReg++] = WHvArm64RegisterX13;
1391 aenmNames[iReg++] = WHvArm64RegisterX14;
1392 aenmNames[iReg++] = WHvArm64RegisterX15;
1393 aenmNames[iReg++] = WHvArm64RegisterX16;
1394 aenmNames[iReg++] = WHvArm64RegisterX17;
1395 aenmNames[iReg++] = WHvArm64RegisterX18;
1396 aenmNames[iReg++] = WHvArm64RegisterX19;
1397 aenmNames[iReg++] = WHvArm64RegisterX20;
1398 aenmNames[iReg++] = WHvArm64RegisterX21;
1399 aenmNames[iReg++] = WHvArm64RegisterX22;
1400 aenmNames[iReg++] = WHvArm64RegisterX23;
1401 aenmNames[iReg++] = WHvArm64RegisterX24;
1402 aenmNames[iReg++] = WHvArm64RegisterX25;
1403 aenmNames[iReg++] = WHvArm64RegisterX26;
1404 aenmNames[iReg++] = WHvArm64RegisterX27;
1405 aenmNames[iReg++] = WHvArm64RegisterX28;
1406 }
1407 if (fWhat & CPUMCTX_EXTRN_LR)
1408 aenmNames[iReg++] = WHvArm64RegisterLr;
1409 if (fWhat & CPUMCTX_EXTRN_FP)
1410 aenmNames[iReg++] = WHvArm64RegisterFp;
1411 }
1412
1413 /* PC & Flags */
1414 if (fWhat & CPUMCTX_EXTRN_PC)
1415 aenmNames[iReg++] = WHvArm64RegisterPc;
1416 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1417 aenmNames[iReg++] = WHvArm64RegisterPstate;
1418 if (fWhat & CPUMCTX_EXTRN_SPSR)
1419 aenmNames[iReg++] = WHvArm64RegisterSpsrEl1;
1420 if (fWhat & CPUMCTX_EXTRN_ELR)
1421 aenmNames[iReg++] = WHvArm64RegisterElrEl1;
1422 if (fWhat & CPUMCTX_EXTRN_SP)
1423 {
1424 aenmNames[iReg++] = WHvArm64RegisterSpEl0;
1425 aenmNames[iReg++] = WHvArm64RegisterSpEl1;
1426 }
1427 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1428 {
1429 aenmNames[iReg++] = WHvArm64RegisterSctlrEl1;
1430 aenmNames[iReg++] = WHvArm64RegisterTcrEl1;
1431 aenmNames[iReg++] = WHvArm64RegisterTtbr0El1;
1432 aenmNames[iReg++] = WHvArm64RegisterTtbr1El1;
1433 }
1434
1435 /* Vector state. */
1436 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1437 {
1438 aenmNames[iReg++] = WHvArm64RegisterQ0;
1439 aenmNames[iReg++] = WHvArm64RegisterQ1;
1440 aenmNames[iReg++] = WHvArm64RegisterQ2;
1441 aenmNames[iReg++] = WHvArm64RegisterQ3;
1442 aenmNames[iReg++] = WHvArm64RegisterQ4;
1443 aenmNames[iReg++] = WHvArm64RegisterQ5;
1444 aenmNames[iReg++] = WHvArm64RegisterQ6;
1445 aenmNames[iReg++] = WHvArm64RegisterQ7;
1446 aenmNames[iReg++] = WHvArm64RegisterQ8;
1447 aenmNames[iReg++] = WHvArm64RegisterQ9;
1448 aenmNames[iReg++] = WHvArm64RegisterQ10;
1449 aenmNames[iReg++] = WHvArm64RegisterQ11;
1450 aenmNames[iReg++] = WHvArm64RegisterQ12;
1451 aenmNames[iReg++] = WHvArm64RegisterQ13;
1452 aenmNames[iReg++] = WHvArm64RegisterQ14;
1453 aenmNames[iReg++] = WHvArm64RegisterQ15;
1454
1455 aenmNames[iReg++] = WHvArm64RegisterQ16;
1456 aenmNames[iReg++] = WHvArm64RegisterQ17;
1457 aenmNames[iReg++] = WHvArm64RegisterQ18;
1458 aenmNames[iReg++] = WHvArm64RegisterQ19;
1459 aenmNames[iReg++] = WHvArm64RegisterQ20;
1460 aenmNames[iReg++] = WHvArm64RegisterQ21;
1461 aenmNames[iReg++] = WHvArm64RegisterQ22;
1462 aenmNames[iReg++] = WHvArm64RegisterQ23;
1463 aenmNames[iReg++] = WHvArm64RegisterQ24;
1464 aenmNames[iReg++] = WHvArm64RegisterQ25;
1465 aenmNames[iReg++] = WHvArm64RegisterQ26;
1466 aenmNames[iReg++] = WHvArm64RegisterQ27;
1467 aenmNames[iReg++] = WHvArm64RegisterQ28;
1468 aenmNames[iReg++] = WHvArm64RegisterQ29;
1469 aenmNames[iReg++] = WHvArm64RegisterQ30;
1470 aenmNames[iReg++] = WHvArm64RegisterQ31;
1471 }
1472 if (fWhat & CPUMCTX_EXTRN_FPCR)
1473 aenmNames[iReg++] = WHvArm64RegisterFpcr;
1474 if (fWhat & CPUMCTX_EXTRN_FPSR)
1475 aenmNames[iReg++] = WHvArm64RegisterFpsr;
1476
1477 /* System registers. */
1478 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1479 {
1480 aenmNames[iReg++] = WHvArm64RegisterVbarEl1;
1481 aenmNames[iReg++] = WHvArm64RegisterEsrEl1;
1482 aenmNames[iReg++] = WHvArm64RegisterFarEl1;
1483 aenmNames[iReg++] = WHvArm64RegisterCntkctlEl1;
1484 aenmNames[iReg++] = WHvArm64RegisterContextidrEl1;
1485 aenmNames[iReg++] = WHvArm64RegisterCpacrEl1;
1486 aenmNames[iReg++] = WHvArm64RegisterCsselrEl1;
1487 aenmNames[iReg++] = WHvArm64RegisterMairEl1;
1488 aenmNames[iReg++] = WHvArm64RegisterParEl1;
1489 aenmNames[iReg++] = WHvArm64RegisterTpidrroEl0;
1490 aenmNames[iReg++] = WHvArm64RegisterTpidrEl0;
1491 aenmNames[iReg++] = WHvArm64RegisterTpidrEl1;
1492 }
1493
1494 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1495 {
1496 /* Hyper-V doesn't allow syncing debug break-/watchpoint registers which aren't there. */
1497 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1498 {
1499 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i);
1500 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i);
1501 }
1502
1503 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1504 {
1505 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i);
1506 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i);
1507 }
1508
1509 aenmNames[iReg++] = WHvArm64RegisterMdscrEl1;
1510 }
1511
1512 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1513 {
1514 aenmNames[iReg++] = WHvArm64RegisterApdAKeyHiEl1;
1515 aenmNames[iReg++] = WHvArm64RegisterApdAKeyLoEl1;
1516 aenmNames[iReg++] = WHvArm64RegisterApdBKeyHiEl1;
1517 aenmNames[iReg++] = WHvArm64RegisterApdBKeyLoEl1;
1518 aenmNames[iReg++] = WHvArm64RegisterApgAKeyHiEl1;
1519 aenmNames[iReg++] = WHvArm64RegisterApgAKeyLoEl1;
1520 aenmNames[iReg++] = WHvArm64RegisterApiAKeyHiEl1;
1521 aenmNames[iReg++] = WHvArm64RegisterApiAKeyLoEl1;
1522 aenmNames[iReg++] = WHvArm64RegisterApiBKeyHiEl1;
1523 aenmNames[iReg++] = WHvArm64RegisterApiBKeyLoEl1;
1524 }
1525
1526 size_t const cRegs = iReg;
1527 Assert(cRegs < RT_ELEMENTS(aenmNames));
1528
1529 /*
1530 * Get the registers.
1531 */
1532 WHV_REGISTER_VALUE aValues[256];
1533 RT_ZERO(aValues);
1534 Assert(RT_ELEMENTS(aValues) >= cRegs);
1535 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1536 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1537 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1538 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1539 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1540 , VERR_NEM_GET_REGISTERS_FAILED);
1541
1542 iReg = 0;
1543#define GET_REG64(a_DstVar, a_enmName) do { \
1544 Assert(aenmNames[iReg] == (a_enmName)); \
1545 (a_DstVar).x = aValues[iReg].Reg64; \
1546 iReg++; \
1547 } while (0)
1548#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1549 Assert(aenmNames[iReg] == (a_enmName)); \
1550 (a_DstVar) = aValues[iReg].Reg64; \
1551 iReg++; \
1552 } while (0)
1553#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1554 Assert(aenmNames[iReg] == (a_enmName)); \
1555 (a_DstVar).u64 = aValues[iReg].Reg64; \
1556 iReg++; \
1557 } while (0)
1558#define GET_REG128(a_DstVar, a_enmName) do { \
1559 Assert(aenmNames[iReg] == a_enmName); \
1560 (a_DstVar).au64[0] = aValues[iReg].Reg128.Low64; \
1561 (a_DstVar).au64[1] = aValues[iReg].Reg128.High64; \
1562 iReg++; \
1563 } while (0)
1564
1565 /* GPRs */
1566 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1567 {
1568 if (fWhat & CPUMCTX_EXTRN_X0)
1569 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[0], WHvArm64RegisterX0);
1570 if (fWhat & CPUMCTX_EXTRN_X1)
1571 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[1], WHvArm64RegisterX1);
1572 if (fWhat & CPUMCTX_EXTRN_X2)
1573 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[2], WHvArm64RegisterX2);
1574 if (fWhat & CPUMCTX_EXTRN_X3)
1575 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[3], WHvArm64RegisterX3);
1576 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1577 {
1578 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[4], WHvArm64RegisterX4);
1579 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[5], WHvArm64RegisterX5);
1580 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[6], WHvArm64RegisterX6);
1581 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[7], WHvArm64RegisterX7);
1582 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[8], WHvArm64RegisterX8);
1583 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[9], WHvArm64RegisterX9);
1584 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[10], WHvArm64RegisterX10);
1585 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[11], WHvArm64RegisterX11);
1586 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[12], WHvArm64RegisterX12);
1587 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[13], WHvArm64RegisterX13);
1588 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[14], WHvArm64RegisterX14);
1589 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[15], WHvArm64RegisterX15);
1590 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[16], WHvArm64RegisterX16);
1591 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[17], WHvArm64RegisterX17);
1592 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[18], WHvArm64RegisterX18);
1593 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[19], WHvArm64RegisterX19);
1594 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[20], WHvArm64RegisterX20);
1595 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[21], WHvArm64RegisterX21);
1596 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[22], WHvArm64RegisterX22);
1597 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[23], WHvArm64RegisterX23);
1598 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[24], WHvArm64RegisterX24);
1599 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[25], WHvArm64RegisterX25);
1600 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[26], WHvArm64RegisterX26);
1601 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[27], WHvArm64RegisterX27);
1602 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[28], WHvArm64RegisterX28);
1603 }
1604 if (fWhat & CPUMCTX_EXTRN_LR)
1605 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[30], WHvArm64RegisterLr);
1606 if (fWhat & CPUMCTX_EXTRN_FP)
1607 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[29], WHvArm64RegisterFp);
1608 }
1609
1610 /* RIP & Flags */
1611 if (fWhat & CPUMCTX_EXTRN_PC)
1612 GET_REG64_RAW(pVCpu->cpum.GstCtx.Pc.u64, WHvArm64RegisterPc);
1613 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1614 GET_REG64_RAW(pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1615 if (fWhat & CPUMCTX_EXTRN_SPSR)
1616 GET_SYSREG64(pVCpu->cpum.GstCtx.Spsr, WHvArm64RegisterSpsrEl1);
1617 if (fWhat & CPUMCTX_EXTRN_ELR)
1618 GET_SYSREG64(pVCpu->cpum.GstCtx.Elr, WHvArm64RegisterElrEl1);
1619 if (fWhat & CPUMCTX_EXTRN_SP)
1620 {
1621 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[0], WHvArm64RegisterSpEl0);
1622 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[1], WHvArm64RegisterSpEl1);
1623 }
1624 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1625 {
1626 GET_SYSREG64(pVCpu->cpum.GstCtx.Sctlr, WHvArm64RegisterSctlrEl1);
1627 GET_SYSREG64(pVCpu->cpum.GstCtx.Tcr, WHvArm64RegisterTcrEl1);
1628 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1629 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1630 }
1631
1632 /* Vector state. */
1633 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1634 {
1635 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[0], WHvArm64RegisterQ0);
1636 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[1], WHvArm64RegisterQ1);
1637 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[2], WHvArm64RegisterQ2);
1638 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[3], WHvArm64RegisterQ3);
1639 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[4], WHvArm64RegisterQ4);
1640 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[5], WHvArm64RegisterQ5);
1641 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[6], WHvArm64RegisterQ6);
1642 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[7], WHvArm64RegisterQ7);
1643 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[8], WHvArm64RegisterQ8);
1644 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[9], WHvArm64RegisterQ9);
1645 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[10], WHvArm64RegisterQ10);
1646 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[11], WHvArm64RegisterQ11);
1647 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[12], WHvArm64RegisterQ12);
1648 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[13], WHvArm64RegisterQ13);
1649 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[14], WHvArm64RegisterQ14);
1650 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[15], WHvArm64RegisterQ15);
1651
1652 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[16], WHvArm64RegisterQ16);
1653 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[17], WHvArm64RegisterQ17);
1654 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[18], WHvArm64RegisterQ18);
1655 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[19], WHvArm64RegisterQ19);
1656 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[20], WHvArm64RegisterQ20);
1657 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[21], WHvArm64RegisterQ21);
1658 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[22], WHvArm64RegisterQ22);
1659 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[23], WHvArm64RegisterQ23);
1660 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[24], WHvArm64RegisterQ24);
1661 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[25], WHvArm64RegisterQ25);
1662 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[26], WHvArm64RegisterQ26);
1663 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[27], WHvArm64RegisterQ27);
1664 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[28], WHvArm64RegisterQ28);
1665 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[29], WHvArm64RegisterQ29);
1666 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[30], WHvArm64RegisterQ30);
1667 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[31], WHvArm64RegisterQ31);
1668 }
1669 if (fWhat & CPUMCTX_EXTRN_FPCR)
1670 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpcr, WHvArm64RegisterFpcr);
1671 if (fWhat & CPUMCTX_EXTRN_FPSR)
1672 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpsr, WHvArm64RegisterFpsr);
1673
1674 /* System registers. */
1675 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1676 {
1677 GET_SYSREG64(pVCpu->cpum.GstCtx.VBar, WHvArm64RegisterVbarEl1);
1678 GET_SYSREG64(pVCpu->cpum.GstCtx.Esr, WHvArm64RegisterEsrEl1);
1679 GET_SYSREG64(pVCpu->cpum.GstCtx.Far, WHvArm64RegisterFarEl1);
1680 GET_SYSREG64(pVCpu->cpum.GstCtx.CntKCtl, WHvArm64RegisterCntkctlEl1);
1681 GET_SYSREG64(pVCpu->cpum.GstCtx.ContextIdr, WHvArm64RegisterContextidrEl1);
1682 GET_SYSREG64(pVCpu->cpum.GstCtx.Cpacr, WHvArm64RegisterCpacrEl1);
1683 GET_SYSREG64(pVCpu->cpum.GstCtx.Csselr, WHvArm64RegisterCsselrEl1);
1684 GET_SYSREG64(pVCpu->cpum.GstCtx.Mair, WHvArm64RegisterMairEl1);
1685 GET_SYSREG64(pVCpu->cpum.GstCtx.Par, WHvArm64RegisterParEl1);
1686 GET_SYSREG64(pVCpu->cpum.GstCtx.TpIdrRoEl0, WHvArm64RegisterTpidrroEl0);
1687 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[0], WHvArm64RegisterTpidrEl0);
1688 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[1], WHvArm64RegisterTpidrEl1);
1689 }
1690
1691 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1692 {
1693 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1694 {
1695 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i));
1696 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i));
1697 }
1698
1699 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1700 {
1701 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i));
1702 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i));
1703 }
1704
1705 GET_SYSREG64(pVCpu->cpum.GstCtx.Mdscr, WHvArm64RegisterMdscrEl1);
1706 }
1707
1708 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1709 {
1710 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.High, WHvArm64RegisterApdAKeyHiEl1);
1711 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.Low, WHvArm64RegisterApdAKeyLoEl1);
1712 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.High, WHvArm64RegisterApdBKeyHiEl1);
1713 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.Low, WHvArm64RegisterApdBKeyLoEl1);
1714 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.High, WHvArm64RegisterApgAKeyHiEl1);
1715 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.Low, WHvArm64RegisterApgAKeyLoEl1);
1716 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.High, WHvArm64RegisterApiAKeyHiEl1);
1717 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.Low, WHvArm64RegisterApiAKeyLoEl1);
1718 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.High, WHvArm64RegisterApiBKeyHiEl1);
1719 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.Low, WHvArm64RegisterApiBKeyLoEl1);
1720 }
1721
1722 /* Almost done, just update extrn flags. */
1723 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1724 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1725 pVCpu->cpum.GstCtx.fExtrn = 0;
1726
1727 return VINF_SUCCESS;
1728}
1729
1730
1731/**
1732 * Interface for importing state on demand (used by IEM).
1733 *
1734 * @returns VBox status code.
1735 * @param pVCpu The cross context CPU structure.
1736 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1737 */
1738VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1739{
1740 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1741 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1742}
1743
1744
1745/**
1746 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1747 *
1748 * @returns VBox status code.
1749 * @param pVCpu The cross context CPU structure.
1750 * @param pcTicks Where to return the CPU tick count.
1751 * @param puAux Where to return the TSC_AUX register value.
1752 */
1753VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1754{
1755 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1756
1757 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1758 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1759 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1760
1761#pragma message("NEMHCQueryCpuTick: Implement it!")
1762#if 0 /** @todo */
1763 /* Call the offical API. */
1764 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1765 WHV_REGISTER_VALUE aValues[2] = { { {0, 0} }, { {0, 0} } };
1766 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1767 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1768 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1769 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1770 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1771 , VERR_NEM_GET_REGISTERS_FAILED);
1772 *pcTicks = aValues[0].Reg64;
1773 if (puAux)
1774 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[1].Reg64 : CPUMGetGuestTscAux(pVCpu);
1775#else
1776 RT_NOREF(pVCpu, pcTicks, puAux);
1777#endif
1778 return VINF_SUCCESS;
1779}
1780
1781
1782/**
1783 * Resumes CPU clock (TSC) on all virtual CPUs.
1784 *
1785 * This is called by TM when the VM is started, restored, resumed or similar.
1786 *
1787 * @returns VBox status code.
1788 * @param pVM The cross context VM structure.
1789 * @param pVCpu The cross context CPU structure of the calling EMT.
1790 * @param uPausedTscValue The TSC value at the time of pausing.
1791 */
1792VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1793{
1794 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1795 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1796
1797 /*
1798 * Call the offical API to do the job.
1799 */
1800 if (pVM->cCpus > 1)
1801 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1802
1803#pragma message("NEMHCResumeCpuTickOnAll: Implement it!")
1804#if 0 /** @todo */
1805 /* Start with the first CPU. */
1806 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1807 WHV_REGISTER_VALUE Value = { {0, 0} };
1808 Value.Reg64 = uPausedTscValue;
1809 uint64_t const uFirstTsc = ASMReadTSC();
1810 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1811 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1812 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1813 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1814 , VERR_NEM_SET_TSC);
1815
1816 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1817 that we don't introduce too much drift here. */
1818 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1819 {
1820 Assert(enmName == WHvX64RegisterTsc);
1821 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1822 Value.Reg64 = uPausedTscValue + offDelta;
1823 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1824 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1825 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1826 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1827 , VERR_NEM_SET_TSC);
1828 }
1829#else
1830 RT_NOREF(uPausedTscValue);
1831#endif
1832
1833 return VINF_SUCCESS;
1834}
1835
1836
1837#ifdef LOG_ENABLED
1838/**
1839 * Logs the current CPU state.
1840 */
1841static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1842{
1843 if (LogIs3Enabled())
1844 {
1845 char szRegs[4096];
1846 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1847 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1848 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1849 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1850 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1851 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1852 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1853 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1854 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1855 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1856 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1857 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1858 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1859 "vbar_el1=%016VR{vbar_el1}\n"
1860 );
1861 char szInstr[256]; RT_ZERO(szInstr);
1862 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1863 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1864 szInstr, sizeof(szInstr), NULL);
1865 Log3(("%s%s\n", szRegs, szInstr));
1866 }
1867}
1868#endif /* LOG_ENABLED */
1869
1870
1871/**
1872 * Copies register state from the (common) exit context.
1873 *
1874 * ASSUMES no state copied yet.
1875 *
1876 * @param pVCpu The cross context per CPU structure.
1877 * @param pMsgHdr The common message header.
1878 */
1879DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1880{
1881 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1882 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1883
1884 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1885 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1886
1887 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1888}
1889
1890
1891/**
1892 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1893 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1894 */
1895typedef struct NEMHCWINHMACPCCSTATE
1896{
1897 /** Input: Write access. */
1898 bool fWriteAccess;
1899 /** Output: Set if we did something. */
1900 bool fDidSomething;
1901 /** Output: Set it we should resume. */
1902 bool fCanResume;
1903} NEMHCWINHMACPCCSTATE;
1904
1905/**
1906 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1907 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1908 * NEMHCWINHMACPCCSTATE structure. }
1909 */
1910NEM_TMPL_STATIC DECLCALLBACK(int)
1911nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1912{
1913 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1914 pState->fDidSomething = false;
1915 pState->fCanResume = false;
1916
1917 /* If A20 is disabled, we may need to make another query on the masked
1918 page to get the correct protection information. */
1919 uint8_t u2State = pInfo->u2NemState;
1920 RTGCPHYS GCPhysSrc = GCPhys;
1921
1922 /*
1923 * Consolidate current page state with actual page protection and access type.
1924 * We don't really consider downgrades here, as they shouldn't happen.
1925 */
1926 int rc;
1927 switch (u2State)
1928 {
1929 case NEM_WIN_PAGE_STATE_UNMAPPED:
1930 case NEM_WIN_PAGE_STATE_NOT_SET:
1931 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1932 {
1933 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1934 return VINF_SUCCESS;
1935 }
1936
1937 /* Don't bother remapping it if it's a write request to a non-writable page. */
1938 if ( pState->fWriteAccess
1939 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1940 {
1941 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1942 return VINF_SUCCESS;
1943 }
1944
1945 /* Map the page. */
1946 rc = nemHCNativeSetPhysPage(pVM,
1947 pVCpu,
1948 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1949 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1950 pInfo->fNemProt,
1951 &u2State,
1952 true /*fBackingState*/);
1953 pInfo->u2NemState = u2State;
1954 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1955 GCPhys, g_apszPageStates[u2State], rc));
1956 pState->fDidSomething = true;
1957 pState->fCanResume = true;
1958 return rc;
1959
1960 case NEM_WIN_PAGE_STATE_READABLE:
1961 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1962 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1963 {
1964 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1965 return VINF_SUCCESS;
1966 }
1967
1968 break;
1969
1970 case NEM_WIN_PAGE_STATE_WRITABLE:
1971 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1972 {
1973 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1974 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1975 else
1976 {
1977 pState->fCanResume = true;
1978 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1979 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1980 }
1981 return VINF_SUCCESS;
1982 }
1983 break;
1984
1985 default:
1986 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1987 }
1988
1989 /*
1990 * Unmap and restart the instruction.
1991 * If this fails, which it does every so often, just unmap everything for now.
1992 */
1993 /** @todo figure out whether we mess up the state or if it's WHv. */
1994 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1995 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1996 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1997 if (SUCCEEDED(hrc))
1998 {
1999 pState->fDidSomething = true;
2000 pState->fCanResume = true;
2001 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
2002 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2003 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2004 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
2005 return VINF_SUCCESS;
2006 }
2007 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2008 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
2009 GCPhys, g_apszPageStates[u2State], hrc, hrc));
2010 return VERR_NEM_UNMAP_PAGES_FAILED;
2011}
2012
2013
2014/**
2015 * Returns the byte size from the given access SAS value.
2016 *
2017 * @returns Number of bytes to transfer.
2018 * @param uSas The SAS value to convert.
2019 */
2020DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
2021{
2022 switch (uSas)
2023 {
2024 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
2025 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
2026 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
2027 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
2028 default:
2029 AssertReleaseFailed();
2030 }
2031
2032 return 0;
2033}
2034
2035
2036/**
2037 * Sets the given general purpose register to the given value.
2038 *
2039 * @param pVCpu The cross context virtual CPU structure of the
2040 * calling EMT.
2041 * @param uReg The register index.
2042 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
2043 * @param fSignExtend Flag whether to sign extend the value.
2044 * @param u64Val The value.
2045 */
2046DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
2047{
2048 AssertReturnVoid(uReg < 31);
2049
2050 if (f64BitReg)
2051 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
2052 else
2053 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
2054
2055 /* Mark the register as not extern anymore. */
2056 switch (uReg)
2057 {
2058 case 0:
2059 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
2060 break;
2061 case 1:
2062 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
2063 break;
2064 case 2:
2065 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
2066 break;
2067 case 3:
2068 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
2069 break;
2070 default:
2071 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
2072 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
2073 }
2074}
2075
2076
2077/**
2078 * Gets the given general purpose register and returns the value.
2079 *
2080 * @returns Value from the given register.
2081 * @param pVCpu The cross context virtual CPU structure of the
2082 * calling EMT.
2083 * @param uReg The register index.
2084 */
2085DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
2086{
2087 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
2088
2089 if (uReg == ARMV8_AARCH64_REG_ZR)
2090 return 0;
2091
2092 /** @todo Import the register if extern. */
2093 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
2094
2095 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
2096}
2097
2098
2099/**
2100 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2101 *
2102 * @returns Strict VBox status code.
2103 * @param pVM The cross context VM structure.
2104 * @param pVCpu The cross context per CPU structure.
2105 * @param pExit The VM exit information to handle.
2106 * @sa nemHCWinHandleMessageMemory
2107 */
2108NEM_TMPL_STATIC VBOXSTRICTRC
2109nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2110{
2111 uint64_t const uHostTsc = ASMReadTSC();
2112 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
2113
2114 /*
2115 * Ask PGM for information about the given GCPhys. We need to check if we're
2116 * out of sync first.
2117 */
2118 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
2119 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite, false, false };
2120 PGMPHYSNEMPAGEINFO Info;
2121 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2122 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2123 if (RT_SUCCESS(rc))
2124 {
2125 if (Info.fNemProt & ( pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2126 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2127 {
2128 if (State.fCanResume)
2129 {
2130 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2131 pVCpu->idCpu, pHdr->Pc,
2132 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2133 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2134 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2135 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2136 pHdr->Pc, uHostTsc);
2137 return VINF_SUCCESS;
2138 }
2139 }
2140 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2141 pVCpu->idCpu, pHdr->Pc,
2142 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2143 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2144 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2145 }
2146 else
2147 Log4(("MemExit/%u: %08RX64: %RGp rc=%Rrc%s; emulating (%s)\n",
2148 pVCpu->idCpu, pHdr->Pc,
2149 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2150 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2151
2152 /*
2153 * Emulate the memory access, either access handler or special memory.
2154 */
2155 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2156 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2157 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2158 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2159 pHdr->Pc, uHostTsc);
2160#pragma message("nemR3WinHandleExitMemory: Why not calling nemR3WinCopyStateFromArmHeader?")
2161/** @todo r=bird: Why is nemR3WinCopyStateFromArmHeader commented out? */
2162 //nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
2163 RT_NOREF_PV(pExitRec);
2164 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
2165 AssertRCReturn(rc, rc);
2166
2167#ifdef LOG_ENABLED
2168 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
2169 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
2170#endif
2171 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
2172 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
2173 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
2174 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
2175 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
2176 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
2177 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
2178 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
2179 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
2180 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
2181 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
2182 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
2183
2184 RT_NOREF(fL2Fault);
2185
2186 VBOXSTRICTRC rcStrict;
2187 if (fIsv)
2188 {
2189 EMHistoryAddExit(pVCpu,
2190 fWrite
2191 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2192 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2193 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
2194
2195 uint64_t u64Val = 0;
2196 if (fWrite)
2197 {
2198 u64Val = nemR3WinGetGReg(pVCpu, uReg);
2199 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2200 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
2201 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2202 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2203 }
2204 else
2205 {
2206 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2207 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2208 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2209 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2210 if (rcStrict == VINF_SUCCESS)
2211 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
2212 }
2213 }
2214 else
2215 {
2216 /** @todo Our UEFI firmware accesses the flash region with the following instruction
2217 * when the NVRAM actually contains data:
2218 * ldrb w9, [x6, #-0x0001]!
2219 * This is too complicated for the hardware so the ISV bit is not set. Until there
2220 * is a proper IEM implementation we just handle this here for now to avoid annoying
2221 * users too much.
2222 */
2223 /* The following ASSUMES that the vCPU state is completely synced. */
2224
2225 /* Read instruction. */
2226 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2227 const void *pvPageR3 = NULL;
2228 PGMPAGEMAPLOCK PageMapLock;
2229
2230 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
2231 if (rcStrict == VINF_SUCCESS)
2232 {
2233 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
2234 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
2235
2236 DISSTATE Dis;
2237 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
2238 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
2239 if (rcStrict == VINF_SUCCESS)
2240 {
2241 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
2242 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2243 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2244 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
2245 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
2246 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
2247 {
2248 /* The fault address is already the final address. */
2249 uint8_t bVal = 0;
2250 rcStrict = PGMPhysRead(pVM, GCPhys, &bVal, 1, PGMACCESSORIGIN_HM);
2251 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2252 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, sizeof(bVal), sizeof(bVal),
2253 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
2254 if (rcStrict == VINF_SUCCESS)
2255 {
2256 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
2257 /* Update the indexed register. */
2258 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
2259 }
2260 }
2261 /*
2262 * Seeing the following with the Windows 11/ARM TPM driver:
2263 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
2264 */
2265 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
2266 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2267 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2268 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
2269 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2270 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
2271 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
2272 {
2273 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
2274 /* The fault address is already the final address. */
2275 uint32_t u32Val1 = 0;
2276 uint32_t u32Val2 = 0;
2277 rcStrict = PGMPhysRead(pVM, GCPhys, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
2278 if (rcStrict == VINF_SUCCESS)
2279 rcStrict = PGMPhysRead(pVM, GCPhys + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
2280 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
2281 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, 2 * sizeof(uint32_t), sizeof(u32Val1),
2282 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
2283 if (rcStrict == VINF_SUCCESS)
2284 {
2285 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
2286 nemR3WinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
2287 }
2288 }
2289 else
2290 AssertFailedReturn(VERR_NOT_SUPPORTED);
2291 }
2292 }
2293 }
2294
2295 if (rcStrict == VINF_SUCCESS)
2296 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
2297
2298 return rcStrict;
2299}
2300
2301
2302/**
2303 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2304 *
2305 * @returns Strict VBox status code.
2306 * @param pVM The cross context VM structure.
2307 * @param pVCpu The cross context per CPU structure.
2308 * @param pExit The VM exit information to handle.
2309 * @sa nemHCWinHandleMessageMemory
2310 */
2311NEM_TMPL_STATIC VBOXSTRICTRC
2312nemR3WinHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2313{
2314 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2315
2316 /** @todo Raise exception to EL1 if PSCI not configured. */
2317 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
2318 uint32_t uFunId = pExit->Hypercall.Immediate;
2319 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
2320 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
2321 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
2322 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
2323 {
2324 switch (uFunNum)
2325 {
2326 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2327 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2328 break;
2329 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2330 rcStrict = VMR3PowerOff(pVM->pUVM);
2331 break;
2332 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2333 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2334 {
2335 bool fHaltOnReset;
2336 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2337 if (RT_SUCCESS(rc) && fHaltOnReset)
2338 {
2339 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
2340 rcStrict = VINF_EM_HALT;
2341 }
2342 else
2343 {
2344 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2345 VM_FF_SET(pVM, VM_FF_RESET);
2346 rcStrict = VINF_EM_RESET;
2347 }
2348 break;
2349 }
2350 case ARM_PSCI_FUNC_ID_CPU_ON:
2351 {
2352 uint64_t u64TgtCpu = pExit->Hypercall.X[1];
2353 RTGCPHYS GCPhysExecAddr = pExit->Hypercall.X[2];
2354 uint64_t u64CtxId = pExit->Hypercall.X[3];
2355 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2356 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2357 break;
2358 }
2359 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2360 {
2361 uint32_t u32FunNum = (uint32_t)pExit->Hypercall.X[1];
2362 switch (u32FunNum)
2363 {
2364 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2365 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2366 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2367 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2368 case ARM_PSCI_FUNC_ID_CPU_ON:
2369 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
2370 false /*f64BitReg*/, false /*fSignExtend*/,
2371 (uint64_t)ARM_PSCI_STS_SUCCESS);
2372 break;
2373 default:
2374 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
2375 false /*f64BitReg*/, false /*fSignExtend*/,
2376 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2377 }
2378 break;
2379 }
2380 default:
2381 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2382 }
2383 }
2384 else
2385 nemR3WinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2386
2387 /** @todo What to do if immediate is != 0? */
2388
2389 if (rcStrict == VINF_SUCCESS)
2390 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
2391
2392 return rcStrict;
2393}
2394
2395
2396/**
2397 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2398 *
2399 * @returns Strict VBox status code.
2400 * @param pVM The cross context VM structure.
2401 * @param pVCpu The cross context per CPU structure.
2402 * @param pExit The VM exit information to handle.
2403 * @sa nemHCWinHandleMessageUnrecoverableException
2404 */
2405NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2406{
2407#if 0
2408 /*
2409 * Just copy the state we've got and handle it in the loop for now.
2410 */
2411 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2412 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2413 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2414 RT_NOREF_PV(pVM);
2415 return VINF_EM_TRIPLE_FAULT;
2416#else
2417 /*
2418 * Let IEM decide whether this is really it.
2419 */
2420 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
2421 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
2422 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
2423 AssertReleaseFailed();
2424 RT_NOREF_PV(pVM);
2425 return VINF_SUCCESS;
2426#endif
2427}
2428
2429
2430/**
2431 * Handles VM exits.
2432 *
2433 * @returns Strict VBox status code.
2434 * @param pVM The cross context VM structure.
2435 * @param pVCpu The cross context per CPU structure.
2436 * @param pExit The VM exit information to handle.
2437 * @sa nemHCWinHandleMessage
2438 */
2439NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2440{
2441 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2442 AssertRCReturn(rc, rc);
2443
2444#ifdef LOG_ENABLED
2445 if (LogIs3Enabled())
2446 nemR3WinLogState(pVM, pVCpu);
2447#endif
2448
2449 switch (pExit->ExitReason)
2450 {
2451 case WHvRunVpExitReasonUnmappedGpa:
2452 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
2453 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
2454
2455 case WHvRunVpExitReasonCanceled:
2456 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
2457 return VINF_SUCCESS;
2458
2459 case WHvRunVpExitReasonHypercall:
2460 return nemR3WinHandleExitHypercall(pVM, pVCpu, pExit);
2461
2462 case 0x8001000c: /* WHvRunVpExitReasonArm64Reset */
2463 {
2464 if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF)
2465 return VMR3PowerOff(pVM->pUVM);
2466 else if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_RESET)
2467 {
2468 VM_FF_SET(pVM, VM_FF_RESET);
2469 return VINF_EM_RESET;
2470 }
2471 else
2472 AssertLogRelFailedReturn(VERR_NEM_IPE_3);
2473 }
2474
2475 case WHvRunVpExitReasonUnrecoverableException:
2476 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
2477 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
2478
2479 case WHvRunVpExitReasonUnsupportedFeature:
2480 case WHvRunVpExitReasonInvalidVpRegisterValue:
2481 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2482 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
2483 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
2484
2485 /* Undesired exits: */
2486 case WHvRunVpExitReasonNone:
2487 default:
2488 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2489 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
2490 }
2491}
2492
2493
2494VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2495{
2496 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
2497#ifdef LOG_ENABLED
2498 if (LogIs3Enabled())
2499 nemR3WinLogState(pVM, pVCpu);
2500#endif
2501
2502 /*
2503 * Try switch to NEM runloop state.
2504 */
2505 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2506 { /* likely */ }
2507 else
2508 {
2509 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2510 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2511 return VINF_SUCCESS;
2512 }
2513
2514 /*
2515 * The run loop.
2516 *
2517 * Current approach to state updating to use the sledgehammer and sync
2518 * everything every time. This will be optimized later.
2519 */
2520 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2521 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2522 for (unsigned iLoop = 0;; iLoop++)
2523 {
2524 /*
2525 * Poll timers and run for a bit.
2526 *
2527 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2528 * so we take the time of the next timer event and uses that as a deadline.
2529 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2530 */
2531 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2532 * the whole polling job when timers have changed... */
2533 uint64_t offDeltaIgnored;
2534 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2535 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2536 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2537 {
2538 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2539 {
2540 /* Ensure that Hyper-V has the whole state. */
2541 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2542 AssertRCReturn(rc2, rc2);
2543
2544#ifdef LOG_ENABLED
2545 if (LogIsFlowEnabled())
2546 {
2547 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2548 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2549 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2550 LogFlow(("NEM/%u: Entry @ %08RX64 pstate=%#RX64\n", pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64));
2551 }
2552#endif
2553
2554 MY_WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2555 TMNotifyStartOfExecution(pVM, pVCpu);
2556
2557 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2558
2559 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2560 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2561#ifdef LOG_ENABLED
2562 if (LogIsFlowEnabled())
2563 {
2564 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2565 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2566 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2567 LogFlow(("NEM/%u: Exit @ %08RX64 pstate=%#RX64 Reason=%#x\n",
2568 pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64, ExitReason.ExitReason));
2569 }
2570#endif
2571 if (SUCCEEDED(hrc))
2572 {
2573 /*
2574 * Deal with the message.
2575 */
2576 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2577 if (rcStrict == VINF_SUCCESS)
2578 { /* hopefully likely */ }
2579 else
2580 {
2581 LogFlow(("NEM/%u: breaking: nemR3WinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2582 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2583 break;
2584 }
2585 }
2586 else
2587 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2588 pVCpu->idCpu, hrc, GetLastError()),
2589 VERR_NEM_IPE_0);
2590
2591 /*
2592 * If no relevant FFs are pending, loop.
2593 */
2594 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2595 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2596 continue;
2597
2598 /** @todo Try handle pending flags, not just return to EM loops. Take care
2599 * not to set important RCs here unless we've handled a message. */
2600 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2601 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2602 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2603 }
2604 else
2605 {
2606 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2607 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2608 }
2609 }
2610 else
2611 {
2612 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2613 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2614 }
2615 break;
2616 } /* the run loop */
2617
2618
2619 /*
2620 * If the CPU is running, make sure to stop it before we try sync back the
2621 * state and return to EM. We don't sync back the whole state if we can help it.
2622 */
2623 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2624 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2625
2626 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2627 {
2628 /* Try anticipate what we might need. */
2629 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2630 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2631 || RT_FAILURE(rcStrict))
2632 fImport = CPUMCTX_EXTRN_ALL;
2633 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2634 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2635
2636 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2637 {
2638 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2639 if (RT_SUCCESS(rc2))
2640 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2641 else if (RT_SUCCESS(rcStrict))
2642 rcStrict = rc2;
2643 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2644 pVCpu->cpum.GstCtx.fExtrn = 0;
2645 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2646 }
2647 else
2648 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2649 }
2650 else
2651 {
2652 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2653 pVCpu->cpum.GstCtx.fExtrn = 0;
2654 }
2655
2656#if 0
2657 UINT32 cbWritten;
2658 WHV_ARM64_LOCAL_INTERRUPT_CONTROLLER_STATE IntrState;
2659 HRESULT hrc = WHvGetVirtualProcessorState(pVM->nem.s.hPartition, pVCpu->idCpu, WHvVirtualProcessorStateTypeInterruptControllerState2,
2660 &IntrState, sizeof(IntrState), &cbWritten);
2661 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2662 ("WHvGetVirtualProcessorState(%p, %u,WHvVirtualProcessorStateTypeInterruptControllerState2,) -> %Rhrc (Last=%#x/%u)\n",
2663 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2664 , VERR_NEM_GET_REGISTERS_FAILED);
2665 LogFlowFunc(("IntrState: cbWritten=%u\n"));
2666 for (uint32_t i = 0; i < RT_ELEMENTS(IntrState.BankedInterruptState); i++)
2667 {
2668 WHV_ARM64_INTERRUPT_STATE *pState = &IntrState.BankedInterruptState[i];
2669 LogFlowFunc(("IntrState: Intr %u:\n"
2670 " Enabled=%RTbool\n"
2671 " EdgeTriggered=%RTbool\n"
2672 " Asserted=%RTbool\n"
2673 " SetPending=%RTbool\n"
2674 " Active=%RTbool\n"
2675 " Direct=%RTbool\n"
2676 " GicrIpriorityrConfigured=%u\n"
2677 " GicrIpriorityrActive=%u\n",
2678 i, pState->Enabled, pState->EdgeTriggered, pState->Asserted, pState->SetPending, pState->Active, pState->Direct,
2679 pState->GicrIpriorityrConfigured, pState->GicrIpriorityrActive));
2680 }
2681#endif
2682
2683 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64,
2684 pVCpu->cpum.GstCtx.fPState, VBOXSTRICTRC_VAL(rcStrict) ));
2685 return rcStrict;
2686}
2687
2688
2689VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2690{
2691 Assert(VM_IS_NEM_ENABLED(pVM));
2692 RT_NOREF(pVM, pVCpu);
2693 return true;
2694}
2695
2696
2697bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2698{
2699 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2700 return false;
2701}
2702
2703
2704void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2705{
2706 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2707 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2708 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2709 RT_NOREF_PV(hrc);
2710 RT_NOREF_PV(fFlags);
2711}
2712
2713
2714DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2715{
2716 RT_NOREF(pVM, fUseDebugLoop);
2717 return false;
2718}
2719
2720
2721DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2722{
2723 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2724 return false;
2725}
2726
2727
2728DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2729{
2730 PGMPAGEMAPLOCK Lock;
2731 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2732 if (RT_SUCCESS(rc))
2733 PGMPhysReleasePageMappingLock(pVM, &Lock);
2734 return rc;
2735}
2736
2737
2738DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2739{
2740 PGMPAGEMAPLOCK Lock;
2741 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2742 if (RT_SUCCESS(rc))
2743 PGMPhysReleasePageMappingLock(pVM, &Lock);
2744 return rc;
2745}
2746
2747
2748VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2749 uint8_t *pu2State, uint32_t *puNemRange)
2750{
2751 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2752 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2753
2754 *pu2State = UINT8_MAX;
2755 RT_NOREF(puNemRange);
2756
2757 if (pvR3)
2758 {
2759 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2760 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2761 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2762 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2763 if (SUCCEEDED(hrc))
2764 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2765 else
2766 {
2767 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2768 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2769 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2770 return VERR_NEM_MAP_PAGES_FAILED;
2771 }
2772 }
2773 return VINF_SUCCESS;
2774}
2775
2776
2777VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2778{
2779 RT_NOREF(pVM);
2780 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2781}
2782
2783
2784VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2785 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2786{
2787 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2788 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2789 RT_NOREF(puNemRange);
2790
2791 /*
2792 * Unmap the RAM we're replacing.
2793 */
2794 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2795 {
2796 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2797 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2798 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2799 if (SUCCEEDED(hrc))
2800 { /* likely */ }
2801 else if (pvMmio2)
2802 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2803 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2804 else
2805 {
2806 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2807 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2808 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2809 return VERR_NEM_UNMAP_PAGES_FAILED;
2810 }
2811 }
2812
2813 /*
2814 * Map MMIO2 if any.
2815 */
2816 if (pvMmio2)
2817 {
2818 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2819 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2820 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2821 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2822 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2823 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2824 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2825 if (SUCCEEDED(hrc))
2826 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2827 else
2828 {
2829 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2830 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2831 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2832 return VERR_NEM_MAP_PAGES_FAILED;
2833 }
2834 }
2835 else
2836 {
2837 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2838 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2839 }
2840 RT_NOREF(pvRam);
2841 return VINF_SUCCESS;
2842}
2843
2844
2845VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2846 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2847{
2848 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2849 return VINF_SUCCESS;
2850}
2851
2852
2853VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2854 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2855{
2856 int rc = VINF_SUCCESS;
2857 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2858 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2859
2860 /*
2861 * Unmap the MMIO2 pages.
2862 */
2863 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2864 * we may have more stuff to unmap even in case of pure MMIO... */
2865 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2866 {
2867 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2868 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2869 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2870 if (FAILED(hrc))
2871 {
2872 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2873 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2874 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2875 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2876 }
2877 }
2878
2879 /*
2880 * Restore the RAM we replaced.
2881 */
2882 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2883 {
2884 AssertPtr(pvRam);
2885 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2886 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2887 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2888 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2889 if (SUCCEEDED(hrc))
2890 { /* likely */ }
2891 else
2892 {
2893 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2894 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2895 rc = VERR_NEM_MAP_PAGES_FAILED;
2896 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2897 }
2898 if (pu2State)
2899 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2900 }
2901 /* Mark the pages as unmapped if relevant. */
2902 else if (pu2State)
2903 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2904
2905 RT_NOREF(pvMmio2, puNemRange);
2906 return rc;
2907}
2908
2909
2910VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2911 void *pvBitmap, size_t cbBitmap)
2912{
2913 Assert(VM_IS_NEM_ENABLED(pVM));
2914 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
2915 Assert(cbBitmap == (uint32_t)cbBitmap);
2916 RT_NOREF(uNemRange);
2917
2918 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
2919 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
2920 if (SUCCEEDED(hrc))
2921 return VINF_SUCCESS;
2922
2923 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
2924 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2925 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
2926}
2927
2928
2929VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2930 uint8_t *pu2State, uint32_t *puNemRange)
2931{
2932 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2933 *pu2State = UINT8_MAX;
2934 *puNemRange = 0;
2935
2936#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
2937 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
2938 for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
2939 {
2940 const void *pvPage;
2941 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
2942 if (RT_SUCCESS(rc))
2943 {
2944 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
2945 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2946 if (SUCCEEDED(hrc))
2947 { /* likely */ }
2948 else
2949 {
2950 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2951 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2952 return VERR_NEM_INIT_FAILED;
2953 }
2954 }
2955 else
2956 {
2957 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
2958 return rc;
2959 }
2960 }
2961 RT_NOREF_PV(fFlags);
2962#else
2963 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
2964#endif
2965 return VINF_SUCCESS;
2966}
2967
2968
2969VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2970 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2971{
2972 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2973 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2974 *pu2State = UINT8_MAX;
2975
2976 /*
2977 * (Re-)map readonly.
2978 */
2979 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2980 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2981 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2982 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2983 if (SUCCEEDED(hrc))
2984 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2985 else
2986 {
2987 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
2988 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2989 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2990 return VERR_NEM_MAP_PAGES_FAILED;
2991 }
2992 RT_NOREF(fFlags, puNemRange);
2993 return VINF_SUCCESS;
2994}
2995
2996VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2997{
2998 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
2999 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
3000 RT_NOREF(pVCpu, fEnabled);
3001}
3002
3003
3004void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
3005{
3006 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
3007 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
3008}
3009
3010
3011VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3012 RTR3PTR pvMemR3, uint8_t *pu2State)
3013{
3014 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
3015 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
3016
3017 *pu2State = UINT8_MAX;
3018 if (pvMemR3)
3019 {
3020 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
3021 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
3022 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
3023 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
3024 if (SUCCEEDED(hrc))
3025 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3026 else
3027 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
3028 pvMemR3, GCPhys, cb, hrc));
3029 }
3030 RT_NOREF(enmKind);
3031}
3032
3033
3034void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
3035 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3036{
3037 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3038 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3039 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3040}
3041
3042
3043/**
3044 * Worker that maps pages into Hyper-V.
3045 *
3046 * This is used by the PGM physical page notifications as well as the memory
3047 * access VMEXIT handlers.
3048 *
3049 * @returns VBox status code.
3050 * @param pVM The cross context VM structure.
3051 * @param pVCpu The cross context virtual CPU structure of the
3052 * calling EMT.
3053 * @param GCPhysSrc The source page address.
3054 * @param GCPhysDst The hyper-V destination page. This may differ from
3055 * GCPhysSrc when A20 is disabled.
3056 * @param fPageProt NEM_PAGE_PROT_XXX.
3057 * @param pu2State Our page state (input/output).
3058 * @param fBackingChanged Set if the page backing is being changed.
3059 * @thread EMT(pVCpu)
3060 */
3061NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
3062 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
3063{
3064 /*
3065 * Looks like we need to unmap a page before we can change the backing
3066 * or even modify the protection. This is going to be *REALLY* efficient.
3067 * PGM lends us two bits to keep track of the state here.
3068 */
3069 RT_NOREF(pVCpu);
3070 uint8_t const u2OldState = *pu2State;
3071 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
3072 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
3073 if ( fBackingChanged
3074 || u2NewState != u2OldState)
3075 {
3076 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
3077 {
3078 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3079 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
3080 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3081 if (SUCCEEDED(hrc))
3082 {
3083 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3084 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
3085 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3086 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
3087 {
3088 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
3089 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3090 return VINF_SUCCESS;
3091 }
3092 }
3093 else
3094 {
3095 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3096 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3097 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3098 return VERR_NEM_INIT_FAILED;
3099 }
3100 }
3101 }
3102
3103 /*
3104 * Writeable mapping?
3105 */
3106 if (fPageProt & NEM_PAGE_PROT_WRITE)
3107 {
3108 void *pvPage;
3109 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
3110 if (RT_SUCCESS(rc))
3111 {
3112 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
3113 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
3114 if (SUCCEEDED(hrc))
3115 {
3116 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3117 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
3118 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3119 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3120 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3121 return VINF_SUCCESS;
3122 }
3123 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3124 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3125 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3126 return VERR_NEM_INIT_FAILED;
3127 }
3128 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3129 return rc;
3130 }
3131
3132 if (fPageProt & NEM_PAGE_PROT_READ)
3133 {
3134 const void *pvPage;
3135 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
3136 if (RT_SUCCESS(rc))
3137 {
3138 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
3139 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
3140 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
3141 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
3142 if (SUCCEEDED(hrc))
3143 {
3144 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3145 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
3146 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3147 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3148 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3149 return VINF_SUCCESS;
3150 }
3151 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3152 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3153 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3154 return VERR_NEM_INIT_FAILED;
3155 }
3156 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3157 return rc;
3158 }
3159
3160 /* We already unmapped it above. */
3161 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3162 return VINF_SUCCESS;
3163}
3164
3165
3166NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
3167{
3168 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
3169 {
3170 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
3171 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3172 return VINF_SUCCESS;
3173 }
3174
3175 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3176 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
3177 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3178 if (SUCCEEDED(hrc))
3179 {
3180 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
3181 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3182 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3183 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
3184 return VINF_SUCCESS;
3185 }
3186 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3187 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
3188 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3189 return VERR_NEM_IPE_6;
3190}
3191
3192
3193int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3194 PGMPAGETYPE enmType, uint8_t *pu2State)
3195{
3196 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3197 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3198 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
3199
3200 int rc;
3201 RT_NOREF_PV(fPageProt);
3202 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3203 return rc;
3204}
3205
3206
3207VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
3208 PGMPAGETYPE enmType, uint8_t *pu2State)
3209{
3210 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3211 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3212 Assert(VM_IS_NEM_ENABLED(pVM));
3213 RT_NOREF(HCPhys, enmType, pvR3);
3214
3215 RT_NOREF_PV(fPageProt);
3216 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3217}
3218
3219
3220VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3221 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3222{
3223 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
3224 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
3225 Assert(VM_IS_NEM_ENABLED(pVM));
3226 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
3227
3228 RT_NOREF_PV(fPageProt);
3229 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3230}
3231
3232
3233/**
3234 * Returns features supported by the NEM backend.
3235 *
3236 * @returns Flags of features supported by the native NEM backend.
3237 * @param pVM The cross context VM structure.
3238 */
3239VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3240{
3241 RT_NOREF(pVM);
3242 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
3243 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
3244}
3245
3246
3247/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
3248 *
3249 * Open questions:
3250 * - Why can't one read and write WHvArm64RegisterId*
3251 * - WHvArm64RegisterDbgbcr0El1 is not readable?
3252 * - Getting notified about system register reads/writes (GIC)?
3253 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
3254 * - Handling of (vTimer) interrupts, how is WHvRequestInterrupt() supposed to be used?
3255 */
3256
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette