VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp@ 108761

Last change on this file since 108761 was 108761, checked in by vboxsync, 5 weeks ago

VMM/NEMR3Native-win-armv8.cpp: Some cleanup, get rid of unused memory mapping related code, bugref:10392

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 138.0 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 108761 2025-03-27 09:15:09Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/dis.h>
57#include <VBox/vmm/nem.h>
58#include <VBox/vmm/iem.h>
59#include <VBox/vmm/em.h>
60#include <VBox/vmm/pdmapic.h>
61#include <VBox/vmm/pdm.h>
62#include <VBox/vmm/dbgftrace.h>
63#include "NEMInternal.h"
64#include <VBox/vmm/vmcc.h>
65
66#include <iprt/formats/arm-psci.h>
67
68#include <iprt/ldr.h>
69#include <iprt/path.h>
70#include <iprt/string.h>
71#include <iprt/system.h>
72#include <iprt/utf16.h>
73
74#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
75HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
76# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
77#endif
78
79/** Our saved state version for Hyper-V specific things. */
80#define NEM_HV_SAVED_STATE_VERSION 1
81
82
83/*
84 * The following definitions appeared in build 27744 allow configuring the base address of the GICv3 controller,
85 * (there is no official SDK for this yet).
86 */
87/** @todo Better way of defining these which doesn't require casting later on when calling APIs. */
88#define WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS UINT32_C(0x00001012)
89/** No GIC present. */
90#define WHV_ARM64_IC_EMULATION_MODE_NONE 0
91/** Hyper-V emulates a GICv3. */
92#define WHV_ARM64_IC_EMULATION_MODE_GICV3 1
93
94/**
95 * Configures the interrupt controller emulated by Hyper-V.
96 */
97typedef struct MY_WHV_ARM64_IC_PARAMETERS
98{
99 uint32_t u32EmulationMode;
100 uint32_t u32Rsvd;
101 union
102 {
103 struct
104 {
105 RTGCPHYS GCPhysGicdBase;
106 RTGCPHYS GCPhysGitsTranslaterBase;
107 uint32_t u32Rsvd;
108 uint32_t cLpiIntIdBits;
109 uint32_t u32PpiCntvOverflw;
110 uint32_t u32PpiPmu;
111 uint32_t au32Rsvd[6];
112 } GicV3;
113 } u;
114} MY_WHV_ARM64_IC_PARAMETERS;
115AssertCompileSize(MY_WHV_ARM64_IC_PARAMETERS, 64);
116
117
118/**
119 * The hypercall exit context.
120 */
121typedef struct MY_WHV_HYPERCALL_CONTEXT
122{
123 WHV_INTERCEPT_MESSAGE_HEADER Header;
124 uint16_t Immediate;
125 uint16_t u16Rsvd;
126 uint32_t u32Rsvd;
127 uint64_t X[18];
128} MY_WHV_HYPERCALL_CONTEXT;
129typedef MY_WHV_HYPERCALL_CONTEXT *PMY_WHV_HYPERCALL_CONTEXT;
130AssertCompileSize(MY_WHV_HYPERCALL_CONTEXT, 24 + 19 * sizeof(uint64_t));
131
132
133/**
134 * The ARM64 reset context.
135 */
136typedef struct MY_WHV_ARM64_RESET_CONTEXT
137{
138 WHV_INTERCEPT_MESSAGE_HEADER Header;
139 uint32_t ResetType;
140 uint32_t u32Rsvd;
141} MY_WHV_ARM64_RESET_CONTEXT;
142typedef MY_WHV_ARM64_RESET_CONTEXT *PMY_WHV_ARM64_RESET_CONTEXT;
143AssertCompileSize(MY_WHV_ARM64_RESET_CONTEXT, 24 + 2 * sizeof(uint32_t));
144
145
146#define WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF 0
147#define WHV_ARM64_RESET_CONTEXT_TYPE_RESET 1
148
149
150/**
151 * The exit reason context for arm64, the size is different
152 * from the default SDK we build against.
153 */
154typedef struct MY_WHV_RUN_VP_EXIT_CONTEXT
155{
156 WHV_RUN_VP_EXIT_REASON ExitReason;
157 uint32_t u32Rsvd;
158 uint64_t u64Rsvd;
159 union
160 {
161 WHV_MEMORY_ACCESS_CONTEXT MemoryAccess;
162 WHV_RUN_VP_CANCELED_CONTEXT CancelReason;
163 MY_WHV_HYPERCALL_CONTEXT Hypercall;
164 WHV_UNRECOVERABLE_EXCEPTION_CONTEXT UnrecoverableException;
165 MY_WHV_ARM64_RESET_CONTEXT Arm64Reset;
166 uint64_t au64Rsvd2[32];
167 };
168} MY_WHV_RUN_VP_EXIT_CONTEXT;
169typedef MY_WHV_RUN_VP_EXIT_CONTEXT *PMY_WHV_RUN_VP_EXIT_CONTEXT;
170AssertCompileSize(MY_WHV_RUN_VP_EXIT_CONTEXT, 272);
171
172#define My_WHvArm64RegisterGicrBaseGpa ((WHV_REGISTER_NAME)UINT32_C(0x00063000))
173#define My_WHvArm64RegisterActlrEl1 ((WHV_REGISTER_NAME)UINT32_C(0x00040003))
174
175
176/*********************************************************************************************************************************
177* Defined Constants And Macros *
178*********************************************************************************************************************************/
179
180
181/*********************************************************************************************************************************
182* Global Variables *
183*********************************************************************************************************************************/
184/** @name APIs imported from WinHvPlatform.dll
185 * @{ */
186static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
187static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
188static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
189static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
190static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
191static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
192static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
193static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
194static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
195static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
196static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
197static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
198static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
199static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
200static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
201static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
202static decltype(WHvSuspendPartitionTime) * g_pfnWHvSuspendPartitionTime;
203static decltype(WHvResumePartitionTime) * g_pfnWHvResumePartitionTime;
204decltype(WHvGetVirtualProcessorState) * g_pfnWHvGetVirtualProcessorState;
205decltype(WHvSetVirtualProcessorState) * g_pfnWHvSetVirtualProcessorState;
206decltype(WHvRequestInterrupt) * g_pfnWHvRequestInterrupt;
207/** @} */
208
209/** The Windows build number. */
210static uint32_t g_uBuildNo = 17134;
211
212
213
214/**
215 * Import instructions.
216 */
217static const struct
218{
219 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
220 bool fOptional; /**< Set if import is optional. */
221 PFNRT *ppfn; /**< The function pointer variable. */
222 const char *pszName; /**< The function name. */
223} g_aImports[] =
224{
225#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
226 NEM_WIN_IMPORT(0, false, WHvGetCapability),
227 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
228 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
229 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
230 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
231 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
232 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
233 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
234 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
235 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
236 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
237 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
238 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
239 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
240 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
241 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
242 NEM_WIN_IMPORT(0, false, WHvSuspendPartitionTime),
243 NEM_WIN_IMPORT(0, false, WHvResumePartitionTime),
244 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorState),
245 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorState),
246 NEM_WIN_IMPORT(0, false, WHvRequestInterrupt),
247#undef NEM_WIN_IMPORT
248};
249
250
251/*
252 * Let the preprocessor alias the APIs to import variables for better autocompletion.
253 */
254#ifndef IN_SLICKEDIT
255# define WHvGetCapability g_pfnWHvGetCapability
256# define WHvCreatePartition g_pfnWHvCreatePartition
257# define WHvSetupPartition g_pfnWHvSetupPartition
258# define WHvDeletePartition g_pfnWHvDeletePartition
259# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
260# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
261# define WHvMapGpaRange g_pfnWHvMapGpaRange
262# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
263# define WHvTranslateGva g_pfnWHvTranslateGva
264# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
265# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
266# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
267# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
268# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
269# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
270# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
271# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
272# define WHvSuspendPartitionTime g_pfnWHvSuspendPartitionTime
273# define WHvResumePartitionTime g_pfnWHvResumePartitionTime
274# define WHvGetVirtualProcessorState g_pfnWHvGetVirtualProcessorState
275# define WHvSetVirtualProcessorState g_pfnWHvSetVirtualProcessorState
276# define WHvRequestInterrupt g_pfnWHvRequestInterrupt
277#endif
278
279
280/*********************************************************************************************************************************
281* Internal Functions *
282*********************************************************************************************************************************/
283DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
284DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
285
286
287/**
288 * Worker for nemR3NativeInit that probes and load the native API.
289 *
290 * @returns VBox status code.
291 * @param fForced Whether the HMForced flag is set and we should
292 * fail if we cannot initialize.
293 * @param pErrInfo Where to always return error info.
294 */
295static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
296{
297 /*
298 * Check that the DLL files we need are present, but without loading them.
299 * We'd like to avoid loading them unnecessarily.
300 */
301 WCHAR wszPath[MAX_PATH + 64];
302 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
303 if (cwcPath >= MAX_PATH || cwcPath < 2)
304 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
305
306 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
307 wszPath[cwcPath++] = '\\';
308 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
309 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
310 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
311
312 /*
313 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
314 */
315 /** @todo */
316
317 /** @todo would be great if we could recognize a root partition from the
318 * CPUID info, but I currently don't dare do that. */
319
320 /*
321 * Now try load the DLLs and resolve the APIs.
322 */
323 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
324 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
325 int rc = VINF_SUCCESS;
326 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
327 {
328 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
329 if (RT_FAILURE(rc2))
330 {
331 if (!RTErrInfoIsSet(pErrInfo))
332 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
333 else
334 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
335 ahMods[i] = NIL_RTLDRMOD;
336 rc = VERR_NEM_INIT_FAILED;
337 }
338 }
339 if (RT_SUCCESS(rc))
340 {
341 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
342 {
343 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
344 if (RT_SUCCESS(rc2))
345 {
346 if (g_aImports[i].fOptional)
347 LogRel(("NEM: info: Found optional import %s!%s.\n",
348 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
349 }
350 else
351 {
352 *g_aImports[i].ppfn = NULL;
353
354 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
355 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
356 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
357 if (!g_aImports[i].fOptional)
358 {
359 if (RTErrInfoIsSet(pErrInfo))
360 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
361 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
362 else
363 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
364 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
365 Assert(RT_FAILURE(rc));
366 }
367 }
368 }
369 if (RT_SUCCESS(rc))
370 {
371 Assert(!RTErrInfoIsSet(pErrInfo));
372 }
373 }
374
375 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
376 RTLdrClose(ahMods[i]);
377 return rc;
378}
379
380
381/**
382 * Wrapper for different WHvGetCapability signatures.
383 */
384DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
385{
386 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
387}
388
389
390/**
391 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
392 *
393 * @returns VBox status code.
394 * @param pVM The cross context VM structure.
395 * @param pErrInfo Where to always return error info.
396 */
397static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
398{
399#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
400#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
401#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
402
403 /*
404 * Is the hypervisor present with the desired capability?
405 *
406 * In build 17083 this translates into:
407 * - CPUID[0x00000001].HVP is set
408 * - CPUID[0x40000000] == "Microsoft Hv"
409 * - CPUID[0x40000001].eax == "Hv#1"
410 * - CPUID[0x40000003].ebx[12] is set.
411 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
412 * a non-zero value.
413 */
414 /**
415 * @todo Someone at Microsoft please explain weird API design:
416 * 1. Pointless CapabilityCode duplication int the output;
417 * 2. No output size.
418 */
419 WHV_CAPABILITY Caps;
420 RT_ZERO(Caps);
421 SetLastError(0);
422 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
423 DWORD rcWin = GetLastError();
424 if (FAILED(hrc))
425 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
426 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
427 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
428 if (!Caps.HypervisorPresent)
429 {
430 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
431 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
432 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
433 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
434 }
435 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
436
437
438 /*
439 * Check what extended VM exits are supported.
440 */
441 RT_ZERO(Caps);
442 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
443 if (FAILED(hrc))
444 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
445 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
446 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
447 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
448 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
449 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
450 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
451 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
452 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
453 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
454 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
455
456 /*
457 * Check features in case they end up defining any.
458 */
459 RT_ZERO(Caps);
460 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
461 if (FAILED(hrc))
462 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
463 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
464 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
465 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
466 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
467 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
468
469 /*
470 * Check that the CPU vendor is supported.
471 */
472 RT_ZERO(Caps);
473 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
474 if (FAILED(hrc))
475 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
476 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
477 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
478 switch (Caps.ProcessorVendor)
479 {
480 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
481 case WHvProcessorVendorArm:
482 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
483 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
484 break;
485 default:
486 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
487 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
488 }
489
490 /*
491 * CPU features, guessing these are virtual CPU features?
492 */
493 RT_ZERO(Caps);
494 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
495 if (FAILED(hrc))
496 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
497 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
498 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
499 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
500#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
501 NEM_LOG_REL_CPU_FEATURE(Asid16);
502 NEM_LOG_REL_CPU_FEATURE(TGran16);
503 NEM_LOG_REL_CPU_FEATURE(TGran64);
504 NEM_LOG_REL_CPU_FEATURE(Haf);
505 NEM_LOG_REL_CPU_FEATURE(Hdbs);
506 NEM_LOG_REL_CPU_FEATURE(Pan);
507 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
508 NEM_LOG_REL_CPU_FEATURE(Uao);
509 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
510 NEM_LOG_REL_CPU_FEATURE(Fp);
511 NEM_LOG_REL_CPU_FEATURE(FpHp);
512 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
513 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
514 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
515 NEM_LOG_REL_CPU_FEATURE(GicV41);
516 NEM_LOG_REL_CPU_FEATURE(Ras);
517 NEM_LOG_REL_CPU_FEATURE(PmuV3);
518 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
519 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
520 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
521 NEM_LOG_REL_CPU_FEATURE(Aes);
522 NEM_LOG_REL_CPU_FEATURE(PolyMul);
523 NEM_LOG_REL_CPU_FEATURE(Sha1);
524 NEM_LOG_REL_CPU_FEATURE(Sha256);
525 NEM_LOG_REL_CPU_FEATURE(Sha512);
526 NEM_LOG_REL_CPU_FEATURE(Crc32);
527 NEM_LOG_REL_CPU_FEATURE(Atomic);
528 NEM_LOG_REL_CPU_FEATURE(Rdm);
529 NEM_LOG_REL_CPU_FEATURE(Sha3);
530 NEM_LOG_REL_CPU_FEATURE(Sm3);
531 NEM_LOG_REL_CPU_FEATURE(Sm4);
532 NEM_LOG_REL_CPU_FEATURE(Dp);
533 NEM_LOG_REL_CPU_FEATURE(Fhm);
534 NEM_LOG_REL_CPU_FEATURE(DcCvap);
535 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
536 NEM_LOG_REL_CPU_FEATURE(ApaBase);
537 NEM_LOG_REL_CPU_FEATURE(ApaEp);
538 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
539 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
540 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
541 NEM_LOG_REL_CPU_FEATURE(Jscvt);
542 NEM_LOG_REL_CPU_FEATURE(Fcma);
543 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
544 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
545 NEM_LOG_REL_CPU_FEATURE(Gpa);
546 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
547 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
548
549#undef NEM_LOG_REL_CPU_FEATURE
550 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
551 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
552 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
553 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
554
555 /*
556 * The cache line flush size.
557 */
558 RT_ZERO(Caps);
559 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
560 if (FAILED(hrc))
561 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
562 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
563 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
564 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
565 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
566 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
567 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
568
569 RT_ZERO(Caps);
570 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
571 if (FAILED(hrc))
572 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
573 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
574 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
575 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
576 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
577 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
578 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
579
580
581 /*
582 * See if they've added more properties that we're not aware of.
583 */
584 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
585 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
586 {
587 static const struct
588 {
589 uint32_t iMin, iMax; } s_aUnknowns[] =
590 {
591 { 0x0004, 0x000f },
592 { 0x1003, 0x100f },
593 { 0x2000, 0x200f },
594 { 0x3000, 0x300f },
595 { 0x4000, 0x400f },
596 };
597 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
598 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
599 {
600 RT_ZERO(Caps);
601 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
602 if (SUCCEEDED(hrc))
603 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
604 }
605 }
606
607 /*
608 * For proper operation, we require CPUID exits.
609 */
610 /** @todo Any? */
611
612#undef NEM_LOG_REL_CAP_EX
613#undef NEM_LOG_REL_CAP_SUB_EX
614#undef NEM_LOG_REL_CAP_SUB
615 return VINF_SUCCESS;
616}
617
618
619/**
620 * Initializes the GIC controller emulation provided by Hyper-V.
621 *
622 * @returns VBox status code.
623 * @param pVM The cross context VM structure.
624 *
625 * @note Needs to be done early when setting up the partition so this has to live here and not in GICNem-win.cpp
626 */
627static int nemR3WinGicCreate(PVM pVM)
628{
629 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
630 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
631
632 /*
633 * Query the MMIO ranges.
634 */
635 RTGCPHYS GCPhysMmioBaseDist = 0;
636 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
637 if (RT_FAILURE(rc))
638 return VMSetError(pVM, rc, RT_SRC_POS,
639 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
640
641 RTGCPHYS GCPhysMmioBaseReDist = 0;
642 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
643 if (RT_FAILURE(rc))
644 return VMSetError(pVM, rc, RT_SRC_POS,
645 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
646
647 RTGCPHYS GCPhysMmioBaseIts = 0;
648 rc = CFGMR3QueryU64(pGicCfg, "ItsMmioBase", &GCPhysMmioBaseIts);
649 if (RT_FAILURE(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND)
650 return VMSetError(pVM, rc, RT_SRC_POS,
651 "Configuration error: Failed to get the \"ItsMmioBase\" value\n");
652 rc = VINF_SUCCESS;
653
654 /*
655 * One can only set the GIC distributor base. The re-distributor regions for the individual
656 * vCPUs are configured when the vCPUs are created, so we need to save the base of the MMIO region.
657 */
658 pVM->nem.s.GCPhysMmioBaseReDist = GCPhysMmioBaseReDist;
659
660 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
661
662 MY_WHV_ARM64_IC_PARAMETERS Property; RT_ZERO(Property);
663 Property.u32EmulationMode = WHV_ARM64_IC_EMULATION_MODE_GICV3;
664 Property.u.GicV3.GCPhysGicdBase = GCPhysMmioBaseDist;
665 Property.u.GicV3.GCPhysGitsTranslaterBase = GCPhysMmioBaseIts;
666 Property.u.GicV3.cLpiIntIdBits = 1; /** @todo LPIs are currently not supported with our device emulations. */
667 Property.u.GicV3.u32PpiCntvOverflw = pVM->nem.s.u32GicPpiVTimer + 16; /* Calculate the absolute timer INTID. */
668 Property.u.GicV3.u32PpiPmu = 23; /** @todo Configure dynamically (from SBSA, needs a PMU/NEM emulation just like with the GIC probably). */
669 HRESULT hrc = WHvSetPartitionProperty(hPartition, (WHV_PARTITION_PROPERTY_CODE)WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS, &Property, sizeof(Property));
670 if (FAILED(hrc))
671 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
672 "Failed to set WHvPartitionPropertyCodeArm64IcParameters: %Rhrc (Last=%#x/%u)",
673 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
674
675 return rc;
676}
677
678
679/**
680 * Creates and sets up a Hyper-V (exo) partition.
681 *
682 * @returns VBox status code.
683 * @param pVM The cross context VM structure.
684 * @param pErrInfo Where to always return error info.
685 */
686static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
687{
688 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
689 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
690
691 /*
692 * Create the partition.
693 */
694 WHV_PARTITION_HANDLE hPartition;
695 HRESULT hrc = WHvCreatePartition(&hPartition);
696 if (FAILED(hrc))
697 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
698 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
699
700 int rc;
701
702 /*
703 * Set partition properties, most importantly the CPU count.
704 */
705 /**
706 * @todo Someone at Microsoft please explain another weird API:
707 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
708 * argument rather than as part of the struct. That is so weird if you've
709 * used any other NT or windows API, including WHvGetCapability().
710 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
711 * technically only need 9 bytes for setting/getting
712 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
713 WHV_PARTITION_PROPERTY Property;
714 RT_ZERO(Property);
715 Property.ProcessorCount = pVM->cCpus;
716 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
717 if (SUCCEEDED(hrc))
718 {
719 RT_ZERO(Property);
720 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
721 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
722 if (SUCCEEDED(hrc))
723 {
724 /*
725 * We'll continue setup in nemR3NativeInitAfterCPUM.
726 */
727 pVM->nem.s.fCreatedEmts = false;
728 pVM->nem.s.hPartition = hPartition;
729 LogRel(("NEM: Created partition %p.\n", hPartition));
730 return VINF_SUCCESS;
731 }
732
733 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
734 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
735 Property.ExtendedVmExits.AsUINT64, hrc);
736 }
737 else
738 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
739 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
740 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
741 WHvDeletePartition(hPartition);
742
743 Assert(!pVM->nem.s.hPartitionDevice);
744 Assert(!pVM->nem.s.hPartition);
745 return rc;
746}
747
748
749static int nemR3NativeInitSetupVm(PVM pVM)
750{
751 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
752 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
753 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
754 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
755
756 /*
757 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
758 */
759 WHV_PARTITION_PROPERTY Property;
760 HRESULT hrc;
761
762 /* Not sure if we really need to set the cache line flush size. */
763 RT_ZERO(Property);
764 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
765 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
766 if (FAILED(hrc))
767 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
768 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
769 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
770
771 /*
772 * Sync CPU features with CPUM.
773 */
774 /** @todo sync CPU features with CPUM. */
775
776 /* Set the partition property. */
777 RT_ZERO(Property);
778 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
779 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
780 if (FAILED(hrc))
781 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
782 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
783 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
784
785 /* Configure the GIC. */
786 int rc = nemR3WinGicCreate(pVM);
787 if (RT_FAILURE(rc))
788 return rc;
789
790 /*
791 * Set up the partition.
792 *
793 * Seems like this is where the partition is actually instantiated and we get
794 * a handle to it.
795 */
796 hrc = WHvSetupPartition(hPartition);
797 if (FAILED(hrc))
798 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
799 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
800 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
801
802 /*
803 * Setup the EMTs.
804 */
805 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
806 {
807 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
808 if (FAILED(hrc))
809 {
810 NTSTATUS const rcNtLast = RTNtLastStatusValue();
811 DWORD const dwErrLast = RTNtLastErrorValue();
812 while (idCpu-- > 0)
813 {
814 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
815 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
816 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
817 RTNtLastErrorValue()));
818 }
819 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
820 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
821 }
822
823 if (idCpu == 0)
824 {
825 /*
826 * Need to query the ID registers and populate CPUM,
827 * these are partition wide registers and need to be queried/set with WHV_ANY_VP.
828 */
829 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
830
831 WHV_REGISTER_NAME aenmNames[10];
832 WHV_REGISTER_VALUE aValues[10];
833 RT_ZERO(aValues);
834
835 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
836 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
837 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
838 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
839 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
840 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
841 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
842 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
843 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
844 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
845
846 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
847 AssertLogRelMsgReturn(SUCCEEDED(hrc),
848 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
849 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
850 , VERR_NEM_GET_REGISTERS_FAILED);
851
852 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
853 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
854 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
855 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
856 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
857 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
858 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
859 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
860 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
861 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
862
863 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
864 if (RT_FAILURE(rc))
865 return rc;
866
867 /* Apply any overrides to the partition. */
868 PCCPUMARMV8IDREGS pIdRegsGst = NULL;
869 rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
870 AssertRCReturn(rc, rc);
871
872 aValues[0].Reg64 = pIdRegsGst->u64RegIdAa64Dfr0El1;
873 aValues[1].Reg64 = pIdRegsGst->u64RegIdAa64Dfr1El1;
874 aValues[2].Reg64 = pIdRegsGst->u64RegIdAa64Isar0El1;
875 aValues[3].Reg64 = pIdRegsGst->u64RegIdAa64Isar1El1;
876 aValues[4].Reg64 = pIdRegsGst->u64RegIdAa64Isar2El1;
877 aValues[5].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr0El1;
878 aValues[6].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr1El1;
879 aValues[7].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr2El1;
880 aValues[8].Reg64 = pIdRegsGst->u64RegIdAa64Pfr0El1;
881 aValues[9].Reg64 = pIdRegsGst->u64RegIdAa64Pfr1El1;
882
883 hrc = WHvSetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
884 AssertLogRelMsgReturn(SUCCEEDED(hrc),
885 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
886 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
887 , VERR_NEM_SET_REGISTERS_FAILED);
888
889 /* Save the amount of break-/watchpoints supported for syncing the guest register state later. */
890 pVM->nem.s.cBreakpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_BRPS) + 1;
891 pVM->nem.s.cWatchpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_WRPS) + 1;
892 }
893
894 /* Configure the GIC re-distributor region for the GIC. */
895 WHV_REGISTER_NAME enmName = My_WHvArm64RegisterGicrBaseGpa;
896 WHV_REGISTER_VALUE Value;
897 Value.Reg64 = pVM->nem.s.GCPhysMmioBaseReDist + idCpu * _128K;
898
899 hrc = WHvSetVirtualProcessorRegisters(hPartition, idCpu, &enmName, 1, &Value);
900 AssertLogRelMsgReturn(SUCCEEDED(hrc),
901 ("WHvSetVirtualProcessorRegisters(%p, %u, WHvArm64RegisterGicrBaseGpa,) -> %Rhrc (Last=%#x/%u)\n",
902 hPartition, idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
903 , VERR_NEM_SET_REGISTERS_FAILED);
904 }
905
906 pVM->nem.s.fCreatedEmts = true;
907
908 LogRel(("NEM: Successfully set up partition\n"));
909 return VINF_SUCCESS;
910}
911
912
913/**
914 * Try initialize the native API.
915 *
916 * This may only do part of the job, more can be done in
917 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
918 *
919 * @returns VBox status code.
920 * @param pVM The cross context VM structure.
921 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
922 * the latter we'll fail if we cannot initialize.
923 * @param fForced Whether the HMForced flag is set and we should
924 * fail if we cannot initialize.
925 */
926int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
927{
928 g_uBuildNo = RTSystemGetNtBuildNo();
929
930 /*
931 * Error state.
932 * The error message will be non-empty on failure and 'rc' will be set too.
933 */
934 RTERRINFOSTATIC ErrInfo;
935 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
936 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
937 if (RT_SUCCESS(rc))
938 {
939 /*
940 * Check the capabilties of the hypervisor, starting with whether it's present.
941 */
942 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
943 if (RT_SUCCESS(rc))
944 {
945 /*
946 * Create and initialize a partition.
947 */
948 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
949 if (RT_SUCCESS(rc))
950 {
951 rc = nemR3NativeInitSetupVm(pVM);
952 if (RT_SUCCESS(rc))
953 {
954 /*
955 * Set ourselves as the execution engine and make config adjustments.
956 */
957 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
958 Log(("NEM: Marked active!\n"));
959 PGMR3EnableNemMode(pVM);
960
961 /*
962 * Register release statistics
963 */
964 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
965 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
966 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
967 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
968 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
969 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
970 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
971 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
972 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
973 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
974 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
975 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
976 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
977 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
978 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
979 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
980 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
981 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
982
983 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
984 {
985 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
986 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
987 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
988 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
989 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
990 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
991 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
992 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
993 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
994 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
995 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
996 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
997 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
998 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
999 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
1000 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
1001 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
1002 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
1003 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
1004 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
1005 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
1006 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
1007 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
1008 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
1009 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
1010 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
1011 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
1012 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
1013 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
1014 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
1015 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
1016 }
1017
1018#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
1019 if (!SUPR3IsDriverless())
1020 {
1021 PUVM pUVM = pVM->pUVM;
1022 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1023 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
1024 "/NEM/R0Stats/cPagesAvailable");
1025 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1026 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
1027 "/NEM/R0Stats/cPagesInUse");
1028 }
1029#endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */
1030 }
1031 }
1032 }
1033 }
1034
1035 /*
1036 * We only fail if in forced mode, otherwise just log the complaint and return.
1037 */
1038 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1039 if ( (fForced || !fFallback)
1040 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1041 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1042
1043 if (RTErrInfoIsSet(pErrInfo))
1044 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1045 return VINF_SUCCESS;
1046}
1047
1048
1049/**
1050 * This is called after CPUMR3Init is done.
1051 *
1052 * @returns VBox status code.
1053 * @param pVM The VM handle..
1054 */
1055int nemR3NativeInitAfterCPUM(PVM pVM)
1056{
1057 /*
1058 * Validate sanity.
1059 */
1060 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1061
1062 /** @todo */
1063
1064 /*
1065 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
1066 */
1067 /** @todo stats */
1068
1069 /*
1070 * Adjust features.
1071 *
1072 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
1073 * the first init call.
1074 */
1075
1076 return VINF_SUCCESS;
1077}
1078
1079
1080/**
1081 * Execute state save operation.
1082 *
1083 * @returns VBox status code.
1084 * @param pVM The cross context VM structure.
1085 * @param pSSM SSM operation handle.
1086 */
1087static DECLCALLBACK(int) nemR3Save(PVM pVM, PSSMHANDLE pSSM)
1088{
1089 /*
1090 * Save the Hyper-V activity state for all CPUs.
1091 */
1092 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1093 {
1094 PVMCPUCC pVCpu = pVM->apCpusR3[i];
1095
1096 static const WHV_REGISTER_NAME s_Name = WHvRegisterInternalActivityState;
1097 WHV_REGISTER_VALUE Reg;
1098
1099 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &s_Name, 1, &Reg);
1100 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1101 ("WHvSetVirtualProcessorRegisters(%p, 0,{WHvRegisterInternalActivityState}, 1,) -> %Rhrc (Last=%#x/%u)\n",
1102 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1103 , VERR_NEM_IPE_9);
1104
1105 SSMR3PutU64(pSSM, Reg.Reg64);
1106 }
1107
1108 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
1109}
1110
1111
1112/**
1113 * Execute state load operation.
1114 *
1115 * @returns VBox status code.
1116 * @param pVM The cross context VM structure.
1117 * @param pSSM SSM operation handle.
1118 * @param uVersion Data layout version.
1119 * @param uPass The data pass.
1120 */
1121static DECLCALLBACK(int) nemR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1122{
1123 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1124
1125 /*
1126 * Validate version.
1127 */
1128 if (uVersion != 1)
1129 {
1130 AssertMsgFailed(("nemR3Load: Invalid version uVersion=%u!\n", uVersion));
1131 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1132 }
1133
1134 /*
1135 * Restore the Hyper-V activity states for all vCPUs.
1136 */
1137 VMCPU_SET_STATE(pVM->apCpusR3[0], VMCPUSTATE_STARTED);
1138 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1139 {
1140 PVMCPUCC pVCpu = pVM->apCpusR3[i];
1141
1142 static const WHV_REGISTER_NAME s_Name = WHvRegisterInternalActivityState;
1143 WHV_REGISTER_VALUE Reg;
1144 int rc = SSMR3GetU64(pSSM, &Reg.Reg64);
1145 if (RT_FAILURE(rc))
1146 return rc;
1147
1148 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &s_Name, 1, &Reg);
1149 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1150 ("WHvSetVirtualProcessorRegisters(%p, 0,{WHvRegisterInternalActivityState}, 1,) -> %Rhrc (Last=%#x/%u)\n",
1151 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1152 , VERR_NEM_IPE_9);
1153 }
1154
1155 /* terminator */
1156 uint32_t u32;
1157 int rc = SSMR3GetU32(pSSM, &u32);
1158 if (RT_FAILURE(rc))
1159 return rc;
1160 if (u32 != UINT32_MAX)
1161 {
1162 AssertMsgFailed(("u32=%#x\n", u32));
1163 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1164 }
1165 return VINF_SUCCESS;
1166}
1167
1168
1169int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1170{
1171 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
1172 //AssertLogRel(fRet);
1173
1174 if (enmWhat == VMINITCOMPLETED_RING3)
1175 {
1176 /*
1177 * Register the saved state data unit.
1178 */
1179 int rc = SSMR3RegisterInternal(pVM, "nem-win", 1, NEM_HV_SAVED_STATE_VERSION,
1180 sizeof(uint64_t),
1181 NULL, NULL, NULL,
1182 NULL, nemR3Save, NULL,
1183 NULL, nemR3Load, NULL);
1184 if (RT_FAILURE(rc))
1185 return rc;
1186 }
1187
1188 NOREF(pVM); NOREF(enmWhat);
1189 return VINF_SUCCESS;
1190}
1191
1192
1193int nemR3NativeTerm(PVM pVM)
1194{
1195 /*
1196 * Delete the partition.
1197 */
1198 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1199 pVM->nem.s.hPartition = NULL;
1200 pVM->nem.s.hPartitionDevice = NULL;
1201 if (hPartition != NULL)
1202 {
1203 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
1204 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
1205 while (idCpu-- > 0)
1206 {
1207 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
1208 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1209 hPartition, idCpu, hrc, RTNtLastStatusValue(),
1210 RTNtLastErrorValue()));
1211 }
1212 WHvDeletePartition(hPartition);
1213 }
1214 pVM->nem.s.fCreatedEmts = false;
1215 return VINF_SUCCESS;
1216}
1217
1218
1219/**
1220 * VM reset notification.
1221 *
1222 * @param pVM The cross context VM structure.
1223 */
1224void nemR3NativeReset(PVM pVM)
1225{
1226 RT_NOREF(pVM);
1227}
1228
1229
1230/**
1231 * Reset CPU due to INIT IPI or hot (un)plugging.
1232 *
1233 * @param pVCpu The cross context virtual CPU structure of the CPU being
1234 * reset.
1235 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1236 */
1237void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1238{
1239 RT_NOREF(pVCpu, fInitIpi);
1240}
1241
1242
1243NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
1244{
1245 WHV_REGISTER_NAME aenmNames[128];
1246 WHV_REGISTER_VALUE aValues[128];
1247
1248 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1249 if (!fWhat)
1250 return VINF_SUCCESS;
1251 uintptr_t iReg = 0;
1252
1253#define ADD_REG64(a_enmName, a_uValue) do { \
1254 aenmNames[iReg] = (a_enmName); \
1255 aValues[iReg].Reg128.High64 = 0; \
1256 aValues[iReg].Reg64 = (a_uValue).x; \
1257 iReg++; \
1258 } while (0)
1259#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
1260 aenmNames[iReg] = (a_enmName); \
1261 aValues[iReg].Reg128.High64 = 0; \
1262 aValues[iReg].Reg64 = (a_uValue); \
1263 iReg++; \
1264 } while (0)
1265#define ADD_SYSREG64(a_enmName, a_uValue) do { \
1266 aenmNames[iReg] = (a_enmName); \
1267 aValues[iReg].Reg128.High64 = 0; \
1268 aValues[iReg].Reg64 = (a_uValue).u64; \
1269 iReg++; \
1270 } while (0)
1271#define ADD_REG128(a_enmName, a_uValue) do { \
1272 aenmNames[iReg] = (a_enmName); \
1273 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
1274 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
1275 iReg++; \
1276 } while (0)
1277
1278 /* GPRs */
1279 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1280 {
1281 if (fWhat & CPUMCTX_EXTRN_X0)
1282 ADD_REG64(WHvArm64RegisterX0, pVCpu->cpum.GstCtx.aGRegs[0]);
1283 if (fWhat & CPUMCTX_EXTRN_X1)
1284 ADD_REG64(WHvArm64RegisterX1, pVCpu->cpum.GstCtx.aGRegs[1]);
1285 if (fWhat & CPUMCTX_EXTRN_X2)
1286 ADD_REG64(WHvArm64RegisterX2, pVCpu->cpum.GstCtx.aGRegs[2]);
1287 if (fWhat & CPUMCTX_EXTRN_X3)
1288 ADD_REG64(WHvArm64RegisterX3, pVCpu->cpum.GstCtx.aGRegs[3]);
1289 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1290 {
1291 ADD_REG64(WHvArm64RegisterX4, pVCpu->cpum.GstCtx.aGRegs[4]);
1292 ADD_REG64(WHvArm64RegisterX5, pVCpu->cpum.GstCtx.aGRegs[5]);
1293 ADD_REG64(WHvArm64RegisterX6, pVCpu->cpum.GstCtx.aGRegs[6]);
1294 ADD_REG64(WHvArm64RegisterX7, pVCpu->cpum.GstCtx.aGRegs[7]);
1295 ADD_REG64(WHvArm64RegisterX8, pVCpu->cpum.GstCtx.aGRegs[8]);
1296 ADD_REG64(WHvArm64RegisterX9, pVCpu->cpum.GstCtx.aGRegs[9]);
1297 ADD_REG64(WHvArm64RegisterX10, pVCpu->cpum.GstCtx.aGRegs[10]);
1298 ADD_REG64(WHvArm64RegisterX11, pVCpu->cpum.GstCtx.aGRegs[11]);
1299 ADD_REG64(WHvArm64RegisterX12, pVCpu->cpum.GstCtx.aGRegs[12]);
1300 ADD_REG64(WHvArm64RegisterX13, pVCpu->cpum.GstCtx.aGRegs[13]);
1301 ADD_REG64(WHvArm64RegisterX14, pVCpu->cpum.GstCtx.aGRegs[14]);
1302 ADD_REG64(WHvArm64RegisterX15, pVCpu->cpum.GstCtx.aGRegs[15]);
1303 ADD_REG64(WHvArm64RegisterX16, pVCpu->cpum.GstCtx.aGRegs[16]);
1304 ADD_REG64(WHvArm64RegisterX17, pVCpu->cpum.GstCtx.aGRegs[17]);
1305 ADD_REG64(WHvArm64RegisterX18, pVCpu->cpum.GstCtx.aGRegs[18]);
1306 ADD_REG64(WHvArm64RegisterX19, pVCpu->cpum.GstCtx.aGRegs[19]);
1307 ADD_REG64(WHvArm64RegisterX20, pVCpu->cpum.GstCtx.aGRegs[20]);
1308 ADD_REG64(WHvArm64RegisterX21, pVCpu->cpum.GstCtx.aGRegs[21]);
1309 ADD_REG64(WHvArm64RegisterX22, pVCpu->cpum.GstCtx.aGRegs[22]);
1310 ADD_REG64(WHvArm64RegisterX23, pVCpu->cpum.GstCtx.aGRegs[23]);
1311 ADD_REG64(WHvArm64RegisterX24, pVCpu->cpum.GstCtx.aGRegs[24]);
1312 ADD_REG64(WHvArm64RegisterX25, pVCpu->cpum.GstCtx.aGRegs[25]);
1313 ADD_REG64(WHvArm64RegisterX26, pVCpu->cpum.GstCtx.aGRegs[26]);
1314 ADD_REG64(WHvArm64RegisterX27, pVCpu->cpum.GstCtx.aGRegs[27]);
1315 ADD_REG64(WHvArm64RegisterX28, pVCpu->cpum.GstCtx.aGRegs[28]);
1316 }
1317 if (fWhat & CPUMCTX_EXTRN_LR)
1318 ADD_REG64(WHvArm64RegisterLr, pVCpu->cpum.GstCtx.aGRegs[30]);
1319 if (fWhat & CPUMCTX_EXTRN_FP)
1320 ADD_REG64(WHvArm64RegisterFp, pVCpu->cpum.GstCtx.aGRegs[29]);
1321 }
1322
1323 /* RIP & Flags */
1324 if (fWhat & CPUMCTX_EXTRN_PC)
1325 ADD_SYSREG64(WHvArm64RegisterPc, pVCpu->cpum.GstCtx.Pc);
1326 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1327 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1328 if (fWhat & CPUMCTX_EXTRN_SPSR)
1329 ADD_SYSREG64(WHvArm64RegisterSpsrEl1, pVCpu->cpum.GstCtx.Spsr);
1330 if (fWhat & CPUMCTX_EXTRN_ELR)
1331 ADD_SYSREG64(WHvArm64RegisterElrEl1, pVCpu->cpum.GstCtx.Elr);
1332 if (fWhat & CPUMCTX_EXTRN_SP)
1333 {
1334 ADD_SYSREG64(WHvArm64RegisterSpEl0, pVCpu->cpum.GstCtx.aSpReg[0]);
1335 ADD_SYSREG64(WHvArm64RegisterSpEl1, pVCpu->cpum.GstCtx.aSpReg[1]);
1336 }
1337 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1338 {
1339 ADD_SYSREG64(WHvArm64RegisterSctlrEl1, pVCpu->cpum.GstCtx.Sctlr);
1340 ADD_SYSREG64(WHvArm64RegisterTcrEl1, pVCpu->cpum.GstCtx.Tcr);
1341 ADD_SYSREG64(WHvArm64RegisterTtbr0El1, pVCpu->cpum.GstCtx.Ttbr0);
1342 ADD_SYSREG64(WHvArm64RegisterTtbr1El1, pVCpu->cpum.GstCtx.Ttbr1);
1343 }
1344
1345 /* Vector state. */
1346 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1347 {
1348 ADD_REG128(WHvArm64RegisterQ0, pVCpu->cpum.GstCtx.aVRegs[0]);
1349 ADD_REG128(WHvArm64RegisterQ1, pVCpu->cpum.GstCtx.aVRegs[1]);
1350 ADD_REG128(WHvArm64RegisterQ2, pVCpu->cpum.GstCtx.aVRegs[2]);
1351 ADD_REG128(WHvArm64RegisterQ3, pVCpu->cpum.GstCtx.aVRegs[3]);
1352 ADD_REG128(WHvArm64RegisterQ4, pVCpu->cpum.GstCtx.aVRegs[4]);
1353 ADD_REG128(WHvArm64RegisterQ5, pVCpu->cpum.GstCtx.aVRegs[5]);
1354 ADD_REG128(WHvArm64RegisterQ6, pVCpu->cpum.GstCtx.aVRegs[6]);
1355 ADD_REG128(WHvArm64RegisterQ7, pVCpu->cpum.GstCtx.aVRegs[7]);
1356 ADD_REG128(WHvArm64RegisterQ8, pVCpu->cpum.GstCtx.aVRegs[8]);
1357 ADD_REG128(WHvArm64RegisterQ9, pVCpu->cpum.GstCtx.aVRegs[9]);
1358 ADD_REG128(WHvArm64RegisterQ10, pVCpu->cpum.GstCtx.aVRegs[10]);
1359 ADD_REG128(WHvArm64RegisterQ11, pVCpu->cpum.GstCtx.aVRegs[11]);
1360 ADD_REG128(WHvArm64RegisterQ12, pVCpu->cpum.GstCtx.aVRegs[12]);
1361 ADD_REG128(WHvArm64RegisterQ13, pVCpu->cpum.GstCtx.aVRegs[13]);
1362 ADD_REG128(WHvArm64RegisterQ14, pVCpu->cpum.GstCtx.aVRegs[14]);
1363 ADD_REG128(WHvArm64RegisterQ15, pVCpu->cpum.GstCtx.aVRegs[15]);
1364 ADD_REG128(WHvArm64RegisterQ16, pVCpu->cpum.GstCtx.aVRegs[16]);
1365 ADD_REG128(WHvArm64RegisterQ17, pVCpu->cpum.GstCtx.aVRegs[17]);
1366 ADD_REG128(WHvArm64RegisterQ18, pVCpu->cpum.GstCtx.aVRegs[18]);
1367 ADD_REG128(WHvArm64RegisterQ19, pVCpu->cpum.GstCtx.aVRegs[19]);
1368 ADD_REG128(WHvArm64RegisterQ20, pVCpu->cpum.GstCtx.aVRegs[20]);
1369 ADD_REG128(WHvArm64RegisterQ21, pVCpu->cpum.GstCtx.aVRegs[21]);
1370 ADD_REG128(WHvArm64RegisterQ22, pVCpu->cpum.GstCtx.aVRegs[22]);
1371 ADD_REG128(WHvArm64RegisterQ23, pVCpu->cpum.GstCtx.aVRegs[23]);
1372 ADD_REG128(WHvArm64RegisterQ24, pVCpu->cpum.GstCtx.aVRegs[24]);
1373 ADD_REG128(WHvArm64RegisterQ25, pVCpu->cpum.GstCtx.aVRegs[25]);
1374 ADD_REG128(WHvArm64RegisterQ26, pVCpu->cpum.GstCtx.aVRegs[26]);
1375 ADD_REG128(WHvArm64RegisterQ27, pVCpu->cpum.GstCtx.aVRegs[27]);
1376 ADD_REG128(WHvArm64RegisterQ28, pVCpu->cpum.GstCtx.aVRegs[28]);
1377 ADD_REG128(WHvArm64RegisterQ29, pVCpu->cpum.GstCtx.aVRegs[29]);
1378 ADD_REG128(WHvArm64RegisterQ30, pVCpu->cpum.GstCtx.aVRegs[30]);
1379 ADD_REG128(WHvArm64RegisterQ31, pVCpu->cpum.GstCtx.aVRegs[31]);
1380 }
1381
1382 if (fWhat & CPUMCTX_EXTRN_FPCR)
1383 ADD_REG64_RAW(WHvArm64RegisterFpcr, pVCpu->cpum.GstCtx.fpcr);
1384 if (fWhat & CPUMCTX_EXTRN_FPSR)
1385 ADD_REG64_RAW(WHvArm64RegisterFpsr, pVCpu->cpum.GstCtx.fpsr);
1386
1387 /* System registers. */
1388 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1389 {
1390 ADD_SYSREG64(WHvArm64RegisterVbarEl1, pVCpu->cpum.GstCtx.VBar);
1391 ADD_SYSREG64(WHvArm64RegisterEsrEl1, pVCpu->cpum.GstCtx.Esr);
1392 ADD_SYSREG64(WHvArm64RegisterFarEl1, pVCpu->cpum.GstCtx.Far);
1393 ADD_SYSREG64(WHvArm64RegisterCntkctlEl1, pVCpu->cpum.GstCtx.CntKCtl);
1394 ADD_SYSREG64(WHvArm64RegisterContextidrEl1, pVCpu->cpum.GstCtx.ContextIdr);
1395 ADD_SYSREG64(WHvArm64RegisterCpacrEl1, pVCpu->cpum.GstCtx.Cpacr);
1396 ADD_SYSREG64(WHvArm64RegisterCsselrEl1, pVCpu->cpum.GstCtx.Csselr);
1397 ADD_SYSREG64(WHvArm64RegisterMairEl1, pVCpu->cpum.GstCtx.Mair);
1398 ADD_SYSREG64(WHvArm64RegisterParEl1, pVCpu->cpum.GstCtx.Par);
1399 ADD_SYSREG64(WHvArm64RegisterTpidrroEl0, pVCpu->cpum.GstCtx.TpIdrRoEl0);
1400 ADD_SYSREG64(WHvArm64RegisterTpidrEl0, pVCpu->cpum.GstCtx.aTpIdr[0]);
1401 ADD_SYSREG64(WHvArm64RegisterTpidrEl1, pVCpu->cpum.GstCtx.aTpIdr[1]);
1402 ADD_SYSREG64(My_WHvArm64RegisterActlrEl1, pVCpu->cpum.GstCtx.Actlr);
1403 }
1404
1405 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1406 {
1407 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1408 {
1409 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Ctrl);
1410 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Value);
1411 }
1412
1413 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1414 {
1415 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Ctrl);
1416 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Value);
1417 }
1418
1419 ADD_SYSREG64(WHvArm64RegisterMdscrEl1, pVCpu->cpum.GstCtx.Mdscr);
1420 }
1421
1422 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1423 {
1424 ADD_SYSREG64(WHvArm64RegisterApdAKeyHiEl1, pVCpu->cpum.GstCtx.Apda.High);
1425 ADD_SYSREG64(WHvArm64RegisterApdAKeyLoEl1, pVCpu->cpum.GstCtx.Apda.Low);
1426 ADD_SYSREG64(WHvArm64RegisterApdBKeyHiEl1, pVCpu->cpum.GstCtx.Apdb.High);
1427 ADD_SYSREG64(WHvArm64RegisterApdBKeyLoEl1, pVCpu->cpum.GstCtx.Apdb.Low);
1428 ADD_SYSREG64(WHvArm64RegisterApgAKeyHiEl1, pVCpu->cpum.GstCtx.Apga.High);
1429 ADD_SYSREG64(WHvArm64RegisterApgAKeyLoEl1, pVCpu->cpum.GstCtx.Apga.Low);
1430 ADD_SYSREG64(WHvArm64RegisterApiAKeyHiEl1, pVCpu->cpum.GstCtx.Apia.High);
1431 ADD_SYSREG64(WHvArm64RegisterApiAKeyLoEl1, pVCpu->cpum.GstCtx.Apia.Low);
1432 ADD_SYSREG64(WHvArm64RegisterApiBKeyHiEl1, pVCpu->cpum.GstCtx.Apib.High);
1433 ADD_SYSREG64(WHvArm64RegisterApiBKeyLoEl1, pVCpu->cpum.GstCtx.Apib.Low);
1434 }
1435
1436#undef ADD_REG64
1437#undef ADD_REG64_RAW
1438#undef ADD_REG128
1439
1440 /*
1441 * Set the registers.
1442 */
1443 Assert(iReg < RT_ELEMENTS(aValues));
1444 Assert(iReg < RT_ELEMENTS(aenmNames));
1445 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1446 if (SUCCEEDED(hrc))
1447 {
1448 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1449 return VINF_SUCCESS;
1450 }
1451 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1452 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1453 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1454 return VERR_INTERNAL_ERROR;
1455}
1456
1457
1458NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1459{
1460 WHV_REGISTER_NAME aenmNames[256];
1461
1462 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1463 if (!fWhat)
1464 return VINF_SUCCESS;
1465
1466 uintptr_t iReg = 0;
1467
1468 /* GPRs */
1469 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1470 {
1471 if (fWhat & CPUMCTX_EXTRN_X0)
1472 aenmNames[iReg++] = WHvArm64RegisterX0;
1473 if (fWhat & CPUMCTX_EXTRN_X1)
1474 aenmNames[iReg++] = WHvArm64RegisterX1;
1475 if (fWhat & CPUMCTX_EXTRN_X2)
1476 aenmNames[iReg++] = WHvArm64RegisterX2;
1477 if (fWhat & CPUMCTX_EXTRN_X3)
1478 aenmNames[iReg++] = WHvArm64RegisterX3;
1479 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1480 {
1481 aenmNames[iReg++] = WHvArm64RegisterX4;
1482 aenmNames[iReg++] = WHvArm64RegisterX5;
1483 aenmNames[iReg++] = WHvArm64RegisterX6;
1484 aenmNames[iReg++] = WHvArm64RegisterX7;
1485 aenmNames[iReg++] = WHvArm64RegisterX8;
1486 aenmNames[iReg++] = WHvArm64RegisterX9;
1487 aenmNames[iReg++] = WHvArm64RegisterX10;
1488 aenmNames[iReg++] = WHvArm64RegisterX11;
1489 aenmNames[iReg++] = WHvArm64RegisterX12;
1490 aenmNames[iReg++] = WHvArm64RegisterX13;
1491 aenmNames[iReg++] = WHvArm64RegisterX14;
1492 aenmNames[iReg++] = WHvArm64RegisterX15;
1493 aenmNames[iReg++] = WHvArm64RegisterX16;
1494 aenmNames[iReg++] = WHvArm64RegisterX17;
1495 aenmNames[iReg++] = WHvArm64RegisterX18;
1496 aenmNames[iReg++] = WHvArm64RegisterX19;
1497 aenmNames[iReg++] = WHvArm64RegisterX20;
1498 aenmNames[iReg++] = WHvArm64RegisterX21;
1499 aenmNames[iReg++] = WHvArm64RegisterX22;
1500 aenmNames[iReg++] = WHvArm64RegisterX23;
1501 aenmNames[iReg++] = WHvArm64RegisterX24;
1502 aenmNames[iReg++] = WHvArm64RegisterX25;
1503 aenmNames[iReg++] = WHvArm64RegisterX26;
1504 aenmNames[iReg++] = WHvArm64RegisterX27;
1505 aenmNames[iReg++] = WHvArm64RegisterX28;
1506 }
1507 if (fWhat & CPUMCTX_EXTRN_LR)
1508 aenmNames[iReg++] = WHvArm64RegisterLr;
1509 if (fWhat & CPUMCTX_EXTRN_FP)
1510 aenmNames[iReg++] = WHvArm64RegisterFp;
1511 }
1512
1513 /* PC & Flags */
1514 if (fWhat & CPUMCTX_EXTRN_PC)
1515 aenmNames[iReg++] = WHvArm64RegisterPc;
1516 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1517 aenmNames[iReg++] = WHvArm64RegisterPstate;
1518 if (fWhat & CPUMCTX_EXTRN_SPSR)
1519 aenmNames[iReg++] = WHvArm64RegisterSpsrEl1;
1520 if (fWhat & CPUMCTX_EXTRN_ELR)
1521 aenmNames[iReg++] = WHvArm64RegisterElrEl1;
1522 if (fWhat & CPUMCTX_EXTRN_SP)
1523 {
1524 aenmNames[iReg++] = WHvArm64RegisterSpEl0;
1525 aenmNames[iReg++] = WHvArm64RegisterSpEl1;
1526 }
1527 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1528 {
1529 aenmNames[iReg++] = WHvArm64RegisterSctlrEl1;
1530 aenmNames[iReg++] = WHvArm64RegisterTcrEl1;
1531 aenmNames[iReg++] = WHvArm64RegisterTtbr0El1;
1532 aenmNames[iReg++] = WHvArm64RegisterTtbr1El1;
1533 }
1534
1535 /* Vector state. */
1536 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1537 {
1538 aenmNames[iReg++] = WHvArm64RegisterQ0;
1539 aenmNames[iReg++] = WHvArm64RegisterQ1;
1540 aenmNames[iReg++] = WHvArm64RegisterQ2;
1541 aenmNames[iReg++] = WHvArm64RegisterQ3;
1542 aenmNames[iReg++] = WHvArm64RegisterQ4;
1543 aenmNames[iReg++] = WHvArm64RegisterQ5;
1544 aenmNames[iReg++] = WHvArm64RegisterQ6;
1545 aenmNames[iReg++] = WHvArm64RegisterQ7;
1546 aenmNames[iReg++] = WHvArm64RegisterQ8;
1547 aenmNames[iReg++] = WHvArm64RegisterQ9;
1548 aenmNames[iReg++] = WHvArm64RegisterQ10;
1549 aenmNames[iReg++] = WHvArm64RegisterQ11;
1550 aenmNames[iReg++] = WHvArm64RegisterQ12;
1551 aenmNames[iReg++] = WHvArm64RegisterQ13;
1552 aenmNames[iReg++] = WHvArm64RegisterQ14;
1553 aenmNames[iReg++] = WHvArm64RegisterQ15;
1554
1555 aenmNames[iReg++] = WHvArm64RegisterQ16;
1556 aenmNames[iReg++] = WHvArm64RegisterQ17;
1557 aenmNames[iReg++] = WHvArm64RegisterQ18;
1558 aenmNames[iReg++] = WHvArm64RegisterQ19;
1559 aenmNames[iReg++] = WHvArm64RegisterQ20;
1560 aenmNames[iReg++] = WHvArm64RegisterQ21;
1561 aenmNames[iReg++] = WHvArm64RegisterQ22;
1562 aenmNames[iReg++] = WHvArm64RegisterQ23;
1563 aenmNames[iReg++] = WHvArm64RegisterQ24;
1564 aenmNames[iReg++] = WHvArm64RegisterQ25;
1565 aenmNames[iReg++] = WHvArm64RegisterQ26;
1566 aenmNames[iReg++] = WHvArm64RegisterQ27;
1567 aenmNames[iReg++] = WHvArm64RegisterQ28;
1568 aenmNames[iReg++] = WHvArm64RegisterQ29;
1569 aenmNames[iReg++] = WHvArm64RegisterQ30;
1570 aenmNames[iReg++] = WHvArm64RegisterQ31;
1571 }
1572 if (fWhat & CPUMCTX_EXTRN_FPCR)
1573 aenmNames[iReg++] = WHvArm64RegisterFpcr;
1574 if (fWhat & CPUMCTX_EXTRN_FPSR)
1575 aenmNames[iReg++] = WHvArm64RegisterFpsr;
1576
1577 /* System registers. */
1578 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1579 {
1580 aenmNames[iReg++] = WHvArm64RegisterVbarEl1;
1581 aenmNames[iReg++] = WHvArm64RegisterEsrEl1;
1582 aenmNames[iReg++] = WHvArm64RegisterFarEl1;
1583 aenmNames[iReg++] = WHvArm64RegisterCntkctlEl1;
1584 aenmNames[iReg++] = WHvArm64RegisterContextidrEl1;
1585 aenmNames[iReg++] = WHvArm64RegisterCpacrEl1;
1586 aenmNames[iReg++] = WHvArm64RegisterCsselrEl1;
1587 aenmNames[iReg++] = WHvArm64RegisterMairEl1;
1588 aenmNames[iReg++] = WHvArm64RegisterParEl1;
1589 aenmNames[iReg++] = WHvArm64RegisterTpidrroEl0;
1590 aenmNames[iReg++] = WHvArm64RegisterTpidrEl0;
1591 aenmNames[iReg++] = WHvArm64RegisterTpidrEl1;
1592 aenmNames[iReg++] = My_WHvArm64RegisterActlrEl1;
1593 }
1594
1595 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1596 {
1597 /* Hyper-V doesn't allow syncing debug break-/watchpoint registers which aren't there. */
1598 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1599 {
1600 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i);
1601 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i);
1602 }
1603
1604 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1605 {
1606 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i);
1607 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i);
1608 }
1609
1610 aenmNames[iReg++] = WHvArm64RegisterMdscrEl1;
1611 }
1612
1613 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1614 {
1615 aenmNames[iReg++] = WHvArm64RegisterApdAKeyHiEl1;
1616 aenmNames[iReg++] = WHvArm64RegisterApdAKeyLoEl1;
1617 aenmNames[iReg++] = WHvArm64RegisterApdBKeyHiEl1;
1618 aenmNames[iReg++] = WHvArm64RegisterApdBKeyLoEl1;
1619 aenmNames[iReg++] = WHvArm64RegisterApgAKeyHiEl1;
1620 aenmNames[iReg++] = WHvArm64RegisterApgAKeyLoEl1;
1621 aenmNames[iReg++] = WHvArm64RegisterApiAKeyHiEl1;
1622 aenmNames[iReg++] = WHvArm64RegisterApiAKeyLoEl1;
1623 aenmNames[iReg++] = WHvArm64RegisterApiBKeyHiEl1;
1624 aenmNames[iReg++] = WHvArm64RegisterApiBKeyLoEl1;
1625 }
1626
1627 size_t const cRegs = iReg;
1628 Assert(cRegs < RT_ELEMENTS(aenmNames));
1629
1630 /*
1631 * Get the registers.
1632 */
1633 WHV_REGISTER_VALUE aValues[256];
1634 RT_ZERO(aValues);
1635 Assert(RT_ELEMENTS(aValues) >= cRegs);
1636 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1637 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1638 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1639 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1640 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1641 , VERR_NEM_GET_REGISTERS_FAILED);
1642
1643 iReg = 0;
1644#define GET_REG64(a_DstVar, a_enmName) do { \
1645 Assert(aenmNames[iReg] == (a_enmName)); \
1646 (a_DstVar).x = aValues[iReg].Reg64; \
1647 iReg++; \
1648 } while (0)
1649#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1650 Assert(aenmNames[iReg] == (a_enmName)); \
1651 (a_DstVar) = aValues[iReg].Reg64; \
1652 iReg++; \
1653 } while (0)
1654#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1655 Assert(aenmNames[iReg] == (a_enmName)); \
1656 (a_DstVar).u64 = aValues[iReg].Reg64; \
1657 iReg++; \
1658 } while (0)
1659#define GET_REG128(a_DstVar, a_enmName) do { \
1660 Assert(aenmNames[iReg] == a_enmName); \
1661 (a_DstVar).au64[0] = aValues[iReg].Reg128.Low64; \
1662 (a_DstVar).au64[1] = aValues[iReg].Reg128.High64; \
1663 iReg++; \
1664 } while (0)
1665
1666 /* GPRs */
1667 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1668 {
1669 if (fWhat & CPUMCTX_EXTRN_X0)
1670 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[0], WHvArm64RegisterX0);
1671 if (fWhat & CPUMCTX_EXTRN_X1)
1672 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[1], WHvArm64RegisterX1);
1673 if (fWhat & CPUMCTX_EXTRN_X2)
1674 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[2], WHvArm64RegisterX2);
1675 if (fWhat & CPUMCTX_EXTRN_X3)
1676 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[3], WHvArm64RegisterX3);
1677 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1678 {
1679 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[4], WHvArm64RegisterX4);
1680 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[5], WHvArm64RegisterX5);
1681 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[6], WHvArm64RegisterX6);
1682 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[7], WHvArm64RegisterX7);
1683 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[8], WHvArm64RegisterX8);
1684 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[9], WHvArm64RegisterX9);
1685 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[10], WHvArm64RegisterX10);
1686 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[11], WHvArm64RegisterX11);
1687 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[12], WHvArm64RegisterX12);
1688 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[13], WHvArm64RegisterX13);
1689 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[14], WHvArm64RegisterX14);
1690 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[15], WHvArm64RegisterX15);
1691 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[16], WHvArm64RegisterX16);
1692 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[17], WHvArm64RegisterX17);
1693 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[18], WHvArm64RegisterX18);
1694 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[19], WHvArm64RegisterX19);
1695 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[20], WHvArm64RegisterX20);
1696 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[21], WHvArm64RegisterX21);
1697 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[22], WHvArm64RegisterX22);
1698 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[23], WHvArm64RegisterX23);
1699 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[24], WHvArm64RegisterX24);
1700 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[25], WHvArm64RegisterX25);
1701 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[26], WHvArm64RegisterX26);
1702 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[27], WHvArm64RegisterX27);
1703 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[28], WHvArm64RegisterX28);
1704 }
1705 if (fWhat & CPUMCTX_EXTRN_LR)
1706 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[30], WHvArm64RegisterLr);
1707 if (fWhat & CPUMCTX_EXTRN_FP)
1708 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[29], WHvArm64RegisterFp);
1709 }
1710
1711 /* RIP & Flags */
1712 if (fWhat & CPUMCTX_EXTRN_PC)
1713 GET_REG64_RAW(pVCpu->cpum.GstCtx.Pc.u64, WHvArm64RegisterPc);
1714 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1715 GET_REG64_RAW(pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1716 if (fWhat & CPUMCTX_EXTRN_SPSR)
1717 GET_SYSREG64(pVCpu->cpum.GstCtx.Spsr, WHvArm64RegisterSpsrEl1);
1718 if (fWhat & CPUMCTX_EXTRN_ELR)
1719 GET_SYSREG64(pVCpu->cpum.GstCtx.Elr, WHvArm64RegisterElrEl1);
1720 if (fWhat & CPUMCTX_EXTRN_SP)
1721 {
1722 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[0], WHvArm64RegisterSpEl0);
1723 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[1], WHvArm64RegisterSpEl1);
1724 }
1725 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1726 {
1727 GET_SYSREG64(pVCpu->cpum.GstCtx.Sctlr, WHvArm64RegisterSctlrEl1);
1728 GET_SYSREG64(pVCpu->cpum.GstCtx.Tcr, WHvArm64RegisterTcrEl1);
1729 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1730 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1731 }
1732
1733 /* Vector state. */
1734 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1735 {
1736 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[0], WHvArm64RegisterQ0);
1737 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[1], WHvArm64RegisterQ1);
1738 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[2], WHvArm64RegisterQ2);
1739 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[3], WHvArm64RegisterQ3);
1740 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[4], WHvArm64RegisterQ4);
1741 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[5], WHvArm64RegisterQ5);
1742 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[6], WHvArm64RegisterQ6);
1743 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[7], WHvArm64RegisterQ7);
1744 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[8], WHvArm64RegisterQ8);
1745 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[9], WHvArm64RegisterQ9);
1746 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[10], WHvArm64RegisterQ10);
1747 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[11], WHvArm64RegisterQ11);
1748 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[12], WHvArm64RegisterQ12);
1749 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[13], WHvArm64RegisterQ13);
1750 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[14], WHvArm64RegisterQ14);
1751 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[15], WHvArm64RegisterQ15);
1752
1753 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[16], WHvArm64RegisterQ16);
1754 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[17], WHvArm64RegisterQ17);
1755 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[18], WHvArm64RegisterQ18);
1756 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[19], WHvArm64RegisterQ19);
1757 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[20], WHvArm64RegisterQ20);
1758 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[21], WHvArm64RegisterQ21);
1759 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[22], WHvArm64RegisterQ22);
1760 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[23], WHvArm64RegisterQ23);
1761 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[24], WHvArm64RegisterQ24);
1762 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[25], WHvArm64RegisterQ25);
1763 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[26], WHvArm64RegisterQ26);
1764 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[27], WHvArm64RegisterQ27);
1765 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[28], WHvArm64RegisterQ28);
1766 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[29], WHvArm64RegisterQ29);
1767 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[30], WHvArm64RegisterQ30);
1768 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[31], WHvArm64RegisterQ31);
1769 }
1770 if (fWhat & CPUMCTX_EXTRN_FPCR)
1771 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpcr, WHvArm64RegisterFpcr);
1772 if (fWhat & CPUMCTX_EXTRN_FPSR)
1773 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpsr, WHvArm64RegisterFpsr);
1774
1775 /* System registers. */
1776 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1777 {
1778 GET_SYSREG64(pVCpu->cpum.GstCtx.VBar, WHvArm64RegisterVbarEl1);
1779 GET_SYSREG64(pVCpu->cpum.GstCtx.Esr, WHvArm64RegisterEsrEl1);
1780 GET_SYSREG64(pVCpu->cpum.GstCtx.Far, WHvArm64RegisterFarEl1);
1781 GET_SYSREG64(pVCpu->cpum.GstCtx.CntKCtl, WHvArm64RegisterCntkctlEl1);
1782 GET_SYSREG64(pVCpu->cpum.GstCtx.ContextIdr, WHvArm64RegisterContextidrEl1);
1783 GET_SYSREG64(pVCpu->cpum.GstCtx.Cpacr, WHvArm64RegisterCpacrEl1);
1784 GET_SYSREG64(pVCpu->cpum.GstCtx.Csselr, WHvArm64RegisterCsselrEl1);
1785 GET_SYSREG64(pVCpu->cpum.GstCtx.Mair, WHvArm64RegisterMairEl1);
1786 GET_SYSREG64(pVCpu->cpum.GstCtx.Par, WHvArm64RegisterParEl1);
1787 GET_SYSREG64(pVCpu->cpum.GstCtx.TpIdrRoEl0, WHvArm64RegisterTpidrroEl0);
1788 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[0], WHvArm64RegisterTpidrEl0);
1789 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[1], WHvArm64RegisterTpidrEl1);
1790 GET_SYSREG64(pVCpu->cpum.GstCtx.Actlr, My_WHvArm64RegisterActlrEl1);
1791 }
1792
1793 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1794 {
1795 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1796 {
1797 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i));
1798 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i));
1799 }
1800
1801 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1802 {
1803 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i));
1804 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i));
1805 }
1806
1807 GET_SYSREG64(pVCpu->cpum.GstCtx.Mdscr, WHvArm64RegisterMdscrEl1);
1808 }
1809
1810 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1811 {
1812 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.High, WHvArm64RegisterApdAKeyHiEl1);
1813 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.Low, WHvArm64RegisterApdAKeyLoEl1);
1814 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.High, WHvArm64RegisterApdBKeyHiEl1);
1815 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.Low, WHvArm64RegisterApdBKeyLoEl1);
1816 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.High, WHvArm64RegisterApgAKeyHiEl1);
1817 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.Low, WHvArm64RegisterApgAKeyLoEl1);
1818 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.High, WHvArm64RegisterApiAKeyHiEl1);
1819 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.Low, WHvArm64RegisterApiAKeyLoEl1);
1820 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.High, WHvArm64RegisterApiBKeyHiEl1);
1821 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.Low, WHvArm64RegisterApiBKeyLoEl1);
1822 }
1823
1824 /* Almost done, just update extrn flags. */
1825 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1826 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1827 pVCpu->cpum.GstCtx.fExtrn = 0;
1828
1829 return VINF_SUCCESS;
1830}
1831
1832
1833/**
1834 * Interface for importing state on demand (used by IEM).
1835 *
1836 * @returns VBox status code.
1837 * @param pVCpu The cross context CPU structure.
1838 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1839 */
1840VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1841{
1842 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1843 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1844}
1845
1846
1847/**
1848 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1849 *
1850 * @returns VBox status code.
1851 * @param pVCpu The cross context CPU structure.
1852 * @param pcTicks Where to return the CPU tick count.
1853 * @param puAux Where to return the TSC_AUX register value.
1854 */
1855VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1856{
1857 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1858
1859 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1860 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1861 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1862
1863 /* Ensure time for the partition is suspended - it will be resumed as soon as a vCPU starts executing. */
1864 HRESULT hrc = WHvSuspendPartitionTime(pVM->nem.s.hPartition);
1865 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1866 ("WHvSuspendPartitionTime(%p) -> %Rhrc (Last=%#x/%u)\n",
1867 pVM->nem.s.hPartition, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1868 , VERR_NEM_GET_REGISTERS_FAILED);
1869
1870 /* Call the offical API. */
1871 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1872 WHV_REGISTER_VALUE Value = { { {0, 0} } };
1873 hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &enmName, 1, &Value);
1874 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1875 ("WHvGetVirtualProcessorRegisters(%p, %u,{CNTVCT_EL0},1,) -> %Rhrc (Last=%#x/%u)\n",
1876 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1877 , VERR_NEM_GET_REGISTERS_FAILED);
1878 *pcTicks = Value.Reg64;
1879 LogFlow(("NEMHCQueryCpuTick: %#RX64 (host: %#RX64)\n", *pcTicks, ASMReadTSC()));
1880 if (puAux)
1881 *puAux =0;
1882
1883 return VINF_SUCCESS;
1884}
1885
1886
1887/**
1888 * Resumes CPU clock (TSC) on all virtual CPUs.
1889 *
1890 * This is called by TM when the VM is started, restored, resumed or similar.
1891 *
1892 * @returns VBox status code.
1893 * @param pVM The cross context VM structure.
1894 * @param pVCpu The cross context CPU structure of the calling EMT.
1895 * @param uPausedTscValue The TSC value at the time of pausing.
1896 */
1897VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1898{
1899 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1900 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1901
1902 /*
1903 * Call the offical API to do the job.
1904 */
1905 LogFlow(("NEMHCResumeCpuTickOnAll: %#RX64 (host: %#RX64)\n", uPausedTscValue, ASMReadTSC()));
1906
1907 /*
1908 * Now set the CNTVCT_EL0 register for each vCPU, Hyper-V will program the timer offset in
1909 * CNTVOFF_EL2 accordingly. ARM guarantees that CNTVCT_EL0 is synchronised across all CPUs,
1910 * as long as CNTVOFF_EL2 is the same everywhere. Lets just hope scheduling will not affect it
1911 * if the partition time is suspended.
1912 */
1913 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1914 {
1915 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1916 WHV_REGISTER_VALUE Value;
1917 Value.Reg64 = uPausedTscValue;
1918 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, idCpu, &enmName, 1, &Value);
1919 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1920 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTVCT_EL0},1,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1921 pVM->nem.s.hPartition, idCpu, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1922 , VERR_NEM_SET_TSC);
1923
1924 /* Make sure the CNTV_CTL_EL0 and CNTV_CVAL_EL0 registers are up to date after resuming (saved state load). */
1925 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
1926 pVCpuDst->nem.s.fSyncCntvRegs = true;
1927 }
1928
1929 HRESULT hrc = WHvResumePartitionTime(pVM->nem.s.hPartition);
1930 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1931 ("WHvResumePartitionTime(%p) -> %Rhrc (Last=%#x/%u)\n",
1932 pVM->nem.s.hPartition, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1933 , VERR_NEM_SET_TSC);
1934
1935 return VINF_SUCCESS;
1936}
1937
1938
1939#ifdef LOG_ENABLED
1940/**
1941 * Logs the current CPU state.
1942 */
1943static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1944{
1945 if (LogIs3Enabled())
1946 {
1947 char szRegs[4096];
1948 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1949 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1950 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1951 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1952 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1953 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1954 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1955 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1956 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1957 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1958 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1959 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1960 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1961 "vbar_el1=%016VR{vbar_el1}\n"
1962 );
1963 char szInstr[256]; RT_ZERO(szInstr);
1964 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1965 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1966 szInstr, sizeof(szInstr), NULL);
1967 Log3(("%s%s\n", szRegs, szInstr));
1968 }
1969}
1970#endif /* LOG_ENABLED */
1971
1972
1973/**
1974 * Copies register state from the (common) exit context.
1975 *
1976 * ASSUMES no state copied yet.
1977 *
1978 * @param pVCpu The cross context per CPU structure.
1979 * @param pMsgHdr The common message header.
1980 */
1981DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1982{
1983#ifdef LOG_ENABLED /* When state logging is enabled the state is synced completely upon VM exit. */
1984 if (!LogIs3Enabled())
1985#endif
1986 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1987 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1988
1989 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1990 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1991
1992 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1993}
1994
1995
1996/**
1997 * Returns the byte size from the given access SAS value.
1998 *
1999 * @returns Number of bytes to transfer.
2000 * @param uSas The SAS value to convert.
2001 */
2002DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
2003{
2004 switch (uSas)
2005 {
2006 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
2007 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
2008 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
2009 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
2010 default:
2011 AssertReleaseFailed();
2012 }
2013
2014 return 0;
2015}
2016
2017
2018/**
2019 * Sets the given general purpose register to the given value.
2020 *
2021 * @param pVCpu The cross context virtual CPU structure of the
2022 * calling EMT.
2023 * @param uReg The register index.
2024 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
2025 * @param fSignExtend Flag whether to sign extend the value.
2026 * @param u64Val The value.
2027 */
2028DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
2029{
2030 AssertReturnVoid(uReg < 31);
2031
2032 if (f64BitReg)
2033 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
2034 else
2035 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
2036
2037 /* Mark the register as not extern anymore. */
2038 switch (uReg)
2039 {
2040 case 0:
2041 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
2042 break;
2043 case 1:
2044 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
2045 break;
2046 case 2:
2047 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
2048 break;
2049 case 3:
2050 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
2051 break;
2052 default:
2053 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
2054 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
2055 }
2056}
2057
2058
2059/**
2060 * Gets the given general purpose register and returns the value.
2061 *
2062 * @returns Value from the given register.
2063 * @param pVCpu The cross context virtual CPU structure of the
2064 * calling EMT.
2065 * @param uReg The register index.
2066 */
2067DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
2068{
2069 AssertReturn(uReg <= ARMV8_A64_REG_XZR, 0);
2070
2071 if (uReg == ARMV8_A64_REG_XZR)
2072 return 0;
2073
2074 /** @todo Import the register if extern. */
2075 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
2076
2077 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
2078}
2079
2080
2081/**
2082 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2083 *
2084 * @returns Strict VBox status code.
2085 * @param pVM The cross context VM structure.
2086 * @param pVCpu The cross context per CPU structure.
2087 * @param pExit The VM exit information to handle.
2088 * @sa nemHCWinHandleMessageMemory
2089 */
2090NEM_TMPL_STATIC VBOXSTRICTRC
2091nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2092{
2093 uint64_t const uHostTsc = ASMReadTSC();
2094 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
2095
2096 /*
2097 * Emulate the memory access, either access handler or special memory.
2098 */
2099 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
2100 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2101 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2102 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2103 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2104 pHdr->Pc, uHostTsc);
2105 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
2106 RT_NOREF_PV(pExitRec);
2107 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
2108 AssertRCReturn(rc, rc);
2109
2110#ifdef LOG_ENABLED
2111 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
2112 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
2113#endif
2114 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
2115 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
2116 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
2117 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
2118 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
2119 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
2120 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
2121 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
2122 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
2123 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
2124 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
2125 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
2126
2127 RT_NOREF(fL2Fault);
2128
2129 VBOXSTRICTRC rcStrict;
2130 if (fIsv)
2131 {
2132 EMHistoryAddExit(pVCpu,
2133 fWrite
2134 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2135 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2136 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
2137
2138 uint64_t u64Val = 0;
2139 if (fWrite)
2140 {
2141 u64Val = nemR3WinGetGReg(pVCpu, uReg);
2142 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2143 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
2144 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2145 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2146 }
2147 else
2148 {
2149 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2150 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2151 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2152 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2153 if (rcStrict == VINF_SUCCESS)
2154 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
2155 }
2156 }
2157 else
2158 {
2159 /** @todo Our UEFI firmware accesses the flash region with the following instruction
2160 * when the NVRAM actually contains data:
2161 * ldrb w9, [x6, #-0x0001]!
2162 * This is too complicated for the hardware so the ISV bit is not set. Until there
2163 * is a proper IEM implementation we just handle this here for now to avoid annoying
2164 * users too much.
2165 */
2166 /* The following ASSUMES that the vCPU state is completely synced. */
2167
2168 /* Read instruction. */
2169 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2170 const void *pvPageR3 = NULL;
2171 PGMPAGEMAPLOCK PageMapLock;
2172
2173 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
2174 if (rcStrict == VINF_SUCCESS)
2175 {
2176 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
2177 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
2178
2179 DISSTATE Dis;
2180 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
2181 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
2182 if (rcStrict == VINF_SUCCESS)
2183 {
2184 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
2185 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2186 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2187 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
2188 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
2189 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
2190 {
2191 /* The fault address is already the final address. */
2192 uint8_t bVal = 0;
2193 rcStrict = PGMPhysRead(pVM, GCPhys, &bVal, 1, PGMACCESSORIGIN_HM);
2194 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2195 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, sizeof(bVal), sizeof(bVal),
2196 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
2197 if (rcStrict == VINF_SUCCESS)
2198 {
2199 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
2200 /* Update the indexed register. */
2201 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
2202 }
2203 }
2204 /*
2205 * Seeing the following with the Windows 11/ARM TPM driver:
2206 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
2207 */
2208 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
2209 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2210 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2211 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
2212 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2213 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
2214 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
2215 {
2216 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
2217 /* The fault address is already the final address. */
2218 uint32_t u32Val1 = 0;
2219 uint32_t u32Val2 = 0;
2220 rcStrict = PGMPhysRead(pVM, GCPhys, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
2221 if (rcStrict == VINF_SUCCESS)
2222 rcStrict = PGMPhysRead(pVM, GCPhys + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
2223 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
2224 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, 2 * sizeof(uint32_t), sizeof(u32Val1),
2225 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
2226 if (rcStrict == VINF_SUCCESS)
2227 {
2228 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
2229 nemR3WinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
2230 }
2231 }
2232 else
2233 AssertFailedReturn(VERR_NOT_SUPPORTED);
2234 }
2235 }
2236 }
2237
2238 if (rcStrict == VINF_SUCCESS)
2239 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
2240
2241 return rcStrict;
2242}
2243
2244
2245/**
2246 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2247 *
2248 * @returns Strict VBox status code.
2249 * @param pVM The cross context VM structure.
2250 * @param pVCpu The cross context per CPU structure.
2251 * @param pExit The VM exit information to handle.
2252 * @sa nemHCWinHandleMessageMemory
2253 */
2254NEM_TMPL_STATIC VBOXSTRICTRC
2255nemR3WinHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2256{
2257 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2258
2259 /** @todo Raise exception to EL1 if PSCI not configured. */
2260 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
2261 uint32_t uFunId = pExit->Hypercall.Immediate;
2262 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
2263 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
2264 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
2265 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
2266 {
2267 switch (uFunNum)
2268 {
2269 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2270 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2271 break;
2272 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2273 rcStrict = VMR3PowerOff(pVM->pUVM);
2274 break;
2275 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2276 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2277 {
2278 bool fHaltOnReset;
2279 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2280 if (RT_SUCCESS(rc) && fHaltOnReset)
2281 {
2282 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
2283 rcStrict = VINF_EM_HALT;
2284 }
2285 else
2286 {
2287 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2288 VM_FF_SET(pVM, VM_FF_RESET);
2289 rcStrict = VINF_EM_RESET;
2290 }
2291 break;
2292 }
2293 case ARM_PSCI_FUNC_ID_CPU_ON:
2294 {
2295 uint64_t u64TgtCpu = pExit->Hypercall.X[1];
2296 RTGCPHYS GCPhysExecAddr = pExit->Hypercall.X[2];
2297 uint64_t u64CtxId = pExit->Hypercall.X[3];
2298 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2299 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2300 break;
2301 }
2302 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2303 {
2304 uint32_t u32FunNum = (uint32_t)pExit->Hypercall.X[1];
2305 switch (u32FunNum)
2306 {
2307 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2308 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2309 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2310 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2311 case ARM_PSCI_FUNC_ID_CPU_ON:
2312 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2313 false /*f64BitReg*/, false /*fSignExtend*/,
2314 (uint64_t)ARM_PSCI_STS_SUCCESS);
2315 break;
2316 default:
2317 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2318 false /*f64BitReg*/, false /*fSignExtend*/,
2319 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2320 }
2321 break;
2322 }
2323 default:
2324 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2325 }
2326 }
2327 else
2328 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2329
2330 /** @todo What to do if immediate is != 0? */
2331
2332 if (rcStrict == VINF_SUCCESS)
2333 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
2334
2335 return rcStrict;
2336}
2337
2338
2339/**
2340 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2341 *
2342 * @returns Strict VBox status code.
2343 * @param pVM The cross context VM structure.
2344 * @param pVCpu The cross context per CPU structure.
2345 * @param pExit The VM exit information to handle.
2346 * @sa nemHCWinHandleMessageUnrecoverableException
2347 */
2348NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2349{
2350#if 0
2351 /*
2352 * Just copy the state we've got and handle it in the loop for now.
2353 */
2354 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2355 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2356 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2357 RT_NOREF_PV(pVM);
2358 return VINF_EM_TRIPLE_FAULT;
2359#else
2360 /*
2361 * Let IEM decide whether this is really it.
2362 */
2363 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
2364 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
2365 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
2366 AssertReleaseFailed();
2367 RT_NOREF_PV(pVM);
2368 return VINF_SUCCESS;
2369#endif
2370}
2371
2372
2373/**
2374 * Handles VM exits.
2375 *
2376 * @returns Strict VBox status code.
2377 * @param pVM The cross context VM structure.
2378 * @param pVCpu The cross context per CPU structure.
2379 * @param pExit The VM exit information to handle.
2380 * @sa nemHCWinHandleMessage
2381 */
2382NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2383{
2384#ifdef LOG_ENABLED
2385 if (LogIs3Enabled())
2386 {
2387 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2388 AssertRCReturn(rc, rc);
2389
2390 nemR3WinLogState(pVM, pVCpu);
2391 }
2392#endif
2393
2394 switch (pExit->ExitReason)
2395 {
2396 case WHvRunVpExitReasonUnmappedGpa:
2397 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
2398 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
2399
2400 case WHvRunVpExitReasonCanceled:
2401 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
2402 return VINF_SUCCESS;
2403
2404 case WHvRunVpExitReasonHypercall:
2405 return nemR3WinHandleExitHypercall(pVM, pVCpu, pExit);
2406
2407 case 0x8001000c: /* WHvRunVpExitReasonArm64Reset */
2408 {
2409 if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF)
2410 return VMR3PowerOff(pVM->pUVM);
2411 else if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_RESET)
2412 {
2413 VM_FF_SET(pVM, VM_FF_RESET);
2414 return VINF_EM_RESET;
2415 }
2416 else
2417 AssertLogRelFailedReturn(VERR_NEM_IPE_3);
2418 }
2419
2420 case WHvRunVpExitReasonUnrecoverableException:
2421 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
2422 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
2423
2424 case WHvRunVpExitReasonUnsupportedFeature:
2425 case WHvRunVpExitReasonInvalidVpRegisterValue:
2426 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2427 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
2428 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
2429
2430 /* Undesired exits: */
2431 case WHvRunVpExitReasonNone:
2432 default:
2433 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2434 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
2435 }
2436}
2437
2438
2439VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2440{
2441 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
2442#ifdef LOG_ENABLED
2443 if (LogIs3Enabled())
2444 nemR3WinLogState(pVM, pVCpu);
2445#endif
2446
2447 /*
2448 * Try switch to NEM runloop state.
2449 */
2450 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2451 { /* likely */ }
2452 else
2453 {
2454 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2455 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2456 return VINF_SUCCESS;
2457 }
2458
2459 if (pVCpu->nem.s.fSyncCntvRegs)
2460 {
2461 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2462 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)];
2463 aRegs[0].Reg64 = pVCpu->cpum.GstCtx.CntvCtlEl0;
2464 aRegs[1].Reg64 = pVCpu->cpum.GstCtx.CntvCValEl0;
2465
2466 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2467 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2468 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2469 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2470 , VERR_NEM_IPE_9);
2471 pVCpu->nem.s.fSyncCntvRegs = false;
2472 }
2473
2474
2475 /*
2476 * The run loop.
2477 *
2478 * Current approach to state updating to use the sledgehammer and sync
2479 * everything every time. This will be optimized later.
2480 */
2481 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2482 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2483 for (unsigned iLoop = 0;; iLoop++)
2484 {
2485 /*
2486 * Poll timers and run for a bit.
2487 *
2488 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2489 * so we take the time of the next timer event and uses that as a deadline.
2490 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2491 */
2492 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2493 * the whole polling job when timers have changed... */
2494 uint64_t offDeltaIgnored;
2495 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2496 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2497 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2498 {
2499 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2500 {
2501 /* Ensure that Hyper-V has the whole state. */
2502 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2503 AssertRCReturn(rc2, rc2);
2504
2505#ifdef LOG_ENABLED
2506 if (LogIsFlowEnabled())
2507 {
2508 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2509 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2510 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2511 LogFlow(("NEM/%u: Entry @ %08RX64 pstate=%#RX64\n", pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64));
2512 }
2513#endif
2514
2515 MY_WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2516 TMNotifyStartOfExecution(pVM, pVCpu);
2517
2518 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2519
2520 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2521 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2522#ifdef LOG_ENABLED
2523 if (LogIsFlowEnabled())
2524 {
2525 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2526 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2527 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2528 LogFlow(("NEM/%u: Exit @ %08RX64 pstate=%#RX64 Reason=%#x\n",
2529 pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64, ExitReason.ExitReason));
2530 }
2531#endif
2532 if (SUCCEEDED(hrc))
2533 {
2534 /* Always sync the CNTV_CTL_EL0/CNTV_CVAL_EL0 registers, just like we do on macOS. */
2535 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2536 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2537 hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2538 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2539 ("WHvGetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2540 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2541 , VERR_NEM_IPE_9);
2542
2543 pVCpu->cpum.GstCtx.CntvCtlEl0 = aRegs[0].Reg64;
2544 pVCpu->cpum.GstCtx.CntvCValEl0 = aRegs[1].Reg64;
2545
2546 /*
2547 * Deal with the message.
2548 */
2549 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2550 if (rcStrict == VINF_SUCCESS)
2551 { /* hopefully likely */ }
2552 else
2553 {
2554 LogFlow(("NEM/%u: breaking: nemR3WinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2555 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2556 break;
2557 }
2558 }
2559 else
2560 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2561 pVCpu->idCpu, hrc, GetLastError()),
2562 VERR_NEM_IPE_0);
2563
2564 /*
2565 * If no relevant FFs are pending, loop.
2566 */
2567 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2568 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2569 continue;
2570
2571 /** @todo Try handle pending flags, not just return to EM loops. Take care
2572 * not to set important RCs here unless we've handled a message. */
2573 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2574 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2575 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2576 }
2577 else
2578 {
2579 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2580 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2581 }
2582 }
2583 else
2584 {
2585 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2586 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2587 }
2588 break;
2589 } /* the run loop */
2590
2591
2592 /*
2593 * If the CPU is running, make sure to stop it before we try sync back the
2594 * state and return to EM. We don't sync back the whole state if we can help it.
2595 */
2596 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2597 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2598
2599 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2600 {
2601 /* Try anticipate what we might need. */
2602 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2603 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2604 || RT_FAILURE(rcStrict))
2605 fImport = CPUMCTX_EXTRN_ALL;
2606 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2607 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2608
2609 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2610 {
2611 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2612 if (RT_SUCCESS(rc2))
2613 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2614 else if (RT_SUCCESS(rcStrict))
2615 rcStrict = rc2;
2616 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2617 pVCpu->cpum.GstCtx.fExtrn = 0;
2618 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2619 }
2620 else
2621 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2622 }
2623 else
2624 {
2625 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2626 pVCpu->cpum.GstCtx.fExtrn = 0;
2627 }
2628
2629#if 0
2630 UINT32 cbWritten;
2631 WHV_ARM64_LOCAL_INTERRUPT_CONTROLLER_STATE IntrState;
2632 HRESULT hrc = WHvGetVirtualProcessorState(pVM->nem.s.hPartition, pVCpu->idCpu, WHvVirtualProcessorStateTypeInterruptControllerState2,
2633 &IntrState, sizeof(IntrState), &cbWritten);
2634 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2635 ("WHvGetVirtualProcessorState(%p, %u,WHvVirtualProcessorStateTypeInterruptControllerState2,) -> %Rhrc (Last=%#x/%u)\n",
2636 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2637 , VERR_NEM_GET_REGISTERS_FAILED);
2638 LogFlowFunc(("IntrState: cbWritten=%u\n"));
2639 for (uint32_t i = 0; i < RT_ELEMENTS(IntrState.BankedInterruptState); i++)
2640 {
2641 WHV_ARM64_INTERRUPT_STATE *pState = &IntrState.BankedInterruptState[i];
2642 LogFlowFunc(("IntrState: Intr %u:\n"
2643 " Enabled=%RTbool\n"
2644 " EdgeTriggered=%RTbool\n"
2645 " Asserted=%RTbool\n"
2646 " SetPending=%RTbool\n"
2647 " Active=%RTbool\n"
2648 " Direct=%RTbool\n"
2649 " GicrIpriorityrConfigured=%u\n"
2650 " GicrIpriorityrActive=%u\n",
2651 i, pState->Enabled, pState->EdgeTriggered, pState->Asserted, pState->SetPending, pState->Active, pState->Direct,
2652 pState->GicrIpriorityrConfigured, pState->GicrIpriorityrActive));
2653 }
2654#endif
2655
2656 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64,
2657 pVCpu->cpum.GstCtx.fPState, VBOXSTRICTRC_VAL(rcStrict) ));
2658 return rcStrict;
2659}
2660
2661
2662VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2663{
2664 Assert(VM_IS_NEM_ENABLED(pVM));
2665 RT_NOREF(pVM, pVCpu);
2666 return true;
2667}
2668
2669
2670VMMR3_INT_DECL(int) NEMR3Halt(PVM pVM, PVMCPU pVCpu)
2671{
2672 Assert(EMGetState(pVCpu) == EMSTATE_WAIT_SIPI);
2673
2674 /*
2675 * Force the vCPU to get out of the SIPI state and into the normal runloop
2676 * as Hyper-V doesn't cause VM exits for PSCI calls so we wouldn't notice when
2677 * when the guest brings APs online.
2678 * Instead we force the EMT to run the vCPU through Hyper-V which manages the state.
2679 */
2680 RT_NOREF(pVM);
2681 EMSetState(pVCpu, EMSTATE_HALTED);
2682 return VINF_EM_RESCHEDULE;
2683}
2684
2685
2686bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2687{
2688 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2689 return false;
2690}
2691
2692
2693void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2694{
2695 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2696 if (pVM->nem.s.fCreatedEmts)
2697 {
2698 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2699 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2700 RT_NOREF_PV(hrc);
2701 }
2702 RT_NOREF_PV(fFlags);
2703}
2704
2705
2706DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2707{
2708 RT_NOREF(pVM, fUseDebugLoop);
2709 return false;
2710}
2711
2712
2713DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2714{
2715 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2716 return false;
2717}
2718
2719
2720DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2721{
2722 PGMPAGEMAPLOCK Lock;
2723 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2724 if (RT_SUCCESS(rc))
2725 PGMPhysReleasePageMappingLock(pVM, &Lock);
2726 return rc;
2727}
2728
2729
2730DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2731{
2732 PGMPAGEMAPLOCK Lock;
2733 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2734 if (RT_SUCCESS(rc))
2735 PGMPhysReleasePageMappingLock(pVM, &Lock);
2736 return rc;
2737}
2738
2739
2740VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2741 uint8_t *pu2State, uint32_t *puNemRange)
2742{
2743 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2744 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2745
2746 *pu2State = UINT8_MAX;
2747 RT_NOREF(puNemRange);
2748
2749 if (pvR3)
2750 {
2751 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2752 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2753 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2754 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2755 if (SUCCEEDED(hrc))
2756 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2757 else
2758 {
2759 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2760 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2761 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2762 return VERR_NEM_MAP_PAGES_FAILED;
2763 }
2764 }
2765 return VINF_SUCCESS;
2766}
2767
2768
2769VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2770{
2771 RT_NOREF(pVM);
2772 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2773}
2774
2775
2776VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2777 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2778{
2779 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2780 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2781 RT_NOREF(puNemRange);
2782
2783 /*
2784 * Unmap the RAM we're replacing.
2785 */
2786 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2787 {
2788 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2789 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2790 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2791 if (SUCCEEDED(hrc))
2792 { /* likely */ }
2793 else if (pvMmio2)
2794 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2795 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2796 else
2797 {
2798 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2799 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2800 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2801 return VERR_NEM_UNMAP_PAGES_FAILED;
2802 }
2803 }
2804
2805 /*
2806 * Map MMIO2 if any.
2807 */
2808 if (pvMmio2)
2809 {
2810 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2811 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2812 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2813 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2814 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2815 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2816 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2817 if (SUCCEEDED(hrc))
2818 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2819 else
2820 {
2821 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2822 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2823 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2824 return VERR_NEM_MAP_PAGES_FAILED;
2825 }
2826 }
2827 else
2828 {
2829 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2830 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2831 }
2832 RT_NOREF(pvRam);
2833 return VINF_SUCCESS;
2834}
2835
2836
2837VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2838 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2839{
2840 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2841 return VINF_SUCCESS;
2842}
2843
2844
2845VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2846 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2847{
2848 int rc = VINF_SUCCESS;
2849 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2850 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2851
2852 /*
2853 * Unmap the MMIO2 pages.
2854 */
2855 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2856 * we may have more stuff to unmap even in case of pure MMIO... */
2857 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2858 {
2859 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2860 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2861 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2862 if (FAILED(hrc))
2863 {
2864 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2865 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2866 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2867 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2868 }
2869 }
2870
2871 /*
2872 * Restore the RAM we replaced.
2873 */
2874 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2875 {
2876 AssertPtr(pvRam);
2877 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2878 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2879 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2880 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2881 if (SUCCEEDED(hrc))
2882 { /* likely */ }
2883 else
2884 {
2885 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2886 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2887 rc = VERR_NEM_MAP_PAGES_FAILED;
2888 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2889 }
2890 if (pu2State)
2891 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2892 }
2893 /* Mark the pages as unmapped if relevant. */
2894 else if (pu2State)
2895 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2896
2897 RT_NOREF(pvMmio2, puNemRange);
2898 return rc;
2899}
2900
2901
2902VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2903 void *pvBitmap, size_t cbBitmap)
2904{
2905 Assert(VM_IS_NEM_ENABLED(pVM));
2906 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
2907 Assert(cbBitmap == (uint32_t)cbBitmap);
2908 RT_NOREF(uNemRange);
2909
2910 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
2911 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
2912 if (SUCCEEDED(hrc))
2913 return VINF_SUCCESS;
2914
2915 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
2916 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2917 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
2918}
2919
2920
2921VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2922 uint8_t *pu2State, uint32_t *puNemRange)
2923{
2924 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2925
2926 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2927 *pu2State = UINT8_MAX;
2928 *puNemRange = 0;
2929 return VINF_SUCCESS;
2930}
2931
2932
2933VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2934 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2935{
2936 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2937 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2938 *pu2State = UINT8_MAX;
2939
2940 /*
2941 * (Re-)map readonly.
2942 */
2943 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2944 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2945 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2946 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2947 if (SUCCEEDED(hrc))
2948 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2949 else
2950 {
2951 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
2952 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2953 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2954 return VERR_NEM_MAP_PAGES_FAILED;
2955 }
2956 RT_NOREF(fFlags, puNemRange);
2957 return VINF_SUCCESS;
2958}
2959
2960VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2961{
2962 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
2963 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
2964 RT_NOREF(pVCpu, fEnabled);
2965}
2966
2967
2968void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2969{
2970 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2971 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2972}
2973
2974
2975VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2976 RTR3PTR pvMemR3, uint8_t *pu2State)
2977{
2978 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2979 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2980
2981 *pu2State = UINT8_MAX;
2982 if (pvMemR3)
2983 {
2984 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2985 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
2986 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2987 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2988 if (SUCCEEDED(hrc))
2989 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2990 else
2991 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
2992 pvMemR3, GCPhys, cb, hrc));
2993 }
2994 RT_NOREF(enmKind);
2995}
2996
2997
2998void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2999 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3000{
3001 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3002 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3003 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3004}
3005
3006
3007int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3008 PGMPAGETYPE enmType, uint8_t *pu2State)
3009{
3010 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3011 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3012 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
3013
3014 AssertFailed();
3015 return VINF_SUCCESS;
3016}
3017
3018
3019VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
3020 PGMPAGETYPE enmType, uint8_t *pu2State)
3021{
3022 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3023 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3024 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
3025}
3026
3027
3028VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3029 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3030{
3031 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3032 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
3033 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
3034
3035 AssertFailed();
3036}
3037
3038
3039/**
3040 * Returns features supported by the NEM backend.
3041 *
3042 * @returns Flags of features supported by the native NEM backend.
3043 * @param pVM The cross context VM structure.
3044 */
3045VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3046{
3047 RT_NOREF(pVM);
3048 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
3049 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
3050}
3051
3052
3053/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
3054 *
3055 * Open questions:
3056 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
3057 */
3058
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette