VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp

Last change on this file was 108843, checked in by vboxsync, 4 weeks ago

VMM/PGM,NEM: Some early page table management infrastructure for ARMv8, bugref:10388

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 129.2 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 108843 2025-04-04 08:36:32Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/dis.h>
57#include <VBox/vmm/nem.h>
58#include <VBox/vmm/iem.h>
59#include <VBox/vmm/em.h>
60#include <VBox/vmm/pdmapic.h>
61#include <VBox/vmm/pdm.h>
62#include <VBox/vmm/dbgftrace.h>
63#include "NEMInternal.h"
64#include <VBox/vmm/vmcc.h>
65
66#include <iprt/formats/arm-psci.h>
67
68#include <iprt/ldr.h>
69#include <iprt/path.h>
70#include <iprt/string.h>
71#include <iprt/system.h>
72#include <iprt/utf16.h>
73
74#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
75HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
76# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
77#endif
78
79/** Our saved state version for Hyper-V specific things. */
80#define NEM_HV_SAVED_STATE_VERSION 1
81
82
83/*
84 * The following definitions appeared in build 27744 allow configuring the base address of the GICv3 controller,
85 * (there is no official SDK for this yet).
86 */
87/** @todo Better way of defining these which doesn't require casting later on when calling APIs. */
88#define WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS UINT32_C(0x00001012)
89/** No GIC present. */
90#define WHV_ARM64_IC_EMULATION_MODE_NONE 0
91/** Hyper-V emulates a GICv3. */
92#define WHV_ARM64_IC_EMULATION_MODE_GICV3 1
93
94/**
95 * Configures the interrupt controller emulated by Hyper-V.
96 */
97typedef struct MY_WHV_ARM64_IC_PARAMETERS
98{
99 uint32_t u32EmulationMode;
100 uint32_t u32Rsvd;
101 union
102 {
103 struct
104 {
105 RTGCPHYS GCPhysGicdBase;
106 RTGCPHYS GCPhysGitsTranslaterBase;
107 uint32_t u32Rsvd;
108 uint32_t cLpiIntIdBits;
109 uint32_t u32PpiCntvOverflw;
110 uint32_t u32PpiPmu;
111 uint32_t au32Rsvd[6];
112 } GicV3;
113 } u;
114} MY_WHV_ARM64_IC_PARAMETERS;
115AssertCompileSize(MY_WHV_ARM64_IC_PARAMETERS, 64);
116
117
118/**
119 * The hypercall exit context.
120 */
121typedef struct MY_WHV_HYPERCALL_CONTEXT
122{
123 WHV_INTERCEPT_MESSAGE_HEADER Header;
124 uint16_t Immediate;
125 uint16_t u16Rsvd;
126 uint32_t u32Rsvd;
127 uint64_t X[18];
128} MY_WHV_HYPERCALL_CONTEXT;
129typedef MY_WHV_HYPERCALL_CONTEXT *PMY_WHV_HYPERCALL_CONTEXT;
130AssertCompileSize(MY_WHV_HYPERCALL_CONTEXT, 24 + 19 * sizeof(uint64_t));
131
132
133/**
134 * The ARM64 reset context.
135 */
136typedef struct MY_WHV_ARM64_RESET_CONTEXT
137{
138 WHV_INTERCEPT_MESSAGE_HEADER Header;
139 uint32_t ResetType;
140 uint32_t u32Rsvd;
141} MY_WHV_ARM64_RESET_CONTEXT;
142typedef MY_WHV_ARM64_RESET_CONTEXT *PMY_WHV_ARM64_RESET_CONTEXT;
143AssertCompileSize(MY_WHV_ARM64_RESET_CONTEXT, 24 + 2 * sizeof(uint32_t));
144
145
146#define WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF 0
147#define WHV_ARM64_RESET_CONTEXT_TYPE_RESET 1
148
149
150/**
151 * The exit reason context for arm64, the size is different
152 * from the default SDK we build against.
153 */
154typedef struct MY_WHV_RUN_VP_EXIT_CONTEXT
155{
156 WHV_RUN_VP_EXIT_REASON ExitReason;
157 uint32_t u32Rsvd;
158 uint64_t u64Rsvd;
159 union
160 {
161 WHV_MEMORY_ACCESS_CONTEXT MemoryAccess;
162 WHV_RUN_VP_CANCELED_CONTEXT CancelReason;
163 MY_WHV_HYPERCALL_CONTEXT Hypercall;
164 WHV_UNRECOVERABLE_EXCEPTION_CONTEXT UnrecoverableException;
165 MY_WHV_ARM64_RESET_CONTEXT Arm64Reset;
166 uint64_t au64Rsvd2[32];
167 };
168} MY_WHV_RUN_VP_EXIT_CONTEXT;
169typedef MY_WHV_RUN_VP_EXIT_CONTEXT *PMY_WHV_RUN_VP_EXIT_CONTEXT;
170AssertCompileSize(MY_WHV_RUN_VP_EXIT_CONTEXT, 272);
171
172#define My_WHvArm64RegisterGicrBaseGpa ((WHV_REGISTER_NAME)UINT32_C(0x00063000))
173#define My_WHvArm64RegisterActlrEl1 ((WHV_REGISTER_NAME)UINT32_C(0x00040003))
174
175
176/*********************************************************************************************************************************
177* Defined Constants And Macros *
178*********************************************************************************************************************************/
179
180
181/*********************************************************************************************************************************
182* Global Variables *
183*********************************************************************************************************************************/
184/** @name APIs imported from WinHvPlatform.dll
185 * @{ */
186static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
187static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
188static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
189static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
190static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
191static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
192static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
193static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
194static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
195static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
196static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
197static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
198static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
199static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
200static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
201static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
202static decltype(WHvSuspendPartitionTime) * g_pfnWHvSuspendPartitionTime;
203static decltype(WHvResumePartitionTime) * g_pfnWHvResumePartitionTime;
204decltype(WHvGetVirtualProcessorState) * g_pfnWHvGetVirtualProcessorState;
205decltype(WHvSetVirtualProcessorState) * g_pfnWHvSetVirtualProcessorState;
206decltype(WHvRequestInterrupt) * g_pfnWHvRequestInterrupt;
207/** @} */
208
209/** The Windows build number. */
210static uint32_t g_uBuildNo = 17134;
211
212
213
214/**
215 * Import instructions.
216 */
217static const struct
218{
219 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
220 bool fOptional; /**< Set if import is optional. */
221 PFNRT *ppfn; /**< The function pointer variable. */
222 const char *pszName; /**< The function name. */
223} g_aImports[] =
224{
225#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
226 NEM_WIN_IMPORT(0, false, WHvGetCapability),
227 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
228 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
229 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
230 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
231 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
232 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
233 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
234 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
235 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
236 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
237 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
238 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
239 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
240 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
241 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
242 NEM_WIN_IMPORT(0, false, WHvSuspendPartitionTime),
243 NEM_WIN_IMPORT(0, false, WHvResumePartitionTime),
244 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorState),
245 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorState),
246 NEM_WIN_IMPORT(0, false, WHvRequestInterrupt),
247#undef NEM_WIN_IMPORT
248};
249
250
251/*
252 * Let the preprocessor alias the APIs to import variables for better autocompletion.
253 */
254#ifndef IN_SLICKEDIT
255# define WHvGetCapability g_pfnWHvGetCapability
256# define WHvCreatePartition g_pfnWHvCreatePartition
257# define WHvSetupPartition g_pfnWHvSetupPartition
258# define WHvDeletePartition g_pfnWHvDeletePartition
259# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
260# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
261# define WHvMapGpaRange g_pfnWHvMapGpaRange
262# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
263# define WHvTranslateGva g_pfnWHvTranslateGva
264# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
265# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
266# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
267# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
268# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
269# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
270# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
271# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
272# define WHvSuspendPartitionTime g_pfnWHvSuspendPartitionTime
273# define WHvResumePartitionTime g_pfnWHvResumePartitionTime
274# define WHvGetVirtualProcessorState g_pfnWHvGetVirtualProcessorState
275# define WHvSetVirtualProcessorState g_pfnWHvSetVirtualProcessorState
276# define WHvRequestInterrupt g_pfnWHvRequestInterrupt
277#endif
278
279
280#define WHV_REGNM(a_Suffix) WHvArm64Register ## a_Suffix
281/** The general registers. */
282static const struct
283{
284 WHV_REGISTER_NAME enmWHvReg;
285 uint32_t fCpumExtrn;
286 uintptr_t offCpumCtx;
287} s_aCpumRegs[] =
288{
289#define CPUM_GREG_EMIT_X0_X3(a_Idx) { WHV_REGNM(X ## a_Idx), CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
290#define CPUM_GREG_EMIT_X4_X28(a_Idx) { WHV_REGNM(X ## a_Idx), CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
291 CPUM_GREG_EMIT_X0_X3(0),
292 CPUM_GREG_EMIT_X0_X3(1),
293 CPUM_GREG_EMIT_X0_X3(2),
294 CPUM_GREG_EMIT_X0_X3(3),
295 CPUM_GREG_EMIT_X4_X28(4),
296 CPUM_GREG_EMIT_X4_X28(5),
297 CPUM_GREG_EMIT_X4_X28(6),
298 CPUM_GREG_EMIT_X4_X28(7),
299 CPUM_GREG_EMIT_X4_X28(8),
300 CPUM_GREG_EMIT_X4_X28(9),
301 CPUM_GREG_EMIT_X4_X28(10),
302 CPUM_GREG_EMIT_X4_X28(11),
303 CPUM_GREG_EMIT_X4_X28(12),
304 CPUM_GREG_EMIT_X4_X28(13),
305 CPUM_GREG_EMIT_X4_X28(14),
306 CPUM_GREG_EMIT_X4_X28(15),
307 CPUM_GREG_EMIT_X4_X28(16),
308 CPUM_GREG_EMIT_X4_X28(17),
309 CPUM_GREG_EMIT_X4_X28(18),
310 CPUM_GREG_EMIT_X4_X28(19),
311 CPUM_GREG_EMIT_X4_X28(20),
312 CPUM_GREG_EMIT_X4_X28(21),
313 CPUM_GREG_EMIT_X4_X28(22),
314 CPUM_GREG_EMIT_X4_X28(23),
315 CPUM_GREG_EMIT_X4_X28(24),
316 CPUM_GREG_EMIT_X4_X28(25),
317 CPUM_GREG_EMIT_X4_X28(26),
318 CPUM_GREG_EMIT_X4_X28(27),
319 CPUM_GREG_EMIT_X4_X28(28),
320 { WHV_REGNM(Fp), CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
321 { WHV_REGNM(Lr), CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
322 { WHV_REGNM(Pc), CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
323 { WHV_REGNM(Fpcr), CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
324 { WHV_REGNM(Fpsr), CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
325#undef CPUM_GREG_EMIT_X0_X3
326#undef CPUM_GREG_EMIT_X4_X28
327};
328/** SIMD/FP registers. */
329static const struct
330{
331 WHV_REGISTER_NAME enmWHvReg;
332 uintptr_t offCpumCtx;
333} s_aCpumFpRegs[] =
334{
335#define CPUM_VREG_EMIT(a_Idx) { WHV_REGNM(Q ## a_Idx), RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
336 CPUM_VREG_EMIT(0),
337 CPUM_VREG_EMIT(1),
338 CPUM_VREG_EMIT(2),
339 CPUM_VREG_EMIT(3),
340 CPUM_VREG_EMIT(4),
341 CPUM_VREG_EMIT(5),
342 CPUM_VREG_EMIT(6),
343 CPUM_VREG_EMIT(7),
344 CPUM_VREG_EMIT(8),
345 CPUM_VREG_EMIT(9),
346 CPUM_VREG_EMIT(10),
347 CPUM_VREG_EMIT(11),
348 CPUM_VREG_EMIT(12),
349 CPUM_VREG_EMIT(13),
350 CPUM_VREG_EMIT(14),
351 CPUM_VREG_EMIT(15),
352 CPUM_VREG_EMIT(16),
353 CPUM_VREG_EMIT(17),
354 CPUM_VREG_EMIT(18),
355 CPUM_VREG_EMIT(19),
356 CPUM_VREG_EMIT(20),
357 CPUM_VREG_EMIT(21),
358 CPUM_VREG_EMIT(22),
359 CPUM_VREG_EMIT(23),
360 CPUM_VREG_EMIT(24),
361 CPUM_VREG_EMIT(25),
362 CPUM_VREG_EMIT(26),
363 CPUM_VREG_EMIT(27),
364 CPUM_VREG_EMIT(28),
365 CPUM_VREG_EMIT(29),
366 CPUM_VREG_EMIT(30),
367 CPUM_VREG_EMIT(31)
368#undef CPUM_VREG_EMIT
369};
370/** PAuth key system registers. */
371static const struct
372{
373 WHV_REGISTER_NAME enmWHvReg;
374 uintptr_t offCpumCtx;
375} s_aCpumPAuthKeyRegs[] =
376{
377 { WHV_REGNM(ApdAKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apda.Low.u64) },
378 { WHV_REGNM(ApdAKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apda.High.u64) },
379 { WHV_REGNM(ApdBKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apdb.Low.u64) },
380 { WHV_REGNM(ApdBKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apdb.High.u64) },
381 { WHV_REGNM(ApgAKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apga.Low.u64) },
382 { WHV_REGNM(ApgAKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apga.High.u64) },
383 { WHV_REGNM(ApiAKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apia.Low.u64) },
384 { WHV_REGNM(ApiAKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apia.High.u64) },
385 { WHV_REGNM(ApiBKeyLoEl1), RT_UOFFSETOF(CPUMCTX, Apib.Low.u64) },
386 { WHV_REGNM(ApiBKeyHiEl1), RT_UOFFSETOF(CPUMCTX, Apib.High.u64) }
387};
388/** System registers. */
389static const struct
390{
391 WHV_REGISTER_NAME enmWHvReg;
392 uint32_t fCpumExtrn;
393 uintptr_t offCpumCtx;
394} s_aCpumSysRegs[] =
395{
396 { WHV_REGNM(SpEl0), CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
397 { WHV_REGNM(SpEl1), CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
398 { WHV_REGNM(SpsrEl1), CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
399 { WHV_REGNM(ElrEl1), CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
400 { WHV_REGNM(VbarEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
401 { WHV_REGNM(CntkctlEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
402 { WHV_REGNM(ContextidrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
403 { WHV_REGNM(CpacrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
404 { WHV_REGNM(CsselrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
405 { WHV_REGNM(EsrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
406 { WHV_REGNM(FarEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
407 { WHV_REGNM(MairEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
408 { WHV_REGNM(ParEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
409 { WHV_REGNM(TpidrroEl0), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
410 { WHV_REGNM(TpidrEl0), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
411 { WHV_REGNM(TpidrEl1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
412 { My_WHvArm64RegisterActlrEl1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Actlr.u64) }
413#if 0 /* Not available in Hyper-V */
414 { WHV_REGNM(), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
415 { WHV_REGNM(), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
416 { WHV_REGNM(), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
417 { WHV_REGNM(), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
418#endif
419};
420/** Paging registers (CPUMCTX_EXTRN_SCTLR_TCR_TTBR). */
421static const struct
422{
423 WHV_REGISTER_NAME enmWHvReg;
424 uint32_t offCpumCtx;
425} s_aCpumSysRegsPg[] =
426{
427 { WHV_REGNM(SctlrEl1), RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
428 { WHV_REGNM(TcrEl1), RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
429 { WHV_REGNM(Ttbr0El1), RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
430 { WHV_REGNM(Ttbr1El1), RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
431};
432
433
434/*********************************************************************************************************************************
435* Internal Functions *
436*********************************************************************************************************************************/
437DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
438DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
439
440
441/**
442 * Worker for nemR3NativeInit that probes and load the native API.
443 *
444 * @returns VBox status code.
445 * @param fForced Whether the HMForced flag is set and we should
446 * fail if we cannot initialize.
447 * @param pErrInfo Where to always return error info.
448 */
449static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
450{
451 /*
452 * Check that the DLL files we need are present, but without loading them.
453 * We'd like to avoid loading them unnecessarily.
454 */
455 WCHAR wszPath[MAX_PATH + 64];
456 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
457 if (cwcPath >= MAX_PATH || cwcPath < 2)
458 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
459
460 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
461 wszPath[cwcPath++] = '\\';
462 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
463 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
464 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
465
466 /*
467 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
468 */
469 /** @todo */
470
471 /** @todo would be great if we could recognize a root partition from the
472 * CPUID info, but I currently don't dare do that. */
473
474 /*
475 * Now try load the DLLs and resolve the APIs.
476 */
477 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
478 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
479 int rc = VINF_SUCCESS;
480 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
481 {
482 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
483 if (RT_FAILURE(rc2))
484 {
485 if (!RTErrInfoIsSet(pErrInfo))
486 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
487 else
488 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
489 ahMods[i] = NIL_RTLDRMOD;
490 rc = VERR_NEM_INIT_FAILED;
491 }
492 }
493 if (RT_SUCCESS(rc))
494 {
495 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
496 {
497 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
498 if (RT_SUCCESS(rc2))
499 {
500 if (g_aImports[i].fOptional)
501 LogRel(("NEM: info: Found optional import %s!%s.\n",
502 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
503 }
504 else
505 {
506 *g_aImports[i].ppfn = NULL;
507
508 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
509 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
510 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
511 if (!g_aImports[i].fOptional)
512 {
513 if (RTErrInfoIsSet(pErrInfo))
514 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
515 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
516 else
517 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
518 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
519 Assert(RT_FAILURE(rc));
520 }
521 }
522 }
523 if (RT_SUCCESS(rc))
524 {
525 Assert(!RTErrInfoIsSet(pErrInfo));
526 }
527 }
528
529 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
530 RTLdrClose(ahMods[i]);
531 return rc;
532}
533
534
535/**
536 * Wrapper for different WHvGetCapability signatures.
537 */
538DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
539{
540 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
541}
542
543
544/**
545 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
546 *
547 * @returns VBox status code.
548 * @param pVM The cross context VM structure.
549 * @param pErrInfo Where to always return error info.
550 */
551static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
552{
553#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
554#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
555#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
556
557 WHV_CAPABILITY Caps;
558 RT_ZERO(Caps);
559 SetLastError(0);
560 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
561 DWORD rcWin = GetLastError();
562 if (FAILED(hrc))
563 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
564 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
565 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
566 if (!Caps.HypervisorPresent)
567 {
568 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
569 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
570 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
571 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
572 }
573 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
574
575
576 /*
577 * Check what extended VM exits are supported.
578 */
579 RT_ZERO(Caps);
580 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
581 if (FAILED(hrc))
582 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
583 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
584 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
585 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
586 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
587 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
588 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
589 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
590 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
591 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
592 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
593
594 /*
595 * Check features in case they end up defining any.
596 */
597 RT_ZERO(Caps);
598 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
599 if (FAILED(hrc))
600 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
601 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
602 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
603 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
604 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
605 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
606
607 /*
608 * Check that the CPU vendor is supported.
609 */
610 RT_ZERO(Caps);
611 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
612 if (FAILED(hrc))
613 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
614 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
615 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
616 switch (Caps.ProcessorVendor)
617 {
618 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
619 case WHvProcessorVendorArm:
620 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
621 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
622 break;
623 default:
624 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
625 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
626 }
627
628 /*
629 * CPU features, guessing these are virtual CPU features?
630 */
631 RT_ZERO(Caps);
632 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
633 if (FAILED(hrc))
634 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
635 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
636 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
637 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
638#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
639 NEM_LOG_REL_CPU_FEATURE(Asid16);
640 NEM_LOG_REL_CPU_FEATURE(TGran16);
641 NEM_LOG_REL_CPU_FEATURE(TGran64);
642 NEM_LOG_REL_CPU_FEATURE(Haf);
643 NEM_LOG_REL_CPU_FEATURE(Hdbs);
644 NEM_LOG_REL_CPU_FEATURE(Pan);
645 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
646 NEM_LOG_REL_CPU_FEATURE(Uao);
647 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
648 NEM_LOG_REL_CPU_FEATURE(Fp);
649 NEM_LOG_REL_CPU_FEATURE(FpHp);
650 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
651 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
652 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
653 NEM_LOG_REL_CPU_FEATURE(GicV41);
654 NEM_LOG_REL_CPU_FEATURE(Ras);
655 NEM_LOG_REL_CPU_FEATURE(PmuV3);
656 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
657 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
658 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
659 NEM_LOG_REL_CPU_FEATURE(Aes);
660 NEM_LOG_REL_CPU_FEATURE(PolyMul);
661 NEM_LOG_REL_CPU_FEATURE(Sha1);
662 NEM_LOG_REL_CPU_FEATURE(Sha256);
663 NEM_LOG_REL_CPU_FEATURE(Sha512);
664 NEM_LOG_REL_CPU_FEATURE(Crc32);
665 NEM_LOG_REL_CPU_FEATURE(Atomic);
666 NEM_LOG_REL_CPU_FEATURE(Rdm);
667 NEM_LOG_REL_CPU_FEATURE(Sha3);
668 NEM_LOG_REL_CPU_FEATURE(Sm3);
669 NEM_LOG_REL_CPU_FEATURE(Sm4);
670 NEM_LOG_REL_CPU_FEATURE(Dp);
671 NEM_LOG_REL_CPU_FEATURE(Fhm);
672 NEM_LOG_REL_CPU_FEATURE(DcCvap);
673 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
674 NEM_LOG_REL_CPU_FEATURE(ApaBase);
675 NEM_LOG_REL_CPU_FEATURE(ApaEp);
676 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
677 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
678 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
679 NEM_LOG_REL_CPU_FEATURE(Jscvt);
680 NEM_LOG_REL_CPU_FEATURE(Fcma);
681 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
682 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
683 NEM_LOG_REL_CPU_FEATURE(Gpa);
684 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
685 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
686
687#undef NEM_LOG_REL_CPU_FEATURE
688 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
689 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
690 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
691 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
692
693 /*
694 * The cache line flush size.
695 */
696 RT_ZERO(Caps);
697 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
698 if (FAILED(hrc))
699 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
700 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
701 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
702 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
703 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
704 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
705 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
706
707 RT_ZERO(Caps);
708 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
709 if (FAILED(hrc))
710 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
711 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
712 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
713 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
714 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
715 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
716 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
717
718
719 /*
720 * See if they've added more properties that we're not aware of.
721 */
722 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
723 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
724 {
725 static const struct
726 {
727 uint32_t iMin, iMax; } s_aUnknowns[] =
728 {
729 { 0x0004, 0x000f },
730 { 0x1003, 0x100f },
731 { 0x2000, 0x200f },
732 { 0x3000, 0x300f },
733 { 0x4000, 0x400f },
734 };
735 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
736 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
737 {
738 RT_ZERO(Caps);
739 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
740 if (SUCCEEDED(hrc))
741 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
742 }
743 }
744
745 /*
746 * For proper operation, we require CPUID exits.
747 */
748 /** @todo Any? */
749
750#undef NEM_LOG_REL_CAP_EX
751#undef NEM_LOG_REL_CAP_SUB_EX
752#undef NEM_LOG_REL_CAP_SUB
753 return VINF_SUCCESS;
754}
755
756
757/**
758 * Initializes the GIC controller emulation provided by Hyper-V.
759 *
760 * @returns VBox status code.
761 * @param pVM The cross context VM structure.
762 *
763 * @note Needs to be done early when setting up the partition so this has to live here and not in GICNem-win.cpp
764 */
765static int nemR3WinGicCreate(PVM pVM)
766{
767 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
768 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
769
770 /*
771 * Query the MMIO ranges.
772 */
773 RTGCPHYS GCPhysMmioBaseDist = 0;
774 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
775 if (RT_FAILURE(rc))
776 return VMSetError(pVM, rc, RT_SRC_POS,
777 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
778
779 RTGCPHYS GCPhysMmioBaseReDist = 0;
780 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
781 if (RT_FAILURE(rc))
782 return VMSetError(pVM, rc, RT_SRC_POS,
783 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
784
785 RTGCPHYS GCPhysMmioBaseIts = 0;
786 rc = CFGMR3QueryU64(pGicCfg, "ItsMmioBase", &GCPhysMmioBaseIts);
787 if (RT_FAILURE(rc) && rc != VERR_CFGM_VALUE_NOT_FOUND)
788 return VMSetError(pVM, rc, RT_SRC_POS,
789 "Configuration error: Failed to get the \"ItsMmioBase\" value\n");
790 rc = VINF_SUCCESS;
791
792 /*
793 * One can only set the GIC distributor base. The re-distributor regions for the individual
794 * vCPUs are configured when the vCPUs are created, so we need to save the base of the MMIO region.
795 */
796 pVM->nem.s.GCPhysMmioBaseReDist = GCPhysMmioBaseReDist;
797
798 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
799
800 MY_WHV_ARM64_IC_PARAMETERS Property; RT_ZERO(Property);
801 Property.u32EmulationMode = WHV_ARM64_IC_EMULATION_MODE_GICV3;
802 Property.u.GicV3.GCPhysGicdBase = GCPhysMmioBaseDist;
803 Property.u.GicV3.GCPhysGitsTranslaterBase = GCPhysMmioBaseIts;
804 Property.u.GicV3.cLpiIntIdBits = 1; /** @todo LPIs are currently not supported with our device emulations. */
805 Property.u.GicV3.u32PpiCntvOverflw = pVM->nem.s.u32GicPpiVTimer + 16; /* Calculate the absolute timer INTID. */
806 Property.u.GicV3.u32PpiPmu = 23; /** @todo Configure dynamically (from SBSA, needs a PMU/NEM emulation just like with the GIC probably). */
807 HRESULT hrc = WHvSetPartitionProperty(hPartition, (WHV_PARTITION_PROPERTY_CODE)WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS, &Property, sizeof(Property));
808 if (FAILED(hrc))
809 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
810 "Failed to set WHvPartitionPropertyCodeArm64IcParameters: %Rhrc (Last=%#x/%u)",
811 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
812
813 return rc;
814}
815
816
817/**
818 * Creates and sets up a Hyper-V (exo) partition.
819 *
820 * @returns VBox status code.
821 * @param pVM The cross context VM structure.
822 * @param pErrInfo Where to always return error info.
823 */
824static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
825{
826 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
827 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
828
829 /*
830 * Create the partition.
831 */
832 WHV_PARTITION_HANDLE hPartition;
833 HRESULT hrc = WHvCreatePartition(&hPartition);
834 if (FAILED(hrc))
835 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
836 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
837
838 int rc;
839
840 /*
841 * Set partition properties, most importantly the CPU count.
842 */
843 /**
844 * @todo Someone at Microsoft please explain another weird API:
845 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
846 * argument rather than as part of the struct. That is so weird if you've
847 * used any other NT or windows API, including WHvGetCapability().
848 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
849 * technically only need 9 bytes for setting/getting
850 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
851 WHV_PARTITION_PROPERTY Property;
852 RT_ZERO(Property);
853 Property.ProcessorCount = pVM->cCpus;
854 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
855 if (SUCCEEDED(hrc))
856 {
857 RT_ZERO(Property);
858 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
859 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
860 if (SUCCEEDED(hrc))
861 {
862 /*
863 * We'll continue setup in nemR3NativeInitAfterCPUM.
864 */
865 pVM->nem.s.fCreatedEmts = false;
866 pVM->nem.s.hPartition = hPartition;
867 LogRel(("NEM: Created partition %p.\n", hPartition));
868 return VINF_SUCCESS;
869 }
870
871 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
872 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
873 Property.ExtendedVmExits.AsUINT64, hrc);
874 }
875 else
876 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
877 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
878 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
879 WHvDeletePartition(hPartition);
880
881 Assert(!pVM->nem.s.hPartitionDevice);
882 Assert(!pVM->nem.s.hPartition);
883 return rc;
884}
885
886
887static int nemR3NativeInitSetupVm(PVM pVM)
888{
889 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
890 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
891 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
892 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
893
894 /*
895 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
896 */
897 WHV_PARTITION_PROPERTY Property;
898 HRESULT hrc;
899
900 /* Not sure if we really need to set the cache line flush size. */
901 RT_ZERO(Property);
902 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
903 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
904 if (FAILED(hrc))
905 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
906 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
907 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
908
909 /*
910 * Sync CPU features with CPUM.
911 */
912 /** @todo sync CPU features with CPUM. */
913
914 /* Set the partition property. */
915 RT_ZERO(Property);
916 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
917 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
918 if (FAILED(hrc))
919 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
920 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
921 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
922
923 /* Configure the GIC. */
924 int rc = nemR3WinGicCreate(pVM);
925 if (RT_FAILURE(rc))
926 return rc;
927
928 /*
929 * Set up the partition.
930 *
931 * Seems like this is where the partition is actually instantiated and we get
932 * a handle to it.
933 */
934 hrc = WHvSetupPartition(hPartition);
935 if (FAILED(hrc))
936 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
937 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
938 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
939
940 /*
941 * Setup the EMTs.
942 */
943 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
944 {
945 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
946 if (FAILED(hrc))
947 {
948 NTSTATUS const rcNtLast = RTNtLastStatusValue();
949 DWORD const dwErrLast = RTNtLastErrorValue();
950 while (idCpu-- > 0)
951 {
952 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
953 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
954 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
955 RTNtLastErrorValue()));
956 }
957 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
958 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
959 }
960
961 if (idCpu == 0)
962 {
963 /*
964 * Need to query the ID registers and populate CPUM,
965 * these are partition wide registers and need to be queried/set with WHV_ANY_VP.
966 */
967 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
968
969 WHV_REGISTER_NAME aenmNames[10];
970 WHV_REGISTER_VALUE aValues[10];
971 RT_ZERO(aValues);
972
973 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
974 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
975 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
976 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
977 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
978 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
979 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
980 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
981 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
982 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
983
984 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
985 AssertLogRelMsgReturn(SUCCEEDED(hrc),
986 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
987 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
988 , VERR_NEM_GET_REGISTERS_FAILED);
989
990 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
991 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
992 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
993 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
994 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
995 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
996 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
997 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
998 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
999 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
1000
1001 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
1002 if (RT_FAILURE(rc))
1003 return rc;
1004
1005 /* Apply any overrides to the partition. */
1006 PCCPUMARMV8IDREGS pIdRegsGst = NULL;
1007 rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
1008 AssertRCReturn(rc, rc);
1009
1010 aValues[0].Reg64 = pIdRegsGst->u64RegIdAa64Dfr0El1;
1011 aValues[1].Reg64 = pIdRegsGst->u64RegIdAa64Dfr1El1;
1012 aValues[2].Reg64 = pIdRegsGst->u64RegIdAa64Isar0El1;
1013 aValues[3].Reg64 = pIdRegsGst->u64RegIdAa64Isar1El1;
1014 aValues[4].Reg64 = pIdRegsGst->u64RegIdAa64Isar2El1;
1015 aValues[5].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr0El1;
1016 aValues[6].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr1El1;
1017 aValues[7].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr2El1;
1018 aValues[8].Reg64 = pIdRegsGst->u64RegIdAa64Pfr0El1;
1019 aValues[9].Reg64 = pIdRegsGst->u64RegIdAa64Pfr1El1;
1020
1021 hrc = WHvSetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
1022 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1023 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1024 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1025 , VERR_NEM_SET_REGISTERS_FAILED);
1026
1027 /* Save the amount of break-/watchpoints supported for syncing the guest register state later. */
1028 pVM->nem.s.cBreakpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_BRPS) + 1;
1029 pVM->nem.s.cWatchpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_WRPS) + 1;
1030 }
1031
1032 /* Configure the GIC re-distributor region for the GIC. */
1033 WHV_REGISTER_NAME enmName = My_WHvArm64RegisterGicrBaseGpa;
1034 WHV_REGISTER_VALUE Value;
1035 Value.Reg64 = pVM->nem.s.GCPhysMmioBaseReDist + idCpu * _128K;
1036
1037 hrc = WHvSetVirtualProcessorRegisters(hPartition, idCpu, &enmName, 1, &Value);
1038 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1039 ("WHvSetVirtualProcessorRegisters(%p, %u, WHvArm64RegisterGicrBaseGpa,) -> %Rhrc (Last=%#x/%u)\n",
1040 hPartition, idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1041 , VERR_NEM_SET_REGISTERS_FAILED);
1042 }
1043
1044 pVM->nem.s.fCreatedEmts = true;
1045
1046 LogRel(("NEM: Successfully set up partition\n"));
1047 return VINF_SUCCESS;
1048}
1049
1050
1051/**
1052 * Try initialize the native API.
1053 *
1054 * This may only do part of the job, more can be done in
1055 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1056 *
1057 * @returns VBox status code.
1058 * @param pVM The cross context VM structure.
1059 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1060 * the latter we'll fail if we cannot initialize.
1061 * @param fForced Whether the HMForced flag is set and we should
1062 * fail if we cannot initialize.
1063 */
1064int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1065{
1066 g_uBuildNo = RTSystemGetNtBuildNo();
1067
1068 /*
1069 * Error state.
1070 * The error message will be non-empty on failure and 'rc' will be set too.
1071 */
1072 RTERRINFOSTATIC ErrInfo;
1073 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1074 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
1075 if (RT_SUCCESS(rc))
1076 {
1077 /*
1078 * Check the capabilties of the hypervisor, starting with whether it's present.
1079 */
1080 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
1081 if (RT_SUCCESS(rc))
1082 {
1083 /*
1084 * Create and initialize a partition.
1085 */
1086 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
1087 if (RT_SUCCESS(rc))
1088 {
1089 rc = nemR3NativeInitSetupVm(pVM);
1090 if (RT_SUCCESS(rc))
1091 {
1092 /*
1093 * Set ourselves as the execution engine and make config adjustments.
1094 */
1095 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1096 Log(("NEM: Marked active!\n"));
1097 PGMR3EnableNemMode(pVM);
1098
1099 /*
1100 * Register release statistics
1101 */
1102 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
1103 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
1104 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
1105 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
1106 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
1107 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
1108 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
1109 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
1110 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
1111 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
1112 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
1113 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
1114 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
1115 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
1116 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
1117 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
1118 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
1119 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
1120
1121 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1122 {
1123 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
1124 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
1125 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
1126 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
1127 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
1128 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
1129 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
1130 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
1131 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
1132 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
1133 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
1134 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
1135 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
1136 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
1137 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
1138 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
1139 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
1140 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
1141 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
1142 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
1143 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
1144 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
1145 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
1146 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
1147 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
1148 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
1149 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
1150 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
1151 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
1152 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
1153 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
1154 }
1155
1156#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
1157 if (!SUPR3IsDriverless())
1158 {
1159 PUVM pUVM = pVM->pUVM;
1160 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1161 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
1162 "/NEM/R0Stats/cPagesAvailable");
1163 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1164 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
1165 "/NEM/R0Stats/cPagesInUse");
1166 }
1167#endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */
1168 }
1169 }
1170 }
1171 }
1172
1173 /*
1174 * We only fail if in forced mode, otherwise just log the complaint and return.
1175 */
1176 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1177 if ( (fForced || !fFallback)
1178 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1179 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1180
1181 if (RTErrInfoIsSet(pErrInfo))
1182 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1183 return VINF_SUCCESS;
1184}
1185
1186
1187/**
1188 * This is called after CPUMR3Init is done.
1189 *
1190 * @returns VBox status code.
1191 * @param pVM The VM handle..
1192 */
1193int nemR3NativeInitAfterCPUM(PVM pVM)
1194{
1195 /*
1196 * Validate sanity.
1197 */
1198 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1199
1200 /** @todo */
1201
1202 /*
1203 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
1204 */
1205 /** @todo stats */
1206
1207 /*
1208 * Adjust features.
1209 *
1210 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
1211 * the first init call.
1212 */
1213
1214 return VINF_SUCCESS;
1215}
1216
1217
1218/**
1219 * Execute state save operation.
1220 *
1221 * @returns VBox status code.
1222 * @param pVM The cross context VM structure.
1223 * @param pSSM SSM operation handle.
1224 */
1225static DECLCALLBACK(int) nemR3Save(PVM pVM, PSSMHANDLE pSSM)
1226{
1227 /*
1228 * Save the Hyper-V activity state for all CPUs.
1229 */
1230 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1231 {
1232 PVMCPUCC pVCpu = pVM->apCpusR3[i];
1233
1234 static const WHV_REGISTER_NAME s_Name = WHvRegisterInternalActivityState;
1235 WHV_REGISTER_VALUE Reg;
1236
1237 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &s_Name, 1, &Reg);
1238 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1239 ("WHvSetVirtualProcessorRegisters(%p, 0,{WHvRegisterInternalActivityState}, 1,) -> %Rhrc (Last=%#x/%u)\n",
1240 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1241 , VERR_NEM_IPE_9);
1242
1243 SSMR3PutU64(pSSM, Reg.Reg64);
1244 }
1245
1246 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
1247}
1248
1249
1250/**
1251 * Execute state load operation.
1252 *
1253 * @returns VBox status code.
1254 * @param pVM The cross context VM structure.
1255 * @param pSSM SSM operation handle.
1256 * @param uVersion Data layout version.
1257 * @param uPass The data pass.
1258 */
1259static DECLCALLBACK(int) nemR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1260{
1261 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1262
1263 /*
1264 * Validate version.
1265 */
1266 if (uVersion != 1)
1267 {
1268 AssertMsgFailed(("nemR3Load: Invalid version uVersion=%u!\n", uVersion));
1269 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1270 }
1271
1272 /*
1273 * Restore the Hyper-V activity states for all vCPUs.
1274 */
1275 VMCPU_SET_STATE(pVM->apCpusR3[0], VMCPUSTATE_STARTED);
1276 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1277 {
1278 PVMCPUCC pVCpu = pVM->apCpusR3[i];
1279
1280 static const WHV_REGISTER_NAME s_Name = WHvRegisterInternalActivityState;
1281 WHV_REGISTER_VALUE Reg;
1282 int rc = SSMR3GetU64(pSSM, &Reg.Reg64);
1283 if (RT_FAILURE(rc))
1284 return rc;
1285
1286 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &s_Name, 1, &Reg);
1287 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1288 ("WHvSetVirtualProcessorRegisters(%p, 0,{WHvRegisterInternalActivityState}, 1,) -> %Rhrc (Last=%#x/%u)\n",
1289 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1290 , VERR_NEM_IPE_9);
1291 }
1292
1293 /* terminator */
1294 uint32_t u32;
1295 int rc = SSMR3GetU32(pSSM, &u32);
1296 if (RT_FAILURE(rc))
1297 return rc;
1298 if (u32 != UINT32_MAX)
1299 {
1300 AssertMsgFailed(("u32=%#x\n", u32));
1301 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1302 }
1303 return VINF_SUCCESS;
1304}
1305
1306
1307int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1308{
1309 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
1310 //AssertLogRel(fRet);
1311
1312 if (enmWhat == VMINITCOMPLETED_RING3)
1313 {
1314 /*
1315 * Register the saved state data unit.
1316 */
1317 int rc = SSMR3RegisterInternal(pVM, "nem-win", 1, NEM_HV_SAVED_STATE_VERSION,
1318 sizeof(uint64_t),
1319 NULL, NULL, NULL,
1320 NULL, nemR3Save, NULL,
1321 NULL, nemR3Load, NULL);
1322 if (RT_FAILURE(rc))
1323 return rc;
1324 }
1325
1326 NOREF(pVM); NOREF(enmWhat);
1327 return VINF_SUCCESS;
1328}
1329
1330
1331int nemR3NativeTerm(PVM pVM)
1332{
1333 /*
1334 * Delete the partition.
1335 */
1336 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1337 pVM->nem.s.hPartition = NULL;
1338 pVM->nem.s.hPartitionDevice = NULL;
1339 if (hPartition != NULL)
1340 {
1341 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
1342 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
1343 while (idCpu-- > 0)
1344 {
1345 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
1346 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1347 hPartition, idCpu, hrc, RTNtLastStatusValue(),
1348 RTNtLastErrorValue()));
1349 }
1350 WHvDeletePartition(hPartition);
1351 }
1352 pVM->nem.s.fCreatedEmts = false;
1353 return VINF_SUCCESS;
1354}
1355
1356
1357/**
1358 * VM reset notification.
1359 *
1360 * @param pVM The cross context VM structure.
1361 */
1362void nemR3NativeReset(PVM pVM)
1363{
1364 RT_NOREF(pVM);
1365}
1366
1367
1368/**
1369 * Reset CPU due to INIT IPI or hot (un)plugging.
1370 *
1371 * @param pVCpu The cross context virtual CPU structure of the CPU being
1372 * reset.
1373 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1374 */
1375void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1376{
1377 RT_NOREF(pVCpu, fInitIpi);
1378}
1379
1380
1381NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
1382{
1383 WHV_REGISTER_NAME aenmNames[128];
1384 WHV_REGISTER_VALUE aValues[128];
1385
1386 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1387 if (!fWhat)
1388 return VINF_SUCCESS;
1389 uintptr_t iReg = 0;
1390
1391#define ADD_REG64(a_enmName, a_uValue) do { \
1392 aenmNames[iReg] = (a_enmName); \
1393 aValues[iReg].Reg128.High64 = 0; \
1394 aValues[iReg].Reg64 = (a_uValue).x; \
1395 iReg++; \
1396 } while (0)
1397#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
1398 aenmNames[iReg] = (a_enmName); \
1399 aValues[iReg].Reg128.High64 = 0; \
1400 aValues[iReg].Reg64 = (a_uValue); \
1401 iReg++; \
1402 } while (0)
1403#define ADD_SYSREG64(a_enmName, a_uValue) do { \
1404 aenmNames[iReg] = (a_enmName); \
1405 aValues[iReg].Reg128.High64 = 0; \
1406 aValues[iReg].Reg64 = (a_uValue).u64; \
1407 iReg++; \
1408 } while (0)
1409#define ADD_REG128(a_enmName, a_uValue) do { \
1410 aenmNames[iReg] = (a_enmName); \
1411 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
1412 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
1413 iReg++; \
1414 } while (0)
1415
1416 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1417 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1418 {
1419 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1420 {
1421 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1422 {
1423 const CPUMCTXGREG *pReg = (const CPUMCTXGREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
1424 ADD_REG64(s_aCpumRegs[i].enmWHvReg, *pReg);
1425 }
1426 }
1427 }
1428
1429 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1430 {
1431 /* SIMD/FP registers. */
1432 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1433 {
1434 PCCPUMCTXVREG pVReg = (PCCPUMCTXVREG)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
1435 ADD_REG128(s_aCpumFpRegs[i].enmWHvReg, *pVReg);
1436 }
1437 }
1438
1439 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1440 {
1441 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1442 {
1443 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Ctrl);
1444 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Value);
1445 }
1446
1447 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1448 {
1449 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Ctrl);
1450 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Value);
1451 }
1452
1453 ADD_SYSREG64(WHvArm64RegisterMdscrEl1, pVCpu->cpum.GstCtx.Mdscr);
1454 }
1455
1456 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1457 {
1458 /* PAuth registers. */
1459 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1460 {
1461 const CPUMCTXSYSREG *pReg = (const CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1462 ADD_SYSREG64(s_aCpumPAuthKeyRegs[i].enmWHvReg, *pReg);
1463 }
1464 }
1465
1466 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1467 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1468 {
1469 /* System registers. */
1470 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1471 {
1472 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1473 {
1474 const CPUMCTXSYSREG *pReg = (const CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1475 ADD_SYSREG64(s_aCpumSysRegs[i].enmWHvReg, *pReg);
1476 }
1477 }
1478 }
1479
1480 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1481 {
1482 /* Paging related system registers. */
1483 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegsPg); i++)
1484 {
1485 const CPUMCTXSYSREG *pReg = (const CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegsPg[i].offCpumCtx);
1486 ADD_SYSREG64(s_aCpumSysRegsPg[i].enmWHvReg, *pReg);
1487 }
1488 }
1489
1490 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1491 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1492
1493#undef ADD_REG64
1494#undef ADD_REG64_RAW
1495#undef ADD_REG128
1496
1497 /*
1498 * Set the registers.
1499 */
1500 Assert(iReg < RT_ELEMENTS(aValues));
1501 Assert(iReg < RT_ELEMENTS(aenmNames));
1502 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1503 if (SUCCEEDED(hrc))
1504 {
1505 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1506 return VINF_SUCCESS;
1507 }
1508 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1509 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1510 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1511 return VERR_INTERNAL_ERROR;
1512}
1513
1514
1515NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1516{
1517 WHV_REGISTER_NAME aenmNames[256];
1518
1519 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1520 if (!fWhat)
1521 return VINF_SUCCESS;
1522
1523 uintptr_t iReg = 0;
1524
1525 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1526 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1527 {
1528 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1529 {
1530 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1531 aenmNames[iReg++] = s_aCpumRegs[i].enmWHvReg;
1532 }
1533 }
1534
1535 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1536 {
1537 /* SIMD/FP registers. */
1538 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1539 {
1540 aenmNames[iReg++] = s_aCpumFpRegs[i].enmWHvReg;
1541 }
1542 }
1543
1544 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1545 {
1546 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1547 {
1548 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i);
1549 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i);
1550 }
1551
1552 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1553 {
1554 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i);
1555 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i);
1556 }
1557
1558 aenmNames[iReg++] = WHvArm64RegisterMdscrEl1;
1559 }
1560
1561 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1562 {
1563 /* PAuth registers. */
1564 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1565 {
1566 aenmNames[iReg++] = s_aCpumPAuthKeyRegs[i].enmWHvReg;
1567 }
1568 }
1569
1570 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1571 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1572 {
1573 /* System registers. */
1574 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1575 {
1576 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1577 aenmNames[iReg++] = s_aCpumSysRegs[i].enmWHvReg;
1578 }
1579 }
1580
1581 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1582 {
1583 /* Paging related system registers. */
1584 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegsPg); i++)
1585 aenmNames[iReg++] = s_aCpumSysRegsPg[i].enmWHvReg;
1586 }
1587
1588 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1589 aenmNames[iReg++] = WHvArm64RegisterPstate;
1590
1591 size_t const cRegs = iReg;
1592 Assert(cRegs < RT_ELEMENTS(aenmNames));
1593
1594 /*
1595 * Get the registers.
1596 */
1597 WHV_REGISTER_VALUE aValues[256];
1598 RT_ZERO(aValues);
1599 Assert(RT_ELEMENTS(aValues) >= cRegs);
1600 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1601 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1602 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1603 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1604 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1605 , VERR_NEM_GET_REGISTERS_FAILED);
1606
1607 iReg = 0;
1608#define GET_REG64(a_DstVar, a_enmName) do { \
1609 Assert(aenmNames[iReg] == (a_enmName)); \
1610 (a_DstVar)->x = aValues[iReg].Reg64; \
1611 iReg++; \
1612 } while (0)
1613#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1614 Assert(aenmNames[iReg] == (a_enmName)); \
1615 *(a_DstVar) = aValues[iReg].Reg64; \
1616 iReg++; \
1617 } while (0)
1618#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1619 Assert(aenmNames[iReg] == (a_enmName)); \
1620 (a_DstVar)->u64 = aValues[iReg].Reg64; \
1621 iReg++; \
1622 } while (0)
1623#define GET_REG128(a_DstVar, a_enmName) do { \
1624 Assert(aenmNames[iReg] == a_enmName); \
1625 (a_DstVar)->au64[0] = aValues[iReg].Reg128.Low64; \
1626 (a_DstVar)->au64[1] = aValues[iReg].Reg128.High64; \
1627 iReg++; \
1628 } while (0)
1629
1630 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1631 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1632 {
1633 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1634 {
1635 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1636 {
1637 CPUMCTXGREG *pReg = (CPUMCTXGREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
1638 GET_REG64(pReg, s_aCpumRegs[i].enmWHvReg);
1639 }
1640 }
1641 }
1642
1643 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1644 {
1645 /* SIMD/FP registers. */
1646 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1647 {
1648 PCPUMCTXVREG pVReg = (PCPUMCTXVREG)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
1649 GET_REG128(pVReg, s_aCpumFpRegs[i].enmWHvReg);
1650 }
1651 }
1652
1653 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1654 {
1655 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1656 {
1657 GET_SYSREG64(&pVCpu->cpum.GstCtx.aBp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i));
1658 GET_SYSREG64(&pVCpu->cpum.GstCtx.aBp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i));
1659 }
1660
1661 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1662 {
1663 GET_SYSREG64(&pVCpu->cpum.GstCtx.aWp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i));
1664 GET_SYSREG64(&pVCpu->cpum.GstCtx.aWp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i));
1665 }
1666
1667 GET_SYSREG64(&pVCpu->cpum.GstCtx.Mdscr, WHvArm64RegisterMdscrEl1);
1668 }
1669
1670 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1671 {
1672 /* PAuth registers. */
1673 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1674 {
1675 CPUMCTXSYSREG *pReg = (CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1676 GET_SYSREG64(pReg, s_aCpumPAuthKeyRegs[i].enmWHvReg);
1677 }
1678 }
1679
1680 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1681 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1682 {
1683 /* System registers. */
1684 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1685 {
1686 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1687 {
1688 CPUMCTXSYSREG *pReg = (CPUMCTXSYSREG *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1689 GET_SYSREG64(pReg, s_aCpumSysRegs[i].enmWHvReg);
1690 }
1691 }
1692 }
1693
1694 /* The paging related system registers need to be treated differently as they might invoke a PGM mode change. */
1695 uint64_t u64RegSctlrEl1;
1696 uint64_t u64RegTcrEl1;
1697 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1698 {
1699 GET_REG64_RAW(&u64RegSctlrEl1, WHvArm64RegisterSctlrEl1);
1700 GET_REG64_RAW(&u64RegTcrEl1, WHvArm64RegisterTcrEl1);
1701 GET_SYSREG64(&pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1702 GET_SYSREG64(&pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1703 if ( u64RegSctlrEl1 != pVCpu->cpum.GstCtx.Sctlr.u64
1704 || u64RegTcrEl1 != pVCpu->cpum.GstCtx.Tcr.u64)
1705 {
1706 pVCpu->cpum.GstCtx.Sctlr.u64 = u64RegSctlrEl1;
1707 pVCpu->cpum.GstCtx.Tcr.u64 = u64RegTcrEl1;
1708 int rc = PGMChangeMode(pVCpu, 1 /*bEl*/, u64RegSctlrEl1, u64RegTcrEl1);
1709 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1710 }
1711 }
1712
1713 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1714 GET_REG64_RAW(&pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1715
1716 /* Almost done, just update extrn flags. */
1717 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1718 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1719 pVCpu->cpum.GstCtx.fExtrn = 0;
1720
1721 return VINF_SUCCESS;
1722}
1723
1724
1725/**
1726 * Interface for importing state on demand (used by IEM).
1727 *
1728 * @returns VBox status code.
1729 * @param pVCpu The cross context CPU structure.
1730 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1731 */
1732VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1733{
1734 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1735 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1736}
1737
1738
1739/**
1740 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1741 *
1742 * @returns VBox status code.
1743 * @param pVCpu The cross context CPU structure.
1744 * @param pcTicks Where to return the CPU tick count.
1745 * @param puAux Where to return the TSC_AUX register value.
1746 */
1747VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1748{
1749 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1750
1751 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1752 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1753 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1754
1755 /* Ensure time for the partition is suspended - it will be resumed as soon as a vCPU starts executing. */
1756 HRESULT hrc = WHvSuspendPartitionTime(pVM->nem.s.hPartition);
1757 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1758 ("WHvSuspendPartitionTime(%p) -> %Rhrc (Last=%#x/%u)\n",
1759 pVM->nem.s.hPartition, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1760 , VERR_NEM_GET_REGISTERS_FAILED);
1761
1762 /* Call the offical API. */
1763 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1764 WHV_REGISTER_VALUE Value = { { {0, 0} } };
1765 hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &enmName, 1, &Value);
1766 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1767 ("WHvGetVirtualProcessorRegisters(%p, %u,{CNTVCT_EL0},1,) -> %Rhrc (Last=%#x/%u)\n",
1768 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1769 , VERR_NEM_GET_REGISTERS_FAILED);
1770 *pcTicks = Value.Reg64;
1771 LogFlow(("NEMHCQueryCpuTick: %#RX64 (host: %#RX64)\n", *pcTicks, ASMReadTSC()));
1772 if (puAux)
1773 *puAux =0;
1774
1775 return VINF_SUCCESS;
1776}
1777
1778
1779/**
1780 * Resumes CPU clock (TSC) on all virtual CPUs.
1781 *
1782 * This is called by TM when the VM is started, restored, resumed or similar.
1783 *
1784 * @returns VBox status code.
1785 * @param pVM The cross context VM structure.
1786 * @param pVCpu The cross context CPU structure of the calling EMT.
1787 * @param uPausedTscValue The TSC value at the time of pausing.
1788 */
1789VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1790{
1791 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1792 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1793
1794 /*
1795 * Call the offical API to do the job.
1796 */
1797 LogFlow(("NEMHCResumeCpuTickOnAll: %#RX64 (host: %#RX64)\n", uPausedTscValue, ASMReadTSC()));
1798
1799 /*
1800 * Now set the CNTVCT_EL0 register for each vCPU, Hyper-V will program the timer offset in
1801 * CNTVOFF_EL2 accordingly. ARM guarantees that CNTVCT_EL0 is synchronised across all CPUs,
1802 * as long as CNTVOFF_EL2 is the same everywhere. Lets just hope scheduling will not affect it
1803 * if the partition time is suspended.
1804 */
1805 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1806 {
1807 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1808 WHV_REGISTER_VALUE Value;
1809 Value.Reg64 = uPausedTscValue;
1810 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, idCpu, &enmName, 1, &Value);
1811 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1812 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTVCT_EL0},1,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1813 pVM->nem.s.hPartition, idCpu, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1814 , VERR_NEM_SET_TSC);
1815
1816 /* Make sure the CNTV_CTL_EL0 and CNTV_CVAL_EL0 registers are up to date after resuming (saved state load). */
1817 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
1818 pVCpuDst->nem.s.fSyncCntvRegs = true;
1819 }
1820
1821 HRESULT hrc = WHvResumePartitionTime(pVM->nem.s.hPartition);
1822 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1823 ("WHvResumePartitionTime(%p) -> %Rhrc (Last=%#x/%u)\n",
1824 pVM->nem.s.hPartition, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1825 , VERR_NEM_SET_TSC);
1826
1827 return VINF_SUCCESS;
1828}
1829
1830
1831#ifdef LOG_ENABLED
1832/**
1833 * Logs the current CPU state.
1834 */
1835static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1836{
1837 if (LogIs3Enabled())
1838 {
1839 char szRegs[4096];
1840 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1841 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1842 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1843 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1844 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1845 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1846 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1847 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1848 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1849 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1850 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1851 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1852 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1853 "vbar_el1=%016VR{vbar_el1}\n"
1854 );
1855 char szInstr[256]; RT_ZERO(szInstr);
1856 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1857 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1858 szInstr, sizeof(szInstr), NULL);
1859 Log3(("%s%s\n", szRegs, szInstr));
1860 }
1861}
1862#endif /* LOG_ENABLED */
1863
1864
1865/**
1866 * Copies register state from the (common) exit context.
1867 *
1868 * ASSUMES no state copied yet.
1869 *
1870 * @param pVCpu The cross context per CPU structure.
1871 * @param pMsgHdr The common message header.
1872 */
1873DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1874{
1875#ifdef LOG_ENABLED /* When state logging is enabled the state is synced completely upon VM exit. */
1876 if (!LogIs3Enabled())
1877#endif
1878 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1879 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1880
1881 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1882 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1883
1884 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1885}
1886
1887
1888/**
1889 * Returns the byte size from the given access SAS value.
1890 *
1891 * @returns Number of bytes to transfer.
1892 * @param uSas The SAS value to convert.
1893 */
1894DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
1895{
1896 switch (uSas)
1897 {
1898 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1899 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1900 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1901 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1902 default:
1903 AssertReleaseFailed();
1904 }
1905
1906 return 0;
1907}
1908
1909
1910/**
1911 * Sets the given general purpose register to the given value.
1912 *
1913 * @param pVCpu The cross context virtual CPU structure of the
1914 * calling EMT.
1915 * @param uReg The register index.
1916 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1917 * @param fSignExtend Flag whether to sign extend the value.
1918 * @param u64Val The value.
1919 */
1920DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1921{
1922 AssertReturnVoid(uReg < 31);
1923
1924 if (f64BitReg)
1925 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1926 else
1927 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
1928
1929 /* Mark the register as not extern anymore. */
1930 switch (uReg)
1931 {
1932 case 0:
1933 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1934 break;
1935 case 1:
1936 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1937 break;
1938 case 2:
1939 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1940 break;
1941 case 3:
1942 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1943 break;
1944 default:
1945 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1946 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1947 }
1948}
1949
1950
1951/**
1952 * Gets the given general purpose register and returns the value.
1953 *
1954 * @returns Value from the given register.
1955 * @param pVCpu The cross context virtual CPU structure of the
1956 * calling EMT.
1957 * @param uReg The register index.
1958 */
1959DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1960{
1961 AssertReturn(uReg <= ARMV8_A64_REG_XZR, 0);
1962
1963 if (uReg == ARMV8_A64_REG_XZR)
1964 return 0;
1965
1966 /** @todo Import the register if extern. */
1967 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1968
1969 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1970}
1971
1972
1973/**
1974 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
1975 *
1976 * @returns Strict VBox status code.
1977 * @param pVM The cross context VM structure.
1978 * @param pVCpu The cross context per CPU structure.
1979 * @param pExit The VM exit information to handle.
1980 * @sa nemHCWinHandleMessageMemory
1981 */
1982NEM_TMPL_STATIC VBOXSTRICTRC
1983nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
1984{
1985 uint64_t const uHostTsc = ASMReadTSC();
1986 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
1987
1988 /*
1989 * Emulate the memory access, either access handler or special memory.
1990 */
1991 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
1992 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
1993 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
1994 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1995 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1996 pHdr->Pc, uHostTsc);
1997 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
1998 RT_NOREF_PV(pExitRec);
1999 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
2000 AssertRCReturn(rc, rc);
2001
2002#ifdef LOG_ENABLED
2003 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
2004 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
2005#endif
2006 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
2007 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
2008 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
2009 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
2010 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
2011 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
2012 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
2013 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
2014 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
2015 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
2016 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
2017 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
2018
2019 RT_NOREF(fL2Fault);
2020
2021 VBOXSTRICTRC rcStrict;
2022 if (fIsv)
2023 {
2024 EMHistoryAddExit(pVCpu,
2025 fWrite
2026 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2027 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2028 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
2029
2030 uint64_t u64Val = 0;
2031 if (fWrite)
2032 {
2033 u64Val = nemR3WinGetGReg(pVCpu, uReg);
2034 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2035 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
2036 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2037 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2038 }
2039 else
2040 {
2041 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2042 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2043 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2044 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2045 if (rcStrict == VINF_SUCCESS)
2046 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
2047 }
2048 }
2049 else
2050 {
2051 /** @todo Our UEFI firmware accesses the flash region with the following instruction
2052 * when the NVRAM actually contains data:
2053 * ldrb w9, [x6, #-0x0001]!
2054 * This is too complicated for the hardware so the ISV bit is not set. Until there
2055 * is a proper IEM implementation we just handle this here for now to avoid annoying
2056 * users too much.
2057 */
2058 /* The following ASSUMES that the vCPU state is completely synced. */
2059
2060 /* Read instruction. */
2061 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2062 const void *pvPageR3 = NULL;
2063 PGMPAGEMAPLOCK PageMapLock;
2064
2065 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
2066 if (rcStrict == VINF_SUCCESS)
2067 {
2068 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
2069 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
2070
2071 DISSTATE Dis;
2072 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
2073 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
2074 if (rcStrict == VINF_SUCCESS)
2075 {
2076 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
2077 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2078 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2079 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
2080 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
2081 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
2082 {
2083 /* The fault address is already the final address. */
2084 uint8_t bVal = 0;
2085 rcStrict = PGMPhysRead(pVM, GCPhys, &bVal, 1, PGMACCESSORIGIN_HM);
2086 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2087 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, sizeof(bVal), sizeof(bVal),
2088 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
2089 if (rcStrict == VINF_SUCCESS)
2090 {
2091 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
2092 /* Update the indexed register. */
2093 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
2094 }
2095 }
2096 /*
2097 * Seeing the following with the Windows 11/ARM TPM driver:
2098 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
2099 */
2100 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
2101 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2102 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2103 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
2104 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2105 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
2106 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
2107 {
2108 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
2109 /* The fault address is already the final address. */
2110 uint32_t u32Val1 = 0;
2111 uint32_t u32Val2 = 0;
2112 rcStrict = PGMPhysRead(pVM, GCPhys, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
2113 if (rcStrict == VINF_SUCCESS)
2114 rcStrict = PGMPhysRead(pVM, GCPhys + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
2115 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
2116 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, 2 * sizeof(uint32_t), sizeof(u32Val1),
2117 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
2118 if (rcStrict == VINF_SUCCESS)
2119 {
2120 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
2121 nemR3WinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
2122 }
2123 }
2124 else
2125 AssertFailedReturn(VERR_NOT_SUPPORTED);
2126 }
2127 }
2128 }
2129
2130 if (rcStrict == VINF_SUCCESS)
2131 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
2132
2133 return rcStrict;
2134}
2135
2136
2137/**
2138 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2139 *
2140 * @returns Strict VBox status code.
2141 * @param pVM The cross context VM structure.
2142 * @param pVCpu The cross context per CPU structure.
2143 * @param pExit The VM exit information to handle.
2144 * @sa nemHCWinHandleMessageMemory
2145 */
2146NEM_TMPL_STATIC VBOXSTRICTRC
2147nemR3WinHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2148{
2149 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2150
2151 /** @todo Raise exception to EL1 if PSCI not configured. */
2152 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
2153 uint32_t uFunId = pExit->Hypercall.Immediate;
2154 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
2155 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
2156 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
2157 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
2158 {
2159 switch (uFunNum)
2160 {
2161 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2162 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2163 break;
2164 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2165 rcStrict = VMR3PowerOff(pVM->pUVM);
2166 break;
2167 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2168 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2169 {
2170 bool fHaltOnReset;
2171 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2172 if (RT_SUCCESS(rc) && fHaltOnReset)
2173 {
2174 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
2175 rcStrict = VINF_EM_HALT;
2176 }
2177 else
2178 {
2179 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2180 VM_FF_SET(pVM, VM_FF_RESET);
2181 rcStrict = VINF_EM_RESET;
2182 }
2183 break;
2184 }
2185 case ARM_PSCI_FUNC_ID_CPU_ON:
2186 {
2187 uint64_t u64TgtCpu = pExit->Hypercall.X[1];
2188 RTGCPHYS GCPhysExecAddr = pExit->Hypercall.X[2];
2189 uint64_t u64CtxId = pExit->Hypercall.X[3];
2190 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2191 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2192 break;
2193 }
2194 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2195 {
2196 uint32_t u32FunNum = (uint32_t)pExit->Hypercall.X[1];
2197 switch (u32FunNum)
2198 {
2199 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2200 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2201 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2202 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2203 case ARM_PSCI_FUNC_ID_CPU_ON:
2204 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2205 false /*f64BitReg*/, false /*fSignExtend*/,
2206 (uint64_t)ARM_PSCI_STS_SUCCESS);
2207 break;
2208 default:
2209 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2210 false /*f64BitReg*/, false /*fSignExtend*/,
2211 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2212 }
2213 break;
2214 }
2215 default:
2216 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2217 }
2218 }
2219 else
2220 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2221
2222 /** @todo What to do if immediate is != 0? */
2223
2224 if (rcStrict == VINF_SUCCESS)
2225 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
2226
2227 return rcStrict;
2228}
2229
2230
2231/**
2232 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2233 *
2234 * @returns Strict VBox status code.
2235 * @param pVM The cross context VM structure.
2236 * @param pVCpu The cross context per CPU structure.
2237 * @param pExit The VM exit information to handle.
2238 * @sa nemHCWinHandleMessageUnrecoverableException
2239 */
2240NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2241{
2242#if 0
2243 /*
2244 * Just copy the state we've got and handle it in the loop for now.
2245 */
2246 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2247 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2248 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2249 RT_NOREF_PV(pVM);
2250 return VINF_EM_TRIPLE_FAULT;
2251#else
2252 /*
2253 * Let IEM decide whether this is really it.
2254 */
2255 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
2256 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
2257 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
2258 AssertReleaseFailed();
2259 RT_NOREF_PV(pVM);
2260 return VINF_SUCCESS;
2261#endif
2262}
2263
2264
2265/**
2266 * Handles VM exits.
2267 *
2268 * @returns Strict VBox status code.
2269 * @param pVM The cross context VM structure.
2270 * @param pVCpu The cross context per CPU structure.
2271 * @param pExit The VM exit information to handle.
2272 * @sa nemHCWinHandleMessage
2273 */
2274NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2275{
2276#ifdef LOG_ENABLED
2277 if (LogIs3Enabled())
2278 {
2279 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2280 AssertRCReturn(rc, rc);
2281
2282 nemR3WinLogState(pVM, pVCpu);
2283 }
2284#endif
2285
2286 switch (pExit->ExitReason)
2287 {
2288 case WHvRunVpExitReasonUnmappedGpa:
2289 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
2290 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
2291
2292 case WHvRunVpExitReasonCanceled:
2293 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
2294 return VINF_SUCCESS;
2295
2296 case WHvRunVpExitReasonHypercall:
2297 return nemR3WinHandleExitHypercall(pVM, pVCpu, pExit);
2298
2299 case 0x8001000c: /* WHvRunVpExitReasonArm64Reset */
2300 {
2301 if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF)
2302 return VMR3PowerOff(pVM->pUVM);
2303 else if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_RESET)
2304 {
2305 VM_FF_SET(pVM, VM_FF_RESET);
2306 return VINF_EM_RESET;
2307 }
2308 else
2309 AssertLogRelFailedReturn(VERR_NEM_IPE_3);
2310 }
2311
2312 case WHvRunVpExitReasonUnrecoverableException:
2313 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
2314 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
2315
2316 case WHvRunVpExitReasonUnsupportedFeature:
2317 case WHvRunVpExitReasonInvalidVpRegisterValue:
2318 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2319 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
2320 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
2321
2322 /* Undesired exits: */
2323 case WHvRunVpExitReasonNone:
2324 default:
2325 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2326 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
2327 }
2328}
2329
2330
2331VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2332{
2333 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
2334#ifdef LOG_ENABLED
2335 if (LogIs3Enabled())
2336 nemR3WinLogState(pVM, pVCpu);
2337#endif
2338
2339 /*
2340 * Try switch to NEM runloop state.
2341 */
2342 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2343 { /* likely */ }
2344 else
2345 {
2346 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2347 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2348 return VINF_SUCCESS;
2349 }
2350
2351 if (pVCpu->nem.s.fSyncCntvRegs)
2352 {
2353 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2354 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)];
2355 aRegs[0].Reg64 = pVCpu->cpum.GstCtx.CntvCtlEl0;
2356 aRegs[1].Reg64 = pVCpu->cpum.GstCtx.CntvCValEl0;
2357
2358 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2359 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2360 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2361 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2362 , VERR_NEM_IPE_9);
2363 pVCpu->nem.s.fSyncCntvRegs = false;
2364 }
2365
2366
2367 /*
2368 * The run loop.
2369 *
2370 * Current approach to state updating to use the sledgehammer and sync
2371 * everything every time. This will be optimized later.
2372 */
2373 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2374 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2375 for (unsigned iLoop = 0;; iLoop++)
2376 {
2377 /*
2378 * Poll timers and run for a bit.
2379 *
2380 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2381 * so we take the time of the next timer event and uses that as a deadline.
2382 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2383 */
2384 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2385 * the whole polling job when timers have changed... */
2386 uint64_t offDeltaIgnored;
2387 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2388 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2389 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2390 {
2391 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2392 {
2393 /* Ensure that Hyper-V has the whole state. */
2394 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2395 AssertRCReturn(rc2, rc2);
2396
2397#ifdef LOG_ENABLED
2398 if (LogIsFlowEnabled())
2399 {
2400 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2401 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2402 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2403 LogFlow(("NEM/%u: Entry @ %08RX64 pstate=%#RX64\n", pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64));
2404 }
2405#endif
2406
2407 MY_WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2408 TMNotifyStartOfExecution(pVM, pVCpu);
2409
2410 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2411
2412 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2413 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2414#ifdef LOG_ENABLED
2415 if (LogIsFlowEnabled())
2416 {
2417 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2418 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2419 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2420 LogFlow(("NEM/%u: Exit @ %08RX64 pstate=%#RX64 Reason=%#x\n",
2421 pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64, ExitReason.ExitReason));
2422 }
2423#endif
2424 if (SUCCEEDED(hrc))
2425 {
2426 /* Always sync the CNTV_CTL_EL0/CNTV_CVAL_EL0 registers, just like we do on macOS. */
2427 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2428 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2429 hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2430 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2431 ("WHvGetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2432 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2433 , VERR_NEM_IPE_9);
2434
2435 pVCpu->cpum.GstCtx.CntvCtlEl0 = aRegs[0].Reg64;
2436 pVCpu->cpum.GstCtx.CntvCValEl0 = aRegs[1].Reg64;
2437
2438 /*
2439 * Deal with the message.
2440 */
2441 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2442 if (rcStrict == VINF_SUCCESS)
2443 { /* hopefully likely */ }
2444 else
2445 {
2446 LogFlow(("NEM/%u: breaking: nemR3WinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2447 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2448 break;
2449 }
2450 }
2451 else
2452 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2453 pVCpu->idCpu, hrc, GetLastError()),
2454 VERR_NEM_IPE_0);
2455
2456 /*
2457 * If no relevant FFs are pending, loop.
2458 */
2459 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2460 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2461 continue;
2462
2463 /** @todo Try handle pending flags, not just return to EM loops. Take care
2464 * not to set important RCs here unless we've handled a message. */
2465 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2466 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2467 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2468 }
2469 else
2470 {
2471 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2472 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2473 }
2474 }
2475 else
2476 {
2477 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2478 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2479 }
2480 break;
2481 } /* the run loop */
2482
2483
2484 /*
2485 * If the CPU is running, make sure to stop it before we try sync back the
2486 * state and return to EM. We don't sync back the whole state if we can help it.
2487 */
2488 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2489 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2490
2491 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2492 {
2493 /* Try anticipate what we might need. */
2494 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2495 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2496 || RT_FAILURE(rcStrict))
2497 fImport = CPUMCTX_EXTRN_ALL;
2498 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2499 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2500
2501 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2502 {
2503 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2504 if (RT_SUCCESS(rc2))
2505 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2506 else if (RT_SUCCESS(rcStrict))
2507 rcStrict = rc2;
2508 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2509 pVCpu->cpum.GstCtx.fExtrn = 0;
2510 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2511 }
2512 else
2513 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2514 }
2515 else
2516 {
2517 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2518 pVCpu->cpum.GstCtx.fExtrn = 0;
2519 }
2520
2521#if 0
2522 UINT32 cbWritten;
2523 WHV_ARM64_LOCAL_INTERRUPT_CONTROLLER_STATE IntrState;
2524 HRESULT hrc = WHvGetVirtualProcessorState(pVM->nem.s.hPartition, pVCpu->idCpu, WHvVirtualProcessorStateTypeInterruptControllerState2,
2525 &IntrState, sizeof(IntrState), &cbWritten);
2526 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2527 ("WHvGetVirtualProcessorState(%p, %u,WHvVirtualProcessorStateTypeInterruptControllerState2,) -> %Rhrc (Last=%#x/%u)\n",
2528 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2529 , VERR_NEM_GET_REGISTERS_FAILED);
2530 LogFlowFunc(("IntrState: cbWritten=%u\n"));
2531 for (uint32_t i = 0; i < RT_ELEMENTS(IntrState.BankedInterruptState); i++)
2532 {
2533 WHV_ARM64_INTERRUPT_STATE *pState = &IntrState.BankedInterruptState[i];
2534 LogFlowFunc(("IntrState: Intr %u:\n"
2535 " Enabled=%RTbool\n"
2536 " EdgeTriggered=%RTbool\n"
2537 " Asserted=%RTbool\n"
2538 " SetPending=%RTbool\n"
2539 " Active=%RTbool\n"
2540 " Direct=%RTbool\n"
2541 " GicrIpriorityrConfigured=%u\n"
2542 " GicrIpriorityrActive=%u\n",
2543 i, pState->Enabled, pState->EdgeTriggered, pState->Asserted, pState->SetPending, pState->Active, pState->Direct,
2544 pState->GicrIpriorityrConfigured, pState->GicrIpriorityrActive));
2545 }
2546#endif
2547
2548 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64,
2549 pVCpu->cpum.GstCtx.fPState, VBOXSTRICTRC_VAL(rcStrict) ));
2550 return rcStrict;
2551}
2552
2553
2554VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2555{
2556 Assert(VM_IS_NEM_ENABLED(pVM));
2557 RT_NOREF(pVM, pVCpu);
2558 return true;
2559}
2560
2561
2562VMMR3_INT_DECL(int) NEMR3Halt(PVM pVM, PVMCPU pVCpu)
2563{
2564 Assert(EMGetState(pVCpu) == EMSTATE_WAIT_SIPI);
2565
2566 /*
2567 * Force the vCPU to get out of the SIPI state and into the normal runloop
2568 * as Hyper-V doesn't cause VM exits for PSCI calls so we wouldn't notice when
2569 * when the guest brings APs online.
2570 * Instead we force the EMT to run the vCPU through Hyper-V which manages the state.
2571 */
2572 RT_NOREF(pVM);
2573 EMSetState(pVCpu, EMSTATE_HALTED);
2574 return VINF_EM_RESCHEDULE;
2575}
2576
2577
2578bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2579{
2580 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2581 return false;
2582}
2583
2584
2585void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2586{
2587 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2588 if (pVM->nem.s.fCreatedEmts)
2589 {
2590 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2591 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2592 RT_NOREF_PV(hrc);
2593 }
2594 RT_NOREF_PV(fFlags);
2595}
2596
2597
2598DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2599{
2600 RT_NOREF(pVM, fUseDebugLoop);
2601 return false;
2602}
2603
2604
2605DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2606{
2607 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2608 return false;
2609}
2610
2611
2612DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2613{
2614 PGMPAGEMAPLOCK Lock;
2615 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2616 if (RT_SUCCESS(rc))
2617 PGMPhysReleasePageMappingLock(pVM, &Lock);
2618 return rc;
2619}
2620
2621
2622DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2623{
2624 PGMPAGEMAPLOCK Lock;
2625 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2626 if (RT_SUCCESS(rc))
2627 PGMPhysReleasePageMappingLock(pVM, &Lock);
2628 return rc;
2629}
2630
2631
2632VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2633 uint8_t *pu2State, uint32_t *puNemRange)
2634{
2635 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2636 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2637
2638 *pu2State = UINT8_MAX;
2639 RT_NOREF(puNemRange);
2640
2641 if (pvR3)
2642 {
2643 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2644 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2645 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2646 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2647 if (SUCCEEDED(hrc))
2648 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2649 else
2650 {
2651 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2652 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2653 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2654 return VERR_NEM_MAP_PAGES_FAILED;
2655 }
2656 }
2657 return VINF_SUCCESS;
2658}
2659
2660
2661VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2662{
2663 RT_NOREF(pVM);
2664 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2665}
2666
2667
2668VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2669 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2670{
2671 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2672 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2673 RT_NOREF(puNemRange);
2674
2675 /*
2676 * Unmap the RAM we're replacing.
2677 */
2678 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2679 {
2680 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2681 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2682 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2683 if (SUCCEEDED(hrc))
2684 { /* likely */ }
2685 else if (pvMmio2)
2686 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2687 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2688 else
2689 {
2690 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2691 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2692 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2693 return VERR_NEM_UNMAP_PAGES_FAILED;
2694 }
2695 }
2696
2697 /*
2698 * Map MMIO2 if any.
2699 */
2700 if (pvMmio2)
2701 {
2702 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2703 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2704 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2705 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2706 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2707 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2708 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2709 if (SUCCEEDED(hrc))
2710 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2711 else
2712 {
2713 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2714 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2715 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2716 return VERR_NEM_MAP_PAGES_FAILED;
2717 }
2718 }
2719 else
2720 {
2721 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2722 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2723 }
2724 RT_NOREF(pvRam);
2725 return VINF_SUCCESS;
2726}
2727
2728
2729VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2730 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2731{
2732 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2733 return VINF_SUCCESS;
2734}
2735
2736
2737VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2738 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2739{
2740 int rc = VINF_SUCCESS;
2741 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2742 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2743
2744 /*
2745 * Unmap the MMIO2 pages.
2746 */
2747 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2748 * we may have more stuff to unmap even in case of pure MMIO... */
2749 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2750 {
2751 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2752 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2753 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2754 if (FAILED(hrc))
2755 {
2756 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2757 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2758 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2759 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2760 }
2761 }
2762
2763 /*
2764 * Restore the RAM we replaced.
2765 */
2766 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2767 {
2768 AssertPtr(pvRam);
2769 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2770 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2771 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2772 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2773 if (SUCCEEDED(hrc))
2774 { /* likely */ }
2775 else
2776 {
2777 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2778 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2779 rc = VERR_NEM_MAP_PAGES_FAILED;
2780 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2781 }
2782 if (pu2State)
2783 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2784 }
2785 /* Mark the pages as unmapped if relevant. */
2786 else if (pu2State)
2787 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2788
2789 RT_NOREF(pvMmio2, puNemRange);
2790 return rc;
2791}
2792
2793
2794VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2795 void *pvBitmap, size_t cbBitmap)
2796{
2797 Assert(VM_IS_NEM_ENABLED(pVM));
2798 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
2799 Assert(cbBitmap == (uint32_t)cbBitmap);
2800 RT_NOREF(uNemRange);
2801
2802 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
2803 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
2804 if (SUCCEEDED(hrc))
2805 return VINF_SUCCESS;
2806
2807 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
2808 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2809 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
2810}
2811
2812
2813VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2814 uint8_t *pu2State, uint32_t *puNemRange)
2815{
2816 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2817
2818 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2819 *pu2State = UINT8_MAX;
2820 *puNemRange = 0;
2821 return VINF_SUCCESS;
2822}
2823
2824
2825VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2826 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2827{
2828 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2829 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2830 *pu2State = UINT8_MAX;
2831
2832 /*
2833 * (Re-)map readonly.
2834 */
2835 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2836 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2837 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2838 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2839 if (SUCCEEDED(hrc))
2840 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2841 else
2842 {
2843 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
2844 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2845 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2846 return VERR_NEM_MAP_PAGES_FAILED;
2847 }
2848 RT_NOREF(fFlags, puNemRange);
2849 return VINF_SUCCESS;
2850}
2851
2852VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2853{
2854 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
2855 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
2856 RT_NOREF(pVCpu, fEnabled);
2857}
2858
2859
2860void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2861{
2862 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2863 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2864}
2865
2866
2867VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2868 RTR3PTR pvMemR3, uint8_t *pu2State)
2869{
2870 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2871 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2872
2873 *pu2State = UINT8_MAX;
2874 if (pvMemR3)
2875 {
2876 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2877 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
2878 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2879 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2880 if (SUCCEEDED(hrc))
2881 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2882 else
2883 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
2884 pvMemR3, GCPhys, cb, hrc));
2885 }
2886 RT_NOREF(enmKind);
2887}
2888
2889
2890void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2891 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2892{
2893 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2894 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2895 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2896}
2897
2898
2899int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2900 PGMPAGETYPE enmType, uint8_t *pu2State)
2901{
2902 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2903 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2904 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
2905
2906 AssertFailed();
2907 return VINF_SUCCESS;
2908}
2909
2910
2911VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2912 PGMPAGETYPE enmType, uint8_t *pu2State)
2913{
2914 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2915 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2916 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
2917}
2918
2919
2920VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2921 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2922{
2923 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2924 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
2925 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
2926
2927 AssertFailed();
2928}
2929
2930
2931/**
2932 * Returns features supported by the NEM backend.
2933 *
2934 * @returns Flags of features supported by the native NEM backend.
2935 * @param pVM The cross context VM structure.
2936 */
2937VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
2938{
2939 RT_NOREF(pVM);
2940 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
2941 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
2942}
2943
2944
2945/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
2946 *
2947 * Open questions:
2948 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
2949 */
2950
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette