VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux-armv8.cpp

Last change on this file was 104399, checked in by vboxsync, 3 weeks ago

VMM/NEMR3Native-linux-armv8.cpp: Add PSCI handling which enables SMP + power off + reset, and implement vCPU state syncing between the VMM and KVM, bugref:10391

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 98.1 KB
Line 
1/* $Id: NEMR3Native-linux-armv8.cpp 104399 2024-04-22 20:30:25Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Linux backend arm64 version.
4 */
5
6/*
7 * Copyright (C) 2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_NEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/nem.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/em.h>
37#include <VBox/vmm/gic.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/trpm.h>
40#include "NEMInternal.h"
41#include <VBox/vmm/vmcc.h>
42
43#include <iprt/alloca.h>
44#include <iprt/string.h>
45#include <iprt/system.h>
46#include <iprt/armv8.h>
47
48#include <iprt/formats/arm-psci.h>
49
50#include <errno.h>
51#include <unistd.h>
52#include <sys/ioctl.h>
53#include <sys/fcntl.h>
54#include <sys/mman.h>
55#include <linux/kvm.h>
56
57/** @note This is an experiment right now and therefore is separate from the amd64 KVM NEM backend
58 * We'll see whether it would make sense to merge things later on when things have settled.
59 */
60
61
62/*********************************************************************************************************************************
63* Defined Constants And Macros *
64*********************************************************************************************************************************/
65
66/** Core register group. */
67#define KVM_ARM64_REG_CORE_GROUP UINT64_C(0x6030000000100000)
68/** System register group. */
69#define KVM_ARM64_REG_SYS_GROUP UINT64_C(0x6030000000130000)
70/** System register group. */
71#define KVM_ARM64_REG_SIMD_GROUP UINT64_C(0x6040000000100050)
72/** FP register group. */
73#define KVM_ARM64_REG_FP_GROUP UINT64_C(0x6020000000100000)
74
75#define KVM_ARM64_REG_CORE_CREATE(a_idReg) (KVM_ARM64_REG_CORE_GROUP | ((uint64_t)(a_idReg) & 0xffff))
76#define KVM_ARM64_REG_GPR(a_iGpr) KVM_ARM64_REG_CORE_CREATE((a_iGpr) << 1)
77#define KVM_ARM64_REG_SP_EL0 KVM_ARM64_REG_CORE_CREATE(0x3e)
78#define KVM_ARM64_REG_PC KVM_ARM64_REG_CORE_CREATE(0x40)
79#define KVM_ARM64_REG_PSTATE KVM_ARM64_REG_CORE_CREATE(0x42)
80#define KVM_ARM64_REG_SP_EL1 KVM_ARM64_REG_CORE_CREATE(0x44)
81#define KVM_ARM64_REG_ELR_EL1 KVM_ARM64_REG_CORE_CREATE(0x46)
82#define KVM_ARM64_REG_SPSR_EL1 KVM_ARM64_REG_CORE_CREATE(0x48)
83#define KVM_ARM64_REG_SPSR_ABT KVM_ARM64_REG_CORE_CREATE(0x4a)
84#define KVM_ARM64_REG_SPSR_UND KVM_ARM64_REG_CORE_CREATE(0x4c)
85#define KVM_ARM64_REG_SPSR_IRQ KVM_ARM64_REG_CORE_CREATE(0x4e)
86#define KVM_ARM64_REG_SPSR_FIQ KVM_ARM64_REG_CORE_CREATE(0x50)
87
88/** This maps to our IPRT representation of system register IDs, yay! */
89#define KVM_ARM64_REG_SYS_CREATE(a_idSysReg) (KVM_ARM64_REG_SYS_GROUP | ((uint64_t)(a_idSysReg) & 0xffff))
90
91#define KVM_ARM64_REG_SIMD_CREATE(a_iVecReg) (KVM_ARM64_REG_SIMD_GROUP | (((uint64_t)(a_iVecReg) << 2) & 0xffff))
92
93#define KVM_ARM64_REG_FP_CREATE(a_idReg) (KVM_ARM64_REG_FP_GROUP | ((uint64_t)(a_idReg) & 0xffff))
94#define KVM_ARM64_REG_FP_FPSR KVM_ARM64_REG_FP_CREATE(0xd4)
95#define KVM_ARM64_REG_FP_FPCR KVM_ARM64_REG_FP_CREATE(0xd5)
96
97
98/*********************************************************************************************************************************
99* Structures and Typedefs *
100*********************************************************************************************************************************/
101
102
103/*********************************************************************************************************************************
104* Global Variables *
105*********************************************************************************************************************************/
106/** The general registers. */
107static const struct
108{
109 uint64_t idKvmReg;
110 uint32_t fCpumExtrn;
111 uint32_t offCpumCtx;
112} s_aCpumRegs[] =
113{
114#define CPUM_GREG_EMIT_X0_X3(a_Idx) { KVM_ARM64_REG_GPR(a_Idx), CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
115#define CPUM_GREG_EMIT_X4_X28(a_Idx) { KVM_ARM64_REG_GPR(a_Idx), CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
116 CPUM_GREG_EMIT_X0_X3(0),
117 CPUM_GREG_EMIT_X0_X3(1),
118 CPUM_GREG_EMIT_X0_X3(2),
119 CPUM_GREG_EMIT_X0_X3(3),
120 CPUM_GREG_EMIT_X4_X28(4),
121 CPUM_GREG_EMIT_X4_X28(5),
122 CPUM_GREG_EMIT_X4_X28(6),
123 CPUM_GREG_EMIT_X4_X28(7),
124 CPUM_GREG_EMIT_X4_X28(8),
125 CPUM_GREG_EMIT_X4_X28(9),
126 CPUM_GREG_EMIT_X4_X28(10),
127 CPUM_GREG_EMIT_X4_X28(11),
128 CPUM_GREG_EMIT_X4_X28(12),
129 CPUM_GREG_EMIT_X4_X28(13),
130 CPUM_GREG_EMIT_X4_X28(14),
131 CPUM_GREG_EMIT_X4_X28(15),
132 CPUM_GREG_EMIT_X4_X28(16),
133 CPUM_GREG_EMIT_X4_X28(17),
134 CPUM_GREG_EMIT_X4_X28(18),
135 CPUM_GREG_EMIT_X4_X28(19),
136 CPUM_GREG_EMIT_X4_X28(20),
137 CPUM_GREG_EMIT_X4_X28(21),
138 CPUM_GREG_EMIT_X4_X28(22),
139 CPUM_GREG_EMIT_X4_X28(23),
140 CPUM_GREG_EMIT_X4_X28(24),
141 CPUM_GREG_EMIT_X4_X28(25),
142 CPUM_GREG_EMIT_X4_X28(26),
143 CPUM_GREG_EMIT_X4_X28(27),
144 CPUM_GREG_EMIT_X4_X28(28),
145 { KVM_ARM64_REG_GPR(29), CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
146 { KVM_ARM64_REG_GPR(30), CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
147 { KVM_ARM64_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
148#undef CPUM_GREG_EMIT_X0_X3
149#undef CPUM_GREG_EMIT_X4_X28
150};
151/** SIMD/FP registers. */
152static const struct
153{
154 uint64_t idKvmReg;
155 uint32_t offCpumCtx;
156} s_aCpumFpRegs[] =
157{
158#define CPUM_VREG_EMIT(a_Idx) { KVM_ARM64_REG_SIMD_CREATE(a_Idx), RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
159 CPUM_VREG_EMIT(0),
160 CPUM_VREG_EMIT(1),
161 CPUM_VREG_EMIT(2),
162 CPUM_VREG_EMIT(3),
163 CPUM_VREG_EMIT(4),
164 CPUM_VREG_EMIT(5),
165 CPUM_VREG_EMIT(6),
166 CPUM_VREG_EMIT(7),
167 CPUM_VREG_EMIT(8),
168 CPUM_VREG_EMIT(9),
169 CPUM_VREG_EMIT(10),
170 CPUM_VREG_EMIT(11),
171 CPUM_VREG_EMIT(12),
172 CPUM_VREG_EMIT(13),
173 CPUM_VREG_EMIT(14),
174 CPUM_VREG_EMIT(15),
175 CPUM_VREG_EMIT(16),
176 CPUM_VREG_EMIT(17),
177 CPUM_VREG_EMIT(18),
178 CPUM_VREG_EMIT(19),
179 CPUM_VREG_EMIT(20),
180 CPUM_VREG_EMIT(21),
181 CPUM_VREG_EMIT(22),
182 CPUM_VREG_EMIT(23),
183 CPUM_VREG_EMIT(24),
184 CPUM_VREG_EMIT(25),
185 CPUM_VREG_EMIT(26),
186 CPUM_VREG_EMIT(27),
187 CPUM_VREG_EMIT(28),
188 CPUM_VREG_EMIT(29),
189 CPUM_VREG_EMIT(30),
190 CPUM_VREG_EMIT(31)
191#undef CPUM_VREG_EMIT
192};
193/** System registers. */
194static const struct
195{
196 uint64_t idKvmReg;
197 uint32_t fCpumExtrn;
198 uint32_t offCpumCtx;
199} s_aCpumSysRegs[] =
200{
201 { KVM_ARM64_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
202 { KVM_ARM64_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
203 { KVM_ARM64_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
204 { KVM_ARM64_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
205 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_SCTRL_EL1), CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
206 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TCR_EL1), CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
207 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TTBR0_EL1), CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
208 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TTBR1_EL1), CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
209 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_VBAR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
210 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_AFSR0_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
211 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_AFSR1_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
212 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_AMAIR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
213 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_CNTKCTL_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
214 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_CONTEXTIDR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
215 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_CPACR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
216 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_CSSELR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
217 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ESR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
218 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_FAR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
219 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_MAIR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
220 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_PAR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
221 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TPIDRRO_EL0), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
222 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TPIDR_EL0), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
223 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TPIDR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
224 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_MDCCINT_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
225};
226/** ID registers. */
227static const struct
228{
229 uint64_t idKvmReg;
230 uint32_t offIdStruct;
231} s_aIdRegs[] =
232{
233 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1) },
234 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1) },
235 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
236 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
237 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
238 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
239 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR2_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
240 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1) },
241 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1) }
242};
243
244
245/**
246 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
247 *
248 * @returns VBox status code.
249 * @param pVM The cross context VM structure.
250 * @param pErrInfo Where to always return error info.
251 */
252static int nemR3LnxInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
253{
254 AssertReturn(pVM->nem.s.fdKvm != -1, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
255
256 /*
257 * Capabilities.
258 */
259 static const struct
260 {
261 const char *pszName;
262 int iCap;
263 uint32_t offNem : 24;
264 uint32_t cbNem : 3;
265 uint32_t fReqNonZero : 1;
266 uint32_t uReserved : 4;
267 } s_aCaps[] =
268 {
269#define CAP_ENTRY__L(a_Define) { #a_Define, a_Define, UINT32_C(0x00ffffff), 0, 0, 0 }
270#define CAP_ENTRY__S(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 0, 0 }
271#define CAP_ENTRY_MS(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 1, 0 }
272#define CAP_ENTRY__U(a_Number) { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 0, 0 }
273#define CAP_ENTRY_ML(a_Number) { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 1, 0 }
274
275 CAP_ENTRY__L(KVM_CAP_IRQCHIP), /* 0 */
276 CAP_ENTRY__L(KVM_CAP_HLT),
277 CAP_ENTRY__L(KVM_CAP_MMU_SHADOW_CACHE_CONTROL),
278 CAP_ENTRY_ML(KVM_CAP_USER_MEMORY),
279 CAP_ENTRY__L(KVM_CAP_SET_TSS_ADDR),
280 CAP_ENTRY__U(5),
281 CAP_ENTRY__L(KVM_CAP_VAPIC),
282 CAP_ENTRY__L(KVM_CAP_EXT_CPUID),
283 CAP_ENTRY__L(KVM_CAP_CLOCKSOURCE),
284 CAP_ENTRY__L(KVM_CAP_NR_VCPUS),
285 CAP_ENTRY_MS(KVM_CAP_NR_MEMSLOTS, cMaxMemSlots), /* 10 */
286 CAP_ENTRY__L(KVM_CAP_PIT),
287 CAP_ENTRY__L(KVM_CAP_NOP_IO_DELAY),
288 CAP_ENTRY__L(KVM_CAP_PV_MMU),
289 CAP_ENTRY__L(KVM_CAP_MP_STATE),
290 CAP_ENTRY__L(KVM_CAP_COALESCED_MMIO),
291 CAP_ENTRY__L(KVM_CAP_SYNC_MMU),
292 CAP_ENTRY__U(17),
293 CAP_ENTRY__L(KVM_CAP_IOMMU),
294 CAP_ENTRY__U(19), /* Buggy KVM_CAP_JOIN_MEMORY_REGIONS? */
295 CAP_ENTRY__U(20), /* Mon-working KVM_CAP_DESTROY_MEMORY_REGION? */
296 CAP_ENTRY__L(KVM_CAP_DESTROY_MEMORY_REGION_WORKS), /* 21 */
297 CAP_ENTRY__L(KVM_CAP_USER_NMI),
298#ifdef __KVM_HAVE_GUEST_DEBUG
299 CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG),
300#endif
301#ifdef __KVM_HAVE_PIT
302 CAP_ENTRY__L(KVM_CAP_REINJECT_CONTROL),
303#endif
304 CAP_ENTRY__L(KVM_CAP_IRQ_ROUTING),
305 CAP_ENTRY__L(KVM_CAP_IRQ_INJECT_STATUS),
306 CAP_ENTRY__U(27),
307 CAP_ENTRY__U(28),
308 CAP_ENTRY__L(KVM_CAP_ASSIGN_DEV_IRQ),
309 CAP_ENTRY__L(KVM_CAP_JOIN_MEMORY_REGIONS_WORKS), /* 30 */
310#ifdef __KVM_HAVE_MCE
311 CAP_ENTRY__L(KVM_CAP_MCE),
312#endif
313 CAP_ENTRY__L(KVM_CAP_IRQFD),
314#ifdef __KVM_HAVE_PIT
315 CAP_ENTRY__L(KVM_CAP_PIT2),
316#endif
317 CAP_ENTRY__L(KVM_CAP_SET_BOOT_CPU_ID),
318#ifdef __KVM_HAVE_PIT_STATE2
319 CAP_ENTRY__L(KVM_CAP_PIT_STATE2),
320#endif
321 CAP_ENTRY__L(KVM_CAP_IOEVENTFD),
322 CAP_ENTRY__L(KVM_CAP_SET_IDENTITY_MAP_ADDR),
323#ifdef __KVM_HAVE_XEN_HVM
324 CAP_ENTRY__L(KVM_CAP_XEN_HVM),
325#endif
326 CAP_ENTRY__L(KVM_CAP_ADJUST_CLOCK),
327 CAP_ENTRY__L(KVM_CAP_INTERNAL_ERROR_DATA), /* 40 */
328#ifdef __KVM_HAVE_VCPU_EVENTS
329 CAP_ENTRY_ML(KVM_CAP_VCPU_EVENTS),
330#else
331 CAP_ENTRY_MU(41),
332#endif
333 CAP_ENTRY__L(KVM_CAP_S390_PSW),
334 CAP_ENTRY__L(KVM_CAP_PPC_SEGSTATE),
335 CAP_ENTRY__L(KVM_CAP_HYPERV),
336 CAP_ENTRY__L(KVM_CAP_HYPERV_VAPIC),
337 CAP_ENTRY__L(KVM_CAP_HYPERV_SPIN),
338 CAP_ENTRY__L(KVM_CAP_PCI_SEGMENT),
339 CAP_ENTRY__L(KVM_CAP_PPC_PAIRED_SINGLES),
340 CAP_ENTRY__L(KVM_CAP_INTR_SHADOW),
341#ifdef __KVM_HAVE_DEBUGREGS
342 CAP_ENTRY__L(KVM_CAP_DEBUGREGS), /* 50 */
343#endif
344 CAP_ENTRY__L(KVM_CAP_X86_ROBUST_SINGLESTEP),
345 CAP_ENTRY__L(KVM_CAP_PPC_OSI),
346 CAP_ENTRY__L(KVM_CAP_PPC_UNSET_IRQ),
347 CAP_ENTRY__L(KVM_CAP_ENABLE_CAP),
348 CAP_ENTRY__L(KVM_CAP_PPC_GET_PVINFO),
349 CAP_ENTRY__L(KVM_CAP_PPC_IRQ_LEVEL),
350 CAP_ENTRY__L(KVM_CAP_ASYNC_PF),
351 CAP_ENTRY__L(KVM_CAP_TSC_CONTROL), /* 60 */
352 CAP_ENTRY__L(KVM_CAP_GET_TSC_KHZ),
353 CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_SREGS),
354 CAP_ENTRY__L(KVM_CAP_SPAPR_TCE),
355 CAP_ENTRY__L(KVM_CAP_PPC_SMT),
356 CAP_ENTRY__L(KVM_CAP_PPC_RMA),
357 CAP_ENTRY__L(KVM_CAP_MAX_VCPUS),
358 CAP_ENTRY__L(KVM_CAP_PPC_HIOR),
359 CAP_ENTRY__L(KVM_CAP_PPC_PAPR),
360 CAP_ENTRY__L(KVM_CAP_SW_TLB),
361 CAP_ENTRY__L(KVM_CAP_ONE_REG), /* 70 */
362 CAP_ENTRY__L(KVM_CAP_S390_GMAP),
363 CAP_ENTRY__L(KVM_CAP_TSC_DEADLINE_TIMER),
364 CAP_ENTRY__L(KVM_CAP_S390_UCONTROL),
365 CAP_ENTRY__L(KVM_CAP_SYNC_REGS),
366 CAP_ENTRY__L(KVM_CAP_PCI_2_3),
367 CAP_ENTRY__L(KVM_CAP_KVMCLOCK_CTRL),
368 CAP_ENTRY__L(KVM_CAP_SIGNAL_MSI),
369 CAP_ENTRY__L(KVM_CAP_PPC_GET_SMMU_INFO),
370 CAP_ENTRY__L(KVM_CAP_S390_COW),
371 CAP_ENTRY__L(KVM_CAP_PPC_ALLOC_HTAB), /* 80 */
372 CAP_ENTRY__L(KVM_CAP_READONLY_MEM),
373 CAP_ENTRY__L(KVM_CAP_IRQFD_RESAMPLE),
374 CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_WATCHDOG),
375 CAP_ENTRY__L(KVM_CAP_PPC_HTAB_FD),
376 CAP_ENTRY__L(KVM_CAP_S390_CSS_SUPPORT),
377 CAP_ENTRY__L(KVM_CAP_PPC_EPR),
378 CAP_ENTRY_ML(KVM_CAP_ARM_PSCI),
379 CAP_ENTRY_ML(KVM_CAP_ARM_SET_DEVICE_ADDR),
380 CAP_ENTRY_ML(KVM_CAP_DEVICE_CTRL),
381 CAP_ENTRY__L(KVM_CAP_IRQ_MPIC), /* 90 */
382 CAP_ENTRY__L(KVM_CAP_PPC_RTAS),
383 CAP_ENTRY__L(KVM_CAP_IRQ_XICS),
384 CAP_ENTRY__L(KVM_CAP_ARM_EL1_32BIT),
385 CAP_ENTRY__L(KVM_CAP_SPAPR_MULTITCE),
386 CAP_ENTRY__L(KVM_CAP_EXT_EMUL_CPUID),
387 CAP_ENTRY__L(KVM_CAP_HYPERV_TIME),
388 CAP_ENTRY__L(KVM_CAP_IOAPIC_POLARITY_IGNORED),
389 CAP_ENTRY__L(KVM_CAP_ENABLE_CAP_VM),
390 CAP_ENTRY__L(KVM_CAP_S390_IRQCHIP),
391 CAP_ENTRY__L(KVM_CAP_IOEVENTFD_NO_LENGTH), /* 100 */
392 CAP_ENTRY__L(KVM_CAP_VM_ATTRIBUTES),
393 CAP_ENTRY_ML(KVM_CAP_ARM_PSCI_0_2),
394 CAP_ENTRY__L(KVM_CAP_PPC_FIXUP_HCALL),
395 CAP_ENTRY__L(KVM_CAP_PPC_ENABLE_HCALL),
396 CAP_ENTRY__L(KVM_CAP_CHECK_EXTENSION_VM),
397 CAP_ENTRY__L(KVM_CAP_S390_USER_SIGP),
398 CAP_ENTRY__L(KVM_CAP_S390_VECTOR_REGISTERS),
399 CAP_ENTRY__L(KVM_CAP_S390_MEM_OP),
400 CAP_ENTRY__L(KVM_CAP_S390_USER_STSI),
401 CAP_ENTRY__L(KVM_CAP_S390_SKEYS), /* 110 */
402 CAP_ENTRY__L(KVM_CAP_MIPS_FPU),
403 CAP_ENTRY__L(KVM_CAP_MIPS_MSA),
404 CAP_ENTRY__L(KVM_CAP_S390_INJECT_IRQ),
405 CAP_ENTRY__L(KVM_CAP_S390_IRQ_STATE),
406 CAP_ENTRY__L(KVM_CAP_PPC_HWRNG),
407 CAP_ENTRY__L(KVM_CAP_DISABLE_QUIRKS),
408 CAP_ENTRY__L(KVM_CAP_X86_SMM),
409 CAP_ENTRY__L(KVM_CAP_MULTI_ADDRESS_SPACE),
410 CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_BPS),
411 CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_WPS), /* 120 */
412 CAP_ENTRY__L(KVM_CAP_SPLIT_IRQCHIP),
413 CAP_ENTRY__L(KVM_CAP_IOEVENTFD_ANY_LENGTH),
414 CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC),
415 CAP_ENTRY__L(KVM_CAP_S390_RI),
416 CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_64),
417 CAP_ENTRY__L(KVM_CAP_ARM_PMU_V3),
418 CAP_ENTRY__L(KVM_CAP_VCPU_ATTRIBUTES),
419 CAP_ENTRY__L(KVM_CAP_MAX_VCPU_ID),
420 CAP_ENTRY__L(KVM_CAP_X2APIC_API),
421 CAP_ENTRY__L(KVM_CAP_S390_USER_INSTR0), /* 130 */
422 CAP_ENTRY__L(KVM_CAP_MSI_DEVID),
423 CAP_ENTRY__L(KVM_CAP_PPC_HTM),
424 CAP_ENTRY__L(KVM_CAP_SPAPR_RESIZE_HPT),
425 CAP_ENTRY__L(KVM_CAP_PPC_MMU_RADIX),
426 CAP_ENTRY__L(KVM_CAP_PPC_MMU_HASH_V3),
427 CAP_ENTRY__L(KVM_CAP_IMMEDIATE_EXIT),
428 CAP_ENTRY__L(KVM_CAP_MIPS_VZ),
429 CAP_ENTRY__L(KVM_CAP_MIPS_TE),
430 CAP_ENTRY__L(KVM_CAP_MIPS_64BIT),
431 CAP_ENTRY__L(KVM_CAP_S390_GS), /* 140 */
432 CAP_ENTRY__L(KVM_CAP_S390_AIS),
433 CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_VFIO),
434 CAP_ENTRY__L(KVM_CAP_X86_DISABLE_EXITS),
435 CAP_ENTRY_ML(KVM_CAP_ARM_USER_IRQ),
436 CAP_ENTRY__L(KVM_CAP_S390_CMMA_MIGRATION),
437 CAP_ENTRY__L(KVM_CAP_PPC_FWNMI),
438 CAP_ENTRY__L(KVM_CAP_PPC_SMT_POSSIBLE),
439 CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC2),
440 CAP_ENTRY__L(KVM_CAP_HYPERV_VP_INDEX),
441 CAP_ENTRY__L(KVM_CAP_S390_AIS_MIGRATION), /* 150 */
442 CAP_ENTRY__L(KVM_CAP_PPC_GET_CPU_CHAR),
443 CAP_ENTRY__L(KVM_CAP_S390_BPB),
444 CAP_ENTRY__L(KVM_CAP_GET_MSR_FEATURES),
445 CAP_ENTRY__L(KVM_CAP_HYPERV_EVENTFD),
446 CAP_ENTRY__L(KVM_CAP_HYPERV_TLBFLUSH),
447 CAP_ENTRY__L(KVM_CAP_S390_HPAGE_1M),
448 CAP_ENTRY__L(KVM_CAP_NESTED_STATE),
449 CAP_ENTRY__L(KVM_CAP_ARM_INJECT_SERROR_ESR),
450 CAP_ENTRY__L(KVM_CAP_MSR_PLATFORM_INFO),
451 CAP_ENTRY__L(KVM_CAP_PPC_NESTED_HV), /* 160 */
452 CAP_ENTRY__L(KVM_CAP_HYPERV_SEND_IPI),
453 CAP_ENTRY__L(KVM_CAP_COALESCED_PIO),
454 CAP_ENTRY__L(KVM_CAP_HYPERV_ENLIGHTENED_VMCS),
455 CAP_ENTRY__L(KVM_CAP_EXCEPTION_PAYLOAD),
456 CAP_ENTRY_MS(KVM_CAP_ARM_VM_IPA_SIZE, cIpaBits),
457 CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT),
458 CAP_ENTRY__L(KVM_CAP_HYPERV_CPUID),
459 CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2),
460 CAP_ENTRY__L(KVM_CAP_PPC_IRQ_XIVE),
461 CAP_ENTRY__L(KVM_CAP_ARM_SVE), /* 170 */
462 CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_ADDRESS),
463 CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_GENERIC),
464 CAP_ENTRY__L(KVM_CAP_PMU_EVENT_FILTER),
465 CAP_ENTRY__L(KVM_CAP_ARM_IRQ_LINE_LAYOUT_2),
466 CAP_ENTRY__L(KVM_CAP_HYPERV_DIRECT_TLBFLUSH),
467 CAP_ENTRY__L(KVM_CAP_PPC_GUEST_DEBUG_SSTEP),
468 CAP_ENTRY__L(KVM_CAP_ARM_NISV_TO_USER),
469 CAP_ENTRY__L(KVM_CAP_ARM_INJECT_EXT_DABT),
470 CAP_ENTRY__L(KVM_CAP_S390_VCPU_RESETS),
471 CAP_ENTRY__L(KVM_CAP_S390_PROTECTED), /* 180 */
472 CAP_ENTRY__L(KVM_CAP_PPC_SECURE_GUEST),
473 CAP_ENTRY__L(KVM_CAP_HALT_POLL),
474 CAP_ENTRY__L(KVM_CAP_ASYNC_PF_INT),
475 CAP_ENTRY__L(KVM_CAP_LAST_CPU),
476 CAP_ENTRY__L(KVM_CAP_SMALLER_MAXPHYADDR),
477 CAP_ENTRY__L(KVM_CAP_S390_DIAG318),
478 CAP_ENTRY__L(KVM_CAP_STEAL_TIME),
479 CAP_ENTRY__L(KVM_CAP_X86_USER_SPACE_MSR), /* (since 5.10) */
480 CAP_ENTRY__L(KVM_CAP_X86_MSR_FILTER),
481 CAP_ENTRY__L(KVM_CAP_ENFORCE_PV_FEATURE_CPUID), /* 190 */
482 CAP_ENTRY__L(KVM_CAP_SYS_HYPERV_CPUID),
483 CAP_ENTRY__L(KVM_CAP_DIRTY_LOG_RING),
484 CAP_ENTRY__L(KVM_CAP_X86_BUS_LOCK_EXIT),
485 CAP_ENTRY__L(KVM_CAP_PPC_DAWR1),
486 CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG2),
487 CAP_ENTRY__L(KVM_CAP_SGX_ATTRIBUTE),
488 CAP_ENTRY__L(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM),
489 CAP_ENTRY__L(KVM_CAP_PTP_KVM),
490 CAP_ENTRY__U(199),
491 CAP_ENTRY__U(200),
492 CAP_ENTRY__U(201),
493 CAP_ENTRY__U(202),
494 CAP_ENTRY__U(203),
495 CAP_ENTRY__U(204),
496 CAP_ENTRY__U(205),
497 CAP_ENTRY__U(206),
498 CAP_ENTRY__U(207),
499 CAP_ENTRY__U(208),
500 CAP_ENTRY__U(209),
501 CAP_ENTRY__U(210),
502 CAP_ENTRY__U(211),
503 CAP_ENTRY__U(212),
504 CAP_ENTRY__U(213),
505 CAP_ENTRY__U(214),
506 CAP_ENTRY__U(215),
507 CAP_ENTRY__U(216),
508 };
509
510 LogRel(("NEM: KVM capabilities (system):\n"));
511 int rcRet = VINF_SUCCESS;
512 for (unsigned i = 0; i < RT_ELEMENTS(s_aCaps); i++)
513 {
514 int rc = ioctl(pVM->nem.s.fdKvm, KVM_CHECK_EXTENSION, s_aCaps[i].iCap);
515 if (rc >= 10)
516 LogRel(("NEM: %36s: %#x (%d)\n", s_aCaps[i].pszName, rc, rc));
517 else if (rc >= 0)
518 LogRel(("NEM: %36s: %d\n", s_aCaps[i].pszName, rc));
519 else
520 LogRel(("NEM: %s failed: %d/%d\n", s_aCaps[i].pszName, rc, errno));
521 switch (s_aCaps[i].cbNem)
522 {
523 case 0:
524 break;
525 case 1:
526 {
527 uint8_t *puValue = (uint8_t *)&pVM->nem.padding[s_aCaps[i].offNem];
528 AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
529 *puValue = (uint8_t)rc;
530 AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
531 break;
532 }
533 case 2:
534 {
535 uint16_t *puValue = (uint16_t *)&pVM->nem.padding[s_aCaps[i].offNem];
536 AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
537 *puValue = (uint16_t)rc;
538 AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
539 break;
540 }
541 case 4:
542 {
543 uint32_t *puValue = (uint32_t *)&pVM->nem.padding[s_aCaps[i].offNem];
544 AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
545 *puValue = (uint32_t)rc;
546 AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
547 break;
548 }
549 default:
550 rcRet = RTErrInfoSetF(pErrInfo, VERR_NEM_IPE_0, "s_aCaps[%u] is bad: cbNem=%#x - %s",
551 i, s_aCaps[i].pszName, s_aCaps[i].cbNem);
552 AssertFailedReturn(rcRet);
553 }
554
555 /*
556 * Is a require non-zero entry zero or failing?
557 */
558 if (s_aCaps[i].fReqNonZero && rc <= 0)
559 rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE,
560 "Required capability '%s' is missing!", s_aCaps[i].pszName);
561 }
562
563 /*
564 * Get per VCpu KVM_RUN MMAP area size.
565 */
566 int rc = ioctl(pVM->nem.s.fdKvm, KVM_GET_VCPU_MMAP_SIZE, 0UL);
567 if ((unsigned)rc < _64M)
568 {
569 pVM->nem.s.cbVCpuMmap = (uint32_t)rc;
570 LogRel(("NEM: %36s: %#x (%d)\n", "KVM_GET_VCPU_MMAP_SIZE", rc, rc));
571 }
572 else if (rc < 0)
573 rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE, "KVM_GET_VCPU_MMAP_SIZE failed: %d", errno);
574 else
575 rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_INIT_FAILED, "Odd KVM_GET_VCPU_MMAP_SIZE value: %#x (%d)", rc, rc);
576
577 /*
578 * Init the slot ID bitmap.
579 */
580 ASMBitSet(&pVM->nem.s.bmSlotIds[0], 0); /* don't use slot 0 */
581 if (pVM->nem.s.cMaxMemSlots < _32K)
582 ASMBitSetRange(&pVM->nem.s.bmSlotIds[0], pVM->nem.s.cMaxMemSlots, _32K);
583 ASMBitSet(&pVM->nem.s.bmSlotIds[0], _32K - 1); /* don't use the last slot */
584
585 return rcRet;
586}
587
588
589/**
590 * Queries and logs the supported register list from KVM.
591 *
592 * @returns VBox status code.
593 * @param fdVCpu The file descriptor number of vCPU 0.
594 */
595static int nemR3LnxLogRegList(int fdVCpu)
596{
597 struct KVM_REG_LIST
598 {
599 uint64_t cRegs;
600 uint64_t aRegs[1024];
601 } RegList; RT_ZERO(RegList);
602
603 RegList.cRegs = RT_ELEMENTS(RegList.aRegs);
604 int rcLnx = ioctl(fdVCpu, KVM_GET_REG_LIST, &RegList);
605 if (rcLnx != 0)
606 return RTErrConvertFromErrno(errno);
607
608 LogRel(("NEM: KVM vCPU registers:\n"));
609
610 for (uint32_t i = 0; i < RegList.cRegs; i++)
611 LogRel(("NEM: %36s: %#RX64\n", "Unknown" /** @todo */, RegList.aRegs[i]));
612
613 return VINF_SUCCESS;
614}
615
616
617/**
618 * Sets the given attribute in KVM to the given value.
619 *
620 * @returns VBox status code.
621 * @param pVM The VM instance.
622 * @param u32Grp The device attribute group being set.
623 * @param u32Attr The actual attribute inside the group being set.
624 * @param pvAttrVal Where the attribute value to set.
625 * @param pszAttribute Attribute description for logging.
626 * @param pErrInfo Optional error information.
627 */
628static int nemR3LnxSetAttribute(PVM pVM, uint32_t u32Grp, uint32_t u32Attr, const void *pvAttrVal, const char *pszAttribute,
629 PRTERRINFO pErrInfo)
630{
631 struct kvm_device_attr DevAttr;
632
633 DevAttr.flags = 0;
634 DevAttr.group = u32Grp;
635 DevAttr.attr = u32Attr;
636 DevAttr.addr = (uintptr_t)pvAttrVal;
637 int rcLnx = ioctl(pVM->nem.s.fdVm, KVM_HAS_DEVICE_ATTR, &DevAttr);
638 if (rcLnx < 0)
639 return RTErrInfoSetF(pErrInfo, RTErrConvertFromErrno(errno),
640 N_("KVM error: KVM doesn't support setting the attribute \"%s\" (%d)"),
641 pszAttribute, errno);
642
643 rcLnx = ioctl(pVM->nem.s.fdVm, KVM_SET_DEVICE_ATTR, &DevAttr);
644 if (rcLnx < 0)
645 return RTErrInfoSetF(pErrInfo, RTErrConvertFromErrno(errno),
646 N_("KVM error: Setting the attribute \"%s\" for KVM failed (%d)"),
647 pszAttribute, errno);
648
649 return VINF_SUCCESS;
650}
651
652
653DECL_FORCE_INLINE(int) nemR3LnxKvmSetQueryReg(PVMCPUCC pVCpu, bool fQuery, uint64_t idKvmReg, const void *pv)
654{
655 struct kvm_one_reg Reg;
656 Reg.id = idKvmReg;
657 Reg.addr = (uintptr_t)pv;
658
659 /*
660 * Who thought that this API was a good idea? Supporting to query/set just one register
661 * at a time is horribly inefficient.
662 */
663 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, fQuery ? KVM_GET_ONE_REG : KVM_SET_ONE_REG, &Reg);
664 if (!rcLnx)
665 return 0;
666
667 return RTErrConvertFromErrno(-rcLnx);
668}
669
670DECLINLINE(int) nemR3LnxKvmQueryRegU64(PVMCPUCC pVCpu, uint64_t idKvmReg, uint64_t *pu64)
671{
672 return nemR3LnxKvmSetQueryReg(pVCpu, true /*fQuery*/, idKvmReg, pu64);
673}
674
675
676DECLINLINE(int) nemR3LnxKvmQueryRegU32(PVMCPUCC pVCpu, uint64_t idKvmReg, uint32_t *pu32)
677{
678 return nemR3LnxKvmSetQueryReg(pVCpu, true /*fQuery*/, idKvmReg, pu32);
679}
680
681
682DECLINLINE(int) nemR3LnxKvmQueryRegPV(PVMCPUCC pVCpu, uint64_t idKvmReg, void *pv)
683{
684 return nemR3LnxKvmSetQueryReg(pVCpu, true /*fQuery*/, idKvmReg, pv);
685}
686
687
688DECLINLINE(int) nemR3LnxKvmSetRegU64(PVMCPUCC pVCpu, uint64_t idKvmReg, const uint64_t *pu64)
689{
690 return nemR3LnxKvmSetQueryReg(pVCpu, false /*fQuery*/, idKvmReg, pu64);
691}
692
693
694DECLINLINE(int) nemR3LnxKvmSetRegU32(PVMCPUCC pVCpu, uint64_t idKvmReg, const uint32_t *pu32)
695{
696 return nemR3LnxKvmSetQueryReg(pVCpu, false /*fQuery*/, idKvmReg, pu32);
697}
698
699
700DECLINLINE(int) nemR3LnxKvmSetRegPV(PVMCPUCC pVCpu, uint64_t idKvmReg, const void *pv)
701{
702 return nemR3LnxKvmSetQueryReg(pVCpu, false /*fQuery*/, idKvmReg, pv);
703}
704
705
706/**
707 * Does the early setup of a KVM VM.
708 *
709 * @returns VBox status code.
710 * @param pVM The cross context VM structure.
711 * @param pErrInfo Where to always return error info.
712 */
713static int nemR3LnxInitSetupVm(PVM pVM, PRTERRINFO pErrInfo)
714{
715 AssertReturn(pVM->nem.s.fdVm != -1, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
716
717 /*
718 * Create the VCpus.
719 */
720 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
721 {
722 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
723
724 /* Create it. */
725 pVCpu->nem.s.fdVCpu = ioctl(pVM->nem.s.fdVm, KVM_CREATE_VCPU, (unsigned long)idCpu);
726 if (pVCpu->nem.s.fdVCpu < 0)
727 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_CREATE_VCPU failed for VCpu #%u: %d", idCpu, errno);
728
729 /* Map the KVM_RUN area. */
730 pVCpu->nem.s.pRun = (struct kvm_run *)mmap(NULL, pVM->nem.s.cbVCpuMmap, PROT_READ | PROT_WRITE, MAP_SHARED,
731 pVCpu->nem.s.fdVCpu, 0 /*offset*/);
732 if ((void *)pVCpu->nem.s.pRun == MAP_FAILED)
733 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "mmap failed for VCpu #%u: %d", idCpu, errno);
734
735 /* Initialize the vCPU. */
736 struct kvm_vcpu_init VCpuInit; RT_ZERO(VCpuInit);
737 VCpuInit.target = KVM_ARM_TARGET_GENERIC_V8;
738 /** @todo Enable features. */
739 if (ioctl(pVCpu->nem.s.fdVCpu, KVM_ARM_VCPU_INIT, &VCpuInit) != 0)
740 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_ARM_VCPU_INIT failed for VCpu #%u: %d", idCpu, errno);
741
742#if 0
743 uint32_t fFeatures = 0; /** @todo SVE */
744 if (ioctl(pVCpu->nem.s.fdVCpu, KVM_ARM_VCPU_FINALIZE, &fFeatures) != 0)
745 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_ARM_VCPU_FINALIZE failed for VCpu #%u: %d", idCpu, errno);
746#endif
747
748 if (idCpu == 0)
749 {
750 /* Query the supported register list and log it. */
751 int rc = nemR3LnxLogRegList(pVCpu->nem.s.fdVCpu);
752 if (RT_FAILURE(rc))
753 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "Querying the supported register list failed with %Rrc", rc);
754
755 /* Need to query the ID registers and populate CPUM. */
756 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
757 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
758 {
759 uint64_t *pu64 = (uint64_t *)((uint8_t *)&IdRegs + s_aIdRegs[i].offIdStruct);
760 rc = nemR3LnxKvmQueryRegU64(pVCpu, s_aIdRegs[i].idKvmReg, pu64);
761 if (RT_FAILURE(rc))
762 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
763 "Querying register %#x failed: %Rrc", s_aIdRegs[i].idKvmReg, rc);
764 }
765
766 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
767 if (RT_FAILURE(rc))
768 return rc;
769 }
770 }
771
772 /*
773 * Setup the SMCCC filter to get exits for PSCI related
774 * guest calls (to support SMP, power off and reset).
775 */
776 struct kvm_smccc_filter SmcccPsciFilter; RT_ZERO(SmcccPsciFilter);
777 SmcccPsciFilter.base = ARM_PSCI_FUNC_ID_CREATE_FAST_64(ARM_PSCI_FUNC_ID_PSCI_VERSION);
778 SmcccPsciFilter.nr_functions = ARM_PSCI_FUNC_ID_CREATE_FAST_64(ARM_PSCI_FUNC_ID_SYSTEM_RESET) - SmcccPsciFilter.base + 1;
779 SmcccPsciFilter.action = KVM_SMCCC_FILTER_FWD_TO_USER;
780 int rc = nemR3LnxSetAttribute(pVM, KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER, &SmcccPsciFilter,
781 "KVM_ARM_VM_SMCCC_FILTER", pErrInfo);
782 if (RT_FAILURE(rc))
783 return rc;
784
785 SmcccPsciFilter.base = ARM_PSCI_FUNC_ID_CREATE_FAST_32(ARM_PSCI_FUNC_ID_PSCI_VERSION);
786 SmcccPsciFilter.nr_functions = ARM_PSCI_FUNC_ID_CREATE_FAST_32(ARM_PSCI_FUNC_ID_SYSTEM_RESET) - SmcccPsciFilter.base + 1;
787 SmcccPsciFilter.action = KVM_SMCCC_FILTER_FWD_TO_USER;
788 rc = nemR3LnxSetAttribute(pVM, KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER, &SmcccPsciFilter,
789 "KVM_ARM_VM_SMCCC_FILTER", pErrInfo);
790 if (RT_FAILURE(rc))
791 return rc;
792
793 return VINF_SUCCESS;
794}
795
796
797/** @callback_method_impl{FNVMMEMTRENDEZVOUS} */
798static DECLCALLBACK(VBOXSTRICTRC) nemR3LnxFixThreadPoke(PVM pVM, PVMCPU pVCpu, void *pvUser)
799{
800 RT_NOREF(pVM, pvUser);
801 int rc = RTThreadControlPokeSignal(pVCpu->hThread, true /*fEnable*/);
802 AssertLogRelRC(rc);
803 return VINF_SUCCESS;
804}
805
806
807/**
808 * Try initialize the native API.
809 *
810 * This may only do part of the job, more can be done in
811 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
812 *
813 * @returns VBox status code.
814 * @param pVM The cross context VM structure.
815 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
816 * the latter we'll fail if we cannot initialize.
817 * @param fForced Whether the HMForced flag is set and we should
818 * fail if we cannot initialize.
819 */
820int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
821{
822 RT_NOREF(pVM, fFallback, fForced);
823 /*
824 * Some state init.
825 */
826 pVM->nem.s.fdKvm = -1;
827 pVM->nem.s.fdVm = -1;
828 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
829 {
830 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
831 pNemCpu->fdVCpu = -1;
832 }
833
834 /*
835 * Error state.
836 * The error message will be non-empty on failure and 'rc' will be set too.
837 */
838 RTERRINFOSTATIC ErrInfo;
839 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
840
841 /*
842 * Open kvm subsystem so we can issue system ioctls.
843 */
844 int rc;
845 int fdKvm = open("/dev/kvm", O_RDWR | O_CLOEXEC);
846 if (fdKvm >= 0)
847 {
848 pVM->nem.s.fdKvm = fdKvm;
849
850 /*
851 * Check capabilities.
852 */
853 rc = nemR3LnxInitCheckCapabilities(pVM, pErrInfo);
854 if (RT_SUCCESS(rc))
855 {
856 /*
857 * Create an empty VM since it is recommended we check capabilities on
858 * the VM rather than the system descriptor.
859 */
860 int fdVm = ioctl(fdKvm, KVM_CREATE_VM, pVM->nem.s.cIpaBits);
861 if (fdVm >= 0)
862 {
863 pVM->nem.s.fdVm = fdVm;
864
865 /*
866 * Set up the VM (more on this later).
867 */
868 rc = nemR3LnxInitSetupVm(pVM, pErrInfo);
869 if (RT_SUCCESS(rc))
870 {
871 /*
872 * Set ourselves as the execution engine and make config adjustments.
873 */
874 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
875 Log(("NEM: Marked active!\n"));
876 PGMR3EnableNemMode(pVM);
877
878 /*
879 * Register release statistics
880 */
881 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
882 {
883 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
884 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
885 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
886 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
887 STAMR3RegisterF(pVM, &pNemCpu->StatImportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when importing from KVM", "/NEM/CPU%u/ImportPendingInterrupt", idCpu);
888 STAMR3RegisterF(pVM, &pNemCpu->StatExportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when exporting to KVM", "/NEM/CPU%u/ExportPendingInterrupt", idCpu);
889 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn", idCpu);
890 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn1Loop, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-01-loop", idCpu);
891 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn2Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-02-loops", idCpu);
892 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn3Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-03-loops", idCpu);
893 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn4PlusLoops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-04-to-7-loops", idCpu);
894 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
895 STAMR3RegisterF(pVM, &pNemCpu->StatExitTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "All exits", "/NEM/CPU%u/Exit", idCpu);
896 STAMR3RegisterF(pVM, &pNemCpu->StatExitIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IO", "/NEM/CPU%u/Exit/Io", idCpu);
897 STAMR3RegisterF(pVM, &pNemCpu->StatExitMmio, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_MMIO", "/NEM/CPU%u/Exit/Mmio", idCpu);
898 STAMR3RegisterF(pVM, &pNemCpu->StatExitIntr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTR", "/NEM/CPU%u/Exit/Intr", idCpu);
899 STAMR3RegisterF(pVM, &pNemCpu->StatExitHypercall, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HYPERCALL", "/NEM/CPU%u/Exit/Hypercall", idCpu);
900 STAMR3RegisterF(pVM, &pNemCpu->StatExitDebug, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_DEBUG", "/NEM/CPU%u/Exit/Debug", idCpu);
901 STAMR3RegisterF(pVM, &pNemCpu->StatExitBusLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_BUS_LOCK", "/NEM/CPU%u/Exit/BusLock", idCpu);
902 STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorEmulation, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/EMULATION", "/NEM/CPU%u/Exit/InternalErrorEmulation", idCpu);
903 STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorFatal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/*", "/NEM/CPU%u/Exit/InternalErrorFatal", idCpu);
904 }
905
906 /*
907 * Success.
908 */
909 return VINF_SUCCESS;
910 }
911 close(fdVm);
912 pVM->nem.s.fdVm = -1;
913 }
914 else
915 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_CREATE_VM failed: %u", errno);
916 }
917 close(fdKvm);
918 pVM->nem.s.fdKvm = -1;
919 }
920 else if (errno == EACCES)
921 rc = RTErrInfoSet(pErrInfo, VERR_ACCESS_DENIED, "Do not have access to open /dev/kvm for reading & writing.");
922 else if (errno == ENOENT)
923 rc = RTErrInfoSet(pErrInfo, VERR_NOT_SUPPORTED, "KVM is not availble (/dev/kvm does not exist)");
924 else
925 rc = RTErrInfoSetF(pErrInfo, RTErrConvertFromErrno(errno), "Failed to open '/dev/kvm': %u", errno);
926
927 /*
928 * We only fail if in forced mode, otherwise just log the complaint and return.
929 */
930 Assert(RTErrInfoIsSet(pErrInfo));
931 if ( (fForced || !fFallback)
932 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
933 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
934 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
935 return VINF_SUCCESS;
936}
937
938
939/**
940 * This is called after CPUMR3Init is done.
941 *
942 * @returns VBox status code.
943 * @param pVM The VM handle..
944 */
945int nemR3NativeInitAfterCPUM(PVM pVM)
946{
947 /*
948 * Validate sanity.
949 */
950 AssertReturn(pVM->nem.s.fdKvm >= 0, VERR_WRONG_ORDER);
951 AssertReturn(pVM->nem.s.fdVm >= 0, VERR_WRONG_ORDER);
952 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
953
954 /** @todo */
955
956 return VINF_SUCCESS;
957}
958
959
960int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
961{
962 /*
963 * Make RTThreadPoke work again (disabled for avoiding unnecessary
964 * critical section issues in ring-0).
965 */
966 if (enmWhat == VMINITCOMPLETED_RING3)
967 VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, nemR3LnxFixThreadPoke, NULL);
968
969 return VINF_SUCCESS;
970}
971
972
973int nemR3NativeTerm(PVM pVM)
974{
975 /*
976 * Per-cpu data
977 */
978 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
979 {
980 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
981
982 if (pVCpu->nem.s.fdVCpu != -1)
983 {
984 close(pVCpu->nem.s.fdVCpu);
985 pVCpu->nem.s.fdVCpu = -1;
986 }
987 if (pVCpu->nem.s.pRun)
988 {
989 munmap(pVCpu->nem.s.pRun, pVM->nem.s.cbVCpuMmap);
990 pVCpu->nem.s.pRun = NULL;
991 }
992 }
993
994 /*
995 * Global data.
996 */
997 if (pVM->nem.s.fdVm != -1)
998 {
999 close(pVM->nem.s.fdVm);
1000 pVM->nem.s.fdVm = -1;
1001 }
1002
1003 if (pVM->nem.s.fdKvm != -1)
1004 {
1005 close(pVM->nem.s.fdKvm);
1006 pVM->nem.s.fdKvm = -1;
1007 }
1008 return VINF_SUCCESS;
1009}
1010
1011
1012/**
1013 * VM reset notification.
1014 *
1015 * @param pVM The cross context VM structure.
1016 */
1017void nemR3NativeReset(PVM pVM)
1018{
1019 RT_NOREF(pVM);
1020}
1021
1022
1023/**
1024 * Reset CPU due to INIT IPI or hot (un)plugging.
1025 *
1026 * @param pVCpu The cross context virtual CPU structure of the CPU being
1027 * reset.
1028 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1029 */
1030void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1031{
1032 RT_NOREF(pVCpu, fInitIpi);
1033}
1034
1035
1036/*********************************************************************************************************************************
1037* Memory management *
1038*********************************************************************************************************************************/
1039
1040
1041/**
1042 * Allocates a memory slot ID.
1043 *
1044 * @returns Slot ID on success, UINT16_MAX on failure.
1045 */
1046static uint16_t nemR3LnxMemSlotIdAlloc(PVM pVM)
1047{
1048 /* Use the hint first. */
1049 uint16_t idHint = pVM->nem.s.idPrevSlot;
1050 if (idHint < _32K - 1)
1051 {
1052 int32_t idx = ASMBitNextClear(&pVM->nem.s.bmSlotIds, _32K, idHint);
1053 Assert(idx < _32K);
1054 if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))
1055 return pVM->nem.s.idPrevSlot = (uint16_t)idx;
1056 }
1057
1058 /*
1059 * Search the whole map from the start.
1060 */
1061 int32_t idx = ASMBitFirstClear(&pVM->nem.s.bmSlotIds, _32K);
1062 Assert(idx < _32K);
1063 if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))
1064 return pVM->nem.s.idPrevSlot = (uint16_t)idx;
1065
1066 Assert(idx < 0 /*shouldn't trigger unless there is a race */);
1067 return UINT16_MAX; /* caller is expected to assert. */
1068}
1069
1070
1071/**
1072 * Frees a memory slot ID
1073 */
1074static void nemR3LnxMemSlotIdFree(PVM pVM, uint16_t idSlot)
1075{
1076 if (RT_LIKELY(idSlot < _32K && ASMAtomicBitTestAndClear(&pVM->nem.s.bmSlotIds, idSlot)))
1077 { /*likely*/ }
1078 else
1079 AssertMsgFailed(("idSlot=%u (%#x)\n", idSlot, idSlot));
1080}
1081
1082
1083
1084VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1085 uint8_t *pu2State, uint32_t *puNemRange)
1086{
1087 uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
1088 AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
1089
1090 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d) - idSlot=%#x\n",
1091 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange, idSlot));
1092
1093 struct kvm_userspace_memory_region Region;
1094 Region.slot = idSlot;
1095 Region.flags = 0;
1096 Region.guest_phys_addr = GCPhys;
1097 Region.memory_size = cb;
1098 Region.userspace_addr = (uintptr_t)pvR3;
1099
1100 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
1101 if (rc == 0)
1102 {
1103 *pu2State = 0;
1104 *puNemRange = idSlot;
1105 return VINF_SUCCESS;
1106 }
1107
1108 LogRel(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p, idSlot=%#x failed: %u/%u\n", GCPhys, cb, pvR3, idSlot, rc, errno));
1109 nemR3LnxMemSlotIdFree(pVM, idSlot);
1110 return VERR_NEM_MAP_PAGES_FAILED;
1111}
1112
1113
1114VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1115{
1116 RT_NOREF(pVM);
1117 return true;
1118}
1119
1120
1121VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1122 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1123{
1124 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
1125 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
1126 RT_NOREF(pvRam);
1127
1128 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1129 {
1130 /** @todo implement splitting and whatnot of ranges if we want to be 100%
1131 * conforming (just modify RAM registrations in MM.cpp to test). */
1132 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),
1133 VERR_NEM_MAP_PAGES_FAILED);
1134 }
1135
1136 /*
1137 * Register MMIO2.
1138 */
1139 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1140 {
1141 AssertReturn(pvMmio2, VERR_NEM_MAP_PAGES_FAILED);
1142 AssertReturn(puNemRange, VERR_NEM_MAP_PAGES_FAILED);
1143
1144 uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
1145 AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
1146
1147 struct kvm_userspace_memory_region Region;
1148 Region.slot = idSlot;
1149 Region.flags = fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES ? KVM_MEM_LOG_DIRTY_PAGES : 0;
1150 Region.guest_phys_addr = GCPhys;
1151 Region.memory_size = cb;
1152 Region.userspace_addr = (uintptr_t)pvMmio2;
1153
1154 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
1155 if (rc == 0)
1156 {
1157 *pu2State = 0;
1158 *puNemRange = idSlot;
1159 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvMmio2=%p - idSlot=%#x\n",
1160 GCPhys, cb, fFlags, pvMmio2, idSlot));
1161 return VINF_SUCCESS;
1162 }
1163
1164 nemR3LnxMemSlotIdFree(pVM, idSlot);
1165 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",
1166 GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),
1167 VERR_NEM_MAP_PAGES_FAILED);
1168 }
1169
1170 /* MMIO, don't care. */
1171 *pu2State = 0;
1172 *puNemRange = UINT32_MAX;
1173 return VINF_SUCCESS;
1174}
1175
1176
1177VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1178 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1179{
1180 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1181 return VINF_SUCCESS;
1182}
1183
1184
1185VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1186 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1187{
1188 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p puNemRange=%p (%#x)\n",
1189 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1190 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1191
1192 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1193 {
1194 /** @todo implement splitting and whatnot of ranges if we want to be 100%
1195 * conforming (just modify RAM registrations in MM.cpp to test). */
1196 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),
1197 VERR_NEM_UNMAP_PAGES_FAILED);
1198 }
1199
1200 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1201 {
1202 uint32_t const idSlot = *puNemRange;
1203 AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);
1204 AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);
1205
1206 struct kvm_userspace_memory_region Region;
1207 Region.slot = idSlot;
1208 Region.flags = 0;
1209 Region.guest_phys_addr = GCPhys;
1210 Region.memory_size = 0; /* this deregisters it. */
1211 Region.userspace_addr = (uintptr_t)pvMmio2;
1212
1213 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
1214 if (rc == 0)
1215 {
1216 if (pu2State)
1217 *pu2State = 0;
1218 *puNemRange = UINT32_MAX;
1219 nemR3LnxMemSlotIdFree(pVM, idSlot);
1220 return VINF_SUCCESS;
1221 }
1222
1223 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",
1224 GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),
1225 VERR_NEM_UNMAP_PAGES_FAILED);
1226 }
1227
1228 if (pu2State)
1229 *pu2State = UINT8_MAX;
1230 return VINF_SUCCESS;
1231}
1232
1233
1234VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1235 void *pvBitmap, size_t cbBitmap)
1236{
1237 AssertReturn(uNemRange > 0 && uNemRange < _32K, VERR_NEM_IPE_4);
1238 AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, uNemRange), VERR_NEM_IPE_4);
1239
1240 RT_NOREF(GCPhys, cbBitmap);
1241
1242 struct kvm_dirty_log DirtyLog;
1243 DirtyLog.slot = uNemRange;
1244 DirtyLog.padding1 = 0;
1245 DirtyLog.dirty_bitmap = pvBitmap;
1246
1247 int rc = ioctl(pVM->nem.s.fdVm, KVM_GET_DIRTY_LOG, &DirtyLog);
1248 AssertLogRelMsgReturn(rc == 0, ("%RGp LB %RGp idSlot=%#x failed: %u/%u\n", GCPhys, cb, uNemRange, errno, rc),
1249 VERR_NEM_QUERY_DIRTY_BITMAP_FAILED);
1250
1251 return VINF_SUCCESS;
1252}
1253
1254
1255VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1256 uint8_t *pu2State, uint32_t *puNemRange)
1257{
1258 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1259 *pu2State = UINT8_MAX;
1260
1261 /* Don't support puttint ROM where there is already RAM. For
1262 now just shuffle the registrations till it works... */
1263 AssertLogRelMsgReturn(!(fFlags & NEM_NOTIFY_PHYS_ROM_F_REPLACE), ("%RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags),
1264 VERR_NEM_MAP_PAGES_FAILED);
1265
1266 /** @todo figure out how to do shadow ROMs. */
1267
1268 /*
1269 * We only allocate a slot number here in case we need to use it to
1270 * fend of physical handler fun.
1271 */
1272 uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
1273 AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
1274
1275 *pu2State = 0;
1276 *puNemRange = idSlot;
1277 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",
1278 GCPhys, cb, fFlags, pvPages, idSlot));
1279 RT_NOREF(GCPhys, cb, fFlags, pvPages);
1280 return VINF_SUCCESS;
1281}
1282
1283
1284VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1285 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1286{
1287 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1288 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
1289
1290 AssertPtrReturn(pvPages, VERR_NEM_IPE_5);
1291
1292 uint32_t const idSlot = *puNemRange;
1293 AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);
1294 AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);
1295
1296 *pu2State = UINT8_MAX;
1297
1298 /*
1299 * Do the actual setting of the user pages here now that we've
1300 * got a valid pvPages (typically isn't available during the early
1301 * notification, unless we're replacing RAM).
1302 */
1303 struct kvm_userspace_memory_region Region;
1304 Region.slot = idSlot;
1305 Region.flags = 0;
1306 Region.guest_phys_addr = GCPhys;
1307 Region.memory_size = cb;
1308 Region.userspace_addr = (uintptr_t)pvPages;
1309
1310 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
1311 if (rc == 0)
1312 {
1313 *pu2State = 0;
1314 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",
1315 GCPhys, cb, fFlags, pvPages, idSlot));
1316 return VINF_SUCCESS;
1317 }
1318 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvPages=%p, idSlot=%#x failed: %u/%u\n",
1319 GCPhys, cb, fFlags, pvPages, idSlot, errno, rc),
1320 VERR_NEM_MAP_PAGES_FAILED);
1321}
1322
1323
1324VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
1325{
1326 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
1327 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
1328 RT_NOREF(pVCpu, fEnabled);
1329}
1330
1331
1332VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1333 RTR3PTR pvMemR3, uint8_t *pu2State)
1334{
1335 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
1336 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
1337
1338 *pu2State = UINT8_MAX;
1339 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
1340}
1341
1342
1343void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1344{
1345 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1346 RT_NOREF(pVM, enmKind, GCPhys, cb);
1347}
1348
1349
1350void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1351 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1352{
1353 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1354 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1355 RT_NOREF(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fRestoreAsRAM);
1356}
1357
1358
1359int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
1360 PGMPAGETYPE enmType, uint8_t *pu2State)
1361{
1362 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1363 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1364 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
1365 return VINF_SUCCESS;
1366}
1367
1368
1369VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
1370 PGMPAGETYPE enmType, uint8_t *pu2State)
1371{
1372 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1373 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1374 Assert(VM_IS_NEM_ENABLED(pVM));
1375 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
1376
1377}
1378
1379
1380VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
1381 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
1382{
1383 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
1384 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
1385 Assert(VM_IS_NEM_ENABLED(pVM));
1386 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
1387}
1388
1389
1390/*********************************************************************************************************************************
1391* CPU State *
1392*********************************************************************************************************************************/
1393
1394/**
1395 * Sets the given general purpose register to the given value.
1396 *
1397 * @param pVCpu The cross context virtual CPU structure of the
1398 * calling EMT.
1399 * @param uReg The register index.
1400 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1401 * @param fSignExtend Flag whether to sign extend the value.
1402 * @param u64Val The value.
1403 */
1404DECLINLINE(void) nemR3LnxSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1405{
1406 AssertReturnVoid(uReg < 31);
1407
1408 if (f64BitReg)
1409 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1410 else
1411 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
1412
1413 /* Mark the register as not extern anymore. */
1414 switch (uReg)
1415 {
1416 case 0:
1417 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1418 break;
1419 case 1:
1420 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1421 break;
1422 case 2:
1423 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1424 break;
1425 case 3:
1426 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1427 break;
1428 default:
1429 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1430 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1431 }
1432}
1433
1434
1435/**
1436 * Gets the given general purpose register and returns the value.
1437 *
1438 * @returns Value from the given register.
1439 * @param pVCpu The cross context virtual CPU structure of the
1440 * calling EMT.
1441 * @param uReg The register index.
1442 */
1443DECLINLINE(uint64_t) nemR3LnxGetGReg(PVMCPU pVCpu, uint8_t uReg)
1444{
1445 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
1446
1447 if (uReg == ARMV8_AARCH64_REG_ZR)
1448 return 0;
1449
1450 /** @todo Import the register if extern. */
1451 //AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1452
1453 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1454}
1455
1456/**
1457 * Worker that imports selected state from KVM.
1458 */
1459static int nemHCLnxImportState(PVMCPUCC pVCpu, uint64_t fWhat, PCPUMCTX pCtx)
1460{
1461 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1462 if (!fWhat)
1463 return VINF_SUCCESS;
1464
1465#if 0
1466 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
1467 if (hrc == HV_SUCCESS)
1468 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
1469#endif
1470
1471 int rc = VINF_SUCCESS;
1472 if (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_FP | CPUMCTX_EXTRN_LR | CPUMCTX_EXTRN_PC))
1473 {
1474 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1475 {
1476 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
1477 {
1478 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
1479 rc |= nemR3LnxKvmQueryRegU64(pVCpu, s_aCpumRegs[i].idKvmReg, pu64);
1480 }
1481 }
1482 }
1483
1484 if ( rc == VINF_SUCCESS
1485 && (fWhat & CPUMCTX_EXTRN_FPCR))
1486 {
1487 uint32_t u32Tmp;
1488 rc |= nemR3LnxKvmQueryRegU32(pVCpu, KVM_ARM64_REG_FP_FPCR, &u32Tmp);
1489 if (rc == VINF_SUCCESS)
1490 pVCpu->cpum.GstCtx.fpcr = u32Tmp;
1491 }
1492
1493 if ( rc == VINF_SUCCESS
1494 && (fWhat & CPUMCTX_EXTRN_FPSR))
1495 {
1496 uint32_t u32Tmp;
1497 rc |= nemR3LnxKvmQueryRegU32(pVCpu, KVM_ARM64_REG_FP_FPSR, &u32Tmp);
1498 if (rc == VINF_SUCCESS)
1499 pVCpu->cpum.GstCtx.fpsr = u32Tmp;
1500 }
1501
1502 if ( rc == VINF_SUCCESS
1503 && (fWhat & CPUMCTX_EXTRN_V0_V31))
1504 {
1505 /* SIMD/FP registers. */
1506 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1507 {
1508 void *pu128 = (void *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
1509 rc |= nemR3LnxKvmQueryRegPV(pVCpu, s_aCpumFpRegs[i].idKvmReg, pu128);
1510 }
1511 }
1512
1513 if ( rc == VINF_SUCCESS
1514 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
1515 {
1516#if 0
1517 /* Debug registers. */
1518 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
1519 {
1520 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
1521 rc |= nemR3LnxKvmQueryReg(pVCpu, s_aCpumDbgRegs[i].idKvmReg, pu64);
1522 }
1523#endif
1524 }
1525
1526 if ( rc == VINF_SUCCESS
1527 && (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
1528 {
1529#if 0
1530 /* PAuth registers. */
1531 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1532 {
1533 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1534 hrc |= nemR3LnxKvmQueryReg(pVCpu, s_aCpumPAuthKeyRegs[i].idKvmReg, pu64);
1535 }
1536#endif
1537 }
1538
1539 if ( rc == VINF_SUCCESS
1540 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC)))
1541 {
1542 /* System registers. */
1543 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1544 {
1545 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
1546 {
1547 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1548 rc |= nemR3LnxKvmQueryRegU64(pVCpu, s_aCpumSysRegs[i].idKvmReg, pu64);
1549 }
1550 }
1551 }
1552
1553 if ( rc == VINF_SUCCESS
1554 && (fWhat & CPUMCTX_EXTRN_PSTATE))
1555 {
1556 uint64_t u64Tmp;
1557 rc |= nemR3LnxKvmQueryRegU64(pVCpu, KVM_ARM64_REG_PSTATE, &u64Tmp);
1558 if (rc == VINF_SUCCESS)
1559 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
1560
1561 }
1562
1563 /*
1564 * Update the external mask.
1565 */
1566 pCtx->fExtrn &= ~fWhat;
1567 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1568 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1569 pVCpu->cpum.GstCtx.fExtrn = 0;
1570
1571 return VINF_SUCCESS;
1572}
1573
1574
1575/**
1576 * Interface for importing state on demand (used by IEM).
1577 *
1578 * @returns VBox status code.
1579 * @param pVCpu The cross context CPU structure.
1580 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1581 */
1582VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1583{
1584 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1585 return nemHCLnxImportState(pVCpu, fWhat, &pVCpu->cpum.GstCtx);
1586}
1587
1588
1589/**
1590 * Exports state to KVM.
1591 */
1592static int nemHCLnxExportState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1593{
1594 uint64_t const fExtrn = ~pCtx->fExtrn & CPUMCTX_EXTRN_ALL;
1595 Assert((~fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL); RT_NOREF(fExtrn);
1596
1597 RT_NOREF(pVM);
1598 int rc = VINF_SUCCESS;
1599 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_FP | CPUMCTX_EXTRN_LR | CPUMCTX_EXTRN_PC))
1600 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_FP | CPUMCTX_EXTRN_LR | CPUMCTX_EXTRN_PC))
1601 {
1602 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1603 {
1604 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1605 {
1606 const uint64_t *pu64 = (const uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
1607 rc |= nemR3LnxKvmSetRegU64(pVCpu, s_aCpumRegs[i].idKvmReg, pu64);
1608 }
1609 }
1610 }
1611
1612 if ( rc == VINF_SUCCESS
1613 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_FPCR))
1614 {
1615 uint32_t u32Tmp = pVCpu->cpum.GstCtx.fpcr;
1616 rc |= nemR3LnxKvmSetRegU32(pVCpu, KVM_ARM64_REG_FP_FPCR, &u32Tmp);
1617 }
1618
1619 if ( rc == VINF_SUCCESS
1620 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_FPSR))
1621 {
1622 uint32_t u32Tmp = pVCpu->cpum.GstCtx.fpsr;
1623 rc |= nemR3LnxKvmSetRegU32(pVCpu, KVM_ARM64_REG_FP_FPSR, &u32Tmp);
1624 }
1625
1626 if ( rc == VINF_SUCCESS
1627 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
1628 {
1629 /* SIMD/FP registers. */
1630 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1631 {
1632 void *pu128 = (void *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
1633 rc |= nemR3LnxKvmSetRegPV(pVCpu, s_aCpumFpRegs[i].idKvmReg, pu128);
1634 }
1635 }
1636
1637 if ( rc == VINF_SUCCESS
1638 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
1639 {
1640#if 0
1641 /* Debug registers. */
1642 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
1643 {
1644 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
1645 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
1646 }
1647#endif
1648 }
1649
1650 if ( rc == VINF_SUCCESS
1651 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
1652 {
1653#if 0
1654 /* Debug registers. */
1655 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1656 {
1657 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1658 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, *pu64);
1659 }
1660#endif
1661 }
1662
1663 if ( rc == VINF_SUCCESS
1664 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1665 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1666 {
1667 /* System registers. */
1668 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1669 {
1670 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1671 {
1672 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1673 rc |= nemR3LnxKvmSetRegU64(pVCpu, s_aCpumSysRegs[i].idKvmReg, pu64);
1674 }
1675 }
1676 }
1677
1678 if ( rc == VINF_SUCCESS
1679 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
1680 {
1681 uint64_t u64Tmp = pVCpu->cpum.GstCtx.fPState;
1682 rc = nemR3LnxKvmSetRegU64(pVCpu, KVM_ARM64_REG_PSTATE, &u64Tmp);
1683 }
1684
1685 /*
1686 * KVM now owns all the state.
1687 */
1688 pCtx->fExtrn = CPUMCTX_EXTRN_KEEPER_NEM | CPUMCTX_EXTRN_ALL;
1689 return VINF_SUCCESS;
1690}
1691
1692
1693/**
1694 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1695 *
1696 * @returns VBox status code.
1697 * @param pVCpu The cross context CPU structure.
1698 * @param pcTicks Where to return the CPU tick count.
1699 * @param puAux Where to return the TSC_AUX register value.
1700 */
1701VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1702{
1703 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1704 // KVM_GET_CLOCK?
1705 RT_NOREF(pVCpu, pcTicks, puAux);
1706 return VINF_SUCCESS;
1707}
1708
1709
1710/**
1711 * Resumes CPU clock (TSC) on all virtual CPUs.
1712 *
1713 * This is called by TM when the VM is started, restored, resumed or similar.
1714 *
1715 * @returns VBox status code.
1716 * @param pVM The cross context VM structure.
1717 * @param pVCpu The cross context CPU structure of the calling EMT.
1718 * @param uPausedTscValue The TSC value at the time of pausing.
1719 */
1720VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1721{
1722 // KVM_SET_CLOCK?
1723 RT_NOREF(pVM, pVCpu, uPausedTscValue);
1724 return VINF_SUCCESS;
1725}
1726
1727
1728VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
1729{
1730 RT_NOREF(pVM);
1731 return NEM_FEAT_F_NESTED_PAGING
1732 | NEM_FEAT_F_FULL_GST_EXEC;
1733}
1734
1735
1736
1737/*********************************************************************************************************************************
1738* Execution *
1739*********************************************************************************************************************************/
1740
1741
1742VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1743{
1744 RT_NOREF(pVM, pVCpu);
1745 Assert(VM_IS_NEM_ENABLED(pVM));
1746 return true;
1747}
1748
1749
1750bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1751{
1752 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
1753 return false;
1754}
1755
1756
1757void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1758{
1759 int rc = RTThreadPoke(pVCpu->hThread);
1760 LogFlow(("nemR3NativeNotifyFF: #%u -> %Rrc\n", pVCpu->idCpu, rc));
1761 AssertRC(rc);
1762 RT_NOREF(pVM, fFlags);
1763}
1764
1765
1766DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1767{
1768 RT_NOREF(pVM, fUseDebugLoop);
1769 return false;
1770}
1771
1772
1773DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1774{
1775 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1776 return false;
1777}
1778
1779
1780DECL_FORCE_INLINE(int) nemR3LnxKvmUpdateIntrState(PVM pVM, PVMCPU pVCpu, bool fIrq, bool fAsserted)
1781{
1782 struct kvm_irq_level IrqLvl;
1783
1784 LogFlowFunc(("pVM=%p pVCpu=%p fIrq=%RTbool fAsserted=%RTbool\n",
1785 pVM, pVCpu, fIrq, fAsserted));
1786
1787 IrqLvl.irq = ((uint32_t)KVM_ARM_IRQ_TYPE_CPU << 24) /* Directly drives CPU interrupt lines. */
1788 | (pVCpu->idCpu & 0xff) << 16
1789 | (fIrq ? 0 : 1);
1790 IrqLvl.level = fAsserted ? 1 : 0;
1791 int rcLnx = ioctl(pVM->nem.s.fdVm, KVM_IRQ_LINE, &IrqLvl);
1792 AssertReturn(rcLnx == 0, VERR_NEM_IPE_9);
1793
1794 return VINF_SUCCESS;
1795}
1796
1797
1798/**
1799 * Deals with pending interrupt FFs prior to executing guest code.
1800 */
1801static VBOXSTRICTRC nemHCLnxHandleInterruptFF(PVM pVM, PVMCPU pVCpu)
1802{
1803 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n",
1804 pVCpu, pVCpu->idCpu,
1805 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ),
1806 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ)));
1807
1808 bool fIrq = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
1809 bool fFiq = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
1810
1811 /* Update the pending interrupt state. */
1812 if (fIrq != pVCpu->nem.s.fIrqLastSeen)
1813 {
1814 int rc = nemR3LnxKvmUpdateIntrState(pVM, pVCpu, true /*fIrq*/, fIrq);
1815 AssertRCReturn(rc, VERR_NEM_IPE_9);
1816 pVCpu->nem.s.fIrqLastSeen = fIrq;
1817 }
1818
1819 if (fFiq != pVCpu->nem.s.fIrqLastSeen)
1820 {
1821 int rc = nemR3LnxKvmUpdateIntrState(pVM, pVCpu, false /*fIrq*/, fFiq);
1822 AssertRCReturn(rc, VERR_NEM_IPE_9);
1823 pVCpu->nem.s.fFiqLastSeen = fFiq;
1824 }
1825
1826 return VINF_SUCCESS;
1827}
1828
1829
1830#if 0
1831/**
1832 * Handles KVM_EXIT_INTERNAL_ERROR.
1833 */
1834static VBOXSTRICTRC nemR3LnxHandleInternalError(PVMCPU pVCpu, struct kvm_run *pRun)
1835{
1836 Log(("NEM: KVM_EXIT_INTERNAL_ERROR! suberror=%#x (%d) ndata=%u data=%.*Rhxs\n", pRun->internal.suberror,
1837 pRun->internal.suberror, pRun->internal.ndata, sizeof(pRun->internal.data), &pRun->internal.data[0]));
1838
1839 /*
1840 * Deal with each suberror, returning if we don't want IEM to handle it.
1841 */
1842 switch (pRun->internal.suberror)
1843 {
1844 case KVM_INTERNAL_ERROR_EMULATION:
1845 {
1846 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTERNAL_ERROR_EMULATION),
1847 pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
1848 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInternalErrorEmulation);
1849 break;
1850 }
1851
1852 case KVM_INTERNAL_ERROR_SIMUL_EX:
1853 case KVM_INTERNAL_ERROR_DELIVERY_EV:
1854 case KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON:
1855 default:
1856 {
1857 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTERNAL_ERROR_FATAL),
1858 pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
1859 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInternalErrorFatal);
1860 const char *pszName;
1861 switch (pRun->internal.suberror)
1862 {
1863 case KVM_INTERNAL_ERROR_EMULATION: pszName = "KVM_INTERNAL_ERROR_EMULATION"; break;
1864 case KVM_INTERNAL_ERROR_SIMUL_EX: pszName = "KVM_INTERNAL_ERROR_SIMUL_EX"; break;
1865 case KVM_INTERNAL_ERROR_DELIVERY_EV: pszName = "KVM_INTERNAL_ERROR_DELIVERY_EV"; break;
1866 case KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON: pszName = "KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON"; break;
1867 default: pszName = "unknown"; break;
1868 }
1869 LogRel(("NEM: KVM_EXIT_INTERNAL_ERROR! suberror=%#x (%s) ndata=%u data=%.*Rhxs\n", pRun->internal.suberror, pszName,
1870 pRun->internal.ndata, sizeof(pRun->internal.data), &pRun->internal.data[0]));
1871 return VERR_NEM_IPE_0;
1872 }
1873 }
1874
1875 /*
1876 * Execute instruction in IEM and try get on with it.
1877 */
1878 Log2(("nemR3LnxHandleInternalError: Executing instruction at %04x:%08RX64 in IEM\n",
1879 pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip));
1880 VBOXSTRICTRC rcStrict = nemHCLnxImportState(pVCpu,
1881 IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT
1882 | CPUMCTX_EXTRN_INHIBIT_NMI,
1883 &pVCpu->cpum.GstCtx, pRun);
1884 if (RT_SUCCESS(rcStrict))
1885 rcStrict = IEMExecOne(pVCpu);
1886 return rcStrict;
1887}
1888#endif
1889
1890
1891/**
1892 * Handles KVM_EXIT_MMIO.
1893 */
1894static VBOXSTRICTRC nemHCLnxHandleExitMmio(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun)
1895{
1896 /*
1897 * Input validation.
1898 */
1899 Assert(pRun->mmio.len <= sizeof(pRun->mmio.data));
1900 Assert(pRun->mmio.is_write <= 1);
1901
1902#if 0
1903 /*
1904 * We cannot easily act on the exit history here, because the MMIO port
1905 * exit is stateful and the instruction will be completed in the next
1906 * KVM_RUN call. There seems no way to circumvent this.
1907 */
1908 EMHistoryAddExit(pVCpu,
1909 pRun->mmio.is_write
1910 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1911 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1912 pRun->s.regs.regs.pc, ASMReadTSC());
1913#else
1914 RT_NOREF(pVCpu);
1915#endif
1916
1917 /*
1918 * Do the requested job.
1919 */
1920 VBOXSTRICTRC rcStrict;
1921 if (pRun->mmio.is_write)
1922 {
1923 rcStrict = PGMPhysWrite(pVM, pRun->mmio.phys_addr, pRun->mmio.data, pRun->mmio.len, PGMACCESSORIGIN_HM);
1924 Log4(("MmioExit/%u:WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1925 pVCpu->idCpu,
1926 pRun->mmio.phys_addr, pRun->mmio.len, pRun->mmio.len, pRun->mmio.data, VBOXSTRICTRC_VAL(rcStrict) ));
1927 }
1928 else
1929 {
1930 rcStrict = PGMPhysRead(pVM, pRun->mmio.phys_addr, pRun->mmio.data, pRun->mmio.len, PGMACCESSORIGIN_HM);
1931 Log4(("MmioExit/%u: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1932 pVCpu->idCpu,
1933 pRun->mmio.phys_addr, pRun->mmio.len, pRun->mmio.len, pRun->mmio.data, VBOXSTRICTRC_VAL(rcStrict) ));
1934 }
1935 return rcStrict;
1936}
1937
1938
1939/**
1940 * Handles KVM_EXIT_HYPERCALL.
1941 */
1942static VBOXSTRICTRC nemHCLnxHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun)
1943{
1944#if 0
1945 /*
1946 * We cannot easily act on the exit history here, because the MMIO port
1947 * exit is stateful and the instruction will be completed in the next
1948 * KVM_RUN call. There seems no way to circumvent this.
1949 */
1950 EMHistoryAddExit(pVCpu,
1951 pRun->mmio.is_write
1952 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1953 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1954 pRun->s.regs.regs.pc, ASMReadTSC());
1955#else
1956 RT_NOREF(pVCpu);
1957#endif
1958
1959 /*
1960 * Do the requested job.
1961 */
1962 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1963
1964 /** @todo Raise exception to EL1 if PSCI not configured. */
1965 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1966 uint32_t uFunId = pRun->hypercall.nr;
1967 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1968 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1969 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1970 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1971 {
1972 rcStrict = nemHCLnxImportState(pVCpu, CPUMCTX_EXTRN_X0 | CPUMCTX_EXTRN_X1 | CPUMCTX_EXTRN_X2 | CPUMCTX_EXTRN_X3,
1973 &pVCpu->cpum.GstCtx);
1974 if (rcStrict != VINF_SUCCESS)
1975 return rcStrict;
1976
1977 switch (uFunNum)
1978 {
1979 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1980 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1981 break;
1982 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1983 rcStrict = VMR3PowerOff(pVM->pUVM);
1984 break;
1985 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1986 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1987 {
1988 bool fHaltOnReset;
1989 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1990 if (RT_SUCCESS(rc) && fHaltOnReset)
1991 {
1992 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
1993 rcStrict = VINF_EM_HALT;
1994 }
1995 else
1996 {
1997 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1998 VM_FF_SET(pVM, VM_FF_RESET);
1999 rcStrict = VINF_EM_RESET;
2000 }
2001 break;
2002 }
2003 case ARM_PSCI_FUNC_ID_CPU_ON:
2004 {
2005 uint64_t u64TgtCpu = nemR3LnxGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
2006 RTGCPHYS GCPhysExecAddr = nemR3LnxGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
2007 uint64_t u64CtxId = nemR3LnxGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
2008 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2009 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2010 break;
2011 }
2012 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2013 {
2014 uint32_t u32FunNum = (uint32_t)nemR3LnxGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
2015 switch (u32FunNum)
2016 {
2017 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2018 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2019 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2020 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2021 case ARM_PSCI_FUNC_ID_CPU_ON:
2022 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
2023 false /*f64BitReg*/, false /*fSignExtend*/,
2024 (uint64_t)ARM_PSCI_STS_SUCCESS);
2025 break;
2026 default:
2027 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
2028 false /*f64BitReg*/, false /*fSignExtend*/,
2029 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2030 }
2031 break;
2032 }
2033 default:
2034 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2035 }
2036 }
2037 else
2038 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2039
2040
2041 return rcStrict;
2042}
2043
2044
2045static VBOXSTRICTRC nemHCLnxHandleExit(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun, bool *pfStatefulExit)
2046{
2047 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitTotal);
2048
2049 if (pVCpu->nem.s.fIrqDeviceLvls != pRun->s.regs.device_irq_level)
2050 {
2051 uint64_t fChanged = pVCpu->nem.s.fIrqDeviceLvls ^ pRun->s.regs.device_irq_level;
2052
2053 if (fChanged & KVM_ARM_DEV_EL1_VTIMER)
2054 {
2055 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2056 GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, RT_BOOL(pRun->s.regs.device_irq_level & KVM_ARM_DEV_EL1_VTIMER));
2057 }
2058
2059 if (fChanged & KVM_ARM_DEV_EL1_PTIMER)
2060 {
2061 //TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2062 GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, RT_BOOL(pRun->s.regs.device_irq_level & KVM_ARM_DEV_EL1_PTIMER));
2063 }
2064
2065 pVCpu->nem.s.fIrqDeviceLvls = pRun->s.regs.device_irq_level;
2066 }
2067
2068 switch (pRun->exit_reason)
2069 {
2070 case KVM_EXIT_EXCEPTION:
2071 AssertFailed();
2072 break;
2073
2074 case KVM_EXIT_MMIO:
2075 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMmio);
2076 *pfStatefulExit = true;
2077 return nemHCLnxHandleExitMmio(pVM, pVCpu, pRun);
2078
2079 case KVM_EXIT_INTR: /* EINTR */
2080 //EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTERRUPTED),
2081 // pRun->s.regs.regs.pc, ASMReadTSC());
2082 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIntr);
2083 Log5(("Intr/%u\n", pVCpu->idCpu));
2084 return VINF_SUCCESS;
2085
2086 case KVM_EXIT_HYPERCALL:
2087 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHypercall);
2088 return nemHCLnxHandleExitHypercall(pVM, pVCpu, pRun);
2089
2090#if 0
2091 case KVM_EXIT_DEBUG:
2092 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitDebug);
2093 AssertFailed();
2094 break;
2095
2096 case KVM_EXIT_SYSTEM_EVENT:
2097 AssertFailed();
2098 break;
2099
2100 case KVM_EXIT_DIRTY_RING_FULL:
2101 AssertFailed();
2102 break;
2103 case KVM_EXIT_AP_RESET_HOLD:
2104 AssertFailed();
2105 break;
2106
2107
2108 case KVM_EXIT_SHUTDOWN:
2109 AssertFailed();
2110 break;
2111
2112 case KVM_EXIT_FAIL_ENTRY:
2113 LogRel(("NEM: KVM_EXIT_FAIL_ENTRY! hardware_entry_failure_reason=%#x cpu=%#x\n",
2114 pRun->fail_entry.hardware_entry_failure_reason, pRun->fail_entry.cpu));
2115 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_FAILED_ENTRY),
2116 pRun->s.regs.regs.pc, ASMReadTSC());
2117 return VERR_NEM_IPE_1;
2118
2119 case KVM_EXIT_INTERNAL_ERROR:
2120 /* we're counting sub-reasons inside the function. */
2121 return nemR3LnxHandleInternalError(pVCpu, pRun);
2122#endif
2123
2124 /*
2125 * Foreign and unknowns.
2126 */
2127#if 0
2128 case KVM_EXIT_IO:
2129 AssertLogRelMsgFailedReturn(("KVM_EXIT_IO on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2130 case KVM_EXIT_NMI:
2131 AssertLogRelMsgFailedReturn(("KVM_EXIT_NMI on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2132 case KVM_EXIT_EPR:
2133 AssertLogRelMsgFailedReturn(("KVM_EXIT_EPR on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2134 case KVM_EXIT_WATCHDOG:
2135 AssertLogRelMsgFailedReturn(("KVM_EXIT_WATCHDOG on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2136 case KVM_EXIT_ARM_NISV:
2137 AssertLogRelMsgFailedReturn(("KVM_EXIT_ARM_NISV on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2138 case KVM_EXIT_S390_STSI:
2139 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_STSI on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2140 case KVM_EXIT_S390_TSCH:
2141 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_TSCH on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2142 case KVM_EXIT_OSI:
2143 AssertLogRelMsgFailedReturn(("KVM_EXIT_OSI on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2144 case KVM_EXIT_PAPR_HCALL:
2145 AssertLogRelMsgFailedReturn(("KVM_EXIT_PAPR_HCALL on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2146 case KVM_EXIT_S390_UCONTROL:
2147 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_UCONTROL on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2148 case KVM_EXIT_DCR:
2149 AssertLogRelMsgFailedReturn(("KVM_EXIT_DCR on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2150 case KVM_EXIT_S390_SIEIC:
2151 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_SIEIC on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2152 case KVM_EXIT_S390_RESET:
2153 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_RESET on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2154 case KVM_EXIT_UNKNOWN:
2155 AssertLogRelMsgFailedReturn(("KVM_EXIT_UNKNOWN on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2156 case KVM_EXIT_XEN:
2157 AssertLogRelMsgFailedReturn(("KVM_EXIT_XEN on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
2158#endif
2159 default:
2160 AssertLogRelMsgFailedReturn(("Unknown exit reason %u on VCpu #%u!\n", pRun->exit_reason, pVCpu->idCpu), VERR_NEM_IPE_1);
2161 }
2162 RT_NOREF(pVM, pVCpu);
2163 return VERR_NOT_IMPLEMENTED;
2164}
2165
2166
2167VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2168{
2169 /*
2170 * Try switch to NEM runloop state.
2171 */
2172 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2173 { /* likely */ }
2174 else
2175 {
2176 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2177 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2178 return VINF_SUCCESS;
2179 }
2180
2181 /*
2182 * The run loop.
2183 */
2184 struct kvm_run * const pRun = pVCpu->nem.s.pRun;
2185 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2186 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2187 bool fStatefulExit = false; /* For MMIO and IO exits. */
2188 for (unsigned iLoop = 0;; iLoop++)
2189 {
2190 /*
2191 * Sync the interrupt state.
2192 */
2193 rcStrict = nemHCLnxHandleInterruptFF(pVM, pVCpu);
2194 if (rcStrict == VINF_SUCCESS)
2195 { /* likely */ }
2196 else
2197 {
2198 LogFlow(("NEM/%u: breaking: nemHCLnxHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2199 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2200 break;
2201 }
2202
2203 /*
2204 * Ensure KVM has the whole state.
2205 */
2206 if ((pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL)
2207 {
2208 int rc2 = nemHCLnxExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx);
2209 AssertRCReturn(rc2, rc2);
2210 }
2211
2212 /*
2213 * Poll timers and run for a bit.
2214 *
2215 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2216 * so we take the time of the next timer event and uses that as a deadline.
2217 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2218 */
2219 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2220 * the whole polling job when timers have changed... */
2221 uint64_t offDeltaIgnored;
2222 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2223 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2224 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2225 {
2226 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2227 {
2228 //LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n",
2229 // pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip,
2230 // !!(pRun->s.regs.regs.rflags & X86_EFL_IF), pRun->s.regs.regs.rflags,
2231 // pRun->s.regs.sregs.ss.selector, pRun->s.regs.regs.rsp, pRun->s.regs.sregs.cr0));
2232 TMNotifyStartOfExecution(pVM, pVCpu);
2233
2234 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_RUN, 0UL);
2235
2236 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2237 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2238
2239#if 0 //def LOG_ENABLED
2240 if (LogIsFlowEnabled())
2241 {
2242 struct kvm_mp_state MpState = {UINT32_MAX};
2243 ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_MP_STATE, &MpState);
2244 LogFlow(("NEM/%u: Exit @ %04x:%08RX64 IF=%d EFL=%#RX64 CR8=%#x Reason=%#x IrqReady=%d Flags=%#x %#lx\n", pVCpu->idCpu,
2245 pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->if_flag,
2246 pRun->s.regs.regs.rflags, pRun->s.regs.sregs.cr8, pRun->exit_reason,
2247 pRun->ready_for_interrupt_injection, pRun->flags, MpState.mp_state));
2248 }
2249#endif
2250 fStatefulExit = false;
2251 if (RT_LIKELY(rcLnx == 0 || errno == EINTR))
2252 {
2253 /*
2254 * Deal with the exit.
2255 */
2256 rcStrict = nemHCLnxHandleExit(pVM, pVCpu, pRun, &fStatefulExit);
2257 if (rcStrict == VINF_SUCCESS)
2258 { /* hopefully likely */ }
2259 else
2260 {
2261 LogFlow(("NEM/%u: breaking: nemHCLnxHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2262 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2263 break;
2264 }
2265 }
2266 else
2267 {
2268 int rc2 = RTErrConvertFromErrno(errno);
2269 AssertLogRelMsgFailedReturn(("KVM_RUN failed: rcLnx=%d errno=%u rc=%Rrc\n", rcLnx, errno, rc2), rc2);
2270 }
2271
2272 /*
2273 * If no relevant FFs are pending, loop.
2274 */
2275 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2276 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2277 { /* likely */ }
2278 else
2279 {
2280
2281 /** @todo Try handle pending flags, not just return to EM loops. Take care
2282 * not to set important RCs here unless we've handled an exit. */
2283 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2284 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2285 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2286 break;
2287 }
2288 }
2289 else
2290 {
2291 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2292 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2293 break;
2294 }
2295 }
2296 else
2297 {
2298 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2299 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2300 break;
2301 }
2302 } /* the run loop */
2303
2304
2305 /*
2306 * If the last exit was stateful, commit the state we provided before
2307 * returning to the EM loop so we have a consistent state and can safely
2308 * be rescheduled and whatnot. This may require us to make multiple runs
2309 * for larger MMIO and I/O operations. Sigh^3.
2310 *
2311 * Note! There is no 'ing way to reset the kernel side completion callback
2312 * for these stateful i/o exits. Very annoying interface.
2313 */
2314 /** @todo check how this works with string I/O and string MMIO. */
2315 if (fStatefulExit && RT_SUCCESS(rcStrict))
2316 {
2317 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn);
2318 uint32_t const uOrgExit = pRun->exit_reason;
2319 for (uint32_t i = 0; ; i++)
2320 {
2321 pRun->immediate_exit = 1;
2322 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_RUN, 0UL);
2323 Log(("NEM/%u: Flushed stateful exit -> %d/%d exit_reason=%d\n", pVCpu->idCpu, rcLnx, errno, pRun->exit_reason));
2324 if (rcLnx == -1 && errno == EINTR)
2325 {
2326 switch (i)
2327 {
2328 case 0: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn1Loop); break;
2329 case 1: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn2Loops); break;
2330 case 2: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn3Loops); break;
2331 default: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn4PlusLoops); break;
2332 }
2333 break;
2334 }
2335 AssertLogRelMsgBreakStmt(rcLnx == 0 && pRun->exit_reason == uOrgExit,
2336 ("rcLnx=%d errno=%d exit_reason=%d uOrgExit=%d\n", rcLnx, errno, pRun->exit_reason, uOrgExit),
2337 rcStrict = VERR_NEM_IPE_6);
2338 VBOXSTRICTRC rcStrict2 = nemHCLnxHandleExit(pVM, pVCpu, pRun, &fStatefulExit);
2339 if (rcStrict2 == VINF_SUCCESS || rcStrict2 == rcStrict)
2340 { /* likely */ }
2341 else if (RT_FAILURE(rcStrict2))
2342 {
2343 rcStrict = rcStrict2;
2344 break;
2345 }
2346 else
2347 {
2348 AssertLogRelMsgBreakStmt(rcStrict == VINF_SUCCESS,
2349 ("rcStrict=%Rrc rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict), VBOXSTRICTRC_VAL(rcStrict2)),
2350 rcStrict = VERR_NEM_IPE_7);
2351 rcStrict = rcStrict2;
2352 }
2353 }
2354 pRun->immediate_exit = 0;
2355 }
2356
2357 /*
2358 * If the CPU is running, make sure to stop it before we try sync back the
2359 * state and return to EM. We don't sync back the whole state if we can help it.
2360 */
2361 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2362 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2363
2364 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2365 {
2366 /* Try anticipate what we might need. */
2367 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK /*?*/;
2368 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2369 || RT_FAILURE(rcStrict))
2370 fImport = CPUMCTX_EXTRN_ALL;
2371 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2372 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2373
2374 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2375 {
2376 int rc2 = nemHCLnxImportState(pVCpu, fImport, &pVCpu->cpum.GstCtx);
2377 if (RT_SUCCESS(rc2))
2378 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2379 else if (RT_SUCCESS(rcStrict))
2380 rcStrict = rc2;
2381 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2382 pVCpu->cpum.GstCtx.fExtrn = 0;
2383 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2384 }
2385 else
2386 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2387 }
2388 else
2389 {
2390 pVCpu->cpum.GstCtx.fExtrn = 0;
2391 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2392 }
2393
2394 LogFlow(("NEM/%u: %08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, VBOXSTRICTRC_VAL(rcStrict) ));
2395 return rcStrict;
2396}
2397
2398
2399/** @page pg_nem_linux NEM/linux - Native Execution Manager, Linux.
2400 *
2401 * This is using KVM.
2402 *
2403 */
2404
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use