VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 35740

Last change on this file since 35740 was 35696, checked in by vboxsync, 13 years ago

PCDBGCCMD & PFNDBGCCMD: Drop the return type & variable. Functions will be added separately from commands (superset of DBGCCMD).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 170.9 KB
Line 
1/* $Id: VBoxRecompiler.c 35696 2011-01-24 18:03:33Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include "vl.h"
24#include "osdep.h"
25#include "exec-all.h"
26#include "config.h"
27#include "cpu-all.h"
28
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/iom.h>
36#include <VBox/vmm/mm.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/vmm/hwaccm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include "REMInternal.h"
45#include <VBox/vmm/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81
82/*******************************************************************************
83* Internal Functions *
84*******************************************************************************/
85static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
88static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
89
90static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
91static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
92static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
93static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
94static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
95static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
96
97static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
98static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
99static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
100static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
101static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
102static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
103
104static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
105static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
106static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111
112/** @todo Move stats to REM::s some rainy day we have nothing do to. */
113#ifdef VBOX_WITH_STATISTICS
114static STAMPROFILEADV gStatExecuteSingleInstr;
115static STAMPROFILEADV gStatCompilationQEmu;
116static STAMPROFILEADV gStatRunCodeQEmu;
117static STAMPROFILEADV gStatTotalTimeQEmu;
118static STAMPROFILEADV gStatTimers;
119static STAMPROFILEADV gStatTBLookup;
120static STAMPROFILEADV gStatIRQ;
121static STAMPROFILEADV gStatRawCheck;
122static STAMPROFILEADV gStatMemRead;
123static STAMPROFILEADV gStatMemWrite;
124static STAMPROFILE gStatGCPhys2HCVirt;
125static STAMPROFILE gStatHCVirt2GCPhys;
126static STAMCOUNTER gStatCpuGetTSC;
127static STAMCOUNTER gStatRefuseTFInhibit;
128static STAMCOUNTER gStatRefuseVM86;
129static STAMCOUNTER gStatRefusePaging;
130static STAMCOUNTER gStatRefusePAE;
131static STAMCOUNTER gStatRefuseIOPLNot0;
132static STAMCOUNTER gStatRefuseIF0;
133static STAMCOUNTER gStatRefuseCode16;
134static STAMCOUNTER gStatRefuseWP0;
135static STAMCOUNTER gStatRefuseRing1or2;
136static STAMCOUNTER gStatRefuseCanExecute;
137static STAMCOUNTER gStatREMGDTChange;
138static STAMCOUNTER gStatREMIDTChange;
139static STAMCOUNTER gStatREMLDTRChange;
140static STAMCOUNTER gStatREMTRChange;
141static STAMCOUNTER gStatSelOutOfSync[6];
142static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
143static STAMCOUNTER gStatFlushTBs;
144#endif
145/* in exec.c */
146extern uint32_t tlb_flush_count;
147extern uint32_t tb_flush_count;
148extern uint32_t tb_phys_invalidate_count;
149
150/*
151 * Global stuff.
152 */
153
154/** MMIO read callbacks. */
155CPUReadMemoryFunc *g_apfnMMIORead[3] =
156{
157 remR3MMIOReadU8,
158 remR3MMIOReadU16,
159 remR3MMIOReadU32
160};
161
162/** MMIO write callbacks. */
163CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
164{
165 remR3MMIOWriteU8,
166 remR3MMIOWriteU16,
167 remR3MMIOWriteU32
168};
169
170/** Handler read callbacks. */
171CPUReadMemoryFunc *g_apfnHandlerRead[3] =
172{
173 remR3HandlerReadU8,
174 remR3HandlerReadU16,
175 remR3HandlerReadU32
176};
177
178/** Handler write callbacks. */
179CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
180{
181 remR3HandlerWriteU8,
182 remR3HandlerWriteU16,
183 remR3HandlerWriteU32
184};
185
186
187#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
188/*
189 * Debugger commands.
190 */
191static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
192
193/** '.remstep' arguments. */
194static const DBGCVARDESC g_aArgRemStep[] =
195{
196 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
197 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
198};
199
200/** Command descriptors. */
201static const DBGCCMD g_aCmds[] =
202{
203 {
204 .pszCmd ="remstep",
205 .cArgsMin = 0,
206 .cArgsMax = 1,
207 .paArgDescs = &g_aArgRemStep[0],
208 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
209 .fFlags = 0,
210 .pfnHandler = remR3CmdDisasEnableStepping,
211 .pszSyntax = "[on/off]",
212 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
213 "If no arguments show the current state."
214 }
215};
216#endif
217
218/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
219uint8_t *code_gen_prologue;
220
221
222/*******************************************************************************
223* Internal Functions *
224*******************************************************************************/
225void remAbort(int rc, const char *pszTip);
226extern int testmath(void);
227
228/* Put them here to avoid unused variable warning. */
229AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
230#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
231//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
232/* Why did this have to be identical?? */
233AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
234#else
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#endif
237
238
239/**
240 * Initializes the REM.
241 *
242 * @returns VBox status code.
243 * @param pVM The VM to operate on.
244 */
245REMR3DECL(int) REMR3Init(PVM pVM)
246{
247 PREMHANDLERNOTIFICATION pCur;
248 uint32_t u32Dummy;
249 int rc;
250 unsigned i;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /*
276 * Initialize the REM critical section.
277 *
278 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
279 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
280 * deadlocks. (mostly pgm vs rem locking)
281 */
282 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
283 AssertRCReturn(rc, rc);
284
285 /* ctx. */
286 pVM->rem.s.pCtx = NULL; /* set when executing code. */
287 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
288
289 /* ignore all notifications */
290 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
291
292 code_gen_prologue = RTMemExecAlloc(_1K);
293 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
294
295 cpu_exec_init_all(0);
296
297 /*
298 * Init the recompiler.
299 */
300 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
301 {
302 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
303 return VERR_GENERAL_FAILURE;
304 }
305 PVMCPU pVCpu = VMMGetCpu(pVM);
306 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
307 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
308
309 /* allocate code buffer for single instruction emulation. */
310 pVM->rem.s.Env.cbCodeBuffer = 4096;
311 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
312 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
313
314 /* finally, set the cpu_single_env global. */
315 cpu_single_env = &pVM->rem.s.Env;
316
317 /* Nothing is pending by default */
318 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
319
320 /*
321 * Register ram types.
322 */
323 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
324 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
325 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
326 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
327 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
328
329 /* stop ignoring. */
330 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
331
332 /*
333 * Register the saved state data unit.
334 */
335 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
336 NULL, NULL, NULL,
337 NULL, remR3Save, NULL,
338 NULL, remR3Load, NULL);
339 if (RT_FAILURE(rc))
340 return rc;
341
342#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
343 /*
344 * Debugger commands.
345 */
346 static bool fRegisteredCmds = false;
347 if (!fRegisteredCmds)
348 {
349 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
350 if (RT_SUCCESS(rc))
351 fRegisteredCmds = true;
352 }
353#endif
354
355#ifdef VBOX_WITH_STATISTICS
356 /*
357 * Statistics.
358 */
359 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
360 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
361 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
362 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
363 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
364 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
365 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
366 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
368 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
369 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
370 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
371
372 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
373
374 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
375 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
376 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
377 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
378 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
379 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
380 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
381 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
382 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
383 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
384 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
385
386 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
387 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
388 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
389 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
390
391 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
392 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
393 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
397
398 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
404
405 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
406#endif /* VBOX_WITH_STATISTICS */
407
408 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
409 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
410 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
411
412
413#ifdef DEBUG_ALL_LOGGING
414 loglevel = ~0;
415# ifdef DEBUG_TMP_LOGGING
416 logfile = fopen("/tmp/vbox-qemu.log", "w");
417# endif
418#endif
419
420 /*
421 * Init the handler notification lists.
422 */
423 pVM->rem.s.idxPendingList = UINT32_MAX;
424 pVM->rem.s.idxFreeList = 0;
425
426 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
427 {
428 pCur = &pVM->rem.s.aHandlerNotifications[i];
429 pCur->idxNext = i + 1;
430 pCur->idxSelf = i;
431 }
432 pCur->idxNext = UINT32_MAX; /* the last record. */
433
434 return rc;
435}
436
437
438/**
439 * Finalizes the REM initialization.
440 *
441 * This is called after all components, devices and drivers has
442 * been initialized. Its main purpose it to finish the RAM related
443 * initialization.
444 *
445 * @returns VBox status code.
446 *
447 * @param pVM The VM handle.
448 */
449REMR3DECL(int) REMR3InitFinalize(PVM pVM)
450{
451 int rc;
452
453 /*
454 * Ram size & dirty bit map.
455 */
456 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
457 pVM->rem.s.fGCPhysLastRamFixed = true;
458#ifdef RT_STRICT
459 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
460#else
461 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
462#endif
463 return rc;
464}
465
466
467/**
468 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
469 *
470 * @returns VBox status code.
471 * @param pVM The VM handle.
472 * @param fGuarded Whether to guard the map.
473 */
474static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
475{
476 int rc = VINF_SUCCESS;
477 RTGCPHYS cb;
478
479 cb = pVM->rem.s.GCPhysLastRam + 1;
480 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
481 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
482 VERR_OUT_OF_RANGE);
483 phys_ram_size = cb;
484 phys_ram_dirty_size = cb >> PAGE_SHIFT;
485 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
486
487 if (!fGuarded)
488 {
489 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
490 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
491 }
492 else
493 {
494 /*
495 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
496 */
497 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
498 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
499 if (cbBitmapFull == cbBitmapAligned)
500 cbBitmapFull += _4G >> PAGE_SHIFT;
501 else if (cbBitmapFull - cbBitmapAligned < _64K)
502 cbBitmapFull += _64K;
503
504 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
505 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
506
507 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
508 if (RT_FAILURE(rc))
509 {
510 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
511 AssertLogRelRCReturn(rc, rc);
512 }
513
514 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
515 }
516
517 /* initialize it. */
518 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
519 return rc;
520}
521
522
523/**
524 * Terminates the REM.
525 *
526 * Termination means cleaning up and freeing all resources,
527 * the VM it self is at this point powered off or suspended.
528 *
529 * @returns VBox status code.
530 * @param pVM The VM to operate on.
531 */
532REMR3DECL(int) REMR3Term(PVM pVM)
533{
534#ifdef VBOX_WITH_STATISTICS
535 /*
536 * Statistics.
537 */
538 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
539 STAM_DEREG(pVM, &gStatCompilationQEmu);
540 STAM_DEREG(pVM, &gStatRunCodeQEmu);
541 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
542 STAM_DEREG(pVM, &gStatTimers);
543 STAM_DEREG(pVM, &gStatTBLookup);
544 STAM_DEREG(pVM, &gStatIRQ);
545 STAM_DEREG(pVM, &gStatRawCheck);
546 STAM_DEREG(pVM, &gStatMemRead);
547 STAM_DEREG(pVM, &gStatMemWrite);
548 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
549 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
550
551 STAM_DEREG(pVM, &gStatCpuGetTSC);
552
553 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
554 STAM_DEREG(pVM, &gStatRefuseVM86);
555 STAM_DEREG(pVM, &gStatRefusePaging);
556 STAM_DEREG(pVM, &gStatRefusePAE);
557 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
558 STAM_DEREG(pVM, &gStatRefuseIF0);
559 STAM_DEREG(pVM, &gStatRefuseCode16);
560 STAM_DEREG(pVM, &gStatRefuseWP0);
561 STAM_DEREG(pVM, &gStatRefuseRing1or2);
562 STAM_DEREG(pVM, &gStatRefuseCanExecute);
563 STAM_DEREG(pVM, &gStatFlushTBs);
564
565 STAM_DEREG(pVM, &gStatREMGDTChange);
566 STAM_DEREG(pVM, &gStatREMLDTRChange);
567 STAM_DEREG(pVM, &gStatREMIDTChange);
568 STAM_DEREG(pVM, &gStatREMTRChange);
569
570 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
571 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
572 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
573 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
576
577 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
578 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
579 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
583
584 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
585#endif /* VBOX_WITH_STATISTICS */
586
587 STAM_REL_DEREG(pVM, &tb_flush_count);
588 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
589 STAM_REL_DEREG(pVM, &tlb_flush_count);
590
591 return VINF_SUCCESS;
592}
593
594
595/**
596 * The VM is being reset.
597 *
598 * For the REM component this means to call the cpu_reset() and
599 * reinitialize some state variables.
600 *
601 * @param pVM VM handle.
602 */
603REMR3DECL(void) REMR3Reset(PVM pVM)
604{
605 /*
606 * Reset the REM cpu.
607 */
608 Assert(pVM->rem.s.cIgnoreAll == 0);
609 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
610 cpu_reset(&pVM->rem.s.Env);
611 pVM->rem.s.cInvalidatedPages = 0;
612 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
613 Assert(pVM->rem.s.cIgnoreAll == 0);
614
615 /* Clear raw ring 0 init state */
616 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
617
618 /* Flush the TBs the next time we execute code here. */
619 pVM->rem.s.fFlushTBs = true;
620}
621
622
623/**
624 * Execute state save operation.
625 *
626 * @returns VBox status code.
627 * @param pVM VM Handle.
628 * @param pSSM SSM operation handle.
629 */
630static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
631{
632 PREM pRem = &pVM->rem.s;
633
634 /*
635 * Save the required CPU Env bits.
636 * (Not much because we're never in REM when doing the save.)
637 */
638 LogFlow(("remR3Save:\n"));
639 Assert(!pRem->fInREM);
640 SSMR3PutU32(pSSM, pRem->Env.hflags);
641 SSMR3PutU32(pSSM, ~0); /* separator */
642
643 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
644 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
645 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
646
647 return SSMR3PutU32(pSSM, ~0); /* terminator */
648}
649
650
651/**
652 * Execute state load operation.
653 *
654 * @returns VBox status code.
655 * @param pVM VM Handle.
656 * @param pSSM SSM operation handle.
657 * @param uVersion Data layout version.
658 * @param uPass The data pass.
659 */
660static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
661{
662 uint32_t u32Dummy;
663 uint32_t fRawRing0 = false;
664 uint32_t u32Sep;
665 uint32_t i;
666 int rc;
667 PREM pRem;
668
669 LogFlow(("remR3Load:\n"));
670 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
671
672 /*
673 * Validate version.
674 */
675 if ( uVersion != REM_SAVED_STATE_VERSION
676 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
677 {
678 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
679 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
680 }
681
682 /*
683 * Do a reset to be on the safe side...
684 */
685 REMR3Reset(pVM);
686
687 /*
688 * Ignore all ignorable notifications.
689 * (Not doing this will cause serious trouble.)
690 */
691 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
692
693 /*
694 * Load the required CPU Env bits.
695 * (Not much because we're never in REM when doing the save.)
696 */
697 pRem = &pVM->rem.s;
698 Assert(!pRem->fInREM);
699 SSMR3GetU32(pSSM, &pRem->Env.hflags);
700 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
701 {
702 /* Redundant REM CPU state has to be loaded, but can be ignored. */
703 CPUX86State_Ver16 temp;
704 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
705 }
706
707 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
708 if (RT_FAILURE(rc))
709 return rc;
710 if (u32Sep != ~0U)
711 {
712 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
713 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
714 }
715
716 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
717 SSMR3GetUInt(pSSM, &fRawRing0);
718 if (fRawRing0)
719 pRem->Env.state |= CPU_RAW_RING0;
720
721 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
722 {
723 /*
724 * Load the REM stuff.
725 */
726 /** @todo r=bird: We should just drop all these items, restoring doesn't make
727 * sense. */
728 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
729 if (RT_FAILURE(rc))
730 return rc;
731 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
732 {
733 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
734 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
735 }
736 for (i = 0; i < pRem->cInvalidatedPages; i++)
737 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
738 }
739
740 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
741 if (RT_FAILURE(rc))
742 return rc;
743
744 /* check the terminator. */
745 rc = SSMR3GetU32(pSSM, &u32Sep);
746 if (RT_FAILURE(rc))
747 return rc;
748 if (u32Sep != ~0U)
749 {
750 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
751 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
752 }
753
754 /*
755 * Get the CPUID features.
756 */
757 PVMCPU pVCpu = VMMGetCpu(pVM);
758 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
759 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
760
761 /*
762 * Sync the Load Flush the TLB
763 */
764 tlb_flush(&pRem->Env, 1);
765
766 /*
767 * Stop ignoring ignorable notifications.
768 */
769 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
770
771 /*
772 * Sync the whole CPU state when executing code in the recompiler.
773 */
774 for (i = 0; i < pVM->cCpus; i++)
775 {
776 PVMCPU pVCpu = &pVM->aCpus[i];
777 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
778 }
779 return VINF_SUCCESS;
780}
781
782
783
784#undef LOG_GROUP
785#define LOG_GROUP LOG_GROUP_REM_RUN
786
787/**
788 * Single steps an instruction in recompiled mode.
789 *
790 * Before calling this function the REM state needs to be in sync with
791 * the VM. Call REMR3State() to perform the sync. It's only necessary
792 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
793 * and after calling REMR3StateBack().
794 *
795 * @returns VBox status code.
796 *
797 * @param pVM VM Handle.
798 * @param pVCpu VMCPU Handle.
799 */
800REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
801{
802 int rc, interrupt_request;
803 RTGCPTR GCPtrPC;
804 bool fBp;
805
806 /*
807 * Lock the REM - we don't wanna have anyone interrupting us
808 * while stepping - and enabled single stepping. We also ignore
809 * pending interrupts and suchlike.
810 */
811 interrupt_request = pVM->rem.s.Env.interrupt_request;
812 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
813 pVM->rem.s.Env.interrupt_request = 0;
814 cpu_single_step(&pVM->rem.s.Env, 1);
815
816 /*
817 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
818 */
819 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
820 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
821
822 /*
823 * Execute and handle the return code.
824 * We execute without enabling the cpu tick, so on success we'll
825 * just flip it on and off to make sure it moves
826 */
827 rc = cpu_exec(&pVM->rem.s.Env);
828 if (rc == EXCP_DEBUG)
829 {
830 TMR3NotifyResume(pVM, pVCpu);
831 TMR3NotifySuspend(pVM, pVCpu);
832 rc = VINF_EM_DBG_STEPPED;
833 }
834 else
835 {
836 switch (rc)
837 {
838 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
839 case EXCP_HLT:
840 case EXCP_HALTED: rc = VINF_EM_HALT; break;
841 case EXCP_RC:
842 rc = pVM->rem.s.rc;
843 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
844 break;
845 case EXCP_EXECUTE_RAW:
846 case EXCP_EXECUTE_HWACC:
847 /** @todo: is it correct? No! */
848 rc = VINF_SUCCESS;
849 break;
850 default:
851 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
852 rc = VERR_INTERNAL_ERROR;
853 break;
854 }
855 }
856
857 /*
858 * Restore the stuff we changed to prevent interruption.
859 * Unlock the REM.
860 */
861 if (fBp)
862 {
863 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
864 Assert(rc2 == 0); NOREF(rc2);
865 }
866 cpu_single_step(&pVM->rem.s.Env, 0);
867 pVM->rem.s.Env.interrupt_request = interrupt_request;
868
869 return rc;
870}
871
872
873/**
874 * Set a breakpoint using the REM facilities.
875 *
876 * @returns VBox status code.
877 * @param pVM The VM handle.
878 * @param Address The breakpoint address.
879 * @thread The emulation thread.
880 */
881REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
882{
883 VM_ASSERT_EMT(pVM);
884 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
885 {
886 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
887 return VINF_SUCCESS;
888 }
889 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
890 return VERR_REM_NO_MORE_BP_SLOTS;
891}
892
893
894/**
895 * Clears a breakpoint set by REMR3BreakpointSet().
896 *
897 * @returns VBox status code.
898 * @param pVM The VM handle.
899 * @param Address The breakpoint address.
900 * @thread The emulation thread.
901 */
902REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
903{
904 VM_ASSERT_EMT(pVM);
905 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
906 {
907 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
908 return VINF_SUCCESS;
909 }
910 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
911 return VERR_REM_BP_NOT_FOUND;
912}
913
914
915/**
916 * Emulate an instruction.
917 *
918 * This function executes one instruction without letting anyone
919 * interrupt it. This is intended for being called while being in
920 * raw mode and thus will take care of all the state syncing between
921 * REM and the rest.
922 *
923 * @returns VBox status code.
924 * @param pVM VM handle.
925 * @param pVCpu VMCPU Handle.
926 */
927REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
928{
929 bool fFlushTBs;
930
931 int rc, rc2;
932 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
933
934 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
935 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
936 */
937 if (HWACCMIsEnabled(pVM))
938 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
939
940 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
941 fFlushTBs = pVM->rem.s.fFlushTBs;
942 pVM->rem.s.fFlushTBs = false;
943
944 /*
945 * Sync the state and enable single instruction / single stepping.
946 */
947 rc = REMR3State(pVM, pVCpu);
948 pVM->rem.s.fFlushTBs = fFlushTBs;
949 if (RT_SUCCESS(rc))
950 {
951 int interrupt_request = pVM->rem.s.Env.interrupt_request;
952 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
953 Assert(!pVM->rem.s.Env.singlestep_enabled);
954 /*
955 * Now we set the execute single instruction flag and enter the cpu_exec loop.
956 */
957 TMNotifyStartOfExecution(pVCpu);
958 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
959 rc = cpu_exec(&pVM->rem.s.Env);
960 TMNotifyEndOfExecution(pVCpu);
961 switch (rc)
962 {
963 /*
964 * Executed without anything out of the way happening.
965 */
966 case EXCP_SINGLE_INSTR:
967 rc = VINF_EM_RESCHEDULE;
968 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
969 break;
970
971 /*
972 * If we take a trap or start servicing a pending interrupt, we might end up here.
973 * (Timer thread or some other thread wishing EMT's attention.)
974 */
975 case EXCP_INTERRUPT:
976 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
977 rc = VINF_EM_RESCHEDULE;
978 break;
979
980 /*
981 * Single step, we assume!
982 * If there was a breakpoint there we're fucked now.
983 */
984 case EXCP_DEBUG:
985 {
986 /* breakpoint or single step? */
987 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
988 int iBP;
989 rc = VINF_EM_DBG_STEPPED;
990 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
991 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
992 {
993 rc = VINF_EM_DBG_BREAKPOINT;
994 break;
995 }
996 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
997 break;
998 }
999
1000 /*
1001 * hlt instruction.
1002 */
1003 case EXCP_HLT:
1004 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1005 rc = VINF_EM_HALT;
1006 break;
1007
1008 /*
1009 * The VM has halted.
1010 */
1011 case EXCP_HALTED:
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1013 rc = VINF_EM_HALT;
1014 break;
1015
1016 /*
1017 * Switch to RAW-mode.
1018 */
1019 case EXCP_EXECUTE_RAW:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1021 rc = VINF_EM_RESCHEDULE_RAW;
1022 break;
1023
1024 /*
1025 * Switch to hardware accelerated RAW-mode.
1026 */
1027 case EXCP_EXECUTE_HWACC:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1029 rc = VINF_EM_RESCHEDULE_HWACC;
1030 break;
1031
1032 /*
1033 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1034 */
1035 case EXCP_RC:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1037 rc = pVM->rem.s.rc;
1038 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1039 break;
1040
1041 /*
1042 * Figure out the rest when they arrive....
1043 */
1044 default:
1045 AssertMsgFailed(("rc=%d\n", rc));
1046 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1047 rc = VINF_EM_RESCHEDULE;
1048 break;
1049 }
1050
1051 /*
1052 * Switch back the state.
1053 */
1054 pVM->rem.s.Env.interrupt_request = interrupt_request;
1055 rc2 = REMR3StateBack(pVM, pVCpu);
1056 AssertRC(rc2);
1057 }
1058
1059 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1060 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1061 return rc;
1062}
1063
1064
1065/**
1066 * Runs code in recompiled mode.
1067 *
1068 * Before calling this function the REM state needs to be in sync with
1069 * the VM. Call REMR3State() to perform the sync. It's only necessary
1070 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1071 * and after calling REMR3StateBack().
1072 *
1073 * @returns VBox status code.
1074 *
1075 * @param pVM VM Handle.
1076 * @param pVCpu VMCPU Handle.
1077 */
1078REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1079{
1080 int rc;
1081 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1082 Assert(pVM->rem.s.fInREM);
1083
1084 TMNotifyStartOfExecution(pVCpu);
1085 rc = cpu_exec(&pVM->rem.s.Env);
1086 TMNotifyEndOfExecution(pVCpu);
1087 switch (rc)
1088 {
1089 /*
1090 * This happens when the execution was interrupted
1091 * by an external event, like pending timers.
1092 */
1093 case EXCP_INTERRUPT:
1094 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1095 rc = VINF_SUCCESS;
1096 break;
1097
1098 /*
1099 * hlt instruction.
1100 */
1101 case EXCP_HLT:
1102 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1103 rc = VINF_EM_HALT;
1104 break;
1105
1106 /*
1107 * The VM has halted.
1108 */
1109 case EXCP_HALTED:
1110 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1111 rc = VINF_EM_HALT;
1112 break;
1113
1114 /*
1115 * Breakpoint/single step.
1116 */
1117 case EXCP_DEBUG:
1118 {
1119#if 0//def DEBUG_bird
1120 static int iBP = 0;
1121 printf("howdy, breakpoint! iBP=%d\n", iBP);
1122 switch (iBP)
1123 {
1124 case 0:
1125 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1126 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1127 //pVM->rem.s.Env.interrupt_request = 0;
1128 //pVM->rem.s.Env.exception_index = -1;
1129 //g_fInterruptDisabled = 1;
1130 rc = VINF_SUCCESS;
1131 asm("int3");
1132 break;
1133 default:
1134 asm("int3");
1135 break;
1136 }
1137 iBP++;
1138#else
1139 /* breakpoint or single step? */
1140 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1141 int iBP;
1142 rc = VINF_EM_DBG_STEPPED;
1143 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1144 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1145 {
1146 rc = VINF_EM_DBG_BREAKPOINT;
1147 break;
1148 }
1149 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1150#endif
1151 break;
1152 }
1153
1154 /*
1155 * Switch to RAW-mode.
1156 */
1157 case EXCP_EXECUTE_RAW:
1158 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1159 rc = VINF_EM_RESCHEDULE_RAW;
1160 break;
1161
1162 /*
1163 * Switch to hardware accelerated RAW-mode.
1164 */
1165 case EXCP_EXECUTE_HWACC:
1166 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1167 rc = VINF_EM_RESCHEDULE_HWACC;
1168 break;
1169
1170 /*
1171 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1172 */
1173 case EXCP_RC:
1174 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1175 rc = pVM->rem.s.rc;
1176 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1177 break;
1178
1179 /*
1180 * Figure out the rest when they arrive....
1181 */
1182 default:
1183 AssertMsgFailed(("rc=%d\n", rc));
1184 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1185 rc = VINF_SUCCESS;
1186 break;
1187 }
1188
1189 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1190 return rc;
1191}
1192
1193
1194/**
1195 * Check if the cpu state is suitable for Raw execution.
1196 *
1197 * @returns boolean
1198 * @param env The CPU env struct.
1199 * @param eip The EIP to check this for (might differ from env->eip).
1200 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1201 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1202 *
1203 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1204 */
1205bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1206{
1207 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1208 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1209 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1210 uint32_t u32CR0;
1211
1212 /* Update counter. */
1213 env->pVM->rem.s.cCanExecuteRaw++;
1214
1215 if (HWACCMIsEnabled(env->pVM))
1216 {
1217 CPUMCTX Ctx;
1218
1219 env->state |= CPU_RAW_HWACC;
1220
1221 /*
1222 * Create partial context for HWACCMR3CanExecuteGuest
1223 */
1224 Ctx.cr0 = env->cr[0];
1225 Ctx.cr3 = env->cr[3];
1226 Ctx.cr4 = env->cr[4];
1227
1228 Ctx.tr = env->tr.selector;
1229 Ctx.trHid.u64Base = env->tr.base;
1230 Ctx.trHid.u32Limit = env->tr.limit;
1231 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1232
1233 Ctx.ldtr = env->ldt.selector;
1234 Ctx.ldtrHid.u64Base = env->ldt.base;
1235 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1236 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1237
1238 Ctx.idtr.cbIdt = env->idt.limit;
1239 Ctx.idtr.pIdt = env->idt.base;
1240
1241 Ctx.gdtr.cbGdt = env->gdt.limit;
1242 Ctx.gdtr.pGdt = env->gdt.base;
1243
1244 Ctx.rsp = env->regs[R_ESP];
1245 Ctx.rip = env->eip;
1246
1247 Ctx.eflags.u32 = env->eflags;
1248
1249 Ctx.cs = env->segs[R_CS].selector;
1250 Ctx.csHid.u64Base = env->segs[R_CS].base;
1251 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1252 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1253
1254 Ctx.ds = env->segs[R_DS].selector;
1255 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1256 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1257 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1258
1259 Ctx.es = env->segs[R_ES].selector;
1260 Ctx.esHid.u64Base = env->segs[R_ES].base;
1261 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1262 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1263
1264 Ctx.fs = env->segs[R_FS].selector;
1265 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1266 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1267 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1268
1269 Ctx.gs = env->segs[R_GS].selector;
1270 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1271 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1272 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1273
1274 Ctx.ss = env->segs[R_SS].selector;
1275 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1276 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1277 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1278
1279 Ctx.msrEFER = env->efer;
1280
1281 /* Hardware accelerated raw-mode:
1282 *
1283 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1284 */
1285 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1286 {
1287 *piException = EXCP_EXECUTE_HWACC;
1288 return true;
1289 }
1290 return false;
1291 }
1292
1293 /*
1294 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1295 * or 32 bits protected mode ring 0 code
1296 *
1297 * The tests are ordered by the likelihood of being true during normal execution.
1298 */
1299 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1300 {
1301 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1302 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1303 return false;
1304 }
1305
1306#ifndef VBOX_RAW_V86
1307 if (fFlags & VM_MASK) {
1308 STAM_COUNTER_INC(&gStatRefuseVM86);
1309 Log2(("raw mode refused: VM_MASK\n"));
1310 return false;
1311 }
1312#endif
1313
1314 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1315 {
1316#ifndef DEBUG_bird
1317 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1318#endif
1319 return false;
1320 }
1321
1322 if (env->singlestep_enabled)
1323 {
1324 //Log2(("raw mode refused: Single step\n"));
1325 return false;
1326 }
1327
1328 if (env->nb_breakpoints > 0)
1329 {
1330 //Log2(("raw mode refused: Breakpoints\n"));
1331 return false;
1332 }
1333
1334 u32CR0 = env->cr[0];
1335 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1336 {
1337 STAM_COUNTER_INC(&gStatRefusePaging);
1338 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1339 return false;
1340 }
1341
1342 if (env->cr[4] & CR4_PAE_MASK)
1343 {
1344 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1345 {
1346 STAM_COUNTER_INC(&gStatRefusePAE);
1347 return false;
1348 }
1349 }
1350
1351 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1352 {
1353 if (!EMIsRawRing3Enabled(env->pVM))
1354 return false;
1355
1356 if (!(env->eflags & IF_MASK))
1357 {
1358 STAM_COUNTER_INC(&gStatRefuseIF0);
1359 Log2(("raw mode refused: IF (RawR3)\n"));
1360 return false;
1361 }
1362
1363 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1364 {
1365 STAM_COUNTER_INC(&gStatRefuseWP0);
1366 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1367 return false;
1368 }
1369 }
1370 else
1371 {
1372 if (!EMIsRawRing0Enabled(env->pVM))
1373 return false;
1374
1375 // Let's start with pure 32 bits ring 0 code first
1376 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1377 {
1378 STAM_COUNTER_INC(&gStatRefuseCode16);
1379 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1380 return false;
1381 }
1382
1383 // Only R0
1384 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1385 {
1386 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1387 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1388 return false;
1389 }
1390
1391 if (!(u32CR0 & CR0_WP_MASK))
1392 {
1393 STAM_COUNTER_INC(&gStatRefuseWP0);
1394 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1395 return false;
1396 }
1397
1398 if (PATMIsPatchGCAddr(env->pVM, eip))
1399 {
1400 Log2(("raw r0 mode forced: patch code\n"));
1401 *piException = EXCP_EXECUTE_RAW;
1402 return true;
1403 }
1404
1405#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1406 if (!(env->eflags & IF_MASK))
1407 {
1408 STAM_COUNTER_INC(&gStatRefuseIF0);
1409 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1410 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1411 return false;
1412 }
1413#endif
1414
1415 env->state |= CPU_RAW_RING0;
1416 }
1417
1418 /*
1419 * Don't reschedule the first time we're called, because there might be
1420 * special reasons why we're here that is not covered by the above checks.
1421 */
1422 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1423 {
1424 Log2(("raw mode refused: first scheduling\n"));
1425 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1426 return false;
1427 }
1428
1429 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1430 *piException = EXCP_EXECUTE_RAW;
1431 return true;
1432}
1433
1434
1435/**
1436 * Fetches a code byte.
1437 *
1438 * @returns Success indicator (bool) for ease of use.
1439 * @param env The CPU environment structure.
1440 * @param GCPtrInstr Where to fetch code.
1441 * @param pu8Byte Where to store the byte on success
1442 */
1443bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1444{
1445 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1446 if (RT_SUCCESS(rc))
1447 return true;
1448 return false;
1449}
1450
1451
1452/**
1453 * Flush (or invalidate if you like) page table/dir entry.
1454 *
1455 * (invlpg instruction; tlb_flush_page)
1456 *
1457 * @param env Pointer to cpu environment.
1458 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1459 */
1460void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1461{
1462 PVM pVM = env->pVM;
1463 PCPUMCTX pCtx;
1464 int rc;
1465
1466 /*
1467 * When we're replaying invlpg instructions or restoring a saved
1468 * state we disable this path.
1469 */
1470 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1471 return;
1472 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1473 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1474
1475 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1476
1477 /*
1478 * Update the control registers before calling PGMFlushPage.
1479 */
1480 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1481 Assert(pCtx);
1482 pCtx->cr0 = env->cr[0];
1483 pCtx->cr3 = env->cr[3];
1484 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1485 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1486 pCtx->cr4 = env->cr[4];
1487
1488 /*
1489 * Let PGM do the rest.
1490 */
1491 Assert(env->pVCpu);
1492 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1493 if (RT_FAILURE(rc))
1494 {
1495 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1496 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1497 }
1498 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1499}
1500
1501
1502#ifndef REM_PHYS_ADDR_IN_TLB
1503/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1504void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1505{
1506 void *pv;
1507 int rc;
1508
1509 /* Address must be aligned enough to fiddle with lower bits */
1510 Assert((physAddr & 0x3) == 0);
1511
1512 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1513 Assert( rc == VINF_SUCCESS
1514 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1515 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1516 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1517 if (RT_FAILURE(rc))
1518 return (void *)1;
1519 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1520 return (void *)((uintptr_t)pv | 2);
1521 return pv;
1522}
1523#endif /* REM_PHYS_ADDR_IN_TLB */
1524
1525
1526/**
1527 * Called from tlb_protect_code in order to write monitor a code page.
1528 *
1529 * @param env Pointer to the CPU environment.
1530 * @param GCPtr Code page to monitor
1531 */
1532void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1533{
1534#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1535 Assert(env->pVM->rem.s.fInREM);
1536 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1537 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1538 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1539 && !(env->eflags & VM_MASK) /* no V86 mode */
1540 && !HWACCMIsEnabled(env->pVM))
1541 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1542#endif
1543}
1544
1545
1546/**
1547 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1548 *
1549 * @param env Pointer to the CPU environment.
1550 * @param GCPtr Code page to monitor
1551 */
1552void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1553{
1554 Assert(env->pVM->rem.s.fInREM);
1555#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1556 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1557 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1558 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1559 && !(env->eflags & VM_MASK) /* no V86 mode */
1560 && !HWACCMIsEnabled(env->pVM))
1561 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1562#endif
1563}
1564
1565
1566/**
1567 * Called when the CPU is initialized, any of the CRx registers are changed or
1568 * when the A20 line is modified.
1569 *
1570 * @param env Pointer to the CPU environment.
1571 * @param fGlobal Set if the flush is global.
1572 */
1573void remR3FlushTLB(CPUState *env, bool fGlobal)
1574{
1575 PVM pVM = env->pVM;
1576 PCPUMCTX pCtx;
1577
1578 /*
1579 * When we're replaying invlpg instructions or restoring a saved
1580 * state we disable this path.
1581 */
1582 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1583 return;
1584 Assert(pVM->rem.s.fInREM);
1585
1586 /*
1587 * The caller doesn't check cr4, so we have to do that for ourselves.
1588 */
1589 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1590 fGlobal = true;
1591 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1592
1593 /*
1594 * Update the control registers before calling PGMR3FlushTLB.
1595 */
1596 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1597 Assert(pCtx);
1598 pCtx->cr0 = env->cr[0];
1599 pCtx->cr3 = env->cr[3];
1600 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1601 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1602 pCtx->cr4 = env->cr[4];
1603
1604 /*
1605 * Let PGM do the rest.
1606 */
1607 Assert(env->pVCpu);
1608 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1609}
1610
1611
1612/**
1613 * Called when any of the cr0, cr4 or efer registers is updated.
1614 *
1615 * @param env Pointer to the CPU environment.
1616 */
1617void remR3ChangeCpuMode(CPUState *env)
1618{
1619 PVM pVM = env->pVM;
1620 uint64_t efer;
1621 PCPUMCTX pCtx;
1622 int rc;
1623
1624 /*
1625 * When we're replaying loads or restoring a saved
1626 * state this path is disabled.
1627 */
1628 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1629 return;
1630 Assert(pVM->rem.s.fInREM);
1631
1632 /*
1633 * Update the control registers before calling PGMChangeMode()
1634 * as it may need to map whatever cr3 is pointing to.
1635 */
1636 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1637 Assert(pCtx);
1638 pCtx->cr0 = env->cr[0];
1639 pCtx->cr3 = env->cr[3];
1640 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1641 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1642 pCtx->cr4 = env->cr[4];
1643
1644#ifdef TARGET_X86_64
1645 efer = env->efer;
1646#else
1647 efer = 0;
1648#endif
1649 Assert(env->pVCpu);
1650 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1651 if (rc != VINF_SUCCESS)
1652 {
1653 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1654 {
1655 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1656 remR3RaiseRC(env->pVM, rc);
1657 }
1658 else
1659 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1660 }
1661}
1662
1663
1664/**
1665 * Called from compiled code to run dma.
1666 *
1667 * @param env Pointer to the CPU environment.
1668 */
1669void remR3DmaRun(CPUState *env)
1670{
1671 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1672 PDMR3DmaRun(env->pVM);
1673 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1674}
1675
1676
1677/**
1678 * Called from compiled code to schedule pending timers in VMM
1679 *
1680 * @param env Pointer to the CPU environment.
1681 */
1682void remR3TimersRun(CPUState *env)
1683{
1684 LogFlow(("remR3TimersRun:\n"));
1685 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1686 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1687 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1688 TMR3TimerQueuesDo(env->pVM);
1689 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1690 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1691}
1692
1693
1694/**
1695 * Record trap occurrence
1696 *
1697 * @returns VBox status code
1698 * @param env Pointer to the CPU environment.
1699 * @param uTrap Trap nr
1700 * @param uErrorCode Error code
1701 * @param pvNextEIP Next EIP
1702 */
1703int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1704{
1705 PVM pVM = env->pVM;
1706#ifdef VBOX_WITH_STATISTICS
1707 static STAMCOUNTER s_aStatTrap[255];
1708 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1709#endif
1710
1711#ifdef VBOX_WITH_STATISTICS
1712 if (uTrap < 255)
1713 {
1714 if (!s_aRegisters[uTrap])
1715 {
1716 char szStatName[64];
1717 s_aRegisters[uTrap] = true;
1718 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1719 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1720 }
1721 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1722 }
1723#endif
1724 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1725 if( uTrap < 0x20
1726 && (env->cr[0] & X86_CR0_PE)
1727 && !(env->eflags & X86_EFL_VM))
1728 {
1729#ifdef DEBUG
1730 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1731#endif
1732 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1733 {
1734 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1735 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1736 return VERR_REM_TOO_MANY_TRAPS;
1737 }
1738 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1739 pVM->rem.s.cPendingExceptions = 1;
1740 pVM->rem.s.uPendingException = uTrap;
1741 pVM->rem.s.uPendingExcptEIP = env->eip;
1742 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1743 }
1744 else
1745 {
1746 pVM->rem.s.cPendingExceptions = 0;
1747 pVM->rem.s.uPendingException = uTrap;
1748 pVM->rem.s.uPendingExcptEIP = env->eip;
1749 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1750 }
1751 return VINF_SUCCESS;
1752}
1753
1754
1755/*
1756 * Clear current active trap
1757 *
1758 * @param pVM VM Handle.
1759 */
1760void remR3TrapClear(PVM pVM)
1761{
1762 pVM->rem.s.cPendingExceptions = 0;
1763 pVM->rem.s.uPendingException = 0;
1764 pVM->rem.s.uPendingExcptEIP = 0;
1765 pVM->rem.s.uPendingExcptCR2 = 0;
1766}
1767
1768
1769/*
1770 * Record previous call instruction addresses
1771 *
1772 * @param env Pointer to the CPU environment.
1773 */
1774void remR3RecordCall(CPUState *env)
1775{
1776 CSAMR3RecordCallAddress(env->pVM, env->eip);
1777}
1778
1779
1780/**
1781 * Syncs the internal REM state with the VM.
1782 *
1783 * This must be called before REMR3Run() is invoked whenever when the REM
1784 * state is not up to date. Calling it several times in a row is not
1785 * permitted.
1786 *
1787 * @returns VBox status code.
1788 *
1789 * @param pVM VM Handle.
1790 * @param pVCpu VMCPU Handle.
1791 *
1792 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1793 * no do this since the majority of the callers don't want any unnecessary of events
1794 * pending that would immediately interrupt execution.
1795 */
1796REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1797{
1798 register const CPUMCTX *pCtx;
1799 register unsigned fFlags;
1800 bool fHiddenSelRegsValid;
1801 unsigned i;
1802 TRPMEVENT enmType;
1803 uint8_t u8TrapNo;
1804 uint32_t uCpl;
1805 int rc;
1806
1807 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1808 Log2(("REMR3State:\n"));
1809
1810 pVM->rem.s.Env.pVCpu = pVCpu;
1811 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1812 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
1813
1814 Assert(!pVM->rem.s.fInREM);
1815 pVM->rem.s.fInStateSync = true;
1816
1817 /*
1818 * If we have to flush TBs, do that immediately.
1819 */
1820 if (pVM->rem.s.fFlushTBs)
1821 {
1822 STAM_COUNTER_INC(&gStatFlushTBs);
1823 tb_flush(&pVM->rem.s.Env);
1824 pVM->rem.s.fFlushTBs = false;
1825 }
1826
1827 /*
1828 * Copy the registers which require no special handling.
1829 */
1830#ifdef TARGET_X86_64
1831 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1832 Assert(R_EAX == 0);
1833 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1834 Assert(R_ECX == 1);
1835 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1836 Assert(R_EDX == 2);
1837 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1838 Assert(R_EBX == 3);
1839 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1840 Assert(R_ESP == 4);
1841 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1842 Assert(R_EBP == 5);
1843 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1844 Assert(R_ESI == 6);
1845 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1846 Assert(R_EDI == 7);
1847 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1848 pVM->rem.s.Env.regs[8] = pCtx->r8;
1849 pVM->rem.s.Env.regs[9] = pCtx->r9;
1850 pVM->rem.s.Env.regs[10] = pCtx->r10;
1851 pVM->rem.s.Env.regs[11] = pCtx->r11;
1852 pVM->rem.s.Env.regs[12] = pCtx->r12;
1853 pVM->rem.s.Env.regs[13] = pCtx->r13;
1854 pVM->rem.s.Env.regs[14] = pCtx->r14;
1855 pVM->rem.s.Env.regs[15] = pCtx->r15;
1856
1857 pVM->rem.s.Env.eip = pCtx->rip;
1858
1859 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1860#else
1861 Assert(R_EAX == 0);
1862 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1863 Assert(R_ECX == 1);
1864 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1865 Assert(R_EDX == 2);
1866 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1867 Assert(R_EBX == 3);
1868 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1869 Assert(R_ESP == 4);
1870 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1871 Assert(R_EBP == 5);
1872 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1873 Assert(R_ESI == 6);
1874 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1875 Assert(R_EDI == 7);
1876 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1877 pVM->rem.s.Env.eip = pCtx->eip;
1878
1879 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1880#endif
1881
1882 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1883
1884 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1885 for (i=0;i<8;i++)
1886 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1887
1888 /*
1889 * Clear the halted hidden flag (the interrupt waking up the CPU can
1890 * have been dispatched in raw mode).
1891 */
1892 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1893
1894 /*
1895 * Replay invlpg?
1896 */
1897 if (pVM->rem.s.cInvalidatedPages)
1898 {
1899 RTUINT i;
1900
1901 pVM->rem.s.fIgnoreInvlPg = true;
1902 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1903 {
1904 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1905 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1906 }
1907 pVM->rem.s.fIgnoreInvlPg = false;
1908 pVM->rem.s.cInvalidatedPages = 0;
1909 }
1910
1911 /* Replay notification changes. */
1912 REMR3ReplayHandlerNotifications(pVM);
1913
1914 /* Update MSRs; before CRx registers! */
1915 pVM->rem.s.Env.efer = pCtx->msrEFER;
1916 pVM->rem.s.Env.star = pCtx->msrSTAR;
1917 pVM->rem.s.Env.pat = pCtx->msrPAT;
1918#ifdef TARGET_X86_64
1919 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1920 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1921 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1922 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1923
1924 /* Update the internal long mode activate flag according to the new EFER value. */
1925 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1926 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1927 else
1928 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1929#endif
1930
1931 /*
1932 * Registers which are rarely changed and require special handling / order when changed.
1933 */
1934 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
1935 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
1936 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1937 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1938 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1939 {
1940 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1941 {
1942 pVM->rem.s.fIgnoreCR3Load = true;
1943 tlb_flush(&pVM->rem.s.Env, true);
1944 pVM->rem.s.fIgnoreCR3Load = false;
1945 }
1946
1947 /* CR4 before CR0! */
1948 if (fFlags & CPUM_CHANGED_CR4)
1949 {
1950 pVM->rem.s.fIgnoreCR3Load = true;
1951 pVM->rem.s.fIgnoreCpuMode = true;
1952 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1953 pVM->rem.s.fIgnoreCpuMode = false;
1954 pVM->rem.s.fIgnoreCR3Load = false;
1955 }
1956
1957 if (fFlags & CPUM_CHANGED_CR0)
1958 {
1959 pVM->rem.s.fIgnoreCR3Load = true;
1960 pVM->rem.s.fIgnoreCpuMode = true;
1961 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1962 pVM->rem.s.fIgnoreCpuMode = false;
1963 pVM->rem.s.fIgnoreCR3Load = false;
1964 }
1965
1966 if (fFlags & CPUM_CHANGED_CR3)
1967 {
1968 pVM->rem.s.fIgnoreCR3Load = true;
1969 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1970 pVM->rem.s.fIgnoreCR3Load = false;
1971 }
1972
1973 if (fFlags & CPUM_CHANGED_GDTR)
1974 {
1975 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1976 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1977 }
1978
1979 if (fFlags & CPUM_CHANGED_IDTR)
1980 {
1981 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1982 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1983 }
1984
1985 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1986 {
1987 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1988 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1989 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1990 }
1991
1992 if (fFlags & CPUM_CHANGED_LDTR)
1993 {
1994 if (fHiddenSelRegsValid)
1995 {
1996 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1997 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1998 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1999 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2000 }
2001 else
2002 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2003 }
2004
2005 if (fFlags & CPUM_CHANGED_CPUID)
2006 {
2007 uint32_t u32Dummy;
2008
2009 /*
2010 * Get the CPUID features.
2011 */
2012 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2013 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2014 }
2015
2016 /* Sync FPU state after CR4, CPUID and EFER (!). */
2017 if (fFlags & CPUM_CHANGED_FPU_REM)
2018 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2019 }
2020
2021 /*
2022 * Sync TR unconditionally to make life simpler.
2023 */
2024 pVM->rem.s.Env.tr.selector = pCtx->tr;
2025 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2026 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2027 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2028 /* Note! do_interrupt will fault if the busy flag is still set... */
2029 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2030
2031 /*
2032 * Update selector registers.
2033 * This must be done *after* we've synced gdt, ldt and crX registers
2034 * since we're reading the GDT/LDT om sync_seg. This will happen with
2035 * saved state which takes a quick dip into rawmode for instance.
2036 */
2037 /*
2038 * Stack; Note first check this one as the CPL might have changed. The
2039 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2040 */
2041
2042 if (fHiddenSelRegsValid)
2043 {
2044 /* The hidden selector registers are valid in the CPU context. */
2045 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2046
2047 /* Set current CPL */
2048 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2049
2050 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2052 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2053 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2054 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2055 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2056 }
2057 else
2058 {
2059 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2060 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2061 {
2062 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2063
2064 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2065 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2066#ifdef VBOX_WITH_STATISTICS
2067 if (pVM->rem.s.Env.segs[R_SS].newselector)
2068 {
2069 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2070 }
2071#endif
2072 }
2073 else
2074 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2075
2076 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2077 {
2078 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2079 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2080#ifdef VBOX_WITH_STATISTICS
2081 if (pVM->rem.s.Env.segs[R_ES].newselector)
2082 {
2083 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2084 }
2085#endif
2086 }
2087 else
2088 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2089
2090 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2091 {
2092 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2093 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2094#ifdef VBOX_WITH_STATISTICS
2095 if (pVM->rem.s.Env.segs[R_CS].newselector)
2096 {
2097 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2098 }
2099#endif
2100 }
2101 else
2102 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2103
2104 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2105 {
2106 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2107 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2108#ifdef VBOX_WITH_STATISTICS
2109 if (pVM->rem.s.Env.segs[R_DS].newselector)
2110 {
2111 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2112 }
2113#endif
2114 }
2115 else
2116 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2117
2118 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2119 * be the same but not the base/limit. */
2120 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2121 {
2122 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2123 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2124#ifdef VBOX_WITH_STATISTICS
2125 if (pVM->rem.s.Env.segs[R_FS].newselector)
2126 {
2127 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2128 }
2129#endif
2130 }
2131 else
2132 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2133
2134 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2135 {
2136 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2137 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2138#ifdef VBOX_WITH_STATISTICS
2139 if (pVM->rem.s.Env.segs[R_GS].newselector)
2140 {
2141 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2142 }
2143#endif
2144 }
2145 else
2146 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2147 }
2148
2149 /*
2150 * Check for traps.
2151 */
2152 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2153 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2154 if (RT_SUCCESS(rc))
2155 {
2156#ifdef DEBUG
2157 if (u8TrapNo == 0x80)
2158 {
2159 remR3DumpLnxSyscall(pVCpu);
2160 remR3DumpOBsdSyscall(pVCpu);
2161 }
2162#endif
2163
2164 pVM->rem.s.Env.exception_index = u8TrapNo;
2165 if (enmType != TRPM_SOFTWARE_INT)
2166 {
2167 pVM->rem.s.Env.exception_is_int = 0;
2168 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2169 }
2170 else
2171 {
2172 /*
2173 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2174 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2175 * for int03 and into.
2176 */
2177 pVM->rem.s.Env.exception_is_int = 1;
2178 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2179 /* int 3 may be generated by one-byte 0xcc */
2180 if (u8TrapNo == 3)
2181 {
2182 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2183 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2184 }
2185 /* int 4 may be generated by one-byte 0xce */
2186 else if (u8TrapNo == 4)
2187 {
2188 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2189 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2190 }
2191 }
2192
2193 /* get error code and cr2 if needed. */
2194 switch (u8TrapNo)
2195 {
2196 case 0x0e:
2197 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2198 /* fallthru */
2199 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2200 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2201 break;
2202
2203 case 0x11: case 0x08:
2204 default:
2205 pVM->rem.s.Env.error_code = 0;
2206 break;
2207 }
2208
2209 /*
2210 * We can now reset the active trap since the recompiler is gonna have a go at it.
2211 */
2212 rc = TRPMResetTrap(pVCpu);
2213 AssertRC(rc);
2214 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2215 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2216 }
2217
2218 /*
2219 * Clear old interrupt request flags; Check for pending hardware interrupts.
2220 * (See @remark for why we don't check for other FFs.)
2221 */
2222 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2223 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2224 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2225 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2226
2227 /*
2228 * We're now in REM mode.
2229 */
2230 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2231 pVM->rem.s.fInREM = true;
2232 pVM->rem.s.fInStateSync = false;
2233 pVM->rem.s.cCanExecuteRaw = 0;
2234 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2235 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2236 return VINF_SUCCESS;
2237}
2238
2239
2240/**
2241 * Syncs back changes in the REM state to the the VM state.
2242 *
2243 * This must be called after invoking REMR3Run().
2244 * Calling it several times in a row is not permitted.
2245 *
2246 * @returns VBox status code.
2247 *
2248 * @param pVM VM Handle.
2249 * @param pVCpu VMCPU Handle.
2250 */
2251REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2252{
2253 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2254 Assert(pCtx);
2255 unsigned i;
2256
2257 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2258 Log2(("REMR3StateBack:\n"));
2259 Assert(pVM->rem.s.fInREM);
2260
2261 /*
2262 * Copy back the registers.
2263 * This is done in the order they are declared in the CPUMCTX structure.
2264 */
2265
2266 /** @todo FOP */
2267 /** @todo FPUIP */
2268 /** @todo CS */
2269 /** @todo FPUDP */
2270 /** @todo DS */
2271
2272 /** @todo check if FPU/XMM was actually used in the recompiler */
2273 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2274//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2275
2276#ifdef TARGET_X86_64
2277 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2278 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2279 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2280 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2281 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2282 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2283 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2284 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2285 pCtx->r8 = pVM->rem.s.Env.regs[8];
2286 pCtx->r9 = pVM->rem.s.Env.regs[9];
2287 pCtx->r10 = pVM->rem.s.Env.regs[10];
2288 pCtx->r11 = pVM->rem.s.Env.regs[11];
2289 pCtx->r12 = pVM->rem.s.Env.regs[12];
2290 pCtx->r13 = pVM->rem.s.Env.regs[13];
2291 pCtx->r14 = pVM->rem.s.Env.regs[14];
2292 pCtx->r15 = pVM->rem.s.Env.regs[15];
2293
2294 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2295
2296#else
2297 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2298 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2299 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2300 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2301 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2302 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2303 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2304
2305 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2306#endif
2307
2308 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2309
2310#ifdef VBOX_WITH_STATISTICS
2311 if (pVM->rem.s.Env.segs[R_SS].newselector)
2312 {
2313 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2314 }
2315 if (pVM->rem.s.Env.segs[R_GS].newselector)
2316 {
2317 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2318 }
2319 if (pVM->rem.s.Env.segs[R_FS].newselector)
2320 {
2321 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2322 }
2323 if (pVM->rem.s.Env.segs[R_ES].newselector)
2324 {
2325 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2326 }
2327 if (pVM->rem.s.Env.segs[R_DS].newselector)
2328 {
2329 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2330 }
2331 if (pVM->rem.s.Env.segs[R_CS].newselector)
2332 {
2333 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2334 }
2335#endif
2336 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2337 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2338 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2339 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2340 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2341
2342#ifdef TARGET_X86_64
2343 pCtx->rip = pVM->rem.s.Env.eip;
2344 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2345#else
2346 pCtx->eip = pVM->rem.s.Env.eip;
2347 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2348#endif
2349
2350 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2351 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2352 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2353 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2354 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2355 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2356
2357 for (i = 0; i < 8; i++)
2358 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2359
2360 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2361 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2362 {
2363 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2364 STAM_COUNTER_INC(&gStatREMGDTChange);
2365 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2366 }
2367
2368 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2369 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2370 {
2371 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2372 STAM_COUNTER_INC(&gStatREMIDTChange);
2373 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2374 }
2375
2376 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2377 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2378 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2379 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2380 {
2381 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2382 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2383 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2384 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2385 STAM_COUNTER_INC(&gStatREMLDTRChange);
2386 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2387 }
2388
2389 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2390 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2391 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2392 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2393 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2394 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2395 : 0) )
2396 {
2397 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2398 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2399 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2400 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2401 pCtx->tr = pVM->rem.s.Env.tr.selector;
2402 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2403 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2404 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2405 if (pCtx->trHid.Attr.u)
2406 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2407 STAM_COUNTER_INC(&gStatREMTRChange);
2408 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2409 }
2410
2411 /** @todo These values could still be out of sync! */
2412 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2413 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2414 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2415 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2416
2417 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2418 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2419 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2420
2421 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2422 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2423 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2424
2425 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2426 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2427 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2428
2429 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2430 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2431 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2432
2433 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2434 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2435 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2436
2437 /* Sysenter MSR */
2438 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2439 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2440 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2441
2442 /* System MSRs. */
2443 pCtx->msrEFER = pVM->rem.s.Env.efer;
2444 pCtx->msrSTAR = pVM->rem.s.Env.star;
2445 pCtx->msrPAT = pVM->rem.s.Env.pat;
2446#ifdef TARGET_X86_64
2447 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2448 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2449 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2450 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2451#endif
2452
2453 remR3TrapClear(pVM);
2454
2455 /*
2456 * Check for traps.
2457 */
2458 if ( pVM->rem.s.Env.exception_index >= 0
2459 && pVM->rem.s.Env.exception_index < 256)
2460 {
2461 int rc;
2462
2463 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2464 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2465 AssertRC(rc);
2466 switch (pVM->rem.s.Env.exception_index)
2467 {
2468 case 0x0e:
2469 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2470 /* fallthru */
2471 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2472 case 0x11: case 0x08: /* 0 */
2473 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2474 break;
2475 }
2476
2477 }
2478
2479 /*
2480 * We're not longer in REM mode.
2481 */
2482 CPUMR3RemLeave(pVCpu,
2483 HWACCMIsEnabled(pVM)
2484 || ( pVM->rem.s.Env.segs[R_SS].newselector
2485 | pVM->rem.s.Env.segs[R_GS].newselector
2486 | pVM->rem.s.Env.segs[R_FS].newselector
2487 | pVM->rem.s.Env.segs[R_ES].newselector
2488 | pVM->rem.s.Env.segs[R_DS].newselector
2489 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2490 );
2491 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2492 pVM->rem.s.fInREM = false;
2493 pVM->rem.s.pCtx = NULL;
2494 pVM->rem.s.Env.pVCpu = NULL;
2495 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2496 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2497 return VINF_SUCCESS;
2498}
2499
2500
2501/**
2502 * This is called by the disassembler when it wants to update the cpu state
2503 * before for instance doing a register dump.
2504 */
2505static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2506{
2507 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2508 unsigned i;
2509
2510 Assert(pVM->rem.s.fInREM);
2511
2512 /*
2513 * Copy back the registers.
2514 * This is done in the order they are declared in the CPUMCTX structure.
2515 */
2516
2517 /** @todo FOP */
2518 /** @todo FPUIP */
2519 /** @todo CS */
2520 /** @todo FPUDP */
2521 /** @todo DS */
2522 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2523 pCtx->fpu.MXCSR = 0;
2524 pCtx->fpu.MXCSR_MASK = 0;
2525
2526 /** @todo check if FPU/XMM was actually used in the recompiler */
2527 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2528//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2529
2530#ifdef TARGET_X86_64
2531 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2532 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2533 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2534 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2535 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2536 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2537 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2538 pCtx->r8 = pVM->rem.s.Env.regs[8];
2539 pCtx->r9 = pVM->rem.s.Env.regs[9];
2540 pCtx->r10 = pVM->rem.s.Env.regs[10];
2541 pCtx->r11 = pVM->rem.s.Env.regs[11];
2542 pCtx->r12 = pVM->rem.s.Env.regs[12];
2543 pCtx->r13 = pVM->rem.s.Env.regs[13];
2544 pCtx->r14 = pVM->rem.s.Env.regs[14];
2545 pCtx->r15 = pVM->rem.s.Env.regs[15];
2546
2547 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2548#else
2549 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2550 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2551 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2552 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2553 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2554 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2555 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2556
2557 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2558#endif
2559
2560 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2561
2562 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2563 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2564 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2565 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2566 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2567
2568#ifdef TARGET_X86_64
2569 pCtx->rip = pVM->rem.s.Env.eip;
2570 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2571#else
2572 pCtx->eip = pVM->rem.s.Env.eip;
2573 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2574#endif
2575
2576 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2577 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2578 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2579 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2580 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2581 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2582
2583 for (i = 0; i < 8; i++)
2584 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2585
2586 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2587 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2588 {
2589 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2590 STAM_COUNTER_INC(&gStatREMGDTChange);
2591 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2592 }
2593
2594 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2595 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2596 {
2597 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2598 STAM_COUNTER_INC(&gStatREMIDTChange);
2599 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2600 }
2601
2602 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2603 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2604 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2605 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2606 {
2607 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2608 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2609 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2610 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2611 STAM_COUNTER_INC(&gStatREMLDTRChange);
2612 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2613 }
2614
2615 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2616 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2617 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2618 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2619 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2620 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2621 : 0) )
2622 {
2623 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2624 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2625 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2626 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2627 pCtx->tr = pVM->rem.s.Env.tr.selector;
2628 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2629 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2630 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2631 if (pCtx->trHid.Attr.u)
2632 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2633 STAM_COUNTER_INC(&gStatREMTRChange);
2634 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2635 }
2636
2637 /** @todo These values could still be out of sync! */
2638 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2639 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2640 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2641 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2642
2643 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2644 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2645 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2646
2647 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2648 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2649 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2650
2651 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2652 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2653 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2654
2655 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2656 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2657 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2658
2659 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2660 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2661 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2662
2663 /* Sysenter MSR */
2664 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2665 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2666 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2667
2668 /* System MSRs. */
2669 pCtx->msrEFER = pVM->rem.s.Env.efer;
2670 pCtx->msrSTAR = pVM->rem.s.Env.star;
2671 pCtx->msrPAT = pVM->rem.s.Env.pat;
2672#ifdef TARGET_X86_64
2673 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2674 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2675 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2676 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2677#endif
2678
2679}
2680
2681
2682/**
2683 * Update the VMM state information if we're currently in REM.
2684 *
2685 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2686 * we're currently executing in REM and the VMM state is invalid. This method will of
2687 * course check that we're executing in REM before syncing any data over to the VMM.
2688 *
2689 * @param pVM The VM handle.
2690 * @param pVCpu The VMCPU handle.
2691 */
2692REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2693{
2694 if (pVM->rem.s.fInREM)
2695 remR3StateUpdate(pVM, pVCpu);
2696}
2697
2698
2699#undef LOG_GROUP
2700#define LOG_GROUP LOG_GROUP_REM
2701
2702
2703/**
2704 * Notify the recompiler about Address Gate 20 state change.
2705 *
2706 * This notification is required since A20 gate changes are
2707 * initialized from a device driver and the VM might just as
2708 * well be in REM mode as in RAW mode.
2709 *
2710 * @param pVM VM handle.
2711 * @param pVCpu VMCPU handle.
2712 * @param fEnable True if the gate should be enabled.
2713 * False if the gate should be disabled.
2714 */
2715REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2716{
2717 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2718 VM_ASSERT_EMT(pVM);
2719
2720 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2721 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2722 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2723}
2724
2725
2726/**
2727 * Replays the handler notification changes
2728 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2729 *
2730 * @param pVM VM handle.
2731 */
2732REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2733{
2734 /*
2735 * Replay the flushes.
2736 */
2737 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2738 VM_ASSERT_EMT(pVM);
2739
2740 /** @todo this isn't ensuring correct replay order. */
2741 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2742 {
2743 uint32_t idxNext;
2744 uint32_t idxRevHead;
2745 uint32_t idxHead;
2746#ifdef VBOX_STRICT
2747 int32_t c = 0;
2748#endif
2749
2750 /* Lockless purging of pending notifications. */
2751 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2752 if (idxHead == UINT32_MAX)
2753 return;
2754 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2755
2756 /*
2757 * Reverse the list to process it in FIFO order.
2758 */
2759 idxRevHead = UINT32_MAX;
2760 do
2761 {
2762 /* Save the index of the next rec. */
2763 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2764 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2765 /* Push the record onto the reversed list. */
2766 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2767 idxRevHead = idxHead;
2768 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2769 /* Advance. */
2770 idxHead = idxNext;
2771 } while (idxHead != UINT32_MAX);
2772
2773 /*
2774 * Loop thru the list, reinserting the record into the free list as they are
2775 * processed to avoid having other EMTs running out of entries while we're flushing.
2776 */
2777 idxHead = idxRevHead;
2778 do
2779 {
2780 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2781 uint32_t idxCur;
2782 Assert(--c >= 0);
2783
2784 switch (pCur->enmKind)
2785 {
2786 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2787 remR3NotifyHandlerPhysicalRegister(pVM,
2788 pCur->u.PhysicalRegister.enmType,
2789 pCur->u.PhysicalRegister.GCPhys,
2790 pCur->u.PhysicalRegister.cb,
2791 pCur->u.PhysicalRegister.fHasHCHandler);
2792 break;
2793
2794 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2795 remR3NotifyHandlerPhysicalDeregister(pVM,
2796 pCur->u.PhysicalDeregister.enmType,
2797 pCur->u.PhysicalDeregister.GCPhys,
2798 pCur->u.PhysicalDeregister.cb,
2799 pCur->u.PhysicalDeregister.fHasHCHandler,
2800 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2801 break;
2802
2803 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2804 remR3NotifyHandlerPhysicalModify(pVM,
2805 pCur->u.PhysicalModify.enmType,
2806 pCur->u.PhysicalModify.GCPhysOld,
2807 pCur->u.PhysicalModify.GCPhysNew,
2808 pCur->u.PhysicalModify.cb,
2809 pCur->u.PhysicalModify.fHasHCHandler,
2810 pCur->u.PhysicalModify.fRestoreAsRAM);
2811 break;
2812
2813 default:
2814 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2815 break;
2816 }
2817
2818 /*
2819 * Advance idxHead.
2820 */
2821 idxCur = idxHead;
2822 idxHead = pCur->idxNext;
2823 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
2824
2825 /*
2826 * Put the record back into the free list.
2827 */
2828 do
2829 {
2830 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2831 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2832 ASMCompilerBarrier();
2833 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
2834 } while (idxHead != UINT32_MAX);
2835
2836#ifdef VBOX_STRICT
2837 if (pVM->cCpus == 1)
2838 {
2839 unsigned c;
2840 /* Check that all records are now on the free list. */
2841 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
2842 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
2843 c++;
2844 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
2845 }
2846#endif
2847 }
2848}
2849
2850
2851/**
2852 * Notify REM about changed code page.
2853 *
2854 * @returns VBox status code.
2855 * @param pVM VM handle.
2856 * @param pVCpu VMCPU handle.
2857 * @param pvCodePage Code page address
2858 */
2859REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2860{
2861#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2862 int rc;
2863 RTGCPHYS PhysGC;
2864 uint64_t flags;
2865
2866 VM_ASSERT_EMT(pVM);
2867
2868 /*
2869 * Get the physical page address.
2870 */
2871 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2872 if (rc == VINF_SUCCESS)
2873 {
2874 /*
2875 * Sync the required registers and flush the whole page.
2876 * (Easier to do the whole page than notifying it about each physical
2877 * byte that was changed.
2878 */
2879 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2880 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2881 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2882 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2883
2884 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2885 }
2886#endif
2887 return VINF_SUCCESS;
2888}
2889
2890
2891/**
2892 * Notification about a successful MMR3PhysRegister() call.
2893 *
2894 * @param pVM VM handle.
2895 * @param GCPhys The physical address the RAM.
2896 * @param cb Size of the memory.
2897 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2898 */
2899REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2900{
2901 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2902 VM_ASSERT_EMT(pVM);
2903
2904 /*
2905 * Validate input - we trust the caller.
2906 */
2907 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2908 Assert(cb);
2909 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2910 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2911
2912 /*
2913 * Base ram? Update GCPhysLastRam.
2914 */
2915 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2916 {
2917 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2918 {
2919 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2920 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2921 }
2922 }
2923
2924 /*
2925 * Register the ram.
2926 */
2927 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2928
2929 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2930 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2931 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2932
2933 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2934}
2935
2936
2937/**
2938 * Notification about a successful MMR3PhysRomRegister() call.
2939 *
2940 * @param pVM VM handle.
2941 * @param GCPhys The physical address of the ROM.
2942 * @param cb The size of the ROM.
2943 * @param pvCopy Pointer to the ROM copy.
2944 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2945 * This function will be called when ever the protection of the
2946 * shadow ROM changes (at reset and end of POST).
2947 */
2948REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2949{
2950 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2951 VM_ASSERT_EMT(pVM);
2952
2953 /*
2954 * Validate input - we trust the caller.
2955 */
2956 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2957 Assert(cb);
2958 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2959
2960 /*
2961 * Register the rom.
2962 */
2963 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2964
2965 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2966 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2967 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2968
2969 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2970}
2971
2972
2973/**
2974 * Notification about a successful memory deregistration or reservation.
2975 *
2976 * @param pVM VM Handle.
2977 * @param GCPhys Start physical address.
2978 * @param cb The size of the range.
2979 */
2980REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2981{
2982 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2983 VM_ASSERT_EMT(pVM);
2984
2985 /*
2986 * Validate input - we trust the caller.
2987 */
2988 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2989 Assert(cb);
2990 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2991
2992 /*
2993 * Unassigning the memory.
2994 */
2995 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2996
2997 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2998 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2999 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3000
3001 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3002}
3003
3004
3005/**
3006 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3007 *
3008 * @param pVM VM Handle.
3009 * @param enmType Handler type.
3010 * @param GCPhys Handler range address.
3011 * @param cb Size of the handler range.
3012 * @param fHasHCHandler Set if the handler has a HC callback function.
3013 *
3014 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3015 * Handler memory type to memory which has no HC handler.
3016 */
3017static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3018{
3019 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3020 enmType, GCPhys, cb, fHasHCHandler));
3021
3022 VM_ASSERT_EMT(pVM);
3023 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3024 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3025
3026
3027 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3028
3029 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3030 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3031 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3032 else if (fHasHCHandler)
3033 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3034 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3035
3036 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3037}
3038
3039/**
3040 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3041 *
3042 * @param pVM VM Handle.
3043 * @param enmType Handler type.
3044 * @param GCPhys Handler range address.
3045 * @param cb Size of the handler range.
3046 * @param fHasHCHandler Set if the handler has a HC callback function.
3047 *
3048 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3049 * Handler memory type to memory which has no HC handler.
3050 */
3051REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3052{
3053 REMR3ReplayHandlerNotifications(pVM);
3054
3055 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3056}
3057
3058/**
3059 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3060 *
3061 * @param pVM VM Handle.
3062 * @param enmType Handler type.
3063 * @param GCPhys Handler range address.
3064 * @param cb Size of the handler range.
3065 * @param fHasHCHandler Set if the handler has a HC callback function.
3066 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3067 */
3068static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3069{
3070 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3071 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3072 VM_ASSERT_EMT(pVM);
3073
3074
3075 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3076
3077 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3078 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3079 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3080 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3081 else if (fHasHCHandler)
3082 {
3083 if (!fRestoreAsRAM)
3084 {
3085 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3086 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3087 }
3088 else
3089 {
3090 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3091 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3092 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3093 }
3094 }
3095 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3096
3097 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3098}
3099
3100/**
3101 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3102 *
3103 * @param pVM VM Handle.
3104 * @param enmType Handler type.
3105 * @param GCPhys Handler range address.
3106 * @param cb Size of the handler range.
3107 * @param fHasHCHandler Set if the handler has a HC callback function.
3108 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3109 */
3110REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3111{
3112 REMR3ReplayHandlerNotifications(pVM);
3113 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3114}
3115
3116
3117/**
3118 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3119 *
3120 * @param pVM VM Handle.
3121 * @param enmType Handler type.
3122 * @param GCPhysOld Old handler range address.
3123 * @param GCPhysNew New handler range address.
3124 * @param cb Size of the handler range.
3125 * @param fHasHCHandler Set if the handler has a HC callback function.
3126 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3127 */
3128static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3129{
3130 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3131 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3132 VM_ASSERT_EMT(pVM);
3133 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3134
3135 if (fHasHCHandler)
3136 {
3137 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3138
3139 /*
3140 * Reset the old page.
3141 */
3142 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3143 if (!fRestoreAsRAM)
3144 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3145 else
3146 {
3147 /* This is not perfect, but it'll do for PD monitoring... */
3148 Assert(cb == PAGE_SIZE);
3149 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3150 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3151 }
3152
3153 /*
3154 * Update the new page.
3155 */
3156 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3157 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3158 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3159 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3160
3161 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3162 }
3163}
3164
3165/**
3166 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3167 *
3168 * @param pVM VM Handle.
3169 * @param enmType Handler type.
3170 * @param GCPhysOld Old handler range address.
3171 * @param GCPhysNew New handler range address.
3172 * @param cb Size of the handler range.
3173 * @param fHasHCHandler Set if the handler has a HC callback function.
3174 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3175 */
3176REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3177{
3178 REMR3ReplayHandlerNotifications(pVM);
3179
3180 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3181}
3182
3183/**
3184 * Checks if we're handling access to this page or not.
3185 *
3186 * @returns true if we're trapping access.
3187 * @returns false if we aren't.
3188 * @param pVM The VM handle.
3189 * @param GCPhys The physical address.
3190 *
3191 * @remark This function will only work correctly in VBOX_STRICT builds!
3192 */
3193REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3194{
3195#ifdef VBOX_STRICT
3196 unsigned long off;
3197 REMR3ReplayHandlerNotifications(pVM);
3198
3199 off = get_phys_page_offset(GCPhys);
3200 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3201 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3202 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3203#else
3204 return false;
3205#endif
3206}
3207
3208
3209/**
3210 * Deals with a rare case in get_phys_addr_code where the code
3211 * is being monitored.
3212 *
3213 * It could also be an MMIO page, in which case we will raise a fatal error.
3214 *
3215 * @returns The physical address corresponding to addr.
3216 * @param env The cpu environment.
3217 * @param addr The virtual address.
3218 * @param pTLBEntry The TLB entry.
3219 */
3220target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3221 target_ulong addr,
3222 CPUTLBEntry* pTLBEntry,
3223 target_phys_addr_t ioTLBEntry)
3224{
3225 PVM pVM = env->pVM;
3226
3227 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3228 {
3229 /* If code memory is being monitored, appropriate IOTLB entry will have
3230 handler IO type, and addend will provide real physical address, no
3231 matter if we store VA in TLB or not, as handlers are always passed PA */
3232 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3233 return ret;
3234 }
3235 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3236 "*** handlers\n",
3237 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3238 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3239 LogRel(("*** mmio\n"));
3240 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3241 LogRel(("*** phys\n"));
3242 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3243 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3244 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3245 AssertFatalFailed();
3246}
3247
3248/**
3249 * Read guest RAM and ROM.
3250 *
3251 * @param SrcGCPhys The source address (guest physical).
3252 * @param pvDst The destination address.
3253 * @param cb Number of bytes
3254 */
3255void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3256{
3257 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3258 VBOX_CHECK_ADDR(SrcGCPhys);
3259 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3260#ifdef VBOX_DEBUG_PHYS
3261 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3262#endif
3263 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3264}
3265
3266
3267/**
3268 * Read guest RAM and ROM, unsigned 8-bit.
3269 *
3270 * @param SrcGCPhys The source address (guest physical).
3271 */
3272RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3273{
3274 uint8_t val;
3275 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3276 VBOX_CHECK_ADDR(SrcGCPhys);
3277 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3278 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3279#ifdef VBOX_DEBUG_PHYS
3280 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3281#endif
3282 return val;
3283}
3284
3285
3286/**
3287 * Read guest RAM and ROM, signed 8-bit.
3288 *
3289 * @param SrcGCPhys The source address (guest physical).
3290 */
3291RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3292{
3293 int8_t val;
3294 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3295 VBOX_CHECK_ADDR(SrcGCPhys);
3296 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3297 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3298#ifdef VBOX_DEBUG_PHYS
3299 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3300#endif
3301 return val;
3302}
3303
3304
3305/**
3306 * Read guest RAM and ROM, unsigned 16-bit.
3307 *
3308 * @param SrcGCPhys The source address (guest physical).
3309 */
3310RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3311{
3312 uint16_t val;
3313 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3314 VBOX_CHECK_ADDR(SrcGCPhys);
3315 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3316 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3317#ifdef VBOX_DEBUG_PHYS
3318 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3319#endif
3320 return val;
3321}
3322
3323
3324/**
3325 * Read guest RAM and ROM, signed 16-bit.
3326 *
3327 * @param SrcGCPhys The source address (guest physical).
3328 */
3329RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3330{
3331 int16_t val;
3332 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3333 VBOX_CHECK_ADDR(SrcGCPhys);
3334 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3335 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3336#ifdef VBOX_DEBUG_PHYS
3337 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3338#endif
3339 return val;
3340}
3341
3342
3343/**
3344 * Read guest RAM and ROM, unsigned 32-bit.
3345 *
3346 * @param SrcGCPhys The source address (guest physical).
3347 */
3348RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3349{
3350 uint32_t val;
3351 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3352 VBOX_CHECK_ADDR(SrcGCPhys);
3353 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3354 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3355#ifdef VBOX_DEBUG_PHYS
3356 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3357#endif
3358 return val;
3359}
3360
3361
3362/**
3363 * Read guest RAM and ROM, signed 32-bit.
3364 *
3365 * @param SrcGCPhys The source address (guest physical).
3366 */
3367RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3368{
3369 int32_t val;
3370 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3371 VBOX_CHECK_ADDR(SrcGCPhys);
3372 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3373 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3374#ifdef VBOX_DEBUG_PHYS
3375 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3376#endif
3377 return val;
3378}
3379
3380
3381/**
3382 * Read guest RAM and ROM, unsigned 64-bit.
3383 *
3384 * @param SrcGCPhys The source address (guest physical).
3385 */
3386uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3387{
3388 uint64_t val;
3389 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3390 VBOX_CHECK_ADDR(SrcGCPhys);
3391 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3392 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3393#ifdef VBOX_DEBUG_PHYS
3394 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3395#endif
3396 return val;
3397}
3398
3399
3400/**
3401 * Read guest RAM and ROM, signed 64-bit.
3402 *
3403 * @param SrcGCPhys The source address (guest physical).
3404 */
3405int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3406{
3407 int64_t val;
3408 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3409 VBOX_CHECK_ADDR(SrcGCPhys);
3410 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3411 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3412#ifdef VBOX_DEBUG_PHYS
3413 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3414#endif
3415 return val;
3416}
3417
3418
3419/**
3420 * Write guest RAM.
3421 *
3422 * @param DstGCPhys The destination address (guest physical).
3423 * @param pvSrc The source address.
3424 * @param cb Number of bytes to write
3425 */
3426void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3427{
3428 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3429 VBOX_CHECK_ADDR(DstGCPhys);
3430 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3431 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3432#ifdef VBOX_DEBUG_PHYS
3433 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3434#endif
3435}
3436
3437
3438/**
3439 * Write guest RAM, unsigned 8-bit.
3440 *
3441 * @param DstGCPhys The destination address (guest physical).
3442 * @param val Value
3443 */
3444void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3445{
3446 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3447 VBOX_CHECK_ADDR(DstGCPhys);
3448 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3449 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3450#ifdef VBOX_DEBUG_PHYS
3451 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3452#endif
3453}
3454
3455
3456/**
3457 * Write guest RAM, unsigned 8-bit.
3458 *
3459 * @param DstGCPhys The destination address (guest physical).
3460 * @param val Value
3461 */
3462void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3463{
3464 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3465 VBOX_CHECK_ADDR(DstGCPhys);
3466 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3467 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3468#ifdef VBOX_DEBUG_PHYS
3469 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3470#endif
3471}
3472
3473
3474/**
3475 * Write guest RAM, unsigned 32-bit.
3476 *
3477 * @param DstGCPhys The destination address (guest physical).
3478 * @param val Value
3479 */
3480void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3481{
3482 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3483 VBOX_CHECK_ADDR(DstGCPhys);
3484 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3485 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3486#ifdef VBOX_DEBUG_PHYS
3487 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3488#endif
3489}
3490
3491
3492/**
3493 * Write guest RAM, unsigned 64-bit.
3494 *
3495 * @param DstGCPhys The destination address (guest physical).
3496 * @param val Value
3497 */
3498void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3499{
3500 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3501 VBOX_CHECK_ADDR(DstGCPhys);
3502 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3503 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3504#ifdef VBOX_DEBUG_PHYS
3505 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3506#endif
3507}
3508
3509#undef LOG_GROUP
3510#define LOG_GROUP LOG_GROUP_REM_MMIO
3511
3512/** Read MMIO memory. */
3513static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3514{
3515 uint32_t u32 = 0;
3516 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3517 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3518 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3519 return u32;
3520}
3521
3522/** Read MMIO memory. */
3523static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3524{
3525 uint32_t u32 = 0;
3526 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3527 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3528 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3529 return u32;
3530}
3531
3532/** Read MMIO memory. */
3533static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3534{
3535 uint32_t u32 = 0;
3536 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3537 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3538 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3539 return u32;
3540}
3541
3542/** Write to MMIO memory. */
3543static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3544{
3545 int rc;
3546 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3547 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3548 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3549}
3550
3551/** Write to MMIO memory. */
3552static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3553{
3554 int rc;
3555 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3556 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3557 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3558}
3559
3560/** Write to MMIO memory. */
3561static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3562{
3563 int rc;
3564 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3565 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3566 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3567}
3568
3569
3570#undef LOG_GROUP
3571#define LOG_GROUP LOG_GROUP_REM_HANDLER
3572
3573/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3574
3575static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3576{
3577 uint8_t u8;
3578 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3579 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3580 return u8;
3581}
3582
3583static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3584{
3585 uint16_t u16;
3586 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3587 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3588 return u16;
3589}
3590
3591static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3592{
3593 uint32_t u32;
3594 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3595 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3596 return u32;
3597}
3598
3599static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3600{
3601 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3602 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3603}
3604
3605static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3606{
3607 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3608 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3609}
3610
3611static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3612{
3613 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3614 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3615}
3616
3617/* -+- disassembly -+- */
3618
3619#undef LOG_GROUP
3620#define LOG_GROUP LOG_GROUP_REM_DISAS
3621
3622
3623/**
3624 * Enables or disables singled stepped disassembly.
3625 *
3626 * @returns VBox status code.
3627 * @param pVM VM handle.
3628 * @param fEnable To enable set this flag, to disable clear it.
3629 */
3630static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3631{
3632 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3633 VM_ASSERT_EMT(pVM);
3634
3635 if (fEnable)
3636 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3637 else
3638 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3639 return VINF_SUCCESS;
3640}
3641
3642
3643/**
3644 * Enables or disables singled stepped disassembly.
3645 *
3646 * @returns VBox status code.
3647 * @param pVM VM handle.
3648 * @param fEnable To enable set this flag, to disable clear it.
3649 */
3650REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3651{
3652 int rc;
3653
3654 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3655 if (VM_IS_EMT(pVM))
3656 return remR3DisasEnableStepping(pVM, fEnable);
3657
3658 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3659 AssertRC(rc);
3660 return rc;
3661}
3662
3663
3664#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3665/**
3666 * External Debugger Command: .remstep [on|off|1|0]
3667 */
3668static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3669{
3670 int rc;
3671
3672 if (cArgs == 0)
3673 /*
3674 * Print the current status.
3675 */
3676 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3677 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3678 else
3679 {
3680 /*
3681 * Convert the argument and change the mode.
3682 */
3683 bool fEnable;
3684 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3685 if (RT_SUCCESS(rc))
3686 {
3687 rc = REMR3DisasEnableStepping(pVM, fEnable);
3688 if (RT_SUCCESS(rc))
3689 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3690 else
3691 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3692 }
3693 else
3694 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3695 }
3696 return rc;
3697}
3698#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3699
3700
3701/**
3702 * Disassembles one instruction and prints it to the log.
3703 *
3704 * @returns Success indicator.
3705 * @param env Pointer to the recompiler CPU structure.
3706 * @param f32BitCode Indicates that whether or not the code should
3707 * be disassembled as 16 or 32 bit. If -1 the CS
3708 * selector will be inspected.
3709 * @param pszPrefix
3710 */
3711bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3712{
3713 PVM pVM = env->pVM;
3714 const bool fLog = LogIsEnabled();
3715 const bool fLog2 = LogIs2Enabled();
3716 int rc = VINF_SUCCESS;
3717
3718 /*
3719 * Don't bother if there ain't any log output to do.
3720 */
3721 if (!fLog && !fLog2)
3722 return true;
3723
3724 /*
3725 * Update the state so DBGF reads the correct register values.
3726 */
3727 remR3StateUpdate(pVM, env->pVCpu);
3728
3729 /*
3730 * Log registers if requested.
3731 */
3732 if (!fLog2)
3733 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3734
3735 /*
3736 * Disassemble to log.
3737 */
3738 if (fLog)
3739 {
3740 PVMCPU pVCpu = VMMGetCpu(pVM);
3741 char szBuf[256];
3742 szBuf[0] = '\0';
3743 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3744 pVCpu->idCpu,
3745 0, /* Sel */
3746 0, /* GCPtr */
3747 DBGF_DISAS_FLAGS_CURRENT_GUEST
3748 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3749 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3750 szBuf,
3751 sizeof(szBuf),
3752 NULL);
3753 if (RT_FAILURE(rc))
3754 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3755 if (pszPrefix && *pszPrefix)
3756 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3757 else
3758 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3759 }
3760
3761 return RT_SUCCESS(rc);
3762}
3763
3764
3765/**
3766 * Disassemble recompiled code.
3767 *
3768 * @param phFileIgnored Ignored, logfile usually.
3769 * @param pvCode Pointer to the code block.
3770 * @param cb Size of the code block.
3771 */
3772void disas(FILE *phFile, void *pvCode, unsigned long cb)
3773{
3774#ifdef DEBUG_TMP_LOGGING
3775# define DISAS_PRINTF(x...) fprintf(phFile, x)
3776#else
3777# define DISAS_PRINTF(x...) RTLogPrintf(x)
3778 if (LogIs2Enabled())
3779#endif
3780 {
3781 unsigned off = 0;
3782 char szOutput[256];
3783 DISCPUSTATE Cpu;
3784
3785 memset(&Cpu, 0, sizeof(Cpu));
3786#ifdef RT_ARCH_X86
3787 Cpu.mode = CPUMODE_32BIT;
3788#else
3789 Cpu.mode = CPUMODE_64BIT;
3790#endif
3791
3792 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3793 while (off < cb)
3794 {
3795 uint32_t cbInstr;
3796 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3797 DISAS_PRINTF("%s", szOutput);
3798 else
3799 {
3800 DISAS_PRINTF("disas error\n");
3801 cbInstr = 1;
3802#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
3803 break;
3804#endif
3805 }
3806 off += cbInstr;
3807 }
3808 }
3809
3810#undef DISAS_PRINTF
3811}
3812
3813
3814/**
3815 * Disassemble guest code.
3816 *
3817 * @param phFileIgnored Ignored, logfile usually.
3818 * @param uCode The guest address of the code to disassemble. (flat?)
3819 * @param cb Number of bytes to disassemble.
3820 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3821 */
3822void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3823{
3824#ifdef DEBUG_TMP_LOGGING
3825# define DISAS_PRINTF(x...) fprintf(phFile, x)
3826#else
3827# define DISAS_PRINTF(x...) RTLogPrintf(x)
3828 if (LogIs2Enabled())
3829#endif
3830 {
3831 PVM pVM = cpu_single_env->pVM;
3832 PVMCPU pVCpu = cpu_single_env->pVCpu;
3833 RTSEL cs;
3834 RTGCUINTPTR eip;
3835
3836 Assert(pVCpu);
3837
3838 /*
3839 * Update the state so DBGF reads the correct register values (flags).
3840 */
3841 remR3StateUpdate(pVM, pVCpu);
3842
3843 /*
3844 * Do the disassembling.
3845 */
3846 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3847 cs = cpu_single_env->segs[R_CS].selector;
3848 eip = uCode - cpu_single_env->segs[R_CS].base;
3849 for (;;)
3850 {
3851 char szBuf[256];
3852 uint32_t cbInstr;
3853 int rc = DBGFR3DisasInstrEx(pVM,
3854 pVCpu->idCpu,
3855 cs,
3856 eip,
3857 DBGF_DISAS_FLAGS_DEFAULT_MODE,
3858 szBuf, sizeof(szBuf),
3859 &cbInstr);
3860 if (RT_SUCCESS(rc))
3861 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3862 else
3863 {
3864 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3865 cbInstr = 1;
3866 }
3867
3868 /* next */
3869 if (cb <= cbInstr)
3870 break;
3871 cb -= cbInstr;
3872 uCode += cbInstr;
3873 eip += cbInstr;
3874 }
3875 }
3876#undef DISAS_PRINTF
3877}
3878
3879
3880/**
3881 * Looks up a guest symbol.
3882 *
3883 * @returns Pointer to symbol name. This is a static buffer.
3884 * @param orig_addr The address in question.
3885 */
3886const char *lookup_symbol(target_ulong orig_addr)
3887{
3888 PVM pVM = cpu_single_env->pVM;
3889 RTGCINTPTR off = 0;
3890 RTDBGSYMBOL Sym;
3891 DBGFADDRESS Addr;
3892
3893 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
3894 if (RT_SUCCESS(rc))
3895 {
3896 static char szSym[sizeof(Sym.szName) + 48];
3897 if (!off)
3898 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3899 else if (off > 0)
3900 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3901 else
3902 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3903 return szSym;
3904 }
3905 return "<N/A>";
3906}
3907
3908
3909#undef LOG_GROUP
3910#define LOG_GROUP LOG_GROUP_REM
3911
3912
3913/* -+- FF notifications -+- */
3914
3915
3916/**
3917 * Notification about a pending interrupt.
3918 *
3919 * @param pVM VM Handle.
3920 * @param pVCpu VMCPU Handle.
3921 * @param u8Interrupt Interrupt
3922 * @thread The emulation thread.
3923 */
3924REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3925{
3926 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3927 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3928}
3929
3930/**
3931 * Notification about a pending interrupt.
3932 *
3933 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3934 * @param pVM VM Handle.
3935 * @param pVCpu VMCPU Handle.
3936 * @thread The emulation thread.
3937 */
3938REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3939{
3940 return pVM->rem.s.u32PendingInterrupt;
3941}
3942
3943/**
3944 * Notification about the interrupt FF being set.
3945 *
3946 * @param pVM VM Handle.
3947 * @param pVCpu VMCPU Handle.
3948 * @thread The emulation thread.
3949 */
3950REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3951{
3952 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3953 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3954 if (pVM->rem.s.fInREM)
3955 {
3956 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3957 CPU_INTERRUPT_EXTERNAL_HARD);
3958 }
3959}
3960
3961
3962/**
3963 * Notification about the interrupt FF being set.
3964 *
3965 * @param pVM VM Handle.
3966 * @param pVCpu VMCPU Handle.
3967 * @thread Any.
3968 */
3969REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3970{
3971 LogFlow(("REMR3NotifyInterruptClear:\n"));
3972 if (pVM->rem.s.fInREM)
3973 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3974}
3975
3976
3977/**
3978 * Notification about pending timer(s).
3979 *
3980 * @param pVM VM Handle.
3981 * @param pVCpuDst The target cpu for this notification.
3982 * TM will not broadcast pending timer events, but use
3983 * a dedicated EMT for them. So, only interrupt REM
3984 * execution if the given CPU is executing in REM.
3985 * @thread Any.
3986 */
3987REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3988{
3989#ifndef DEBUG_bird
3990 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3991#endif
3992 if (pVM->rem.s.fInREM)
3993 {
3994 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3995 {
3996 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3997 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3998 CPU_INTERRUPT_EXTERNAL_TIMER);
3999 }
4000 else
4001 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4002 }
4003 else
4004 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4005}
4006
4007
4008/**
4009 * Notification about pending DMA transfers.
4010 *
4011 * @param pVM VM Handle.
4012 * @thread Any.
4013 */
4014REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4015{
4016 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4017 if (pVM->rem.s.fInREM)
4018 {
4019 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4020 CPU_INTERRUPT_EXTERNAL_DMA);
4021 }
4022}
4023
4024
4025/**
4026 * Notification about pending timer(s).
4027 *
4028 * @param pVM VM Handle.
4029 * @thread Any.
4030 */
4031REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4032{
4033 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4034 if (pVM->rem.s.fInREM)
4035 {
4036 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4037 CPU_INTERRUPT_EXTERNAL_EXIT);
4038 }
4039}
4040
4041
4042/**
4043 * Notification about pending FF set by an external thread.
4044 *
4045 * @param pVM VM handle.
4046 * @thread Any.
4047 */
4048REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4049{
4050 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4051 if (pVM->rem.s.fInREM)
4052 {
4053 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4054 CPU_INTERRUPT_EXTERNAL_EXIT);
4055 }
4056}
4057
4058
4059#ifdef VBOX_WITH_STATISTICS
4060void remR3ProfileStart(int statcode)
4061{
4062 STAMPROFILEADV *pStat;
4063 switch(statcode)
4064 {
4065 case STATS_EMULATE_SINGLE_INSTR:
4066 pStat = &gStatExecuteSingleInstr;
4067 break;
4068 case STATS_QEMU_COMPILATION:
4069 pStat = &gStatCompilationQEmu;
4070 break;
4071 case STATS_QEMU_RUN_EMULATED_CODE:
4072 pStat = &gStatRunCodeQEmu;
4073 break;
4074 case STATS_QEMU_TOTAL:
4075 pStat = &gStatTotalTimeQEmu;
4076 break;
4077 case STATS_QEMU_RUN_TIMERS:
4078 pStat = &gStatTimers;
4079 break;
4080 case STATS_TLB_LOOKUP:
4081 pStat= &gStatTBLookup;
4082 break;
4083 case STATS_IRQ_HANDLING:
4084 pStat= &gStatIRQ;
4085 break;
4086 case STATS_RAW_CHECK:
4087 pStat = &gStatRawCheck;
4088 break;
4089
4090 default:
4091 AssertMsgFailed(("unknown stat %d\n", statcode));
4092 return;
4093 }
4094 STAM_PROFILE_ADV_START(pStat, a);
4095}
4096
4097
4098void remR3ProfileStop(int statcode)
4099{
4100 STAMPROFILEADV *pStat;
4101 switch(statcode)
4102 {
4103 case STATS_EMULATE_SINGLE_INSTR:
4104 pStat = &gStatExecuteSingleInstr;
4105 break;
4106 case STATS_QEMU_COMPILATION:
4107 pStat = &gStatCompilationQEmu;
4108 break;
4109 case STATS_QEMU_RUN_EMULATED_CODE:
4110 pStat = &gStatRunCodeQEmu;
4111 break;
4112 case STATS_QEMU_TOTAL:
4113 pStat = &gStatTotalTimeQEmu;
4114 break;
4115 case STATS_QEMU_RUN_TIMERS:
4116 pStat = &gStatTimers;
4117 break;
4118 case STATS_TLB_LOOKUP:
4119 pStat= &gStatTBLookup;
4120 break;
4121 case STATS_IRQ_HANDLING:
4122 pStat= &gStatIRQ;
4123 break;
4124 case STATS_RAW_CHECK:
4125 pStat = &gStatRawCheck;
4126 break;
4127 default:
4128 AssertMsgFailed(("unknown stat %d\n", statcode));
4129 return;
4130 }
4131 STAM_PROFILE_ADV_STOP(pStat, a);
4132}
4133#endif
4134
4135/**
4136 * Raise an RC, force rem exit.
4137 *
4138 * @param pVM VM handle.
4139 * @param rc The rc.
4140 */
4141void remR3RaiseRC(PVM pVM, int rc)
4142{
4143 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4144 Assert(pVM->rem.s.fInREM);
4145 VM_ASSERT_EMT(pVM);
4146 pVM->rem.s.rc = rc;
4147 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4148}
4149
4150
4151/* -+- timers -+- */
4152
4153uint64_t cpu_get_tsc(CPUX86State *env)
4154{
4155 STAM_COUNTER_INC(&gStatCpuGetTSC);
4156 return TMCpuTickGet(env->pVCpu);
4157}
4158
4159
4160/* -+- interrupts -+- */
4161
4162void cpu_set_ferr(CPUX86State *env)
4163{
4164 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4165 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4166}
4167
4168int cpu_get_pic_interrupt(CPUState *env)
4169{
4170 uint8_t u8Interrupt;
4171 int rc;
4172
4173 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4174 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4175 * with the (a)pic.
4176 */
4177 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4178 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4179 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4180 * remove this kludge. */
4181 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4182 {
4183 rc = VINF_SUCCESS;
4184 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4185 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4186 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4187 }
4188 else
4189 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4190
4191 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4192 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4193 if (RT_SUCCESS(rc))
4194 {
4195 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4196 env->interrupt_request |= CPU_INTERRUPT_HARD;
4197 return u8Interrupt;
4198 }
4199 return -1;
4200}
4201
4202
4203/* -+- local apic -+- */
4204
4205#if 0 /* CPUMSetGuestMsr does this now. */
4206void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4207{
4208 int rc = PDMApicSetBase(env->pVM, val);
4209 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4210}
4211#endif
4212
4213uint64_t cpu_get_apic_base(CPUX86State *env)
4214{
4215 uint64_t u64;
4216 int rc = PDMApicGetBase(env->pVM, &u64);
4217 if (RT_SUCCESS(rc))
4218 {
4219 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4220 return u64;
4221 }
4222 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4223 return 0;
4224}
4225
4226void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4227{
4228 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4229 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4230}
4231
4232uint8_t cpu_get_apic_tpr(CPUX86State *env)
4233{
4234 uint8_t u8;
4235 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4236 if (RT_SUCCESS(rc))
4237 {
4238 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4239 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4240 }
4241 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4242 return 0;
4243}
4244
4245/**
4246 * Read an MSR.
4247 *
4248 * @retval 0 success.
4249 * @retval -1 failure, raise \#GP(0).
4250 * @param env The cpu state.
4251 * @param idMsr The MSR to read.
4252 * @param puValue Where to return the value.
4253 */
4254int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4255{
4256 Assert(env->pVCpu);
4257 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4258}
4259
4260/**
4261 * Write to an MSR.
4262 *
4263 * @retval 0 success.
4264 * @retval -1 failure, raise \#GP(0).
4265 * @param env The cpu state.
4266 * @param idMsr The MSR to read.
4267 * @param puValue Where to return the value.
4268 */
4269int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4270{
4271 Assert(env->pVCpu);
4272 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4273}
4274
4275/* -+- I/O Ports -+- */
4276
4277#undef LOG_GROUP
4278#define LOG_GROUP LOG_GROUP_REM_IOPORT
4279
4280void cpu_outb(CPUState *env, int addr, int val)
4281{
4282 int rc;
4283
4284 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4285 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4286
4287 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4288 if (RT_LIKELY(rc == VINF_SUCCESS))
4289 return;
4290 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4291 {
4292 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4293 remR3RaiseRC(env->pVM, rc);
4294 return;
4295 }
4296 remAbort(rc, __FUNCTION__);
4297}
4298
4299void cpu_outw(CPUState *env, int addr, int val)
4300{
4301 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4302 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4303 if (RT_LIKELY(rc == VINF_SUCCESS))
4304 return;
4305 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4306 {
4307 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4308 remR3RaiseRC(env->pVM, rc);
4309 return;
4310 }
4311 remAbort(rc, __FUNCTION__);
4312}
4313
4314void cpu_outl(CPUState *env, int addr, int val)
4315{
4316 int rc;
4317 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4318 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4319 if (RT_LIKELY(rc == VINF_SUCCESS))
4320 return;
4321 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4322 {
4323 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4324 remR3RaiseRC(env->pVM, rc);
4325 return;
4326 }
4327 remAbort(rc, __FUNCTION__);
4328}
4329
4330int cpu_inb(CPUState *env, int addr)
4331{
4332 uint32_t u32 = 0;
4333 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4334 if (RT_LIKELY(rc == VINF_SUCCESS))
4335 {
4336 if (/*addr != 0x61 && */addr != 0x71)
4337 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4338 return (int)u32;
4339 }
4340 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4341 {
4342 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4343 remR3RaiseRC(env->pVM, rc);
4344 return (int)u32;
4345 }
4346 remAbort(rc, __FUNCTION__);
4347 return 0xff;
4348}
4349
4350int cpu_inw(CPUState *env, int addr)
4351{
4352 uint32_t u32 = 0;
4353 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4354 if (RT_LIKELY(rc == VINF_SUCCESS))
4355 {
4356 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4357 return (int)u32;
4358 }
4359 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4360 {
4361 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4362 remR3RaiseRC(env->pVM, rc);
4363 return (int)u32;
4364 }
4365 remAbort(rc, __FUNCTION__);
4366 return 0xffff;
4367}
4368
4369int cpu_inl(CPUState *env, int addr)
4370{
4371 uint32_t u32 = 0;
4372 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4373 if (RT_LIKELY(rc == VINF_SUCCESS))
4374 {
4375//if (addr==0x01f0 && u32 == 0x6b6d)
4376// loglevel = ~0;
4377 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4378 return (int)u32;
4379 }
4380 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4381 {
4382 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4383 remR3RaiseRC(env->pVM, rc);
4384 return (int)u32;
4385 }
4386 remAbort(rc, __FUNCTION__);
4387 return 0xffffffff;
4388}
4389
4390#undef LOG_GROUP
4391#define LOG_GROUP LOG_GROUP_REM
4392
4393
4394/* -+- helpers and misc other interfaces -+- */
4395
4396/**
4397 * Perform the CPUID instruction.
4398 *
4399 * ASMCpuId cannot be invoked from some source files where this is used because of global
4400 * register allocations.
4401 *
4402 * @param env Pointer to the recompiler CPU structure.
4403 * @param uOperator CPUID operation (eax).
4404 * @param pvEAX Where to store eax.
4405 * @param pvEBX Where to store ebx.
4406 * @param pvECX Where to store ecx.
4407 * @param pvEDX Where to store edx.
4408 */
4409void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4410{
4411 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4412}
4413
4414
4415#if 0 /* not used */
4416/**
4417 * Interface for qemu hardware to report back fatal errors.
4418 */
4419void hw_error(const char *pszFormat, ...)
4420{
4421 /*
4422 * Bitch about it.
4423 */
4424 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4425 * this in my Odin32 tree at home! */
4426 va_list args;
4427 va_start(args, pszFormat);
4428 RTLogPrintf("fatal error in virtual hardware:");
4429 RTLogPrintfV(pszFormat, args);
4430 va_end(args);
4431 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4432
4433 /*
4434 * If we're in REM context we'll sync back the state before 'jumping' to
4435 * the EMs failure handling.
4436 */
4437 PVM pVM = cpu_single_env->pVM;
4438 if (pVM->rem.s.fInREM)
4439 REMR3StateBack(pVM);
4440 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4441 AssertMsgFailed(("EMR3FatalError returned!\n"));
4442}
4443#endif
4444
4445/**
4446 * Interface for the qemu cpu to report unhandled situation
4447 * raising a fatal VM error.
4448 */
4449void cpu_abort(CPUState *env, const char *pszFormat, ...)
4450{
4451 va_list va;
4452 PVM pVM;
4453 PVMCPU pVCpu;
4454 char szMsg[256];
4455
4456 /*
4457 * Bitch about it.
4458 */
4459 RTLogFlags(NULL, "nodisabled nobuffered");
4460 RTLogFlush(NULL);
4461
4462 va_start(va, pszFormat);
4463#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4464 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4465 unsigned cArgs = 0;
4466 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4467 const char *psz = strchr(pszFormat, '%');
4468 while (psz && cArgs < 6)
4469 {
4470 auArgs[cArgs++] = va_arg(va, uintptr_t);
4471 psz = strchr(psz + 1, '%');
4472 }
4473 switch (cArgs)
4474 {
4475 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4476 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4477 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4478 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4479 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4480 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4481 default:
4482 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4483 }
4484#else
4485 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4486#endif
4487 va_end(va);
4488
4489 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4490 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4491
4492 /*
4493 * If we're in REM context we'll sync back the state before 'jumping' to
4494 * the EMs failure handling.
4495 */
4496 pVM = cpu_single_env->pVM;
4497 pVCpu = cpu_single_env->pVCpu;
4498 Assert(pVCpu);
4499
4500 if (pVM->rem.s.fInREM)
4501 REMR3StateBack(pVM, pVCpu);
4502 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4503 AssertMsgFailed(("EMR3FatalError returned!\n"));
4504}
4505
4506
4507/**
4508 * Aborts the VM.
4509 *
4510 * @param rc VBox error code.
4511 * @param pszTip Hint about why/when this happened.
4512 */
4513void remAbort(int rc, const char *pszTip)
4514{
4515 PVM pVM;
4516 PVMCPU pVCpu;
4517
4518 /*
4519 * Bitch about it.
4520 */
4521 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4522 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4523
4524 /*
4525 * Jump back to where we entered the recompiler.
4526 */
4527 pVM = cpu_single_env->pVM;
4528 pVCpu = cpu_single_env->pVCpu;
4529 Assert(pVCpu);
4530
4531 if (pVM->rem.s.fInREM)
4532 REMR3StateBack(pVM, pVCpu);
4533
4534 EMR3FatalError(pVCpu, rc);
4535 AssertMsgFailed(("EMR3FatalError returned!\n"));
4536}
4537
4538
4539/**
4540 * Dumps a linux system call.
4541 * @param pVCpu VMCPU handle.
4542 */
4543void remR3DumpLnxSyscall(PVMCPU pVCpu)
4544{
4545 static const char *apsz[] =
4546 {
4547 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4548 "sys_exit",
4549 "sys_fork",
4550 "sys_read",
4551 "sys_write",
4552 "sys_open", /* 5 */
4553 "sys_close",
4554 "sys_waitpid",
4555 "sys_creat",
4556 "sys_link",
4557 "sys_unlink", /* 10 */
4558 "sys_execve",
4559 "sys_chdir",
4560 "sys_time",
4561 "sys_mknod",
4562 "sys_chmod", /* 15 */
4563 "sys_lchown16",
4564 "sys_ni_syscall", /* old break syscall holder */
4565 "sys_stat",
4566 "sys_lseek",
4567 "sys_getpid", /* 20 */
4568 "sys_mount",
4569 "sys_oldumount",
4570 "sys_setuid16",
4571 "sys_getuid16",
4572 "sys_stime", /* 25 */
4573 "sys_ptrace",
4574 "sys_alarm",
4575 "sys_fstat",
4576 "sys_pause",
4577 "sys_utime", /* 30 */
4578 "sys_ni_syscall", /* old stty syscall holder */
4579 "sys_ni_syscall", /* old gtty syscall holder */
4580 "sys_access",
4581 "sys_nice",
4582 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4583 "sys_sync",
4584 "sys_kill",
4585 "sys_rename",
4586 "sys_mkdir",
4587 "sys_rmdir", /* 40 */
4588 "sys_dup",
4589 "sys_pipe",
4590 "sys_times",
4591 "sys_ni_syscall", /* old prof syscall holder */
4592 "sys_brk", /* 45 */
4593 "sys_setgid16",
4594 "sys_getgid16",
4595 "sys_signal",
4596 "sys_geteuid16",
4597 "sys_getegid16", /* 50 */
4598 "sys_acct",
4599 "sys_umount", /* recycled never used phys() */
4600 "sys_ni_syscall", /* old lock syscall holder */
4601 "sys_ioctl",
4602 "sys_fcntl", /* 55 */
4603 "sys_ni_syscall", /* old mpx syscall holder */
4604 "sys_setpgid",
4605 "sys_ni_syscall", /* old ulimit syscall holder */
4606 "sys_olduname",
4607 "sys_umask", /* 60 */
4608 "sys_chroot",
4609 "sys_ustat",
4610 "sys_dup2",
4611 "sys_getppid",
4612 "sys_getpgrp", /* 65 */
4613 "sys_setsid",
4614 "sys_sigaction",
4615 "sys_sgetmask",
4616 "sys_ssetmask",
4617 "sys_setreuid16", /* 70 */
4618 "sys_setregid16",
4619 "sys_sigsuspend",
4620 "sys_sigpending",
4621 "sys_sethostname",
4622 "sys_setrlimit", /* 75 */
4623 "sys_old_getrlimit",
4624 "sys_getrusage",
4625 "sys_gettimeofday",
4626 "sys_settimeofday",
4627 "sys_getgroups16", /* 80 */
4628 "sys_setgroups16",
4629 "old_select",
4630 "sys_symlink",
4631 "sys_lstat",
4632 "sys_readlink", /* 85 */
4633 "sys_uselib",
4634 "sys_swapon",
4635 "sys_reboot",
4636 "old_readdir",
4637 "old_mmap", /* 90 */
4638 "sys_munmap",
4639 "sys_truncate",
4640 "sys_ftruncate",
4641 "sys_fchmod",
4642 "sys_fchown16", /* 95 */
4643 "sys_getpriority",
4644 "sys_setpriority",
4645 "sys_ni_syscall", /* old profil syscall holder */
4646 "sys_statfs",
4647 "sys_fstatfs", /* 100 */
4648 "sys_ioperm",
4649 "sys_socketcall",
4650 "sys_syslog",
4651 "sys_setitimer",
4652 "sys_getitimer", /* 105 */
4653 "sys_newstat",
4654 "sys_newlstat",
4655 "sys_newfstat",
4656 "sys_uname",
4657 "sys_iopl", /* 110 */
4658 "sys_vhangup",
4659 "sys_ni_syscall", /* old "idle" system call */
4660 "sys_vm86old",
4661 "sys_wait4",
4662 "sys_swapoff", /* 115 */
4663 "sys_sysinfo",
4664 "sys_ipc",
4665 "sys_fsync",
4666 "sys_sigreturn",
4667 "sys_clone", /* 120 */
4668 "sys_setdomainname",
4669 "sys_newuname",
4670 "sys_modify_ldt",
4671 "sys_adjtimex",
4672 "sys_mprotect", /* 125 */
4673 "sys_sigprocmask",
4674 "sys_ni_syscall", /* old "create_module" */
4675 "sys_init_module",
4676 "sys_delete_module",
4677 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4678 "sys_quotactl",
4679 "sys_getpgid",
4680 "sys_fchdir",
4681 "sys_bdflush",
4682 "sys_sysfs", /* 135 */
4683 "sys_personality",
4684 "sys_ni_syscall", /* reserved for afs_syscall */
4685 "sys_setfsuid16",
4686 "sys_setfsgid16",
4687 "sys_llseek", /* 140 */
4688 "sys_getdents",
4689 "sys_select",
4690 "sys_flock",
4691 "sys_msync",
4692 "sys_readv", /* 145 */
4693 "sys_writev",
4694 "sys_getsid",
4695 "sys_fdatasync",
4696 "sys_sysctl",
4697 "sys_mlock", /* 150 */
4698 "sys_munlock",
4699 "sys_mlockall",
4700 "sys_munlockall",
4701 "sys_sched_setparam",
4702 "sys_sched_getparam", /* 155 */
4703 "sys_sched_setscheduler",
4704 "sys_sched_getscheduler",
4705 "sys_sched_yield",
4706 "sys_sched_get_priority_max",
4707 "sys_sched_get_priority_min", /* 160 */
4708 "sys_sched_rr_get_interval",
4709 "sys_nanosleep",
4710 "sys_mremap",
4711 "sys_setresuid16",
4712 "sys_getresuid16", /* 165 */
4713 "sys_vm86",
4714 "sys_ni_syscall", /* Old sys_query_module */
4715 "sys_poll",
4716 "sys_nfsservctl",
4717 "sys_setresgid16", /* 170 */
4718 "sys_getresgid16",
4719 "sys_prctl",
4720 "sys_rt_sigreturn",
4721 "sys_rt_sigaction",
4722 "sys_rt_sigprocmask", /* 175 */
4723 "sys_rt_sigpending",
4724 "sys_rt_sigtimedwait",
4725 "sys_rt_sigqueueinfo",
4726 "sys_rt_sigsuspend",
4727 "sys_pread64", /* 180 */
4728 "sys_pwrite64",
4729 "sys_chown16",
4730 "sys_getcwd",
4731 "sys_capget",
4732 "sys_capset", /* 185 */
4733 "sys_sigaltstack",
4734 "sys_sendfile",
4735 "sys_ni_syscall", /* reserved for streams1 */
4736 "sys_ni_syscall", /* reserved for streams2 */
4737 "sys_vfork", /* 190 */
4738 "sys_getrlimit",
4739 "sys_mmap2",
4740 "sys_truncate64",
4741 "sys_ftruncate64",
4742 "sys_stat64", /* 195 */
4743 "sys_lstat64",
4744 "sys_fstat64",
4745 "sys_lchown",
4746 "sys_getuid",
4747 "sys_getgid", /* 200 */
4748 "sys_geteuid",
4749 "sys_getegid",
4750 "sys_setreuid",
4751 "sys_setregid",
4752 "sys_getgroups", /* 205 */
4753 "sys_setgroups",
4754 "sys_fchown",
4755 "sys_setresuid",
4756 "sys_getresuid",
4757 "sys_setresgid", /* 210 */
4758 "sys_getresgid",
4759 "sys_chown",
4760 "sys_setuid",
4761 "sys_setgid",
4762 "sys_setfsuid", /* 215 */
4763 "sys_setfsgid",
4764 "sys_pivot_root",
4765 "sys_mincore",
4766 "sys_madvise",
4767 "sys_getdents64", /* 220 */
4768 "sys_fcntl64",
4769 "sys_ni_syscall", /* reserved for TUX */
4770 "sys_ni_syscall",
4771 "sys_gettid",
4772 "sys_readahead", /* 225 */
4773 "sys_setxattr",
4774 "sys_lsetxattr",
4775 "sys_fsetxattr",
4776 "sys_getxattr",
4777 "sys_lgetxattr", /* 230 */
4778 "sys_fgetxattr",
4779 "sys_listxattr",
4780 "sys_llistxattr",
4781 "sys_flistxattr",
4782 "sys_removexattr", /* 235 */
4783 "sys_lremovexattr",
4784 "sys_fremovexattr",
4785 "sys_tkill",
4786 "sys_sendfile64",
4787 "sys_futex", /* 240 */
4788 "sys_sched_setaffinity",
4789 "sys_sched_getaffinity",
4790 "sys_set_thread_area",
4791 "sys_get_thread_area",
4792 "sys_io_setup", /* 245 */
4793 "sys_io_destroy",
4794 "sys_io_getevents",
4795 "sys_io_submit",
4796 "sys_io_cancel",
4797 "sys_fadvise64", /* 250 */
4798 "sys_ni_syscall",
4799 "sys_exit_group",
4800 "sys_lookup_dcookie",
4801 "sys_epoll_create",
4802 "sys_epoll_ctl", /* 255 */
4803 "sys_epoll_wait",
4804 "sys_remap_file_pages",
4805 "sys_set_tid_address",
4806 "sys_timer_create",
4807 "sys_timer_settime", /* 260 */
4808 "sys_timer_gettime",
4809 "sys_timer_getoverrun",
4810 "sys_timer_delete",
4811 "sys_clock_settime",
4812 "sys_clock_gettime", /* 265 */
4813 "sys_clock_getres",
4814 "sys_clock_nanosleep",
4815 "sys_statfs64",
4816 "sys_fstatfs64",
4817 "sys_tgkill", /* 270 */
4818 "sys_utimes",
4819 "sys_fadvise64_64",
4820 "sys_ni_syscall" /* sys_vserver */
4821 };
4822
4823 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4824 switch (uEAX)
4825 {
4826 default:
4827 if (uEAX < RT_ELEMENTS(apsz))
4828 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4829 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4830 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4831 else
4832 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4833 break;
4834
4835 }
4836}
4837
4838
4839/**
4840 * Dumps an OpenBSD system call.
4841 * @param pVCpu VMCPU handle.
4842 */
4843void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4844{
4845 static const char *apsz[] =
4846 {
4847 "SYS_syscall", //0
4848 "SYS_exit", //1
4849 "SYS_fork", //2
4850 "SYS_read", //3
4851 "SYS_write", //4
4852 "SYS_open", //5
4853 "SYS_close", //6
4854 "SYS_wait4", //7
4855 "SYS_8",
4856 "SYS_link", //9
4857 "SYS_unlink", //10
4858 "SYS_11",
4859 "SYS_chdir", //12
4860 "SYS_fchdir", //13
4861 "SYS_mknod", //14
4862 "SYS_chmod", //15
4863 "SYS_chown", //16
4864 "SYS_break", //17
4865 "SYS_18",
4866 "SYS_19",
4867 "SYS_getpid", //20
4868 "SYS_mount", //21
4869 "SYS_unmount", //22
4870 "SYS_setuid", //23
4871 "SYS_getuid", //24
4872 "SYS_geteuid", //25
4873 "SYS_ptrace", //26
4874 "SYS_recvmsg", //27
4875 "SYS_sendmsg", //28
4876 "SYS_recvfrom", //29
4877 "SYS_accept", //30
4878 "SYS_getpeername", //31
4879 "SYS_getsockname", //32
4880 "SYS_access", //33
4881 "SYS_chflags", //34
4882 "SYS_fchflags", //35
4883 "SYS_sync", //36
4884 "SYS_kill", //37
4885 "SYS_38",
4886 "SYS_getppid", //39
4887 "SYS_40",
4888 "SYS_dup", //41
4889 "SYS_opipe", //42
4890 "SYS_getegid", //43
4891 "SYS_profil", //44
4892 "SYS_ktrace", //45
4893 "SYS_sigaction", //46
4894 "SYS_getgid", //47
4895 "SYS_sigprocmask", //48
4896 "SYS_getlogin", //49
4897 "SYS_setlogin", //50
4898 "SYS_acct", //51
4899 "SYS_sigpending", //52
4900 "SYS_osigaltstack", //53
4901 "SYS_ioctl", //54
4902 "SYS_reboot", //55
4903 "SYS_revoke", //56
4904 "SYS_symlink", //57
4905 "SYS_readlink", //58
4906 "SYS_execve", //59
4907 "SYS_umask", //60
4908 "SYS_chroot", //61
4909 "SYS_62",
4910 "SYS_63",
4911 "SYS_64",
4912 "SYS_65",
4913 "SYS_vfork", //66
4914 "SYS_67",
4915 "SYS_68",
4916 "SYS_sbrk", //69
4917 "SYS_sstk", //70
4918 "SYS_61",
4919 "SYS_vadvise", //72
4920 "SYS_munmap", //73
4921 "SYS_mprotect", //74
4922 "SYS_madvise", //75
4923 "SYS_76",
4924 "SYS_77",
4925 "SYS_mincore", //78
4926 "SYS_getgroups", //79
4927 "SYS_setgroups", //80
4928 "SYS_getpgrp", //81
4929 "SYS_setpgid", //82
4930 "SYS_setitimer", //83
4931 "SYS_84",
4932 "SYS_85",
4933 "SYS_getitimer", //86
4934 "SYS_87",
4935 "SYS_88",
4936 "SYS_89",
4937 "SYS_dup2", //90
4938 "SYS_91",
4939 "SYS_fcntl", //92
4940 "SYS_select", //93
4941 "SYS_94",
4942 "SYS_fsync", //95
4943 "SYS_setpriority", //96
4944 "SYS_socket", //97
4945 "SYS_connect", //98
4946 "SYS_99",
4947 "SYS_getpriority", //100
4948 "SYS_101",
4949 "SYS_102",
4950 "SYS_sigreturn", //103
4951 "SYS_bind", //104
4952 "SYS_setsockopt", //105
4953 "SYS_listen", //106
4954 "SYS_107",
4955 "SYS_108",
4956 "SYS_109",
4957 "SYS_110",
4958 "SYS_sigsuspend", //111
4959 "SYS_112",
4960 "SYS_113",
4961 "SYS_114",
4962 "SYS_115",
4963 "SYS_gettimeofday", //116
4964 "SYS_getrusage", //117
4965 "SYS_getsockopt", //118
4966 "SYS_119",
4967 "SYS_readv", //120
4968 "SYS_writev", //121
4969 "SYS_settimeofday", //122
4970 "SYS_fchown", //123
4971 "SYS_fchmod", //124
4972 "SYS_125",
4973 "SYS_setreuid", //126
4974 "SYS_setregid", //127
4975 "SYS_rename", //128
4976 "SYS_129",
4977 "SYS_130",
4978 "SYS_flock", //131
4979 "SYS_mkfifo", //132
4980 "SYS_sendto", //133
4981 "SYS_shutdown", //134
4982 "SYS_socketpair", //135
4983 "SYS_mkdir", //136
4984 "SYS_rmdir", //137
4985 "SYS_utimes", //138
4986 "SYS_139",
4987 "SYS_adjtime", //140
4988 "SYS_141",
4989 "SYS_142",
4990 "SYS_143",
4991 "SYS_144",
4992 "SYS_145",
4993 "SYS_146",
4994 "SYS_setsid", //147
4995 "SYS_quotactl", //148
4996 "SYS_149",
4997 "SYS_150",
4998 "SYS_151",
4999 "SYS_152",
5000 "SYS_153",
5001 "SYS_154",
5002 "SYS_nfssvc", //155
5003 "SYS_156",
5004 "SYS_157",
5005 "SYS_158",
5006 "SYS_159",
5007 "SYS_160",
5008 "SYS_getfh", //161
5009 "SYS_162",
5010 "SYS_163",
5011 "SYS_164",
5012 "SYS_sysarch", //165
5013 "SYS_166",
5014 "SYS_167",
5015 "SYS_168",
5016 "SYS_169",
5017 "SYS_170",
5018 "SYS_171",
5019 "SYS_172",
5020 "SYS_pread", //173
5021 "SYS_pwrite", //174
5022 "SYS_175",
5023 "SYS_176",
5024 "SYS_177",
5025 "SYS_178",
5026 "SYS_179",
5027 "SYS_180",
5028 "SYS_setgid", //181
5029 "SYS_setegid", //182
5030 "SYS_seteuid", //183
5031 "SYS_lfs_bmapv", //184
5032 "SYS_lfs_markv", //185
5033 "SYS_lfs_segclean", //186
5034 "SYS_lfs_segwait", //187
5035 "SYS_188",
5036 "SYS_189",
5037 "SYS_190",
5038 "SYS_pathconf", //191
5039 "SYS_fpathconf", //192
5040 "SYS_swapctl", //193
5041 "SYS_getrlimit", //194
5042 "SYS_setrlimit", //195
5043 "SYS_getdirentries", //196
5044 "SYS_mmap", //197
5045 "SYS___syscall", //198
5046 "SYS_lseek", //199
5047 "SYS_truncate", //200
5048 "SYS_ftruncate", //201
5049 "SYS___sysctl", //202
5050 "SYS_mlock", //203
5051 "SYS_munlock", //204
5052 "SYS_205",
5053 "SYS_futimes", //206
5054 "SYS_getpgid", //207
5055 "SYS_xfspioctl", //208
5056 "SYS_209",
5057 "SYS_210",
5058 "SYS_211",
5059 "SYS_212",
5060 "SYS_213",
5061 "SYS_214",
5062 "SYS_215",
5063 "SYS_216",
5064 "SYS_217",
5065 "SYS_218",
5066 "SYS_219",
5067 "SYS_220",
5068 "SYS_semget", //221
5069 "SYS_222",
5070 "SYS_223",
5071 "SYS_224",
5072 "SYS_msgget", //225
5073 "SYS_msgsnd", //226
5074 "SYS_msgrcv", //227
5075 "SYS_shmat", //228
5076 "SYS_229",
5077 "SYS_shmdt", //230
5078 "SYS_231",
5079 "SYS_clock_gettime", //232
5080 "SYS_clock_settime", //233
5081 "SYS_clock_getres", //234
5082 "SYS_235",
5083 "SYS_236",
5084 "SYS_237",
5085 "SYS_238",
5086 "SYS_239",
5087 "SYS_nanosleep", //240
5088 "SYS_241",
5089 "SYS_242",
5090 "SYS_243",
5091 "SYS_244",
5092 "SYS_245",
5093 "SYS_246",
5094 "SYS_247",
5095 "SYS_248",
5096 "SYS_249",
5097 "SYS_minherit", //250
5098 "SYS_rfork", //251
5099 "SYS_poll", //252
5100 "SYS_issetugid", //253
5101 "SYS_lchown", //254
5102 "SYS_getsid", //255
5103 "SYS_msync", //256
5104 "SYS_257",
5105 "SYS_258",
5106 "SYS_259",
5107 "SYS_getfsstat", //260
5108 "SYS_statfs", //261
5109 "SYS_fstatfs", //262
5110 "SYS_pipe", //263
5111 "SYS_fhopen", //264
5112 "SYS_265",
5113 "SYS_fhstatfs", //266
5114 "SYS_preadv", //267
5115 "SYS_pwritev", //268
5116 "SYS_kqueue", //269
5117 "SYS_kevent", //270
5118 "SYS_mlockall", //271
5119 "SYS_munlockall", //272
5120 "SYS_getpeereid", //273
5121 "SYS_274",
5122 "SYS_275",
5123 "SYS_276",
5124 "SYS_277",
5125 "SYS_278",
5126 "SYS_279",
5127 "SYS_280",
5128 "SYS_getresuid", //281
5129 "SYS_setresuid", //282
5130 "SYS_getresgid", //283
5131 "SYS_setresgid", //284
5132 "SYS_285",
5133 "SYS_mquery", //286
5134 "SYS_closefrom", //287
5135 "SYS_sigaltstack", //288
5136 "SYS_shmget", //289
5137 "SYS_semop", //290
5138 "SYS_stat", //291
5139 "SYS_fstat", //292
5140 "SYS_lstat", //293
5141 "SYS_fhstat", //294
5142 "SYS___semctl", //295
5143 "SYS_shmctl", //296
5144 "SYS_msgctl", //297
5145 "SYS_MAXSYSCALL", //298
5146 //299
5147 //300
5148 };
5149 uint32_t uEAX;
5150 if (!LogIsEnabled())
5151 return;
5152 uEAX = CPUMGetGuestEAX(pVCpu);
5153 switch (uEAX)
5154 {
5155 default:
5156 if (uEAX < RT_ELEMENTS(apsz))
5157 {
5158 uint32_t au32Args[8] = {0};
5159 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5160 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5161 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5162 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5163 }
5164 else
5165 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5166 break;
5167 }
5168}
5169
5170
5171#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5172/**
5173 * The Dll main entry point (stub).
5174 */
5175bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5176{
5177 return true;
5178}
5179
5180void *memcpy(void *dst, const void *src, size_t size)
5181{
5182 uint8_t*pbDst = dst, *pbSrc = src;
5183 while (size-- > 0)
5184 *pbDst++ = *pbSrc++;
5185 return dst;
5186}
5187
5188#endif
5189
5190void cpu_smm_update(CPUState *env)
5191{
5192}
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use