VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 96104

Last change on this file since 96104 was 95575, checked in by vboxsync, 3 years ago

VMM/IEM: Log the setting of unmasked FPU exceptions. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 455.0 KB
Line 
1/* $Id: IEMAll.cpp 95575 2022-07-08 21:54:36Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow) : Basic enter/exit IEM state info.
65 * - Level 2 (Log2) : ?
66 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
67 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5) : Decoding details.
69 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7) : iret++ execution logging.
71 * - Level 8 (Log8) : Memory writes.
72 * - Level 9 (Log9) : Memory reads.
73 * - Level 10 (Log10): TLBs.
74 * - Level 11 (Log11): Unmasked FPU exceptions.
75 */
76
77/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
78#ifdef _MSC_VER
79# pragma warning(disable:4505)
80#endif
81
82
83/*********************************************************************************************************************************
84* Header Files *
85*********************************************************************************************************************************/
86#define LOG_GROUP LOG_GROUP_IEM
87#define VMCPU_INCL_CPUM_GST_CTX
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/apic.h>
91#include <VBox/vmm/pdm.h>
92#include <VBox/vmm/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/nem.h>
97#include <VBox/vmm/gim.h>
98#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
99# include <VBox/vmm/em.h>
100# include <VBox/vmm/hm_svm.h>
101#endif
102#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
103# include <VBox/vmm/hmvmxinline.h>
104#endif
105#include <VBox/vmm/tm.h>
106#include <VBox/vmm/dbgf.h>
107#include <VBox/vmm/dbgftrace.h>
108#include "IEMInternal.h"
109#include <VBox/vmm/vmcc.h>
110#include <VBox/log.h>
111#include <VBox/err.h>
112#include <VBox/param.h>
113#include <VBox/dis.h>
114#include <VBox/disopcode.h>
115#include <iprt/asm-math.h>
116#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
117# include <iprt/asm-amd64-x86.h>
118#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
119# include <iprt/asm-arm.h>
120#endif
121#include <iprt/assert.h>
122#include <iprt/string.h>
123#include <iprt/x86.h>
124
125#include "IEMInline.h"
126
127
128/*********************************************************************************************************************************
129* Structures and Typedefs *
130*********************************************************************************************************************************/
131/**
132 * CPU exception classes.
133 */
134typedef enum IEMXCPTCLASS
135{
136 IEMXCPTCLASS_BENIGN,
137 IEMXCPTCLASS_CONTRIBUTORY,
138 IEMXCPTCLASS_PAGE_FAULT,
139 IEMXCPTCLASS_DOUBLE_FAULT
140} IEMXCPTCLASS;
141
142
143/*********************************************************************************************************************************
144* Global Variables *
145*********************************************************************************************************************************/
146#if defined(IEM_LOG_MEMORY_WRITES)
147/** What IEM just wrote. */
148uint8_t g_abIemWrote[256];
149/** How much IEM just wrote. */
150size_t g_cbIemWrote;
151#endif
152
153
154/*********************************************************************************************************************************
155* Internal Functions *
156*********************************************************************************************************************************/
157static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
158 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
159
160
161/**
162 * Initializes the decoder state.
163 *
164 * iemReInitDecoder is mostly a copy of this function.
165 *
166 * @param pVCpu The cross context virtual CPU structure of the
167 * calling thread.
168 * @param fBypassHandlers Whether to bypass access handlers.
169 * @param fDisregardLock Whether to disregard the LOCK prefix.
170 */
171DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
172{
173 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
174 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
177 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
178 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
179 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
180 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
181 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
182 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
183
184 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
185 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
186 pVCpu->iem.s.enmCpuMode = enmMode;
187 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
188 pVCpu->iem.s.enmEffAddrMode = enmMode;
189 if (enmMode != IEMMODE_64BIT)
190 {
191 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
192 pVCpu->iem.s.enmEffOpSize = enmMode;
193 }
194 else
195 {
196 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
197 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
198 }
199 pVCpu->iem.s.fPrefixes = 0;
200 pVCpu->iem.s.uRexReg = 0;
201 pVCpu->iem.s.uRexB = 0;
202 pVCpu->iem.s.uRexIndex = 0;
203 pVCpu->iem.s.idxPrefix = 0;
204 pVCpu->iem.s.uVex3rdReg = 0;
205 pVCpu->iem.s.uVexLength = 0;
206 pVCpu->iem.s.fEvexStuff = 0;
207 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
208#ifdef IEM_WITH_CODE_TLB
209 pVCpu->iem.s.pbInstrBuf = NULL;
210 pVCpu->iem.s.offInstrNextByte = 0;
211 pVCpu->iem.s.offCurInstrStart = 0;
212# ifdef VBOX_STRICT
213 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
214 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
215 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
216# endif
217#else
218 pVCpu->iem.s.offOpcode = 0;
219 pVCpu->iem.s.cbOpcode = 0;
220#endif
221 pVCpu->iem.s.offModRm = 0;
222 pVCpu->iem.s.cActiveMappings = 0;
223 pVCpu->iem.s.iNextMapping = 0;
224 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
225 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
226 pVCpu->iem.s.fDisregardLock = fDisregardLock;
227
228#ifdef DBGFTRACE_ENABLED
229 switch (enmMode)
230 {
231 case IEMMODE_64BIT:
232 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
233 break;
234 case IEMMODE_32BIT:
235 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
236 break;
237 case IEMMODE_16BIT:
238 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
239 break;
240 }
241#endif
242}
243
244
245/**
246 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
247 *
248 * This is mostly a copy of iemInitDecoder.
249 *
250 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
251 */
252DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
253{
254 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
263
264 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
265 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
266 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
267 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
268 pVCpu->iem.s.enmEffAddrMode = enmMode;
269 if (enmMode != IEMMODE_64BIT)
270 {
271 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
272 pVCpu->iem.s.enmEffOpSize = enmMode;
273 }
274 else
275 {
276 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
277 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
278 }
279 pVCpu->iem.s.fPrefixes = 0;
280 pVCpu->iem.s.uRexReg = 0;
281 pVCpu->iem.s.uRexB = 0;
282 pVCpu->iem.s.uRexIndex = 0;
283 pVCpu->iem.s.idxPrefix = 0;
284 pVCpu->iem.s.uVex3rdReg = 0;
285 pVCpu->iem.s.uVexLength = 0;
286 pVCpu->iem.s.fEvexStuff = 0;
287 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
288#ifdef IEM_WITH_CODE_TLB
289 if (pVCpu->iem.s.pbInstrBuf)
290 {
291 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
292 - pVCpu->iem.s.uInstrBufPc;
293 if (off < pVCpu->iem.s.cbInstrBufTotal)
294 {
295 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
296 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
297 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
298 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
299 else
300 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
301 }
302 else
303 {
304 pVCpu->iem.s.pbInstrBuf = NULL;
305 pVCpu->iem.s.offInstrNextByte = 0;
306 pVCpu->iem.s.offCurInstrStart = 0;
307 pVCpu->iem.s.cbInstrBuf = 0;
308 pVCpu->iem.s.cbInstrBufTotal = 0;
309 }
310 }
311 else
312 {
313 pVCpu->iem.s.offInstrNextByte = 0;
314 pVCpu->iem.s.offCurInstrStart = 0;
315 pVCpu->iem.s.cbInstrBuf = 0;
316 pVCpu->iem.s.cbInstrBufTotal = 0;
317 }
318#else
319 pVCpu->iem.s.cbOpcode = 0;
320 pVCpu->iem.s.offOpcode = 0;
321#endif
322 pVCpu->iem.s.offModRm = 0;
323 Assert(pVCpu->iem.s.cActiveMappings == 0);
324 pVCpu->iem.s.iNextMapping = 0;
325 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
326 Assert(pVCpu->iem.s.fBypassHandlers == false);
327
328#ifdef DBGFTRACE_ENABLED
329 switch (enmMode)
330 {
331 case IEMMODE_64BIT:
332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
333 break;
334 case IEMMODE_32BIT:
335 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
336 break;
337 case IEMMODE_16BIT:
338 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
339 break;
340 }
341#endif
342}
343
344
345
346/**
347 * Prefetch opcodes the first time when starting executing.
348 *
349 * @returns Strict VBox status code.
350 * @param pVCpu The cross context virtual CPU structure of the
351 * calling thread.
352 * @param fBypassHandlers Whether to bypass access handlers.
353 * @param fDisregardLock Whether to disregard LOCK prefixes.
354 *
355 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
356 * store them as such.
357 */
358static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
359{
360 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
361
362#ifdef IEM_WITH_CODE_TLB
363 /** @todo Do ITLB lookup here. */
364
365#else /* !IEM_WITH_CODE_TLB */
366
367 /*
368 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
369 *
370 * First translate CS:rIP to a physical address.
371 */
372 uint32_t cbToTryRead;
373 RTGCPTR GCPtrPC;
374 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
375 {
376 cbToTryRead = GUEST_PAGE_SIZE;
377 GCPtrPC = pVCpu->cpum.GstCtx.rip;
378 if (IEM_IS_CANONICAL(GCPtrPC))
379 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
380 else
381 return iemRaiseGeneralProtectionFault0(pVCpu);
382 }
383 else
384 {
385 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
386 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
387 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
388 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
389 else
390 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
391 if (cbToTryRead) { /* likely */ }
392 else /* overflowed */
393 {
394 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
395 cbToTryRead = UINT32_MAX;
396 }
397 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
398 Assert(GCPtrPC <= UINT32_MAX);
399 }
400
401 PGMPTWALK Walk;
402 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
403 if (RT_SUCCESS(rc))
404 Assert(Walk.fSucceeded); /* probable. */
405 else
406 {
407 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
408#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
409 if (Walk.fFailed & PGM_WALKFAIL_EPT)
410 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
411#endif
412 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
413 }
414 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
415 else
416 {
417 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
418#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
419 if (Walk.fFailed & PGM_WALKFAIL_EPT)
420 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
421#endif
422 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
423 }
424 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
425 else
426 {
427 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
428#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
429 if (Walk.fFailed & PGM_WALKFAIL_EPT)
430 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
431#endif
432 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
433 }
434 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
435 /** @todo Check reserved bits and such stuff. PGM is better at doing
436 * that, so do it when implementing the guest virtual address
437 * TLB... */
438
439 /*
440 * Read the bytes at this address.
441 */
442 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
443 if (cbToTryRead > cbLeftOnPage)
444 cbToTryRead = cbLeftOnPage;
445 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
446 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
447
448 if (!pVCpu->iem.s.fBypassHandlers)
449 {
450 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
451 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
452 { /* likely */ }
453 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
454 {
455 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
456 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
457 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
458 }
459 else
460 {
461 Log((RT_SUCCESS(rcStrict)
462 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
463 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
464 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
465 return rcStrict;
466 }
467 }
468 else
469 {
470 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
471 if (RT_SUCCESS(rc))
472 { /* likely */ }
473 else
474 {
475 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
476 GCPtrPC, GCPhys, rc, cbToTryRead));
477 return rc;
478 }
479 }
480 pVCpu->iem.s.cbOpcode = cbToTryRead;
481#endif /* !IEM_WITH_CODE_TLB */
482 return VINF_SUCCESS;
483}
484
485
486/**
487 * Invalidates the IEM TLBs.
488 *
489 * This is called internally as well as by PGM when moving GC mappings.
490 *
491 * @returns
492 * @param pVCpu The cross context virtual CPU structure of the calling
493 * thread.
494 */
495VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
496{
497#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
498 Log10(("IEMTlbInvalidateAll\n"));
499# ifdef IEM_WITH_CODE_TLB
500 pVCpu->iem.s.cbInstrBufTotal = 0;
501 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
502 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
503 { /* very likely */ }
504 else
505 {
506 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
507 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
508 while (i-- > 0)
509 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
510 }
511# endif
512
513# ifdef IEM_WITH_DATA_TLB
514 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
515 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
516 { /* very likely */ }
517 else
518 {
519 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
520 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
521 while (i-- > 0)
522 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
523 }
524# endif
525#else
526 RT_NOREF(pVCpu);
527#endif
528}
529
530
531/**
532 * Invalidates a page in the TLBs.
533 *
534 * @param pVCpu The cross context virtual CPU structure of the calling
535 * thread.
536 * @param GCPtr The address of the page to invalidate
537 * @thread EMT(pVCpu)
538 */
539VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
540{
541#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
542 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
543 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
544 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
545 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
546
547# ifdef IEM_WITH_CODE_TLB
548 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
549 {
550 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
551 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
552 pVCpu->iem.s.cbInstrBufTotal = 0;
553 }
554# endif
555
556# ifdef IEM_WITH_DATA_TLB
557 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
558 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
559# endif
560#else
561 NOREF(pVCpu); NOREF(GCPtr);
562#endif
563}
564
565
566#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
567/**
568 * Invalid both TLBs slow fashion following a rollover.
569 *
570 * Worker for IEMTlbInvalidateAllPhysical,
571 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
572 * iemMemMapJmp and others.
573 *
574 * @thread EMT(pVCpu)
575 */
576static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
577{
578 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
579 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
580 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
581
582 unsigned i;
583# ifdef IEM_WITH_CODE_TLB
584 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
585 while (i-- > 0)
586 {
587 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
588 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
589 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
590 }
591# endif
592# ifdef IEM_WITH_DATA_TLB
593 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
594 while (i-- > 0)
595 {
596 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
597 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
598 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
599 }
600# endif
601
602}
603#endif
604
605
606/**
607 * Invalidates the host physical aspects of the IEM TLBs.
608 *
609 * This is called internally as well as by PGM when moving GC mappings.
610 *
611 * @param pVCpu The cross context virtual CPU structure of the calling
612 * thread.
613 * @note Currently not used.
614 */
615VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
616{
617#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
618 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
619 Log10(("IEMTlbInvalidateAllPhysical\n"));
620
621# ifdef IEM_WITH_CODE_TLB
622 pVCpu->iem.s.cbInstrBufTotal = 0;
623# endif
624 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
625 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
626 {
627 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
628 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
629 }
630 else
631 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
632#else
633 NOREF(pVCpu);
634#endif
635}
636
637
638/**
639 * Invalidates the host physical aspects of the IEM TLBs.
640 *
641 * This is called internally as well as by PGM when moving GC mappings.
642 *
643 * @param pVM The cross context VM structure.
644 * @param idCpuCaller The ID of the calling EMT if available to the caller,
645 * otherwise NIL_VMCPUID.
646 *
647 * @remarks Caller holds the PGM lock.
648 */
649VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
653 if (pVCpuCaller)
654 VMCPU_ASSERT_EMT(pVCpuCaller);
655 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
656
657 VMCC_FOR_EACH_VMCPU(pVM)
658 {
659# ifdef IEM_WITH_CODE_TLB
660 if (pVCpuCaller == pVCpu)
661 pVCpu->iem.s.cbInstrBufTotal = 0;
662# endif
663
664 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
665 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
666 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
667 { /* likely */}
668 else if (pVCpuCaller == pVCpu)
669 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
670 else
671 {
672 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
673 continue;
674 }
675 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
676 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
677 }
678 VMCC_FOR_EACH_VMCPU_END(pVM);
679
680#else
681 RT_NOREF(pVM, idCpuCaller);
682#endif
683}
684
685#ifdef IEM_WITH_CODE_TLB
686
687/**
688 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
689 * failure and jumps.
690 *
691 * We end up here for a number of reasons:
692 * - pbInstrBuf isn't yet initialized.
693 * - Advancing beyond the buffer boundrary (e.g. cross page).
694 * - Advancing beyond the CS segment limit.
695 * - Fetching from non-mappable page (e.g. MMIO).
696 *
697 * @param pVCpu The cross context virtual CPU structure of the
698 * calling thread.
699 * @param pvDst Where to return the bytes.
700 * @param cbDst Number of bytes to read.
701 *
702 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
703 */
704void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
705{
706#ifdef IN_RING3
707 for (;;)
708 {
709 Assert(cbDst <= 8);
710 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
711
712 /*
713 * We might have a partial buffer match, deal with that first to make the
714 * rest simpler. This is the first part of the cross page/buffer case.
715 */
716 if (pVCpu->iem.s.pbInstrBuf != NULL)
717 {
718 if (offBuf < pVCpu->iem.s.cbInstrBuf)
719 {
720 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
721 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
722 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
723
724 cbDst -= cbCopy;
725 pvDst = (uint8_t *)pvDst + cbCopy;
726 offBuf += cbCopy;
727 pVCpu->iem.s.offInstrNextByte += offBuf;
728 }
729 }
730
731 /*
732 * Check segment limit, figuring how much we're allowed to access at this point.
733 *
734 * We will fault immediately if RIP is past the segment limit / in non-canonical
735 * territory. If we do continue, there are one or more bytes to read before we
736 * end up in trouble and we need to do that first before faulting.
737 */
738 RTGCPTR GCPtrFirst;
739 uint32_t cbMaxRead;
740 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
741 {
742 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
743 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
744 { /* likely */ }
745 else
746 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
747 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
748 }
749 else
750 {
751 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
752 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
753 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
754 { /* likely */ }
755 else
756 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
757 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
758 if (cbMaxRead != 0)
759 { /* likely */ }
760 else
761 {
762 /* Overflowed because address is 0 and limit is max. */
763 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
764 cbMaxRead = X86_PAGE_SIZE;
765 }
766 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
767 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
768 if (cbMaxRead2 < cbMaxRead)
769 cbMaxRead = cbMaxRead2;
770 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
771 }
772
773 /*
774 * Get the TLB entry for this piece of code.
775 */
776 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
777 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
778 if (pTlbe->uTag == uTag)
779 {
780 /* likely when executing lots of code, otherwise unlikely */
781# ifdef VBOX_WITH_STATISTICS
782 pVCpu->iem.s.CodeTlb.cTlbHits++;
783# endif
784 }
785 else
786 {
787 pVCpu->iem.s.CodeTlb.cTlbMisses++;
788 PGMPTWALK Walk;
789 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
790 if (RT_FAILURE(rc))
791 {
792#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
793 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
794 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
795#endif
796 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
797 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
798 }
799
800 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
801 Assert(Walk.fSucceeded);
802 pTlbe->uTag = uTag;
803 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
804 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
805 pTlbe->GCPhys = Walk.GCPhys;
806 pTlbe->pbMappingR3 = NULL;
807 }
808
809 /*
810 * Check TLB page table level access flags.
811 */
812 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
813 {
814 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
815 {
816 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
817 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
818 }
819 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
820 {
821 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
822 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
823 }
824 }
825
826 /*
827 * Look up the physical page info if necessary.
828 */
829 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
830 { /* not necessary */ }
831 else
832 {
833 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
834 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
835 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
836 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
837 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
838 { /* likely */ }
839 else
840 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
841 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
842 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
843 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
844 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
845 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
846 }
847
848# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
849 /*
850 * Try do a direct read using the pbMappingR3 pointer.
851 */
852 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
853 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
854 {
855 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
856 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
857 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
858 {
859 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
860 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
861 }
862 else
863 {
864 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
865 Assert(cbInstr < cbMaxRead);
866 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
867 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
868 }
869 if (cbDst <= cbMaxRead)
870 {
871 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
872 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
873 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
874 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
875 return;
876 }
877 pVCpu->iem.s.pbInstrBuf = NULL;
878
879 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
880 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
881 }
882 else
883# endif
884#if 0
885 /*
886 * If there is no special read handling, so we can read a bit more and
887 * put it in the prefetch buffer.
888 */
889 if ( cbDst < cbMaxRead
890 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
891 {
892 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
893 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
894 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
895 { /* likely */ }
896 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
897 {
898 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
899 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
900 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
901 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
902 }
903 else
904 {
905 Log((RT_SUCCESS(rcStrict)
906 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
907 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
908 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
909 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
910 }
911 }
912 /*
913 * Special read handling, so only read exactly what's needed.
914 * This is a highly unlikely scenario.
915 */
916 else
917#endif
918 {
919 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
920 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
921 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
922 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
923 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
924 { /* likely */ }
925 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
926 {
927 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
928 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
929 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
930 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
931 }
932 else
933 {
934 Log((RT_SUCCESS(rcStrict)
935 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
936 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
937 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
938 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
939 }
940 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
941 if (cbToRead == cbDst)
942 return;
943 }
944
945 /*
946 * More to read, loop.
947 */
948 cbDst -= cbMaxRead;
949 pvDst = (uint8_t *)pvDst + cbMaxRead;
950 }
951#else
952 RT_NOREF(pvDst, cbDst);
953 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
954#endif
955}
956
957#else
958
959/**
960 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
961 * exception if it fails.
962 *
963 * @returns Strict VBox status code.
964 * @param pVCpu The cross context virtual CPU structure of the
965 * calling thread.
966 * @param cbMin The minimum number of bytes relative offOpcode
967 * that must be read.
968 */
969VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
970{
971 /*
972 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
973 *
974 * First translate CS:rIP to a physical address.
975 */
976 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
977 uint32_t cbToTryRead;
978 RTGCPTR GCPtrNext;
979 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
980 {
981 cbToTryRead = GUEST_PAGE_SIZE;
982 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
983 if (!IEM_IS_CANONICAL(GCPtrNext))
984 return iemRaiseGeneralProtectionFault0(pVCpu);
985 }
986 else
987 {
988 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
989 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
990 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
991 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
992 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
993 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
994 if (!cbToTryRead) /* overflowed */
995 {
996 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
997 cbToTryRead = UINT32_MAX;
998 /** @todo check out wrapping around the code segment. */
999 }
1000 if (cbToTryRead < cbMin - cbLeft)
1001 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1002 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1003 }
1004
1005 /* Only read up to the end of the page, and make sure we don't read more
1006 than the opcode buffer can hold. */
1007 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1008 if (cbToTryRead > cbLeftOnPage)
1009 cbToTryRead = cbLeftOnPage;
1010 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1011 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1012/** @todo r=bird: Convert assertion into undefined opcode exception? */
1013 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1014
1015 PGMPTWALK Walk;
1016 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1017 if (RT_FAILURE(rc))
1018 {
1019 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1020#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1021 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1022 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1023#endif
1024 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1025 }
1026 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1027 {
1028 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1029#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1030 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1031 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1032#endif
1033 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1034 }
1035 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1036 {
1037 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1038#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1039 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1040 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1041#endif
1042 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1043 }
1044 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1045 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1046 /** @todo Check reserved bits and such stuff. PGM is better at doing
1047 * that, so do it when implementing the guest virtual address
1048 * TLB... */
1049
1050 /*
1051 * Read the bytes at this address.
1052 *
1053 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1054 * and since PATM should only patch the start of an instruction there
1055 * should be no need to check again here.
1056 */
1057 if (!pVCpu->iem.s.fBypassHandlers)
1058 {
1059 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1060 cbToTryRead, PGMACCESSORIGIN_IEM);
1061 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1062 { /* likely */ }
1063 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1064 {
1065 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1066 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1067 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1068 }
1069 else
1070 {
1071 Log((RT_SUCCESS(rcStrict)
1072 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1073 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1074 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1075 return rcStrict;
1076 }
1077 }
1078 else
1079 {
1080 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1081 if (RT_SUCCESS(rc))
1082 { /* likely */ }
1083 else
1084 {
1085 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1086 return rc;
1087 }
1088 }
1089 pVCpu->iem.s.cbOpcode += cbToTryRead;
1090 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1091
1092 return VINF_SUCCESS;
1093}
1094
1095#endif /* !IEM_WITH_CODE_TLB */
1096#ifndef IEM_WITH_SETJMP
1097
1098/**
1099 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1100 *
1101 * @returns Strict VBox status code.
1102 * @param pVCpu The cross context virtual CPU structure of the
1103 * calling thread.
1104 * @param pb Where to return the opcode byte.
1105 */
1106VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1107{
1108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1109 if (rcStrict == VINF_SUCCESS)
1110 {
1111 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1112 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1113 pVCpu->iem.s.offOpcode = offOpcode + 1;
1114 }
1115 else
1116 *pb = 0;
1117 return rcStrict;
1118}
1119
1120#else /* IEM_WITH_SETJMP */
1121
1122/**
1123 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1124 *
1125 * @returns The opcode byte.
1126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1127 */
1128uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1129{
1130# ifdef IEM_WITH_CODE_TLB
1131 uint8_t u8;
1132 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1133 return u8;
1134# else
1135 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1136 if (rcStrict == VINF_SUCCESS)
1137 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1138 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1139# endif
1140}
1141
1142#endif /* IEM_WITH_SETJMP */
1143
1144#ifndef IEM_WITH_SETJMP
1145
1146/**
1147 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1148 *
1149 * @returns Strict VBox status code.
1150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1151 * @param pu16 Where to return the opcode dword.
1152 */
1153VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1154{
1155 uint8_t u8;
1156 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1157 if (rcStrict == VINF_SUCCESS)
1158 *pu16 = (int8_t)u8;
1159 return rcStrict;
1160}
1161
1162
1163/**
1164 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1165 *
1166 * @returns Strict VBox status code.
1167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1168 * @param pu32 Where to return the opcode dword.
1169 */
1170VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1171{
1172 uint8_t u8;
1173 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1174 if (rcStrict == VINF_SUCCESS)
1175 *pu32 = (int8_t)u8;
1176 return rcStrict;
1177}
1178
1179
1180/**
1181 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1182 *
1183 * @returns Strict VBox status code.
1184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1185 * @param pu64 Where to return the opcode qword.
1186 */
1187VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1188{
1189 uint8_t u8;
1190 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1191 if (rcStrict == VINF_SUCCESS)
1192 *pu64 = (int8_t)u8;
1193 return rcStrict;
1194}
1195
1196#endif /* !IEM_WITH_SETJMP */
1197
1198
1199#ifndef IEM_WITH_SETJMP
1200
1201/**
1202 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1203 *
1204 * @returns Strict VBox status code.
1205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1206 * @param pu16 Where to return the opcode word.
1207 */
1208VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1209{
1210 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1211 if (rcStrict == VINF_SUCCESS)
1212 {
1213 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1214# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1215 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1216# else
1217 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1218# endif
1219 pVCpu->iem.s.offOpcode = offOpcode + 2;
1220 }
1221 else
1222 *pu16 = 0;
1223 return rcStrict;
1224}
1225
1226#else /* IEM_WITH_SETJMP */
1227
1228/**
1229 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1230 *
1231 * @returns The opcode word.
1232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1233 */
1234uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1235{
1236# ifdef IEM_WITH_CODE_TLB
1237 uint16_t u16;
1238 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1239 return u16;
1240# else
1241 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1242 if (rcStrict == VINF_SUCCESS)
1243 {
1244 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1245 pVCpu->iem.s.offOpcode += 2;
1246# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1247 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1248# else
1249 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1250# endif
1251 }
1252 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1253# endif
1254}
1255
1256#endif /* IEM_WITH_SETJMP */
1257
1258#ifndef IEM_WITH_SETJMP
1259
1260/**
1261 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1262 *
1263 * @returns Strict VBox status code.
1264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1265 * @param pu32 Where to return the opcode double word.
1266 */
1267VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1268{
1269 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1270 if (rcStrict == VINF_SUCCESS)
1271 {
1272 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1273 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1274 pVCpu->iem.s.offOpcode = offOpcode + 2;
1275 }
1276 else
1277 *pu32 = 0;
1278 return rcStrict;
1279}
1280
1281
1282/**
1283 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1284 *
1285 * @returns Strict VBox status code.
1286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1287 * @param pu64 Where to return the opcode quad word.
1288 */
1289VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1290{
1291 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1292 if (rcStrict == VINF_SUCCESS)
1293 {
1294 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1295 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1296 pVCpu->iem.s.offOpcode = offOpcode + 2;
1297 }
1298 else
1299 *pu64 = 0;
1300 return rcStrict;
1301}
1302
1303#endif /* !IEM_WITH_SETJMP */
1304
1305#ifndef IEM_WITH_SETJMP
1306
1307/**
1308 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1309 *
1310 * @returns Strict VBox status code.
1311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1312 * @param pu32 Where to return the opcode dword.
1313 */
1314VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1315{
1316 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1317 if (rcStrict == VINF_SUCCESS)
1318 {
1319 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1320# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1321 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1322# else
1323 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1324 pVCpu->iem.s.abOpcode[offOpcode + 1],
1325 pVCpu->iem.s.abOpcode[offOpcode + 2],
1326 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1327# endif
1328 pVCpu->iem.s.offOpcode = offOpcode + 4;
1329 }
1330 else
1331 *pu32 = 0;
1332 return rcStrict;
1333}
1334
1335#else /* IEM_WITH_SETJMP */
1336
1337/**
1338 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1339 *
1340 * @returns The opcode dword.
1341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1342 */
1343uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1344{
1345# ifdef IEM_WITH_CODE_TLB
1346 uint32_t u32;
1347 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1348 return u32;
1349# else
1350 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1351 if (rcStrict == VINF_SUCCESS)
1352 {
1353 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1354 pVCpu->iem.s.offOpcode = offOpcode + 4;
1355# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1356 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1357# else
1358 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1359 pVCpu->iem.s.abOpcode[offOpcode + 1],
1360 pVCpu->iem.s.abOpcode[offOpcode + 2],
1361 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1362# endif
1363 }
1364 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1365# endif
1366}
1367
1368#endif /* IEM_WITH_SETJMP */
1369
1370#ifndef IEM_WITH_SETJMP
1371
1372/**
1373 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1374 *
1375 * @returns Strict VBox status code.
1376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1377 * @param pu64 Where to return the opcode dword.
1378 */
1379VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1380{
1381 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1382 if (rcStrict == VINF_SUCCESS)
1383 {
1384 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1385 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1386 pVCpu->iem.s.abOpcode[offOpcode + 1],
1387 pVCpu->iem.s.abOpcode[offOpcode + 2],
1388 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1389 pVCpu->iem.s.offOpcode = offOpcode + 4;
1390 }
1391 else
1392 *pu64 = 0;
1393 return rcStrict;
1394}
1395
1396
1397/**
1398 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1399 *
1400 * @returns Strict VBox status code.
1401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1402 * @param pu64 Where to return the opcode qword.
1403 */
1404VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1405{
1406 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1407 if (rcStrict == VINF_SUCCESS)
1408 {
1409 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1410 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1411 pVCpu->iem.s.abOpcode[offOpcode + 1],
1412 pVCpu->iem.s.abOpcode[offOpcode + 2],
1413 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1414 pVCpu->iem.s.offOpcode = offOpcode + 4;
1415 }
1416 else
1417 *pu64 = 0;
1418 return rcStrict;
1419}
1420
1421#endif /* !IEM_WITH_SETJMP */
1422
1423#ifndef IEM_WITH_SETJMP
1424
1425/**
1426 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1427 *
1428 * @returns Strict VBox status code.
1429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1430 * @param pu64 Where to return the opcode qword.
1431 */
1432VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1433{
1434 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1435 if (rcStrict == VINF_SUCCESS)
1436 {
1437 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1438# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1439 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1440# else
1441 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1442 pVCpu->iem.s.abOpcode[offOpcode + 1],
1443 pVCpu->iem.s.abOpcode[offOpcode + 2],
1444 pVCpu->iem.s.abOpcode[offOpcode + 3],
1445 pVCpu->iem.s.abOpcode[offOpcode + 4],
1446 pVCpu->iem.s.abOpcode[offOpcode + 5],
1447 pVCpu->iem.s.abOpcode[offOpcode + 6],
1448 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1449# endif
1450 pVCpu->iem.s.offOpcode = offOpcode + 8;
1451 }
1452 else
1453 *pu64 = 0;
1454 return rcStrict;
1455}
1456
1457#else /* IEM_WITH_SETJMP */
1458
1459/**
1460 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1461 *
1462 * @returns The opcode qword.
1463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1464 */
1465uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1466{
1467# ifdef IEM_WITH_CODE_TLB
1468 uint64_t u64;
1469 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1470 return u64;
1471# else
1472 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1473 if (rcStrict == VINF_SUCCESS)
1474 {
1475 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1476 pVCpu->iem.s.offOpcode = offOpcode + 8;
1477# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1478 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1479# else
1480 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1481 pVCpu->iem.s.abOpcode[offOpcode + 1],
1482 pVCpu->iem.s.abOpcode[offOpcode + 2],
1483 pVCpu->iem.s.abOpcode[offOpcode + 3],
1484 pVCpu->iem.s.abOpcode[offOpcode + 4],
1485 pVCpu->iem.s.abOpcode[offOpcode + 5],
1486 pVCpu->iem.s.abOpcode[offOpcode + 6],
1487 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1488# endif
1489 }
1490 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1491# endif
1492}
1493
1494#endif /* IEM_WITH_SETJMP */
1495
1496
1497
1498/** @name Misc Worker Functions.
1499 * @{
1500 */
1501
1502/**
1503 * Gets the exception class for the specified exception vector.
1504 *
1505 * @returns The class of the specified exception.
1506 * @param uVector The exception vector.
1507 */
1508static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1509{
1510 Assert(uVector <= X86_XCPT_LAST);
1511 switch (uVector)
1512 {
1513 case X86_XCPT_DE:
1514 case X86_XCPT_TS:
1515 case X86_XCPT_NP:
1516 case X86_XCPT_SS:
1517 case X86_XCPT_GP:
1518 case X86_XCPT_SX: /* AMD only */
1519 return IEMXCPTCLASS_CONTRIBUTORY;
1520
1521 case X86_XCPT_PF:
1522 case X86_XCPT_VE: /* Intel only */
1523 return IEMXCPTCLASS_PAGE_FAULT;
1524
1525 case X86_XCPT_DF:
1526 return IEMXCPTCLASS_DOUBLE_FAULT;
1527 }
1528 return IEMXCPTCLASS_BENIGN;
1529}
1530
1531
1532/**
1533 * Evaluates how to handle an exception caused during delivery of another event
1534 * (exception / interrupt).
1535 *
1536 * @returns How to handle the recursive exception.
1537 * @param pVCpu The cross context virtual CPU structure of the
1538 * calling thread.
1539 * @param fPrevFlags The flags of the previous event.
1540 * @param uPrevVector The vector of the previous event.
1541 * @param fCurFlags The flags of the current exception.
1542 * @param uCurVector The vector of the current exception.
1543 * @param pfXcptRaiseInfo Where to store additional information about the
1544 * exception condition. Optional.
1545 */
1546VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1547 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1548{
1549 /*
1550 * Only CPU exceptions can be raised while delivering other events, software interrupt
1551 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1552 */
1553 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1554 Assert(pVCpu); RT_NOREF(pVCpu);
1555 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1556
1557 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1558 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1559 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1560 {
1561 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1562 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1563 {
1564 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1565 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1566 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1567 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1568 {
1569 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1570 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1571 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1572 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1573 uCurVector, pVCpu->cpum.GstCtx.cr2));
1574 }
1575 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1576 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1577 {
1578 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1579 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1580 }
1581 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1582 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1583 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1584 {
1585 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1586 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1587 }
1588 }
1589 else
1590 {
1591 if (uPrevVector == X86_XCPT_NMI)
1592 {
1593 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1594 if (uCurVector == X86_XCPT_PF)
1595 {
1596 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1597 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1598 }
1599 }
1600 else if ( uPrevVector == X86_XCPT_AC
1601 && uCurVector == X86_XCPT_AC)
1602 {
1603 enmRaise = IEMXCPTRAISE_CPU_HANG;
1604 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1605 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1606 }
1607 }
1608 }
1609 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1610 {
1611 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1612 if (uCurVector == X86_XCPT_PF)
1613 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1614 }
1615 else
1616 {
1617 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1618 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1619 }
1620
1621 if (pfXcptRaiseInfo)
1622 *pfXcptRaiseInfo = fRaiseInfo;
1623 return enmRaise;
1624}
1625
1626
1627/**
1628 * Enters the CPU shutdown state initiated by a triple fault or other
1629 * unrecoverable conditions.
1630 *
1631 * @returns Strict VBox status code.
1632 * @param pVCpu The cross context virtual CPU structure of the
1633 * calling thread.
1634 */
1635static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1636{
1637 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1638 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1639
1640 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1641 {
1642 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1643 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1644 }
1645
1646 RT_NOREF(pVCpu);
1647 return VINF_EM_TRIPLE_FAULT;
1648}
1649
1650
1651/**
1652 * Validates a new SS segment.
1653 *
1654 * @returns VBox strict status code.
1655 * @param pVCpu The cross context virtual CPU structure of the
1656 * calling thread.
1657 * @param NewSS The new SS selctor.
1658 * @param uCpl The CPL to load the stack for.
1659 * @param pDesc Where to return the descriptor.
1660 */
1661static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1662{
1663 /* Null selectors are not allowed (we're not called for dispatching
1664 interrupts with SS=0 in long mode). */
1665 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1666 {
1667 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1668 return iemRaiseTaskSwitchFault0(pVCpu);
1669 }
1670
1671 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1672 if ((NewSS & X86_SEL_RPL) != uCpl)
1673 {
1674 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1675 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1676 }
1677
1678 /*
1679 * Read the descriptor.
1680 */
1681 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1682 if (rcStrict != VINF_SUCCESS)
1683 return rcStrict;
1684
1685 /*
1686 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1687 */
1688 if (!pDesc->Legacy.Gen.u1DescType)
1689 {
1690 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1691 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1692 }
1693
1694 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1695 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1696 {
1697 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1698 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1699 }
1700 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1701 {
1702 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1703 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1704 }
1705
1706 /* Is it there? */
1707 /** @todo testcase: Is this checked before the canonical / limit check below? */
1708 if (!pDesc->Legacy.Gen.u1Present)
1709 {
1710 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1711 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1712 }
1713
1714 return VINF_SUCCESS;
1715}
1716
1717/** @} */
1718
1719
1720/** @name Raising Exceptions.
1721 *
1722 * @{
1723 */
1724
1725
1726/**
1727 * Loads the specified stack far pointer from the TSS.
1728 *
1729 * @returns VBox strict status code.
1730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1731 * @param uCpl The CPL to load the stack for.
1732 * @param pSelSS Where to return the new stack segment.
1733 * @param puEsp Where to return the new stack pointer.
1734 */
1735static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1736{
1737 VBOXSTRICTRC rcStrict;
1738 Assert(uCpl < 4);
1739
1740 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1741 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1742 {
1743 /*
1744 * 16-bit TSS (X86TSS16).
1745 */
1746 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1747 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1748 {
1749 uint32_t off = uCpl * 4 + 2;
1750 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1751 {
1752 /** @todo check actual access pattern here. */
1753 uint32_t u32Tmp = 0; /* gcc maybe... */
1754 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1755 if (rcStrict == VINF_SUCCESS)
1756 {
1757 *puEsp = RT_LOWORD(u32Tmp);
1758 *pSelSS = RT_HIWORD(u32Tmp);
1759 return VINF_SUCCESS;
1760 }
1761 }
1762 else
1763 {
1764 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1765 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1766 }
1767 break;
1768 }
1769
1770 /*
1771 * 32-bit TSS (X86TSS32).
1772 */
1773 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1774 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1775 {
1776 uint32_t off = uCpl * 8 + 4;
1777 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1778 {
1779/** @todo check actual access pattern here. */
1780 uint64_t u64Tmp;
1781 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1782 if (rcStrict == VINF_SUCCESS)
1783 {
1784 *puEsp = u64Tmp & UINT32_MAX;
1785 *pSelSS = (RTSEL)(u64Tmp >> 32);
1786 return VINF_SUCCESS;
1787 }
1788 }
1789 else
1790 {
1791 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1792 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1793 }
1794 break;
1795 }
1796
1797 default:
1798 AssertFailed();
1799 rcStrict = VERR_IEM_IPE_4;
1800 break;
1801 }
1802
1803 *puEsp = 0; /* make gcc happy */
1804 *pSelSS = 0; /* make gcc happy */
1805 return rcStrict;
1806}
1807
1808
1809/**
1810 * Loads the specified stack pointer from the 64-bit TSS.
1811 *
1812 * @returns VBox strict status code.
1813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1814 * @param uCpl The CPL to load the stack for.
1815 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1816 * @param puRsp Where to return the new stack pointer.
1817 */
1818static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1819{
1820 Assert(uCpl < 4);
1821 Assert(uIst < 8);
1822 *puRsp = 0; /* make gcc happy */
1823
1824 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1825 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1826
1827 uint32_t off;
1828 if (uIst)
1829 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1830 else
1831 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1832 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1833 {
1834 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1835 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1836 }
1837
1838 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1839}
1840
1841
1842/**
1843 * Adjust the CPU state according to the exception being raised.
1844 *
1845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1846 * @param u8Vector The exception that has been raised.
1847 */
1848DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1849{
1850 switch (u8Vector)
1851 {
1852 case X86_XCPT_DB:
1853 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1854 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1855 break;
1856 /** @todo Read the AMD and Intel exception reference... */
1857 }
1858}
1859
1860
1861/**
1862 * Implements exceptions and interrupts for real mode.
1863 *
1864 * @returns VBox strict status code.
1865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1866 * @param cbInstr The number of bytes to offset rIP by in the return
1867 * address.
1868 * @param u8Vector The interrupt / exception vector number.
1869 * @param fFlags The flags.
1870 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1871 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1872 */
1873static VBOXSTRICTRC
1874iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1875 uint8_t cbInstr,
1876 uint8_t u8Vector,
1877 uint32_t fFlags,
1878 uint16_t uErr,
1879 uint64_t uCr2) RT_NOEXCEPT
1880{
1881 NOREF(uErr); NOREF(uCr2);
1882 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1883
1884 /*
1885 * Read the IDT entry.
1886 */
1887 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1888 {
1889 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1890 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1891 }
1892 RTFAR16 Idte;
1893 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1894 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1895 {
1896 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1897 return rcStrict;
1898 }
1899
1900 /*
1901 * Push the stack frame.
1902 */
1903 uint16_t *pu16Frame;
1904 uint64_t uNewRsp;
1905 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
1906 if (rcStrict != VINF_SUCCESS)
1907 return rcStrict;
1908
1909 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1910#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1911 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1912 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1913 fEfl |= UINT16_C(0xf000);
1914#endif
1915 pu16Frame[2] = (uint16_t)fEfl;
1916 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1917 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1918 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1919 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1920 return rcStrict;
1921
1922 /*
1923 * Load the vector address into cs:ip and make exception specific state
1924 * adjustments.
1925 */
1926 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1927 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1928 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1929 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1930 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1931 pVCpu->cpum.GstCtx.rip = Idte.off;
1932 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1933 IEMMISC_SET_EFL(pVCpu, fEfl);
1934
1935 /** @todo do we actually do this in real mode? */
1936 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1937 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1938
1939 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1940}
1941
1942
1943/**
1944 * Loads a NULL data selector into when coming from V8086 mode.
1945 *
1946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1947 * @param pSReg Pointer to the segment register.
1948 */
1949DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1950{
1951 pSReg->Sel = 0;
1952 pSReg->ValidSel = 0;
1953 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1954 {
1955 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1956 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1957 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1958 }
1959 else
1960 {
1961 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1962 /** @todo check this on AMD-V */
1963 pSReg->u64Base = 0;
1964 pSReg->u32Limit = 0;
1965 }
1966}
1967
1968
1969/**
1970 * Loads a segment selector during a task switch in V8086 mode.
1971 *
1972 * @param pSReg Pointer to the segment register.
1973 * @param uSel The selector value to load.
1974 */
1975DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1976{
1977 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1978 pSReg->Sel = uSel;
1979 pSReg->ValidSel = uSel;
1980 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1981 pSReg->u64Base = uSel << 4;
1982 pSReg->u32Limit = 0xffff;
1983 pSReg->Attr.u = 0xf3;
1984}
1985
1986
1987/**
1988 * Loads a segment selector during a task switch in protected mode.
1989 *
1990 * In this task switch scenario, we would throw \#TS exceptions rather than
1991 * \#GPs.
1992 *
1993 * @returns VBox strict status code.
1994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1995 * @param pSReg Pointer to the segment register.
1996 * @param uSel The new selector value.
1997 *
1998 * @remarks This does _not_ handle CS or SS.
1999 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2000 */
2001static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2002{
2003 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2004
2005 /* Null data selector. */
2006 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2007 {
2008 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2009 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2010 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2011 return VINF_SUCCESS;
2012 }
2013
2014 /* Fetch the descriptor. */
2015 IEMSELDESC Desc;
2016 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2017 if (rcStrict != VINF_SUCCESS)
2018 {
2019 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2020 VBOXSTRICTRC_VAL(rcStrict)));
2021 return rcStrict;
2022 }
2023
2024 /* Must be a data segment or readable code segment. */
2025 if ( !Desc.Legacy.Gen.u1DescType
2026 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2027 {
2028 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2029 Desc.Legacy.Gen.u4Type));
2030 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2031 }
2032
2033 /* Check privileges for data segments and non-conforming code segments. */
2034 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2035 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2036 {
2037 /* The RPL and the new CPL must be less than or equal to the DPL. */
2038 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2039 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2040 {
2041 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2042 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2043 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2044 }
2045 }
2046
2047 /* Is it there? */
2048 if (!Desc.Legacy.Gen.u1Present)
2049 {
2050 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2051 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2052 }
2053
2054 /* The base and limit. */
2055 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2056 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2057
2058 /*
2059 * Ok, everything checked out fine. Now set the accessed bit before
2060 * committing the result into the registers.
2061 */
2062 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2063 {
2064 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2065 if (rcStrict != VINF_SUCCESS)
2066 return rcStrict;
2067 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2068 }
2069
2070 /* Commit */
2071 pSReg->Sel = uSel;
2072 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2073 pSReg->u32Limit = cbLimit;
2074 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2075 pSReg->ValidSel = uSel;
2076 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2077 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2078 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2079
2080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2081 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2082 return VINF_SUCCESS;
2083}
2084
2085
2086/**
2087 * Performs a task switch.
2088 *
2089 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2090 * caller is responsible for performing the necessary checks (like DPL, TSS
2091 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2092 * reference for JMP, CALL, IRET.
2093 *
2094 * If the task switch is the due to a software interrupt or hardware exception,
2095 * the caller is responsible for validating the TSS selector and descriptor. See
2096 * Intel Instruction reference for INT n.
2097 *
2098 * @returns VBox strict status code.
2099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2100 * @param enmTaskSwitch The cause of the task switch.
2101 * @param uNextEip The EIP effective after the task switch.
2102 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2103 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2104 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2105 * @param SelTSS The TSS selector of the new task.
2106 * @param pNewDescTSS Pointer to the new TSS descriptor.
2107 */
2108VBOXSTRICTRC
2109iemTaskSwitch(PVMCPUCC pVCpu,
2110 IEMTASKSWITCH enmTaskSwitch,
2111 uint32_t uNextEip,
2112 uint32_t fFlags,
2113 uint16_t uErr,
2114 uint64_t uCr2,
2115 RTSEL SelTSS,
2116 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2117{
2118 Assert(!IEM_IS_REAL_MODE(pVCpu));
2119 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2120 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2121
2122 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2123 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2124 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2125 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2126 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2127
2128 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2129 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2130
2131 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2132 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2133
2134 /* Update CR2 in case it's a page-fault. */
2135 /** @todo This should probably be done much earlier in IEM/PGM. See
2136 * @bugref{5653#c49}. */
2137 if (fFlags & IEM_XCPT_FLAGS_CR2)
2138 pVCpu->cpum.GstCtx.cr2 = uCr2;
2139
2140 /*
2141 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2142 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2143 */
2144 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2145 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2146 if (uNewTSSLimit < uNewTSSLimitMin)
2147 {
2148 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2149 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2150 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2151 }
2152
2153 /*
2154 * Task switches in VMX non-root mode always cause task switches.
2155 * The new TSS must have been read and validated (DPL, limits etc.) before a
2156 * task-switch VM-exit commences.
2157 *
2158 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2159 */
2160 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2161 {
2162 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2163 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2164 }
2165
2166 /*
2167 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2168 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2169 */
2170 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2171 {
2172 uint32_t const uExitInfo1 = SelTSS;
2173 uint32_t uExitInfo2 = uErr;
2174 switch (enmTaskSwitch)
2175 {
2176 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2177 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2178 default: break;
2179 }
2180 if (fFlags & IEM_XCPT_FLAGS_ERR)
2181 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2182 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2183 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2184
2185 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2186 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2187 RT_NOREF2(uExitInfo1, uExitInfo2);
2188 }
2189
2190 /*
2191 * Check the current TSS limit. The last written byte to the current TSS during the
2192 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2193 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2194 *
2195 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2196 * end up with smaller than "legal" TSS limits.
2197 */
2198 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2199 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2200 if (uCurTSSLimit < uCurTSSLimitMin)
2201 {
2202 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2203 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2204 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2205 }
2206
2207 /*
2208 * Verify that the new TSS can be accessed and map it. Map only the required contents
2209 * and not the entire TSS.
2210 */
2211 void *pvNewTSS;
2212 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2213 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2214 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2215 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2216 * not perform correct translation if this happens. See Intel spec. 7.2.1
2217 * "Task-State Segment". */
2218 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2219 if (rcStrict != VINF_SUCCESS)
2220 {
2221 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2222 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2223 return rcStrict;
2224 }
2225
2226 /*
2227 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2228 */
2229 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2230 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2231 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2232 {
2233 PX86DESC pDescCurTSS;
2234 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2235 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2236 if (rcStrict != VINF_SUCCESS)
2237 {
2238 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2239 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2240 return rcStrict;
2241 }
2242
2243 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2244 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2245 if (rcStrict != VINF_SUCCESS)
2246 {
2247 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2248 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2249 return rcStrict;
2250 }
2251
2252 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2253 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2254 {
2255 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2256 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2257 u32EFlags &= ~X86_EFL_NT;
2258 }
2259 }
2260
2261 /*
2262 * Save the CPU state into the current TSS.
2263 */
2264 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2265 if (GCPtrNewTSS == GCPtrCurTSS)
2266 {
2267 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2268 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2269 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2270 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2271 pVCpu->cpum.GstCtx.ldtr.Sel));
2272 }
2273 if (fIsNewTSS386)
2274 {
2275 /*
2276 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2277 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2278 */
2279 void *pvCurTSS32;
2280 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2281 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2282 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2283 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2284 if (rcStrict != VINF_SUCCESS)
2285 {
2286 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2287 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2288 return rcStrict;
2289 }
2290
2291 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2292 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2293 pCurTSS32->eip = uNextEip;
2294 pCurTSS32->eflags = u32EFlags;
2295 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2296 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2297 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2298 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2299 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2300 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2301 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2302 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2303 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2304 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2305 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2306 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2307 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2308 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2309
2310 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2311 if (rcStrict != VINF_SUCCESS)
2312 {
2313 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2314 VBOXSTRICTRC_VAL(rcStrict)));
2315 return rcStrict;
2316 }
2317 }
2318 else
2319 {
2320 /*
2321 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2322 */
2323 void *pvCurTSS16;
2324 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2325 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2326 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2327 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2328 if (rcStrict != VINF_SUCCESS)
2329 {
2330 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2331 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2332 return rcStrict;
2333 }
2334
2335 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2336 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2337 pCurTSS16->ip = uNextEip;
2338 pCurTSS16->flags = u32EFlags;
2339 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2340 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2341 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2342 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2343 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2344 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2345 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2346 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2347 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2348 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2349 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2350 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2351
2352 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2353 if (rcStrict != VINF_SUCCESS)
2354 {
2355 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2356 VBOXSTRICTRC_VAL(rcStrict)));
2357 return rcStrict;
2358 }
2359 }
2360
2361 /*
2362 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2363 */
2364 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2365 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2366 {
2367 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2368 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2369 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2370 }
2371
2372 /*
2373 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2374 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2375 */
2376 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2377 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2378 bool fNewDebugTrap;
2379 if (fIsNewTSS386)
2380 {
2381 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2382 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2383 uNewEip = pNewTSS32->eip;
2384 uNewEflags = pNewTSS32->eflags;
2385 uNewEax = pNewTSS32->eax;
2386 uNewEcx = pNewTSS32->ecx;
2387 uNewEdx = pNewTSS32->edx;
2388 uNewEbx = pNewTSS32->ebx;
2389 uNewEsp = pNewTSS32->esp;
2390 uNewEbp = pNewTSS32->ebp;
2391 uNewEsi = pNewTSS32->esi;
2392 uNewEdi = pNewTSS32->edi;
2393 uNewES = pNewTSS32->es;
2394 uNewCS = pNewTSS32->cs;
2395 uNewSS = pNewTSS32->ss;
2396 uNewDS = pNewTSS32->ds;
2397 uNewFS = pNewTSS32->fs;
2398 uNewGS = pNewTSS32->gs;
2399 uNewLdt = pNewTSS32->selLdt;
2400 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2401 }
2402 else
2403 {
2404 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2405 uNewCr3 = 0;
2406 uNewEip = pNewTSS16->ip;
2407 uNewEflags = pNewTSS16->flags;
2408 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2409 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2410 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2411 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2412 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2413 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2414 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2415 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2416 uNewES = pNewTSS16->es;
2417 uNewCS = pNewTSS16->cs;
2418 uNewSS = pNewTSS16->ss;
2419 uNewDS = pNewTSS16->ds;
2420 uNewFS = 0;
2421 uNewGS = 0;
2422 uNewLdt = pNewTSS16->selLdt;
2423 fNewDebugTrap = false;
2424 }
2425
2426 if (GCPtrNewTSS == GCPtrCurTSS)
2427 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2428 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2429
2430 /*
2431 * We're done accessing the new TSS.
2432 */
2433 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2434 if (rcStrict != VINF_SUCCESS)
2435 {
2436 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2437 return rcStrict;
2438 }
2439
2440 /*
2441 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2442 */
2443 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2444 {
2445 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2446 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2447 if (rcStrict != VINF_SUCCESS)
2448 {
2449 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2450 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2451 return rcStrict;
2452 }
2453
2454 /* Check that the descriptor indicates the new TSS is available (not busy). */
2455 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2456 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2457 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2458
2459 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2460 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2461 if (rcStrict != VINF_SUCCESS)
2462 {
2463 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2464 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2465 return rcStrict;
2466 }
2467 }
2468
2469 /*
2470 * From this point on, we're technically in the new task. We will defer exceptions
2471 * until the completion of the task switch but before executing any instructions in the new task.
2472 */
2473 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2474 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2475 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2476 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2477 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2478 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2479 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2480
2481 /* Set the busy bit in TR. */
2482 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2483
2484 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2485 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2486 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2487 {
2488 uNewEflags |= X86_EFL_NT;
2489 }
2490
2491 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2492 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2493 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2494
2495 pVCpu->cpum.GstCtx.eip = uNewEip;
2496 pVCpu->cpum.GstCtx.eax = uNewEax;
2497 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2498 pVCpu->cpum.GstCtx.edx = uNewEdx;
2499 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2500 pVCpu->cpum.GstCtx.esp = uNewEsp;
2501 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2502 pVCpu->cpum.GstCtx.esi = uNewEsi;
2503 pVCpu->cpum.GstCtx.edi = uNewEdi;
2504
2505 uNewEflags &= X86_EFL_LIVE_MASK;
2506 uNewEflags |= X86_EFL_RA1_MASK;
2507 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2508
2509 /*
2510 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2511 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2512 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2513 */
2514 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2515 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2516
2517 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2518 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2519
2520 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2521 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2522
2523 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2524 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2525
2526 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2527 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2528
2529 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2530 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2531 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2532
2533 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2534 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2535 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2536 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2537
2538 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2539 {
2540 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2541 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2542 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2543 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2544 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2545 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2546 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2547 }
2548
2549 /*
2550 * Switch CR3 for the new task.
2551 */
2552 if ( fIsNewTSS386
2553 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2554 {
2555 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2556 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2557 AssertRCSuccessReturn(rc, rc);
2558
2559 /* Inform PGM. */
2560 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2561 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2562 AssertRCReturn(rc, rc);
2563 /* ignore informational status codes */
2564
2565 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2566 }
2567
2568 /*
2569 * Switch LDTR for the new task.
2570 */
2571 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2572 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2573 else
2574 {
2575 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2576
2577 IEMSELDESC DescNewLdt;
2578 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2579 if (rcStrict != VINF_SUCCESS)
2580 {
2581 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2582 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2583 return rcStrict;
2584 }
2585 if ( !DescNewLdt.Legacy.Gen.u1Present
2586 || DescNewLdt.Legacy.Gen.u1DescType
2587 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2588 {
2589 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2590 uNewLdt, DescNewLdt.Legacy.u));
2591 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2592 }
2593
2594 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2595 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2596 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2597 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2598 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2599 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2600 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2601 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2602 }
2603
2604 IEMSELDESC DescSS;
2605 if (IEM_IS_V86_MODE(pVCpu))
2606 {
2607 pVCpu->iem.s.uCpl = 3;
2608 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2609 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2610 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2611 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2612 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2613 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2614
2615 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2616 DescSS.Legacy.u = 0;
2617 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2618 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2619 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2620 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2621 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2622 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2623 DescSS.Legacy.Gen.u2Dpl = 3;
2624 }
2625 else
2626 {
2627 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2628
2629 /*
2630 * Load the stack segment for the new task.
2631 */
2632 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2633 {
2634 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2635 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2636 }
2637
2638 /* Fetch the descriptor. */
2639 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2640 if (rcStrict != VINF_SUCCESS)
2641 {
2642 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2643 VBOXSTRICTRC_VAL(rcStrict)));
2644 return rcStrict;
2645 }
2646
2647 /* SS must be a data segment and writable. */
2648 if ( !DescSS.Legacy.Gen.u1DescType
2649 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2650 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2651 {
2652 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2653 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2654 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2655 }
2656
2657 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2658 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2659 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2660 {
2661 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2662 uNewCpl));
2663 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2664 }
2665
2666 /* Is it there? */
2667 if (!DescSS.Legacy.Gen.u1Present)
2668 {
2669 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2670 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2671 }
2672
2673 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2674 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2675
2676 /* Set the accessed bit before committing the result into SS. */
2677 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2678 {
2679 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2680 if (rcStrict != VINF_SUCCESS)
2681 return rcStrict;
2682 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2683 }
2684
2685 /* Commit SS. */
2686 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2687 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2688 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2689 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2690 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2691 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2692 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2693
2694 /* CPL has changed, update IEM before loading rest of segments. */
2695 pVCpu->iem.s.uCpl = uNewCpl;
2696
2697 /*
2698 * Load the data segments for the new task.
2699 */
2700 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2701 if (rcStrict != VINF_SUCCESS)
2702 return rcStrict;
2703 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2704 if (rcStrict != VINF_SUCCESS)
2705 return rcStrict;
2706 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2707 if (rcStrict != VINF_SUCCESS)
2708 return rcStrict;
2709 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2710 if (rcStrict != VINF_SUCCESS)
2711 return rcStrict;
2712
2713 /*
2714 * Load the code segment for the new task.
2715 */
2716 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2717 {
2718 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2719 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2720 }
2721
2722 /* Fetch the descriptor. */
2723 IEMSELDESC DescCS;
2724 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2725 if (rcStrict != VINF_SUCCESS)
2726 {
2727 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2728 return rcStrict;
2729 }
2730
2731 /* CS must be a code segment. */
2732 if ( !DescCS.Legacy.Gen.u1DescType
2733 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2734 {
2735 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2736 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2737 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2738 }
2739
2740 /* For conforming CS, DPL must be less than or equal to the RPL. */
2741 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2742 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2743 {
2744 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2745 DescCS.Legacy.Gen.u2Dpl));
2746 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2747 }
2748
2749 /* For non-conforming CS, DPL must match RPL. */
2750 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2751 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2752 {
2753 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2754 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2755 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2756 }
2757
2758 /* Is it there? */
2759 if (!DescCS.Legacy.Gen.u1Present)
2760 {
2761 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2762 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2763 }
2764
2765 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2766 u64Base = X86DESC_BASE(&DescCS.Legacy);
2767
2768 /* Set the accessed bit before committing the result into CS. */
2769 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2770 {
2771 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2772 if (rcStrict != VINF_SUCCESS)
2773 return rcStrict;
2774 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2775 }
2776
2777 /* Commit CS. */
2778 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2779 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2780 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2781 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2782 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2783 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2785 }
2786
2787 /** @todo Debug trap. */
2788 if (fIsNewTSS386 && fNewDebugTrap)
2789 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2790
2791 /*
2792 * Construct the error code masks based on what caused this task switch.
2793 * See Intel Instruction reference for INT.
2794 */
2795 uint16_t uExt;
2796 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2797 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2798 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2799 {
2800 uExt = 1;
2801 }
2802 else
2803 uExt = 0;
2804
2805 /*
2806 * Push any error code on to the new stack.
2807 */
2808 if (fFlags & IEM_XCPT_FLAGS_ERR)
2809 {
2810 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2811 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2812 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2813
2814 /* Check that there is sufficient space on the stack. */
2815 /** @todo Factor out segment limit checking for normal/expand down segments
2816 * into a separate function. */
2817 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2818 {
2819 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2820 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2821 {
2822 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2823 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2824 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2825 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2826 }
2827 }
2828 else
2829 {
2830 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2831 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2832 {
2833 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2834 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2835 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2836 }
2837 }
2838
2839
2840 if (fIsNewTSS386)
2841 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2842 else
2843 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2844 if (rcStrict != VINF_SUCCESS)
2845 {
2846 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2847 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2848 return rcStrict;
2849 }
2850 }
2851
2852 /* Check the new EIP against the new CS limit. */
2853 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2854 {
2855 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2856 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2857 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2858 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2859 }
2860
2861 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2862 pVCpu->cpum.GstCtx.ss.Sel));
2863 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2864}
2865
2866
2867/**
2868 * Implements exceptions and interrupts for protected mode.
2869 *
2870 * @returns VBox strict status code.
2871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2872 * @param cbInstr The number of bytes to offset rIP by in the return
2873 * address.
2874 * @param u8Vector The interrupt / exception vector number.
2875 * @param fFlags The flags.
2876 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2877 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2878 */
2879static VBOXSTRICTRC
2880iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2881 uint8_t cbInstr,
2882 uint8_t u8Vector,
2883 uint32_t fFlags,
2884 uint16_t uErr,
2885 uint64_t uCr2) RT_NOEXCEPT
2886{
2887 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2888
2889 /*
2890 * Read the IDT entry.
2891 */
2892 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2893 {
2894 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2895 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2896 }
2897 X86DESC Idte;
2898 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2899 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2900 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2901 {
2902 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2903 return rcStrict;
2904 }
2905 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2906 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2907 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2908
2909 /*
2910 * Check the descriptor type, DPL and such.
2911 * ASSUMES this is done in the same order as described for call-gate calls.
2912 */
2913 if (Idte.Gate.u1DescType)
2914 {
2915 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2916 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2917 }
2918 bool fTaskGate = false;
2919 uint8_t f32BitGate = true;
2920 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2921 switch (Idte.Gate.u4Type)
2922 {
2923 case X86_SEL_TYPE_SYS_UNDEFINED:
2924 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2925 case X86_SEL_TYPE_SYS_LDT:
2926 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2927 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2928 case X86_SEL_TYPE_SYS_UNDEFINED2:
2929 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2930 case X86_SEL_TYPE_SYS_UNDEFINED3:
2931 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2932 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2933 case X86_SEL_TYPE_SYS_UNDEFINED4:
2934 {
2935 /** @todo check what actually happens when the type is wrong...
2936 * esp. call gates. */
2937 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2938 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2939 }
2940
2941 case X86_SEL_TYPE_SYS_286_INT_GATE:
2942 f32BitGate = false;
2943 RT_FALL_THRU();
2944 case X86_SEL_TYPE_SYS_386_INT_GATE:
2945 fEflToClear |= X86_EFL_IF;
2946 break;
2947
2948 case X86_SEL_TYPE_SYS_TASK_GATE:
2949 fTaskGate = true;
2950#ifndef IEM_IMPLEMENTS_TASKSWITCH
2951 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2952#endif
2953 break;
2954
2955 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2956 f32BitGate = false;
2957 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2958 break;
2959
2960 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2961 }
2962
2963 /* Check DPL against CPL if applicable. */
2964 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2965 {
2966 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2967 {
2968 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2969 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2970 }
2971 }
2972
2973 /* Is it there? */
2974 if (!Idte.Gate.u1Present)
2975 {
2976 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2977 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2978 }
2979
2980 /* Is it a task-gate? */
2981 if (fTaskGate)
2982 {
2983 /*
2984 * Construct the error code masks based on what caused this task switch.
2985 * See Intel Instruction reference for INT.
2986 */
2987 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2988 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
2989 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
2990 RTSEL SelTSS = Idte.Gate.u16Sel;
2991
2992 /*
2993 * Fetch the TSS descriptor in the GDT.
2994 */
2995 IEMSELDESC DescTSS;
2996 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
2997 if (rcStrict != VINF_SUCCESS)
2998 {
2999 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3000 VBOXSTRICTRC_VAL(rcStrict)));
3001 return rcStrict;
3002 }
3003
3004 /* The TSS descriptor must be a system segment and be available (not busy). */
3005 if ( DescTSS.Legacy.Gen.u1DescType
3006 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3007 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3008 {
3009 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3010 u8Vector, SelTSS, DescTSS.Legacy.au64));
3011 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3012 }
3013
3014 /* The TSS must be present. */
3015 if (!DescTSS.Legacy.Gen.u1Present)
3016 {
3017 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3018 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3019 }
3020
3021 /* Do the actual task switch. */
3022 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3023 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3024 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3025 }
3026
3027 /* A null CS is bad. */
3028 RTSEL NewCS = Idte.Gate.u16Sel;
3029 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3030 {
3031 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3032 return iemRaiseGeneralProtectionFault0(pVCpu);
3033 }
3034
3035 /* Fetch the descriptor for the new CS. */
3036 IEMSELDESC DescCS;
3037 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3038 if (rcStrict != VINF_SUCCESS)
3039 {
3040 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3041 return rcStrict;
3042 }
3043
3044 /* Must be a code segment. */
3045 if (!DescCS.Legacy.Gen.u1DescType)
3046 {
3047 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3048 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3049 }
3050 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3051 {
3052 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3053 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3054 }
3055
3056 /* Don't allow lowering the privilege level. */
3057 /** @todo Does the lowering of privileges apply to software interrupts
3058 * only? This has bearings on the more-privileged or
3059 * same-privilege stack behavior further down. A testcase would
3060 * be nice. */
3061 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3062 {
3063 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3064 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3065 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3066 }
3067
3068 /* Make sure the selector is present. */
3069 if (!DescCS.Legacy.Gen.u1Present)
3070 {
3071 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3072 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3073 }
3074
3075 /* Check the new EIP against the new CS limit. */
3076 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3077 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3078 ? Idte.Gate.u16OffsetLow
3079 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3080 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3081 if (uNewEip > cbLimitCS)
3082 {
3083 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3084 u8Vector, uNewEip, cbLimitCS, NewCS));
3085 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3086 }
3087 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3088
3089 /* Calc the flag image to push. */
3090 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3091 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3092 fEfl &= ~X86_EFL_RF;
3093 else
3094 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3095
3096 /* From V8086 mode only go to CPL 0. */
3097 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3098 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3099 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3100 {
3101 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3102 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3103 }
3104
3105 /*
3106 * If the privilege level changes, we need to get a new stack from the TSS.
3107 * This in turns means validating the new SS and ESP...
3108 */
3109 if (uNewCpl != pVCpu->iem.s.uCpl)
3110 {
3111 RTSEL NewSS;
3112 uint32_t uNewEsp;
3113 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3114 if (rcStrict != VINF_SUCCESS)
3115 return rcStrict;
3116
3117 IEMSELDESC DescSS;
3118 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3119 if (rcStrict != VINF_SUCCESS)
3120 return rcStrict;
3121 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3122 if (!DescSS.Legacy.Gen.u1DefBig)
3123 {
3124 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3125 uNewEsp = (uint16_t)uNewEsp;
3126 }
3127
3128 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3129
3130 /* Check that there is sufficient space for the stack frame. */
3131 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3132 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3133 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3134 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3135
3136 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3137 {
3138 if ( uNewEsp - 1 > cbLimitSS
3139 || uNewEsp < cbStackFrame)
3140 {
3141 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3142 u8Vector, NewSS, uNewEsp, cbStackFrame));
3143 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3144 }
3145 }
3146 else
3147 {
3148 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3149 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3150 {
3151 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3152 u8Vector, NewSS, uNewEsp, cbStackFrame));
3153 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3154 }
3155 }
3156
3157 /*
3158 * Start making changes.
3159 */
3160
3161 /* Set the new CPL so that stack accesses use it. */
3162 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3163 pVCpu->iem.s.uCpl = uNewCpl;
3164
3165 /* Create the stack frame. */
3166 RTPTRUNION uStackFrame;
3167 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3168 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3169 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3170 if (rcStrict != VINF_SUCCESS)
3171 return rcStrict;
3172 void * const pvStackFrame = uStackFrame.pv;
3173 if (f32BitGate)
3174 {
3175 if (fFlags & IEM_XCPT_FLAGS_ERR)
3176 *uStackFrame.pu32++ = uErr;
3177 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3178 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3179 uStackFrame.pu32[2] = fEfl;
3180 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3181 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3182 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3183 if (fEfl & X86_EFL_VM)
3184 {
3185 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3186 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3187 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3188 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3189 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3190 }
3191 }
3192 else
3193 {
3194 if (fFlags & IEM_XCPT_FLAGS_ERR)
3195 *uStackFrame.pu16++ = uErr;
3196 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3197 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3198 uStackFrame.pu16[2] = fEfl;
3199 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3200 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3201 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3202 if (fEfl & X86_EFL_VM)
3203 {
3204 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3205 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3206 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3207 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3208 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3209 }
3210 }
3211 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3212 if (rcStrict != VINF_SUCCESS)
3213 return rcStrict;
3214
3215 /* Mark the selectors 'accessed' (hope this is the correct time). */
3216 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3217 * after pushing the stack frame? (Write protect the gdt + stack to
3218 * find out.) */
3219 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3220 {
3221 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3222 if (rcStrict != VINF_SUCCESS)
3223 return rcStrict;
3224 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3225 }
3226
3227 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3228 {
3229 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3230 if (rcStrict != VINF_SUCCESS)
3231 return rcStrict;
3232 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3233 }
3234
3235 /*
3236 * Start comitting the register changes (joins with the DPL=CPL branch).
3237 */
3238 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3239 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3240 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3241 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3242 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3243 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3244 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3245 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3246 * SP is loaded).
3247 * Need to check the other combinations too:
3248 * - 16-bit TSS, 32-bit handler
3249 * - 32-bit TSS, 16-bit handler */
3250 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3251 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3252 else
3253 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3254
3255 if (fEfl & X86_EFL_VM)
3256 {
3257 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3258 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3259 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3260 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3261 }
3262 }
3263 /*
3264 * Same privilege, no stack change and smaller stack frame.
3265 */
3266 else
3267 {
3268 uint64_t uNewRsp;
3269 RTPTRUNION uStackFrame;
3270 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3271 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3272 if (rcStrict != VINF_SUCCESS)
3273 return rcStrict;
3274 void * const pvStackFrame = uStackFrame.pv;
3275
3276 if (f32BitGate)
3277 {
3278 if (fFlags & IEM_XCPT_FLAGS_ERR)
3279 *uStackFrame.pu32++ = uErr;
3280 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3281 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3282 uStackFrame.pu32[2] = fEfl;
3283 }
3284 else
3285 {
3286 if (fFlags & IEM_XCPT_FLAGS_ERR)
3287 *uStackFrame.pu16++ = uErr;
3288 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3289 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3290 uStackFrame.pu16[2] = fEfl;
3291 }
3292 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3293 if (rcStrict != VINF_SUCCESS)
3294 return rcStrict;
3295
3296 /* Mark the CS selector as 'accessed'. */
3297 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3298 {
3299 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3300 if (rcStrict != VINF_SUCCESS)
3301 return rcStrict;
3302 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3303 }
3304
3305 /*
3306 * Start committing the register changes (joins with the other branch).
3307 */
3308 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3309 }
3310
3311 /* ... register committing continues. */
3312 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3313 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3314 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3315 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3316 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3317 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3318
3319 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3320 fEfl &= ~fEflToClear;
3321 IEMMISC_SET_EFL(pVCpu, fEfl);
3322
3323 if (fFlags & IEM_XCPT_FLAGS_CR2)
3324 pVCpu->cpum.GstCtx.cr2 = uCr2;
3325
3326 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3327 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3328
3329 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3330}
3331
3332
3333/**
3334 * Implements exceptions and interrupts for long mode.
3335 *
3336 * @returns VBox strict status code.
3337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3338 * @param cbInstr The number of bytes to offset rIP by in the return
3339 * address.
3340 * @param u8Vector The interrupt / exception vector number.
3341 * @param fFlags The flags.
3342 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3343 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3344 */
3345static VBOXSTRICTRC
3346iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3347 uint8_t cbInstr,
3348 uint8_t u8Vector,
3349 uint32_t fFlags,
3350 uint16_t uErr,
3351 uint64_t uCr2) RT_NOEXCEPT
3352{
3353 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3354
3355 /*
3356 * Read the IDT entry.
3357 */
3358 uint16_t offIdt = (uint16_t)u8Vector << 4;
3359 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3360 {
3361 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3362 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3363 }
3364 X86DESC64 Idte;
3365#ifdef _MSC_VER /* Shut up silly compiler warning. */
3366 Idte.au64[0] = 0;
3367 Idte.au64[1] = 0;
3368#endif
3369 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3370 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3371 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3372 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3373 {
3374 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3375 return rcStrict;
3376 }
3377 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3378 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3379 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3380
3381 /*
3382 * Check the descriptor type, DPL and such.
3383 * ASSUMES this is done in the same order as described for call-gate calls.
3384 */
3385 if (Idte.Gate.u1DescType)
3386 {
3387 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3388 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3389 }
3390 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3391 switch (Idte.Gate.u4Type)
3392 {
3393 case AMD64_SEL_TYPE_SYS_INT_GATE:
3394 fEflToClear |= X86_EFL_IF;
3395 break;
3396 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3397 break;
3398
3399 default:
3400 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3401 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3402 }
3403
3404 /* Check DPL against CPL if applicable. */
3405 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3406 {
3407 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3408 {
3409 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3410 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3411 }
3412 }
3413
3414 /* Is it there? */
3415 if (!Idte.Gate.u1Present)
3416 {
3417 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3418 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3419 }
3420
3421 /* A null CS is bad. */
3422 RTSEL NewCS = Idte.Gate.u16Sel;
3423 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3424 {
3425 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3426 return iemRaiseGeneralProtectionFault0(pVCpu);
3427 }
3428
3429 /* Fetch the descriptor for the new CS. */
3430 IEMSELDESC DescCS;
3431 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3432 if (rcStrict != VINF_SUCCESS)
3433 {
3434 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3435 return rcStrict;
3436 }
3437
3438 /* Must be a 64-bit code segment. */
3439 if (!DescCS.Long.Gen.u1DescType)
3440 {
3441 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3442 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3443 }
3444 if ( !DescCS.Long.Gen.u1Long
3445 || DescCS.Long.Gen.u1DefBig
3446 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3447 {
3448 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3449 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3450 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3451 }
3452
3453 /* Don't allow lowering the privilege level. For non-conforming CS
3454 selectors, the CS.DPL sets the privilege level the trap/interrupt
3455 handler runs at. For conforming CS selectors, the CPL remains
3456 unchanged, but the CS.DPL must be <= CPL. */
3457 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3458 * when CPU in Ring-0. Result \#GP? */
3459 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3460 {
3461 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3462 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3463 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3464 }
3465
3466
3467 /* Make sure the selector is present. */
3468 if (!DescCS.Legacy.Gen.u1Present)
3469 {
3470 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3471 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3472 }
3473
3474 /* Check that the new RIP is canonical. */
3475 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3476 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3477 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3478 if (!IEM_IS_CANONICAL(uNewRip))
3479 {
3480 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3481 return iemRaiseGeneralProtectionFault0(pVCpu);
3482 }
3483
3484 /*
3485 * If the privilege level changes or if the IST isn't zero, we need to get
3486 * a new stack from the TSS.
3487 */
3488 uint64_t uNewRsp;
3489 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3490 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3491 if ( uNewCpl != pVCpu->iem.s.uCpl
3492 || Idte.Gate.u3IST != 0)
3493 {
3494 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3495 if (rcStrict != VINF_SUCCESS)
3496 return rcStrict;
3497 }
3498 else
3499 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3500 uNewRsp &= ~(uint64_t)0xf;
3501
3502 /*
3503 * Calc the flag image to push.
3504 */
3505 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3506 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3507 fEfl &= ~X86_EFL_RF;
3508 else
3509 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3510
3511 /*
3512 * Start making changes.
3513 */
3514 /* Set the new CPL so that stack accesses use it. */
3515 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3516 pVCpu->iem.s.uCpl = uNewCpl;
3517
3518 /* Create the stack frame. */
3519 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3520 RTPTRUNION uStackFrame;
3521 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3522 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3523 if (rcStrict != VINF_SUCCESS)
3524 return rcStrict;
3525 void * const pvStackFrame = uStackFrame.pv;
3526
3527 if (fFlags & IEM_XCPT_FLAGS_ERR)
3528 *uStackFrame.pu64++ = uErr;
3529 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3530 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3531 uStackFrame.pu64[2] = fEfl;
3532 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3533 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3534 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3535 if (rcStrict != VINF_SUCCESS)
3536 return rcStrict;
3537
3538 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3539 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3540 * after pushing the stack frame? (Write protect the gdt + stack to
3541 * find out.) */
3542 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3543 {
3544 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3545 if (rcStrict != VINF_SUCCESS)
3546 return rcStrict;
3547 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3548 }
3549
3550 /*
3551 * Start comitting the register changes.
3552 */
3553 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3554 * hidden registers when interrupting 32-bit or 16-bit code! */
3555 if (uNewCpl != uOldCpl)
3556 {
3557 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3558 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3559 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3560 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3561 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3562 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3563 }
3564 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3565 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3566 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3567 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3568 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3569 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3570 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3571 pVCpu->cpum.GstCtx.rip = uNewRip;
3572
3573 fEfl &= ~fEflToClear;
3574 IEMMISC_SET_EFL(pVCpu, fEfl);
3575
3576 if (fFlags & IEM_XCPT_FLAGS_CR2)
3577 pVCpu->cpum.GstCtx.cr2 = uCr2;
3578
3579 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3580 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3581
3582 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3583}
3584
3585
3586/**
3587 * Implements exceptions and interrupts.
3588 *
3589 * All exceptions and interrupts goes thru this function!
3590 *
3591 * @returns VBox strict status code.
3592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3593 * @param cbInstr The number of bytes to offset rIP by in the return
3594 * address.
3595 * @param u8Vector The interrupt / exception vector number.
3596 * @param fFlags The flags.
3597 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3598 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3599 */
3600VBOXSTRICTRC
3601iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3602 uint8_t cbInstr,
3603 uint8_t u8Vector,
3604 uint32_t fFlags,
3605 uint16_t uErr,
3606 uint64_t uCr2) RT_NOEXCEPT
3607{
3608 /*
3609 * Get all the state that we might need here.
3610 */
3611 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3612 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3613
3614#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3615 /*
3616 * Flush prefetch buffer
3617 */
3618 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3619#endif
3620
3621 /*
3622 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3623 */
3624 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3625 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3626 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3627 | IEM_XCPT_FLAGS_BP_INSTR
3628 | IEM_XCPT_FLAGS_ICEBP_INSTR
3629 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3630 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3631 {
3632 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3633 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3634 u8Vector = X86_XCPT_GP;
3635 uErr = 0;
3636 }
3637#ifdef DBGFTRACE_ENABLED
3638 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3639 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3640 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3641#endif
3642
3643 /*
3644 * Evaluate whether NMI blocking should be in effect.
3645 * Normally, NMI blocking is in effect whenever we inject an NMI.
3646 */
3647 bool fBlockNmi;
3648 if ( u8Vector == X86_XCPT_NMI
3649 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3650 fBlockNmi = true;
3651 else
3652 fBlockNmi = false;
3653
3654#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3655 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3656 {
3657 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3658 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3659 return rcStrict0;
3660
3661 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3662 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3663 {
3664 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3665 fBlockNmi = false;
3666 }
3667 }
3668#endif
3669
3670#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3671 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3672 {
3673 /*
3674 * If the event is being injected as part of VMRUN, it isn't subject to event
3675 * intercepts in the nested-guest. However, secondary exceptions that occur
3676 * during injection of any event -are- subject to exception intercepts.
3677 *
3678 * See AMD spec. 15.20 "Event Injection".
3679 */
3680 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3681 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3682 else
3683 {
3684 /*
3685 * Check and handle if the event being raised is intercepted.
3686 */
3687 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3688 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3689 return rcStrict0;
3690 }
3691 }
3692#endif
3693
3694 /*
3695 * Set NMI blocking if necessary.
3696 */
3697 if ( fBlockNmi
3698 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3699 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3700
3701 /*
3702 * Do recursion accounting.
3703 */
3704 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3705 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3706 if (pVCpu->iem.s.cXcptRecursions == 0)
3707 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3708 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3709 else
3710 {
3711 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3712 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3713 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3714
3715 if (pVCpu->iem.s.cXcptRecursions >= 4)
3716 {
3717#ifdef DEBUG_bird
3718 AssertFailed();
3719#endif
3720 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3721 }
3722
3723 /*
3724 * Evaluate the sequence of recurring events.
3725 */
3726 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3727 NULL /* pXcptRaiseInfo */);
3728 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3729 { /* likely */ }
3730 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3731 {
3732 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3733 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3734 u8Vector = X86_XCPT_DF;
3735 uErr = 0;
3736#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3737 /* VMX nested-guest #DF intercept needs to be checked here. */
3738 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3739 {
3740 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3741 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3742 return rcStrict0;
3743 }
3744#endif
3745 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3746 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3747 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3748 }
3749 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3750 {
3751 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3752 return iemInitiateCpuShutdown(pVCpu);
3753 }
3754 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3755 {
3756 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3757 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3758 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3759 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3760 return VERR_EM_GUEST_CPU_HANG;
3761 }
3762 else
3763 {
3764 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3765 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3766 return VERR_IEM_IPE_9;
3767 }
3768
3769 /*
3770 * The 'EXT' bit is set when an exception occurs during deliver of an external
3771 * event (such as an interrupt or earlier exception)[1]. Privileged software
3772 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3773 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3774 *
3775 * [1] - Intel spec. 6.13 "Error Code"
3776 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3777 * [3] - Intel Instruction reference for INT n.
3778 */
3779 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3780 && (fFlags & IEM_XCPT_FLAGS_ERR)
3781 && u8Vector != X86_XCPT_PF
3782 && u8Vector != X86_XCPT_DF)
3783 {
3784 uErr |= X86_TRAP_ERR_EXTERNAL;
3785 }
3786 }
3787
3788 pVCpu->iem.s.cXcptRecursions++;
3789 pVCpu->iem.s.uCurXcpt = u8Vector;
3790 pVCpu->iem.s.fCurXcpt = fFlags;
3791 pVCpu->iem.s.uCurXcptErr = uErr;
3792 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3793
3794 /*
3795 * Extensive logging.
3796 */
3797#if defined(LOG_ENABLED) && defined(IN_RING3)
3798 if (LogIs3Enabled())
3799 {
3800 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3801 PVM pVM = pVCpu->CTX_SUFF(pVM);
3802 char szRegs[4096];
3803 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3804 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3805 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3806 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3807 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3808 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3809 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3810 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3811 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3812 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3813 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3814 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3815 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3816 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3817 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3818 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3819 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3820 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3821 " efer=%016VR{efer}\n"
3822 " pat=%016VR{pat}\n"
3823 " sf_mask=%016VR{sf_mask}\n"
3824 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3825 " lstar=%016VR{lstar}\n"
3826 " star=%016VR{star} cstar=%016VR{cstar}\n"
3827 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3828 );
3829
3830 char szInstr[256];
3831 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3832 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3833 szInstr, sizeof(szInstr), NULL);
3834 Log3(("%s%s\n", szRegs, szInstr));
3835 }
3836#endif /* LOG_ENABLED */
3837
3838 /*
3839 * Stats.
3840 */
3841 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3842 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3843 else if (u8Vector <= X86_XCPT_LAST)
3844 {
3845 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3846 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3847 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3848 }
3849
3850 /*
3851 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3852 * to ensure that a stale TLB or paging cache entry will only cause one
3853 * spurious #PF.
3854 */
3855 if ( u8Vector == X86_XCPT_PF
3856 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3857 IEMTlbInvalidatePage(pVCpu, uCr2);
3858
3859 /*
3860 * Call the mode specific worker function.
3861 */
3862 VBOXSTRICTRC rcStrict;
3863 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3864 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3865 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3866 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3867 else
3868 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3869
3870 /* Flush the prefetch buffer. */
3871#ifdef IEM_WITH_CODE_TLB
3872 pVCpu->iem.s.pbInstrBuf = NULL;
3873#else
3874 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3875#endif
3876
3877 /*
3878 * Unwind.
3879 */
3880 pVCpu->iem.s.cXcptRecursions--;
3881 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3882 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3883 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3884 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3885 pVCpu->iem.s.cXcptRecursions + 1));
3886 return rcStrict;
3887}
3888
3889#ifdef IEM_WITH_SETJMP
3890/**
3891 * See iemRaiseXcptOrInt. Will not return.
3892 */
3893DECL_NO_RETURN(void)
3894iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3895 uint8_t cbInstr,
3896 uint8_t u8Vector,
3897 uint32_t fFlags,
3898 uint16_t uErr,
3899 uint64_t uCr2) RT_NOEXCEPT
3900{
3901 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3902 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3903}
3904#endif
3905
3906
3907/** \#DE - 00. */
3908VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3909{
3910 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3911}
3912
3913
3914/** \#DB - 01.
3915 * @note This automatically clear DR7.GD. */
3916VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3917{
3918 /** @todo set/clear RF. */
3919 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3921}
3922
3923
3924/** \#BR - 05. */
3925VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3926{
3927 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3928}
3929
3930
3931/** \#UD - 06. */
3932VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3933{
3934 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3935}
3936
3937
3938/** \#NM - 07. */
3939VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3940{
3941 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3942}
3943
3944
3945/** \#TS(err) - 0a. */
3946VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3947{
3948 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3949}
3950
3951
3952/** \#TS(tr) - 0a. */
3953VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3954{
3955 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3956 pVCpu->cpum.GstCtx.tr.Sel, 0);
3957}
3958
3959
3960/** \#TS(0) - 0a. */
3961VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3962{
3963 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3964 0, 0);
3965}
3966
3967
3968/** \#TS(err) - 0a. */
3969VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3970{
3971 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3972 uSel & X86_SEL_MASK_OFF_RPL, 0);
3973}
3974
3975
3976/** \#NP(err) - 0b. */
3977VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3978{
3979 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3980}
3981
3982
3983/** \#NP(sel) - 0b. */
3984VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3985{
3986 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3987 uSel & ~X86_SEL_RPL, 0);
3988}
3989
3990
3991/** \#SS(seg) - 0c. */
3992VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3993{
3994 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3995 uSel & ~X86_SEL_RPL, 0);
3996}
3997
3998
3999/** \#SS(err) - 0c. */
4000VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4001{
4002 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4003}
4004
4005
4006/** \#GP(n) - 0d. */
4007VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4008{
4009 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4010}
4011
4012
4013/** \#GP(0) - 0d. */
4014VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4015{
4016 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4017}
4018
4019#ifdef IEM_WITH_SETJMP
4020/** \#GP(0) - 0d. */
4021DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4022{
4023 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4024}
4025#endif
4026
4027
4028/** \#GP(sel) - 0d. */
4029VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4030{
4031 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4032 Sel & ~X86_SEL_RPL, 0);
4033}
4034
4035
4036/** \#GP(0) - 0d. */
4037VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4038{
4039 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4040}
4041
4042
4043/** \#GP(sel) - 0d. */
4044VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4045{
4046 NOREF(iSegReg); NOREF(fAccess);
4047 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4048 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4049}
4050
4051#ifdef IEM_WITH_SETJMP
4052/** \#GP(sel) - 0d, longjmp. */
4053DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4054{
4055 NOREF(iSegReg); NOREF(fAccess);
4056 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4057 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4058}
4059#endif
4060
4061/** \#GP(sel) - 0d. */
4062VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4063{
4064 NOREF(Sel);
4065 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4066}
4067
4068#ifdef IEM_WITH_SETJMP
4069/** \#GP(sel) - 0d, longjmp. */
4070DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4071{
4072 NOREF(Sel);
4073 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4074}
4075#endif
4076
4077
4078/** \#GP(sel) - 0d. */
4079VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4080{
4081 NOREF(iSegReg); NOREF(fAccess);
4082 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4083}
4084
4085#ifdef IEM_WITH_SETJMP
4086/** \#GP(sel) - 0d, longjmp. */
4087DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4088{
4089 NOREF(iSegReg); NOREF(fAccess);
4090 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4091}
4092#endif
4093
4094
4095/** \#PF(n) - 0e. */
4096VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4097{
4098 uint16_t uErr;
4099 switch (rc)
4100 {
4101 case VERR_PAGE_NOT_PRESENT:
4102 case VERR_PAGE_TABLE_NOT_PRESENT:
4103 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4104 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4105 uErr = 0;
4106 break;
4107
4108 default:
4109 AssertMsgFailed(("%Rrc\n", rc));
4110 RT_FALL_THRU();
4111 case VERR_ACCESS_DENIED:
4112 uErr = X86_TRAP_PF_P;
4113 break;
4114
4115 /** @todo reserved */
4116 }
4117
4118 if (pVCpu->iem.s.uCpl == 3)
4119 uErr |= X86_TRAP_PF_US;
4120
4121 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4122 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4123 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4124 uErr |= X86_TRAP_PF_ID;
4125
4126#if 0 /* This is so much non-sense, really. Why was it done like that? */
4127 /* Note! RW access callers reporting a WRITE protection fault, will clear
4128 the READ flag before calling. So, read-modify-write accesses (RW)
4129 can safely be reported as READ faults. */
4130 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4131 uErr |= X86_TRAP_PF_RW;
4132#else
4133 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4134 {
4135 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4136 /// (regardless of outcome of the comparison in the latter case).
4137 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4138 uErr |= X86_TRAP_PF_RW;
4139 }
4140#endif
4141
4142 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4143 uErr, GCPtrWhere);
4144}
4145
4146#ifdef IEM_WITH_SETJMP
4147/** \#PF(n) - 0e, longjmp. */
4148DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4149{
4150 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4151}
4152#endif
4153
4154
4155/** \#MF(0) - 10. */
4156VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4157{
4158 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4159}
4160
4161
4162/** \#AC(0) - 11. */
4163VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4164{
4165 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4166}
4167
4168#ifdef IEM_WITH_SETJMP
4169/** \#AC(0) - 11, longjmp. */
4170DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4171{
4172 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4173}
4174#endif
4175
4176
4177/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4178IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4179{
4180 NOREF(cbInstr);
4181 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4182}
4183
4184
4185/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4186IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4187{
4188 NOREF(cbInstr);
4189 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4190}
4191
4192
4193/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4194IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4195{
4196 NOREF(cbInstr);
4197 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4198}
4199
4200
4201/** @} */
4202
4203/** @name Common opcode decoders.
4204 * @{
4205 */
4206//#include <iprt/mem.h>
4207
4208/**
4209 * Used to add extra details about a stub case.
4210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4211 */
4212void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4213{
4214#if defined(LOG_ENABLED) && defined(IN_RING3)
4215 PVM pVM = pVCpu->CTX_SUFF(pVM);
4216 char szRegs[4096];
4217 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4218 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4219 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4220 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4221 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4222 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4223 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4224 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4225 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4226 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4227 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4228 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4229 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4230 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4231 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4232 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4233 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4234 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4235 " efer=%016VR{efer}\n"
4236 " pat=%016VR{pat}\n"
4237 " sf_mask=%016VR{sf_mask}\n"
4238 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4239 " lstar=%016VR{lstar}\n"
4240 " star=%016VR{star} cstar=%016VR{cstar}\n"
4241 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4242 );
4243
4244 char szInstr[256];
4245 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4246 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4247 szInstr, sizeof(szInstr), NULL);
4248
4249 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4250#else
4251 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4252#endif
4253}
4254
4255/** @} */
4256
4257
4258
4259/** @name Register Access.
4260 * @{
4261 */
4262
4263/**
4264 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4265 *
4266 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4267 * segment limit.
4268 *
4269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4270 * @param offNextInstr The offset of the next instruction.
4271 */
4272VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4273{
4274 switch (pVCpu->iem.s.enmEffOpSize)
4275 {
4276 case IEMMODE_16BIT:
4277 {
4278 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4279 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4280 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4281 return iemRaiseGeneralProtectionFault0(pVCpu);
4282 pVCpu->cpum.GstCtx.rip = uNewIp;
4283 break;
4284 }
4285
4286 case IEMMODE_32BIT:
4287 {
4288 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4289 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4290
4291 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4292 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4293 return iemRaiseGeneralProtectionFault0(pVCpu);
4294 pVCpu->cpum.GstCtx.rip = uNewEip;
4295 break;
4296 }
4297
4298 case IEMMODE_64BIT:
4299 {
4300 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4301
4302 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4303 if (!IEM_IS_CANONICAL(uNewRip))
4304 return iemRaiseGeneralProtectionFault0(pVCpu);
4305 pVCpu->cpum.GstCtx.rip = uNewRip;
4306 break;
4307 }
4308
4309 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4310 }
4311
4312 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4313
4314#ifndef IEM_WITH_CODE_TLB
4315 /* Flush the prefetch buffer. */
4316 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4317#endif
4318
4319 return VINF_SUCCESS;
4320}
4321
4322
4323/**
4324 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4325 *
4326 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4327 * segment limit.
4328 *
4329 * @returns Strict VBox status code.
4330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4331 * @param offNextInstr The offset of the next instruction.
4332 */
4333VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4334{
4335 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4336
4337 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4338 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4339 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4340 return iemRaiseGeneralProtectionFault0(pVCpu);
4341 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4342 pVCpu->cpum.GstCtx.rip = uNewIp;
4343 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4344
4345#ifndef IEM_WITH_CODE_TLB
4346 /* Flush the prefetch buffer. */
4347 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4348#endif
4349
4350 return VINF_SUCCESS;
4351}
4352
4353
4354/**
4355 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4356 *
4357 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4358 * segment limit.
4359 *
4360 * @returns Strict VBox status code.
4361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4362 * @param offNextInstr The offset of the next instruction.
4363 */
4364VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4365{
4366 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4367
4368 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4369 {
4370 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4371
4372 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4373 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4374 return iemRaiseGeneralProtectionFault0(pVCpu);
4375 pVCpu->cpum.GstCtx.rip = uNewEip;
4376 }
4377 else
4378 {
4379 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4380
4381 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4382 if (!IEM_IS_CANONICAL(uNewRip))
4383 return iemRaiseGeneralProtectionFault0(pVCpu);
4384 pVCpu->cpum.GstCtx.rip = uNewRip;
4385 }
4386 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4387
4388#ifndef IEM_WITH_CODE_TLB
4389 /* Flush the prefetch buffer. */
4390 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4391#endif
4392
4393 return VINF_SUCCESS;
4394}
4395
4396
4397/**
4398 * Performs a near jump to the specified address.
4399 *
4400 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4401 * segment limit.
4402 *
4403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4404 * @param uNewRip The new RIP value.
4405 */
4406VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4407{
4408 switch (pVCpu->iem.s.enmEffOpSize)
4409 {
4410 case IEMMODE_16BIT:
4411 {
4412 Assert(uNewRip <= UINT16_MAX);
4413 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4414 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4415 return iemRaiseGeneralProtectionFault0(pVCpu);
4416 /** @todo Test 16-bit jump in 64-bit mode. */
4417 pVCpu->cpum.GstCtx.rip = uNewRip;
4418 break;
4419 }
4420
4421 case IEMMODE_32BIT:
4422 {
4423 Assert(uNewRip <= UINT32_MAX);
4424 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4425 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4426
4427 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4428 return iemRaiseGeneralProtectionFault0(pVCpu);
4429 pVCpu->cpum.GstCtx.rip = uNewRip;
4430 break;
4431 }
4432
4433 case IEMMODE_64BIT:
4434 {
4435 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4436
4437 if (!IEM_IS_CANONICAL(uNewRip))
4438 return iemRaiseGeneralProtectionFault0(pVCpu);
4439 pVCpu->cpum.GstCtx.rip = uNewRip;
4440 break;
4441 }
4442
4443 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4444 }
4445
4446 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4447
4448#ifndef IEM_WITH_CODE_TLB
4449 /* Flush the prefetch buffer. */
4450 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4451#endif
4452
4453 return VINF_SUCCESS;
4454}
4455
4456/** @} */
4457
4458
4459/** @name FPU access and helpers.
4460 *
4461 * @{
4462 */
4463
4464/**
4465 * Updates the x87.DS and FPUDP registers.
4466 *
4467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4468 * @param pFpuCtx The FPU context.
4469 * @param iEffSeg The effective segment register.
4470 * @param GCPtrEff The effective address relative to @a iEffSeg.
4471 */
4472DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4473{
4474 RTSEL sel;
4475 switch (iEffSeg)
4476 {
4477 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4478 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4479 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4480 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4481 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4482 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4483 default:
4484 AssertMsgFailed(("%d\n", iEffSeg));
4485 sel = pVCpu->cpum.GstCtx.ds.Sel;
4486 }
4487 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4488 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4489 {
4490 pFpuCtx->DS = 0;
4491 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4492 }
4493 else if (!IEM_IS_LONG_MODE(pVCpu))
4494 {
4495 pFpuCtx->DS = sel;
4496 pFpuCtx->FPUDP = GCPtrEff;
4497 }
4498 else
4499 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4500}
4501
4502
4503/**
4504 * Rotates the stack registers in the push direction.
4505 *
4506 * @param pFpuCtx The FPU context.
4507 * @remarks This is a complete waste of time, but fxsave stores the registers in
4508 * stack order.
4509 */
4510DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4511{
4512 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4513 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4514 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4515 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4516 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4517 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4518 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4519 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4520 pFpuCtx->aRegs[0].r80 = r80Tmp;
4521}
4522
4523
4524/**
4525 * Rotates the stack registers in the pop direction.
4526 *
4527 * @param pFpuCtx The FPU context.
4528 * @remarks This is a complete waste of time, but fxsave stores the registers in
4529 * stack order.
4530 */
4531DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4532{
4533 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4534 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4535 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4536 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4537 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4538 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4539 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4540 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4541 pFpuCtx->aRegs[7].r80 = r80Tmp;
4542}
4543
4544
4545/**
4546 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4547 * exception prevents it.
4548 *
4549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4550 * @param pResult The FPU operation result to push.
4551 * @param pFpuCtx The FPU context.
4552 */
4553static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4554{
4555 /* Update FSW and bail if there are pending exceptions afterwards. */
4556 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4557 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4558 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4559 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4560 {
4561 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4562 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4563 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4564 pFpuCtx->FSW = fFsw;
4565 return;
4566 }
4567
4568 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4569 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4570 {
4571 /* All is fine, push the actual value. */
4572 pFpuCtx->FTW |= RT_BIT(iNewTop);
4573 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4574 }
4575 else if (pFpuCtx->FCW & X86_FCW_IM)
4576 {
4577 /* Masked stack overflow, push QNaN. */
4578 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4579 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4580 }
4581 else
4582 {
4583 /* Raise stack overflow, don't push anything. */
4584 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4585 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4586 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4587 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4588 return;
4589 }
4590
4591 fFsw &= ~X86_FSW_TOP_MASK;
4592 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4593 pFpuCtx->FSW = fFsw;
4594
4595 iemFpuRotateStackPush(pFpuCtx);
4596 RT_NOREF(pVCpu);
4597}
4598
4599
4600/**
4601 * Stores a result in a FPU register and updates the FSW and FTW.
4602 *
4603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4604 * @param pFpuCtx The FPU context.
4605 * @param pResult The result to store.
4606 * @param iStReg Which FPU register to store it in.
4607 */
4608static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4609{
4610 Assert(iStReg < 8);
4611 uint16_t fNewFsw = pFpuCtx->FSW;
4612 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4613 fNewFsw &= ~X86_FSW_C_MASK;
4614 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4615 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4616 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4617 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4618 pFpuCtx->FSW = fNewFsw;
4619 pFpuCtx->FTW |= RT_BIT(iReg);
4620 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4621 RT_NOREF(pVCpu);
4622}
4623
4624
4625/**
4626 * Only updates the FPU status word (FSW) with the result of the current
4627 * instruction.
4628 *
4629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4630 * @param pFpuCtx The FPU context.
4631 * @param u16FSW The FSW output of the current instruction.
4632 */
4633static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4634{
4635 uint16_t fNewFsw = pFpuCtx->FSW;
4636 fNewFsw &= ~X86_FSW_C_MASK;
4637 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4638 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4639 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4640 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4641 pFpuCtx->FSW = fNewFsw;
4642 RT_NOREF(pVCpu);
4643}
4644
4645
4646/**
4647 * Pops one item off the FPU stack if no pending exception prevents it.
4648 *
4649 * @param pFpuCtx The FPU context.
4650 */
4651static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4652{
4653 /* Check pending exceptions. */
4654 uint16_t uFSW = pFpuCtx->FSW;
4655 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4656 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4657 return;
4658
4659 /* TOP--. */
4660 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4661 uFSW &= ~X86_FSW_TOP_MASK;
4662 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4663 pFpuCtx->FSW = uFSW;
4664
4665 /* Mark the previous ST0 as empty. */
4666 iOldTop >>= X86_FSW_TOP_SHIFT;
4667 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4668
4669 /* Rotate the registers. */
4670 iemFpuRotateStackPop(pFpuCtx);
4671}
4672
4673
4674/**
4675 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4676 *
4677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4678 * @param pResult The FPU operation result to push.
4679 */
4680void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4681{
4682 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4683 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4684 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4685}
4686
4687
4688/**
4689 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4690 * and sets FPUDP and FPUDS.
4691 *
4692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4693 * @param pResult The FPU operation result to push.
4694 * @param iEffSeg The effective segment register.
4695 * @param GCPtrEff The effective address relative to @a iEffSeg.
4696 */
4697void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4698{
4699 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4700 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4701 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4702 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4703}
4704
4705
4706/**
4707 * Replace ST0 with the first value and push the second onto the FPU stack,
4708 * unless a pending exception prevents it.
4709 *
4710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4711 * @param pResult The FPU operation result to store and push.
4712 */
4713void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4714{
4715 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4716 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4717
4718 /* Update FSW and bail if there are pending exceptions afterwards. */
4719 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4720 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4721 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4722 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4723 {
4724 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4725 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4726 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4727 pFpuCtx->FSW = fFsw;
4728 return;
4729 }
4730
4731 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4732 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4733 {
4734 /* All is fine, push the actual value. */
4735 pFpuCtx->FTW |= RT_BIT(iNewTop);
4736 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4737 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4738 }
4739 else if (pFpuCtx->FCW & X86_FCW_IM)
4740 {
4741 /* Masked stack overflow, push QNaN. */
4742 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4743 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4744 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4745 }
4746 else
4747 {
4748 /* Raise stack overflow, don't push anything. */
4749 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4750 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4751 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4752 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4753 return;
4754 }
4755
4756 fFsw &= ~X86_FSW_TOP_MASK;
4757 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4758 pFpuCtx->FSW = fFsw;
4759
4760 iemFpuRotateStackPush(pFpuCtx);
4761}
4762
4763
4764/**
4765 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4766 * FOP.
4767 *
4768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4769 * @param pResult The result to store.
4770 * @param iStReg Which FPU register to store it in.
4771 */
4772void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4773{
4774 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4775 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4776 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4777}
4778
4779
4780/**
4781 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4782 * FOP, and then pops the stack.
4783 *
4784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4785 * @param pResult The result to store.
4786 * @param iStReg Which FPU register to store it in.
4787 */
4788void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4789{
4790 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4791 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4792 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4793 iemFpuMaybePopOne(pFpuCtx);
4794}
4795
4796
4797/**
4798 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4799 * FPUDP, and FPUDS.
4800 *
4801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4802 * @param pResult The result to store.
4803 * @param iStReg Which FPU register to store it in.
4804 * @param iEffSeg The effective memory operand selector register.
4805 * @param GCPtrEff The effective memory operand offset.
4806 */
4807void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4808 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4809{
4810 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4811 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4812 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4813 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4814}
4815
4816
4817/**
4818 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4819 * FPUDP, and FPUDS, and then pops the stack.
4820 *
4821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4822 * @param pResult The result to store.
4823 * @param iStReg Which FPU register to store it in.
4824 * @param iEffSeg The effective memory operand selector register.
4825 * @param GCPtrEff The effective memory operand offset.
4826 */
4827void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4828 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4829{
4830 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4831 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4832 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4833 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4834 iemFpuMaybePopOne(pFpuCtx);
4835}
4836
4837
4838/**
4839 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4840 *
4841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4842 */
4843void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4844{
4845 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4846 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4847}
4848
4849
4850/**
4851 * Updates the FSW, FOP, FPUIP, and FPUCS.
4852 *
4853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4854 * @param u16FSW The FSW from the current instruction.
4855 */
4856void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4857{
4858 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4859 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4860 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4861}
4862
4863
4864/**
4865 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4866 *
4867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4868 * @param u16FSW The FSW from the current instruction.
4869 */
4870void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4871{
4872 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4873 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4874 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4875 iemFpuMaybePopOne(pFpuCtx);
4876}
4877
4878
4879/**
4880 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4881 *
4882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4883 * @param u16FSW The FSW from the current instruction.
4884 * @param iEffSeg The effective memory operand selector register.
4885 * @param GCPtrEff The effective memory operand offset.
4886 */
4887void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4888{
4889 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4890 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4891 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4892 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4893}
4894
4895
4896/**
4897 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4898 *
4899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4900 * @param u16FSW The FSW from the current instruction.
4901 */
4902void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4903{
4904 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4905 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4906 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4907 iemFpuMaybePopOne(pFpuCtx);
4908 iemFpuMaybePopOne(pFpuCtx);
4909}
4910
4911
4912/**
4913 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4914 *
4915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4916 * @param u16FSW The FSW from the current instruction.
4917 * @param iEffSeg The effective memory operand selector register.
4918 * @param GCPtrEff The effective memory operand offset.
4919 */
4920void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4921{
4922 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4923 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4924 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4925 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4926 iemFpuMaybePopOne(pFpuCtx);
4927}
4928
4929
4930/**
4931 * Worker routine for raising an FPU stack underflow exception.
4932 *
4933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4934 * @param pFpuCtx The FPU context.
4935 * @param iStReg The stack register being accessed.
4936 */
4937static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
4938{
4939 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4940 if (pFpuCtx->FCW & X86_FCW_IM)
4941 {
4942 /* Masked underflow. */
4943 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4944 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4945 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4946 if (iStReg != UINT8_MAX)
4947 {
4948 pFpuCtx->FTW |= RT_BIT(iReg);
4949 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4950 }
4951 }
4952 else
4953 {
4954 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4955 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4956 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
4957 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4958 }
4959 RT_NOREF(pVCpu);
4960}
4961
4962
4963/**
4964 * Raises a FPU stack underflow exception.
4965 *
4966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4967 * @param iStReg The destination register that should be loaded
4968 * with QNaN if \#IS is not masked. Specify
4969 * UINT8_MAX if none (like for fcom).
4970 */
4971void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4972{
4973 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4974 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4975 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
4976}
4977
4978
4979void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4980{
4981 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4982 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4983 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4984 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
4985}
4986
4987
4988void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4989{
4990 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4991 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4992 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
4993 iemFpuMaybePopOne(pFpuCtx);
4994}
4995
4996
4997void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4998{
4999 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5000 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5001 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5002 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5003 iemFpuMaybePopOne(pFpuCtx);
5004}
5005
5006
5007void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5008{
5009 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5010 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5011 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5012 iemFpuMaybePopOne(pFpuCtx);
5013 iemFpuMaybePopOne(pFpuCtx);
5014}
5015
5016
5017void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5018{
5019 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5020 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5021
5022 if (pFpuCtx->FCW & X86_FCW_IM)
5023 {
5024 /* Masked overflow - Push QNaN. */
5025 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5026 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5027 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5028 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5029 pFpuCtx->FTW |= RT_BIT(iNewTop);
5030 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5031 iemFpuRotateStackPush(pFpuCtx);
5032 }
5033 else
5034 {
5035 /* Exception pending - don't change TOP or the register stack. */
5036 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5037 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5038 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5039 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5040 }
5041}
5042
5043
5044void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5045{
5046 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5047 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5048
5049 if (pFpuCtx->FCW & X86_FCW_IM)
5050 {
5051 /* Masked overflow - Push QNaN. */
5052 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5053 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5054 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5055 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5056 pFpuCtx->FTW |= RT_BIT(iNewTop);
5057 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5058 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5059 iemFpuRotateStackPush(pFpuCtx);
5060 }
5061 else
5062 {
5063 /* Exception pending - don't change TOP or the register stack. */
5064 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5065 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5066 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5067 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5068 }
5069}
5070
5071
5072/**
5073 * Worker routine for raising an FPU stack overflow exception on a push.
5074 *
5075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5076 * @param pFpuCtx The FPU context.
5077 */
5078static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5079{
5080 if (pFpuCtx->FCW & X86_FCW_IM)
5081 {
5082 /* Masked overflow. */
5083 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5084 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5085 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5086 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5087 pFpuCtx->FTW |= RT_BIT(iNewTop);
5088 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5089 iemFpuRotateStackPush(pFpuCtx);
5090 }
5091 else
5092 {
5093 /* Exception pending - don't change TOP or the register stack. */
5094 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5095 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5096 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5097 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5098 }
5099 RT_NOREF(pVCpu);
5100}
5101
5102
5103/**
5104 * Raises a FPU stack overflow exception on a push.
5105 *
5106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5107 */
5108void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5109{
5110 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5111 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5112 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5113}
5114
5115
5116/**
5117 * Raises a FPU stack overflow exception on a push with a memory operand.
5118 *
5119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5120 * @param iEffSeg The effective memory operand selector register.
5121 * @param GCPtrEff The effective memory operand offset.
5122 */
5123void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5124{
5125 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5126 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5127 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5128 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5129}
5130
5131/** @} */
5132
5133
5134/** @name Memory access.
5135 *
5136 * @{
5137 */
5138
5139
5140/**
5141 * Updates the IEMCPU::cbWritten counter if applicable.
5142 *
5143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5144 * @param fAccess The access being accounted for.
5145 * @param cbMem The access size.
5146 */
5147DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5148{
5149 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5150 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5151 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5152}
5153
5154
5155/**
5156 * Applies the segment limit, base and attributes.
5157 *
5158 * This may raise a \#GP or \#SS.
5159 *
5160 * @returns VBox strict status code.
5161 *
5162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5163 * @param fAccess The kind of access which is being performed.
5164 * @param iSegReg The index of the segment register to apply.
5165 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5166 * TSS, ++).
5167 * @param cbMem The access size.
5168 * @param pGCPtrMem Pointer to the guest memory address to apply
5169 * segmentation to. Input and output parameter.
5170 */
5171VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5172{
5173 if (iSegReg == UINT8_MAX)
5174 return VINF_SUCCESS;
5175
5176 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5177 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5178 switch (pVCpu->iem.s.enmCpuMode)
5179 {
5180 case IEMMODE_16BIT:
5181 case IEMMODE_32BIT:
5182 {
5183 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5184 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5185
5186 if ( pSel->Attr.n.u1Present
5187 && !pSel->Attr.n.u1Unusable)
5188 {
5189 Assert(pSel->Attr.n.u1DescType);
5190 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5191 {
5192 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5193 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5194 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5195
5196 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5197 {
5198 /** @todo CPL check. */
5199 }
5200
5201 /*
5202 * There are two kinds of data selectors, normal and expand down.
5203 */
5204 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5205 {
5206 if ( GCPtrFirst32 > pSel->u32Limit
5207 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5208 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5209 }
5210 else
5211 {
5212 /*
5213 * The upper boundary is defined by the B bit, not the G bit!
5214 */
5215 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5216 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5217 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5218 }
5219 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5220 }
5221 else
5222 {
5223 /*
5224 * Code selector and usually be used to read thru, writing is
5225 * only permitted in real and V8086 mode.
5226 */
5227 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5228 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5229 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5230 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5231 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5232
5233 if ( GCPtrFirst32 > pSel->u32Limit
5234 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5235 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5236
5237 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5238 {
5239 /** @todo CPL check. */
5240 }
5241
5242 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5243 }
5244 }
5245 else
5246 return iemRaiseGeneralProtectionFault0(pVCpu);
5247 return VINF_SUCCESS;
5248 }
5249
5250 case IEMMODE_64BIT:
5251 {
5252 RTGCPTR GCPtrMem = *pGCPtrMem;
5253 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5254 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5255
5256 Assert(cbMem >= 1);
5257 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5258 return VINF_SUCCESS;
5259 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5260 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5261 return iemRaiseGeneralProtectionFault0(pVCpu);
5262 }
5263
5264 default:
5265 AssertFailedReturn(VERR_IEM_IPE_7);
5266 }
5267}
5268
5269
5270/**
5271 * Translates a virtual address to a physical physical address and checks if we
5272 * can access the page as specified.
5273 *
5274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5275 * @param GCPtrMem The virtual address.
5276 * @param fAccess The intended access.
5277 * @param pGCPhysMem Where to return the physical address.
5278 */
5279VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5280{
5281 /** @todo Need a different PGM interface here. We're currently using
5282 * generic / REM interfaces. this won't cut it for R0. */
5283 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5284 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5285 * here. */
5286 PGMPTWALK Walk;
5287 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5288 if (RT_FAILURE(rc))
5289 {
5290 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5291 /** @todo Check unassigned memory in unpaged mode. */
5292 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5293#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5294 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5295 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5296#endif
5297 *pGCPhysMem = NIL_RTGCPHYS;
5298 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5299 }
5300
5301 /* If the page is writable and does not have the no-exec bit set, all
5302 access is allowed. Otherwise we'll have to check more carefully... */
5303 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5304 {
5305 /* Write to read only memory? */
5306 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5307 && !(Walk.fEffective & X86_PTE_RW)
5308 && ( ( pVCpu->iem.s.uCpl == 3
5309 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5310 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5311 {
5312 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5313 *pGCPhysMem = NIL_RTGCPHYS;
5314#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5315 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5316 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5317#endif
5318 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5319 }
5320
5321 /* Kernel memory accessed by userland? */
5322 if ( !(Walk.fEffective & X86_PTE_US)
5323 && pVCpu->iem.s.uCpl == 3
5324 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5325 {
5326 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5327 *pGCPhysMem = NIL_RTGCPHYS;
5328#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5329 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5330 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5331#endif
5332 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5333 }
5334
5335 /* Executing non-executable memory? */
5336 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5337 && (Walk.fEffective & X86_PTE_PAE_NX)
5338 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5339 {
5340 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5341 *pGCPhysMem = NIL_RTGCPHYS;
5342#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5343 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5344 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5345#endif
5346 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5347 VERR_ACCESS_DENIED);
5348 }
5349 }
5350
5351 /*
5352 * Set the dirty / access flags.
5353 * ASSUMES this is set when the address is translated rather than on committ...
5354 */
5355 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5356 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5357 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5358 {
5359 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5360 AssertRC(rc2);
5361 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5362 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5363 }
5364
5365 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5366 *pGCPhysMem = GCPhys;
5367 return VINF_SUCCESS;
5368}
5369
5370
5371/**
5372 * Looks up a memory mapping entry.
5373 *
5374 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5376 * @param pvMem The memory address.
5377 * @param fAccess The access to.
5378 */
5379DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5380{
5381 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5382 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5383 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5384 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5385 return 0;
5386 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5387 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5388 return 1;
5389 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5390 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5391 return 2;
5392 return VERR_NOT_FOUND;
5393}
5394
5395
5396/**
5397 * Finds a free memmap entry when using iNextMapping doesn't work.
5398 *
5399 * @returns Memory mapping index, 1024 on failure.
5400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5401 */
5402static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5403{
5404 /*
5405 * The easy case.
5406 */
5407 if (pVCpu->iem.s.cActiveMappings == 0)
5408 {
5409 pVCpu->iem.s.iNextMapping = 1;
5410 return 0;
5411 }
5412
5413 /* There should be enough mappings for all instructions. */
5414 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5415
5416 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5417 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5418 return i;
5419
5420 AssertFailedReturn(1024);
5421}
5422
5423
5424/**
5425 * Commits a bounce buffer that needs writing back and unmaps it.
5426 *
5427 * @returns Strict VBox status code.
5428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5429 * @param iMemMap The index of the buffer to commit.
5430 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5431 * Always false in ring-3, obviously.
5432 */
5433static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5434{
5435 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5436 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5437#ifdef IN_RING3
5438 Assert(!fPostponeFail);
5439 RT_NOREF_PV(fPostponeFail);
5440#endif
5441
5442 /*
5443 * Do the writing.
5444 */
5445 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5446 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5447 {
5448 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5449 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5450 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5451 if (!pVCpu->iem.s.fBypassHandlers)
5452 {
5453 /*
5454 * Carefully and efficiently dealing with access handler return
5455 * codes make this a little bloated.
5456 */
5457 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5459 pbBuf,
5460 cbFirst,
5461 PGMACCESSORIGIN_IEM);
5462 if (rcStrict == VINF_SUCCESS)
5463 {
5464 if (cbSecond)
5465 {
5466 rcStrict = PGMPhysWrite(pVM,
5467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5468 pbBuf + cbFirst,
5469 cbSecond,
5470 PGMACCESSORIGIN_IEM);
5471 if (rcStrict == VINF_SUCCESS)
5472 { /* nothing */ }
5473 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5474 {
5475 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5476 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5477 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5478 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5479 }
5480#ifndef IN_RING3
5481 else if (fPostponeFail)
5482 {
5483 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5485 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5486 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5487 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5488 return iemSetPassUpStatus(pVCpu, rcStrict);
5489 }
5490#endif
5491 else
5492 {
5493 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5495 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5496 return rcStrict;
5497 }
5498 }
5499 }
5500 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5501 {
5502 if (!cbSecond)
5503 {
5504 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5505 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5506 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5507 }
5508 else
5509 {
5510 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5511 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5512 pbBuf + cbFirst,
5513 cbSecond,
5514 PGMACCESSORIGIN_IEM);
5515 if (rcStrict2 == VINF_SUCCESS)
5516 {
5517 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5518 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5519 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5520 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5521 }
5522 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5523 {
5524 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5525 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5526 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5527 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5528 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5529 }
5530#ifndef IN_RING3
5531 else if (fPostponeFail)
5532 {
5533 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5534 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5535 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5536 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5537 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5538 return iemSetPassUpStatus(pVCpu, rcStrict);
5539 }
5540#endif
5541 else
5542 {
5543 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5544 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5545 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5546 return rcStrict2;
5547 }
5548 }
5549 }
5550#ifndef IN_RING3
5551 else if (fPostponeFail)
5552 {
5553 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5554 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5555 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5556 if (!cbSecond)
5557 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5558 else
5559 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5560 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5561 return iemSetPassUpStatus(pVCpu, rcStrict);
5562 }
5563#endif
5564 else
5565 {
5566 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5567 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5568 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5569 return rcStrict;
5570 }
5571 }
5572 else
5573 {
5574 /*
5575 * No access handlers, much simpler.
5576 */
5577 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5578 if (RT_SUCCESS(rc))
5579 {
5580 if (cbSecond)
5581 {
5582 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5583 if (RT_SUCCESS(rc))
5584 { /* likely */ }
5585 else
5586 {
5587 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5588 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5589 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5590 return rc;
5591 }
5592 }
5593 }
5594 else
5595 {
5596 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5597 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5598 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5599 return rc;
5600 }
5601 }
5602 }
5603
5604#if defined(IEM_LOG_MEMORY_WRITES)
5605 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5606 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5607 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5608 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5609 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5610 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5611
5612 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5613 g_cbIemWrote = cbWrote;
5614 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5615#endif
5616
5617 /*
5618 * Free the mapping entry.
5619 */
5620 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5621 Assert(pVCpu->iem.s.cActiveMappings != 0);
5622 pVCpu->iem.s.cActiveMappings--;
5623 return VINF_SUCCESS;
5624}
5625
5626
5627/**
5628 * iemMemMap worker that deals with a request crossing pages.
5629 */
5630static VBOXSTRICTRC
5631iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5632{
5633 /*
5634 * Do the address translations.
5635 */
5636 RTGCPHYS GCPhysFirst;
5637 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5638 if (rcStrict != VINF_SUCCESS)
5639 return rcStrict;
5640
5641 RTGCPHYS GCPhysSecond;
5642 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5643 fAccess, &GCPhysSecond);
5644 if (rcStrict != VINF_SUCCESS)
5645 return rcStrict;
5646 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5647
5648 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5649
5650 /*
5651 * Read in the current memory content if it's a read, execute or partial
5652 * write access.
5653 */
5654 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5655 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5656 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5657
5658 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5659 {
5660 if (!pVCpu->iem.s.fBypassHandlers)
5661 {
5662 /*
5663 * Must carefully deal with access handler status codes here,
5664 * makes the code a bit bloated.
5665 */
5666 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5667 if (rcStrict == VINF_SUCCESS)
5668 {
5669 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5670 if (rcStrict == VINF_SUCCESS)
5671 { /*likely */ }
5672 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5673 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5674 else
5675 {
5676 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5677 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5678 return rcStrict;
5679 }
5680 }
5681 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5682 {
5683 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5684 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5685 {
5686 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5687 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5688 }
5689 else
5690 {
5691 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5692 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5693 return rcStrict2;
5694 }
5695 }
5696 else
5697 {
5698 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5699 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5700 return rcStrict;
5701 }
5702 }
5703 else
5704 {
5705 /*
5706 * No informational status codes here, much more straight forward.
5707 */
5708 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5709 if (RT_SUCCESS(rc))
5710 {
5711 Assert(rc == VINF_SUCCESS);
5712 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5713 if (RT_SUCCESS(rc))
5714 Assert(rc == VINF_SUCCESS);
5715 else
5716 {
5717 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5718 return rc;
5719 }
5720 }
5721 else
5722 {
5723 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5724 return rc;
5725 }
5726 }
5727 }
5728#ifdef VBOX_STRICT
5729 else
5730 memset(pbBuf, 0xcc, cbMem);
5731 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5732 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5733#endif
5734
5735 /*
5736 * Commit the bounce buffer entry.
5737 */
5738 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5739 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5740 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5741 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5742 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5743 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5744 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5745 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5746 pVCpu->iem.s.cActiveMappings++;
5747
5748 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5749 *ppvMem = pbBuf;
5750 return VINF_SUCCESS;
5751}
5752
5753
5754/**
5755 * iemMemMap woker that deals with iemMemPageMap failures.
5756 */
5757static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5758 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5759{
5760 /*
5761 * Filter out conditions we can handle and the ones which shouldn't happen.
5762 */
5763 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5764 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5765 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5766 {
5767 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5768 return rcMap;
5769 }
5770 pVCpu->iem.s.cPotentialExits++;
5771
5772 /*
5773 * Read in the current memory content if it's a read, execute or partial
5774 * write access.
5775 */
5776 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5777 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5778 {
5779 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5780 memset(pbBuf, 0xff, cbMem);
5781 else
5782 {
5783 int rc;
5784 if (!pVCpu->iem.s.fBypassHandlers)
5785 {
5786 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5787 if (rcStrict == VINF_SUCCESS)
5788 { /* nothing */ }
5789 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5790 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5791 else
5792 {
5793 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5794 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5795 return rcStrict;
5796 }
5797 }
5798 else
5799 {
5800 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5801 if (RT_SUCCESS(rc))
5802 { /* likely */ }
5803 else
5804 {
5805 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5806 GCPhysFirst, rc));
5807 return rc;
5808 }
5809 }
5810 }
5811 }
5812#ifdef VBOX_STRICT
5813 else
5814 memset(pbBuf, 0xcc, cbMem);
5815#endif
5816#ifdef VBOX_STRICT
5817 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5818 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5819#endif
5820
5821 /*
5822 * Commit the bounce buffer entry.
5823 */
5824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5825 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5826 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5827 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5828 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5829 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5830 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5831 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5832 pVCpu->iem.s.cActiveMappings++;
5833
5834 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5835 *ppvMem = pbBuf;
5836 return VINF_SUCCESS;
5837}
5838
5839
5840
5841/**
5842 * Maps the specified guest memory for the given kind of access.
5843 *
5844 * This may be using bounce buffering of the memory if it's crossing a page
5845 * boundary or if there is an access handler installed for any of it. Because
5846 * of lock prefix guarantees, we're in for some extra clutter when this
5847 * happens.
5848 *
5849 * This may raise a \#GP, \#SS, \#PF or \#AC.
5850 *
5851 * @returns VBox strict status code.
5852 *
5853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5854 * @param ppvMem Where to return the pointer to the mapped memory.
5855 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
5856 * 8, 12, 16, 32 or 512. When used by string operations
5857 * it can be up to a page.
5858 * @param iSegReg The index of the segment register to use for this
5859 * access. The base and limits are checked. Use UINT8_MAX
5860 * to indicate that no segmentation is required (for IDT,
5861 * GDT and LDT accesses).
5862 * @param GCPtrMem The address of the guest memory.
5863 * @param fAccess How the memory is being accessed. The
5864 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
5865 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
5866 * when raising exceptions.
5867 * @param uAlignCtl Alignment control:
5868 * - Bits 15:0 is the alignment mask.
5869 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
5870 * IEM_MEMMAP_F_ALIGN_SSE, and
5871 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
5872 * Pass zero to skip alignment.
5873 */
5874VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5875 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
5876{
5877 /*
5878 * Check the input and figure out which mapping entry to use.
5879 */
5880 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
5881 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5882 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5883
5884 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5885 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5886 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5887 {
5888 iMemMap = iemMemMapFindFree(pVCpu);
5889 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5890 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5891 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5892 pVCpu->iem.s.aMemMappings[2].fAccess),
5893 VERR_IEM_IPE_9);
5894 }
5895
5896 /*
5897 * Map the memory, checking that we can actually access it. If something
5898 * slightly complicated happens, fall back on bounce buffering.
5899 */
5900 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5901 if (rcStrict == VINF_SUCCESS)
5902 { /* likely */ }
5903 else
5904 return rcStrict;
5905
5906 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5907 { /* likely */ }
5908 else
5909 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5910
5911 /*
5912 * Alignment check.
5913 */
5914 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
5915 { /* likelyish */ }
5916 else
5917 {
5918 /* Misaligned access. */
5919 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
5920 {
5921 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
5922 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
5923 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
5924 {
5925 AssertCompile(X86_CR0_AM == X86_EFL_AC);
5926
5927 if (iemMemAreAlignmentChecksEnabled(pVCpu))
5928 return iemRaiseAlignmentCheckException(pVCpu);
5929 }
5930 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
5931 && iemMemAreAlignmentChecksEnabled(pVCpu)
5932/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
5933 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
5934 )
5935 return iemRaiseAlignmentCheckException(pVCpu);
5936 else
5937 return iemRaiseGeneralProtectionFault0(pVCpu);
5938 }
5939 }
5940
5941#ifdef IEM_WITH_DATA_TLB
5942 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5943
5944 /*
5945 * Get the TLB entry for this page.
5946 */
5947 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
5948 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
5949 if (pTlbe->uTag == uTag)
5950 {
5951# ifdef VBOX_WITH_STATISTICS
5952 pVCpu->iem.s.DataTlb.cTlbHits++;
5953# endif
5954 }
5955 else
5956 {
5957 pVCpu->iem.s.DataTlb.cTlbMisses++;
5958 PGMPTWALK Walk;
5959 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5960 if (RT_FAILURE(rc))
5961 {
5962 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5963# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5964 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5965 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5966# endif
5967 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
5968 }
5969
5970 Assert(Walk.fSucceeded);
5971 pTlbe->uTag = uTag;
5972 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
5973 pTlbe->GCPhys = Walk.GCPhys;
5974 pTlbe->pbMappingR3 = NULL;
5975 }
5976
5977 /*
5978 * Check TLB page table level access flags.
5979 */
5980 /* If the page is either supervisor only or non-writable, we need to do
5981 more careful access checks. */
5982 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
5983 {
5984 /* Write to read only memory? */
5985 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
5986 && (fAccess & IEM_ACCESS_TYPE_WRITE)
5987 && ( ( pVCpu->iem.s.uCpl == 3
5988 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5989 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5990 {
5991 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5992# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5993 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5994 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5995# endif
5996 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5997 }
5998
5999 /* Kernel memory accessed by userland? */
6000 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6001 && pVCpu->iem.s.uCpl == 3
6002 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6003 {
6004 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6005# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6006 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6007 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6008# endif
6009 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6010 }
6011 }
6012
6013 /*
6014 * Set the dirty / access flags.
6015 * ASSUMES this is set when the address is translated rather than on commit...
6016 */
6017 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6018 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6019 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6020 {
6021 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6022 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6023 AssertRC(rc2);
6024 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6025 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6026 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6027 }
6028
6029 /*
6030 * Look up the physical page info if necessary.
6031 */
6032 uint8_t *pbMem = NULL;
6033 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6034# ifdef IN_RING3
6035 pbMem = pTlbe->pbMappingR3;
6036# else
6037 pbMem = NULL;
6038# endif
6039 else
6040 {
6041 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6042 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6043 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6044 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6045 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6046 { /* likely */ }
6047 else
6048 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6049 pTlbe->pbMappingR3 = NULL;
6050 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6051 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6052 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6053 &pbMem, &pTlbe->fFlagsAndPhysRev);
6054 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6055# ifdef IN_RING3
6056 pTlbe->pbMappingR3 = pbMem;
6057# endif
6058 }
6059
6060 /*
6061 * Check the physical page level access and mapping.
6062 */
6063 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6064 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6065 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6066 { /* probably likely */ }
6067 else
6068 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6069 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6070 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6071 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6072 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6073 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6074
6075 if (pbMem)
6076 {
6077 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6078 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6079 fAccess |= IEM_ACCESS_NOT_LOCKED;
6080 }
6081 else
6082 {
6083 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6084 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6085 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6086 if (rcStrict != VINF_SUCCESS)
6087 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6088 }
6089
6090 void * const pvMem = pbMem;
6091
6092 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6093 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6094 if (fAccess & IEM_ACCESS_TYPE_READ)
6095 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6096
6097#else /* !IEM_WITH_DATA_TLB */
6098
6099 RTGCPHYS GCPhysFirst;
6100 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6101 if (rcStrict != VINF_SUCCESS)
6102 return rcStrict;
6103
6104 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6105 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6106 if (fAccess & IEM_ACCESS_TYPE_READ)
6107 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6108
6109 void *pvMem;
6110 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6111 if (rcStrict != VINF_SUCCESS)
6112 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6113
6114#endif /* !IEM_WITH_DATA_TLB */
6115
6116 /*
6117 * Fill in the mapping table entry.
6118 */
6119 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6120 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6121 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6122 pVCpu->iem.s.cActiveMappings += 1;
6123
6124 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6125 *ppvMem = pvMem;
6126
6127 return VINF_SUCCESS;
6128}
6129
6130
6131/**
6132 * Commits the guest memory if bounce buffered and unmaps it.
6133 *
6134 * @returns Strict VBox status code.
6135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6136 * @param pvMem The mapping.
6137 * @param fAccess The kind of access.
6138 */
6139VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6140{
6141 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6142 AssertReturn(iMemMap >= 0, iMemMap);
6143
6144 /* If it's bounce buffered, we may need to write back the buffer. */
6145 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6146 {
6147 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6148 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6149 }
6150 /* Otherwise unlock it. */
6151 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6152 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6153
6154 /* Free the entry. */
6155 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6156 Assert(pVCpu->iem.s.cActiveMappings != 0);
6157 pVCpu->iem.s.cActiveMappings--;
6158 return VINF_SUCCESS;
6159}
6160
6161#ifdef IEM_WITH_SETJMP
6162
6163/**
6164 * Maps the specified guest memory for the given kind of access, longjmp on
6165 * error.
6166 *
6167 * This may be using bounce buffering of the memory if it's crossing a page
6168 * boundary or if there is an access handler installed for any of it. Because
6169 * of lock prefix guarantees, we're in for some extra clutter when this
6170 * happens.
6171 *
6172 * This may raise a \#GP, \#SS, \#PF or \#AC.
6173 *
6174 * @returns Pointer to the mapped memory.
6175 *
6176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6177 * @param cbMem The number of bytes to map. This is usually 1,
6178 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6179 * string operations it can be up to a page.
6180 * @param iSegReg The index of the segment register to use for
6181 * this access. The base and limits are checked.
6182 * Use UINT8_MAX to indicate that no segmentation
6183 * is required (for IDT, GDT and LDT accesses).
6184 * @param GCPtrMem The address of the guest memory.
6185 * @param fAccess How the memory is being accessed. The
6186 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6187 * how to map the memory, while the
6188 * IEM_ACCESS_WHAT_XXX bit is used when raising
6189 * exceptions.
6190 * @param uAlignCtl Alignment control:
6191 * - Bits 15:0 is the alignment mask.
6192 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6193 * IEM_MEMMAP_F_ALIGN_SSE, and
6194 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6195 * Pass zero to skip alignment.
6196 */
6197void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6198 uint32_t uAlignCtl) RT_NOEXCEPT
6199{
6200 /*
6201 * Check the input, check segment access and adjust address
6202 * with segment base.
6203 */
6204 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6205 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6206 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6207
6208 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6209 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6210 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6211
6212 /*
6213 * Alignment check.
6214 */
6215 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6216 { /* likelyish */ }
6217 else
6218 {
6219 /* Misaligned access. */
6220 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6221 {
6222 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6223 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6224 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6225 {
6226 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6227
6228 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6229 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6230 }
6231 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6232 && iemMemAreAlignmentChecksEnabled(pVCpu)
6233/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6234 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6235 )
6236 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6237 else
6238 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6239 }
6240 }
6241
6242 /*
6243 * Figure out which mapping entry to use.
6244 */
6245 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6246 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6247 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6248 {
6249 iMemMap = iemMemMapFindFree(pVCpu);
6250 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6251 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6252 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6253 pVCpu->iem.s.aMemMappings[2].fAccess),
6254 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6255 }
6256
6257 /*
6258 * Crossing a page boundary?
6259 */
6260 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6261 { /* No (likely). */ }
6262 else
6263 {
6264 void *pvMem;
6265 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6266 if (rcStrict == VINF_SUCCESS)
6267 return pvMem;
6268 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6269 }
6270
6271#ifdef IEM_WITH_DATA_TLB
6272 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6273
6274 /*
6275 * Get the TLB entry for this page.
6276 */
6277 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6278 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6279 if (pTlbe->uTag == uTag)
6280 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6281 else
6282 {
6283 pVCpu->iem.s.DataTlb.cTlbMisses++;
6284 PGMPTWALK Walk;
6285 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6286 if (RT_FAILURE(rc))
6287 {
6288 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6289# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6290 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6291 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6292# endif
6293 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6294 }
6295
6296 Assert(Walk.fSucceeded);
6297 pTlbe->uTag = uTag;
6298 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6299 pTlbe->GCPhys = Walk.GCPhys;
6300 pTlbe->pbMappingR3 = NULL;
6301 }
6302
6303 /*
6304 * Check the flags and physical revision.
6305 */
6306 /** @todo make the caller pass these in with fAccess. */
6307 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6308 ? IEMTLBE_F_PT_NO_USER : 0;
6309 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6310 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6311 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6312 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6313 ? IEMTLBE_F_PT_NO_WRITE : 0)
6314 : 0;
6315 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6316 uint8_t *pbMem = NULL;
6317 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6318 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6319# ifdef IN_RING3
6320 pbMem = pTlbe->pbMappingR3;
6321# else
6322 pbMem = NULL;
6323# endif
6324 else
6325 {
6326 /*
6327 * Okay, something isn't quite right or needs refreshing.
6328 */
6329 /* Write to read only memory? */
6330 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6331 {
6332 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6333# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6334 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6335 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6336# endif
6337 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6338 }
6339
6340 /* Kernel memory accessed by userland? */
6341 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6342 {
6343 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6344# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6345 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6346 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6347# endif
6348 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6349 }
6350
6351 /* Set the dirty / access flags.
6352 ASSUMES this is set when the address is translated rather than on commit... */
6353 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6354 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6355 {
6356 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6357 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6358 AssertRC(rc2);
6359 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6360 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6361 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6362 }
6363
6364 /*
6365 * Check if the physical page info needs updating.
6366 */
6367 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6368# ifdef IN_RING3
6369 pbMem = pTlbe->pbMappingR3;
6370# else
6371 pbMem = NULL;
6372# endif
6373 else
6374 {
6375 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6376 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6377 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6378 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6379 pTlbe->pbMappingR3 = NULL;
6380 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6381 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6382 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6383 &pbMem, &pTlbe->fFlagsAndPhysRev);
6384 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6385# ifdef IN_RING3
6386 pTlbe->pbMappingR3 = pbMem;
6387# endif
6388 }
6389
6390 /*
6391 * Check the physical page level access and mapping.
6392 */
6393 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6394 { /* probably likely */ }
6395 else
6396 {
6397 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6398 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6399 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6400 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6401 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6402 if (rcStrict == VINF_SUCCESS)
6403 return pbMem;
6404 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6405 }
6406 }
6407 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6408
6409 if (pbMem)
6410 {
6411 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6412 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6413 fAccess |= IEM_ACCESS_NOT_LOCKED;
6414 }
6415 else
6416 {
6417 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6418 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6419 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6420 if (rcStrict == VINF_SUCCESS)
6421 return pbMem;
6422 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6423 }
6424
6425 void * const pvMem = pbMem;
6426
6427 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6428 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6429 if (fAccess & IEM_ACCESS_TYPE_READ)
6430 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6431
6432#else /* !IEM_WITH_DATA_TLB */
6433
6434
6435 RTGCPHYS GCPhysFirst;
6436 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6437 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6438 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6439
6440 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6441 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6442 if (fAccess & IEM_ACCESS_TYPE_READ)
6443 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6444
6445 void *pvMem;
6446 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6447 if (rcStrict == VINF_SUCCESS)
6448 { /* likely */ }
6449 else
6450 {
6451 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6452 if (rcStrict == VINF_SUCCESS)
6453 return pvMem;
6454 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6455 }
6456
6457#endif /* !IEM_WITH_DATA_TLB */
6458
6459 /*
6460 * Fill in the mapping table entry.
6461 */
6462 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6463 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6464 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6465 pVCpu->iem.s.cActiveMappings++;
6466
6467 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6468 return pvMem;
6469}
6470
6471
6472/**
6473 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6474 *
6475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6476 * @param pvMem The mapping.
6477 * @param fAccess The kind of access.
6478 */
6479void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6480{
6481 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6482 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6483
6484 /* If it's bounce buffered, we may need to write back the buffer. */
6485 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6486 {
6487 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6488 {
6489 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6490 if (rcStrict == VINF_SUCCESS)
6491 return;
6492 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6493 }
6494 }
6495 /* Otherwise unlock it. */
6496 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6497 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6498
6499 /* Free the entry. */
6500 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6501 Assert(pVCpu->iem.s.cActiveMappings != 0);
6502 pVCpu->iem.s.cActiveMappings--;
6503}
6504
6505#endif /* IEM_WITH_SETJMP */
6506
6507#ifndef IN_RING3
6508/**
6509 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6510 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6511 *
6512 * Allows the instruction to be completed and retired, while the IEM user will
6513 * return to ring-3 immediately afterwards and do the postponed writes there.
6514 *
6515 * @returns VBox status code (no strict statuses). Caller must check
6516 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6518 * @param pvMem The mapping.
6519 * @param fAccess The kind of access.
6520 */
6521VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6522{
6523 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6524 AssertReturn(iMemMap >= 0, iMemMap);
6525
6526 /* If it's bounce buffered, we may need to write back the buffer. */
6527 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6528 {
6529 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6530 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6531 }
6532 /* Otherwise unlock it. */
6533 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6534 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6535
6536 /* Free the entry. */
6537 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6538 Assert(pVCpu->iem.s.cActiveMappings != 0);
6539 pVCpu->iem.s.cActiveMappings--;
6540 return VINF_SUCCESS;
6541}
6542#endif
6543
6544
6545/**
6546 * Rollbacks mappings, releasing page locks and such.
6547 *
6548 * The caller shall only call this after checking cActiveMappings.
6549 *
6550 * @returns Strict VBox status code to pass up.
6551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6552 */
6553void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6554{
6555 Assert(pVCpu->iem.s.cActiveMappings > 0);
6556
6557 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6558 while (iMemMap-- > 0)
6559 {
6560 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6561 if (fAccess != IEM_ACCESS_INVALID)
6562 {
6563 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6564 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6565 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6566 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6567 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6568 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6569 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6570 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6571 pVCpu->iem.s.cActiveMappings--;
6572 }
6573 }
6574}
6575
6576
6577/**
6578 * Fetches a data byte.
6579 *
6580 * @returns Strict VBox status code.
6581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6582 * @param pu8Dst Where to return the byte.
6583 * @param iSegReg The index of the segment register to use for
6584 * this access. The base and limits are checked.
6585 * @param GCPtrMem The address of the guest memory.
6586 */
6587VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6588{
6589 /* The lazy approach for now... */
6590 uint8_t const *pu8Src;
6591 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6592 if (rc == VINF_SUCCESS)
6593 {
6594 *pu8Dst = *pu8Src;
6595 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6596 }
6597 return rc;
6598}
6599
6600
6601#ifdef IEM_WITH_SETJMP
6602/**
6603 * Fetches a data byte, longjmp on error.
6604 *
6605 * @returns The byte.
6606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6607 * @param iSegReg The index of the segment register to use for
6608 * this access. The base and limits are checked.
6609 * @param GCPtrMem The address of the guest memory.
6610 */
6611uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6612{
6613 /* The lazy approach for now... */
6614 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6615 uint8_t const bRet = *pu8Src;
6616 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6617 return bRet;
6618}
6619#endif /* IEM_WITH_SETJMP */
6620
6621
6622/**
6623 * Fetches a data word.
6624 *
6625 * @returns Strict VBox status code.
6626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6627 * @param pu16Dst Where to return the word.
6628 * @param iSegReg The index of the segment register to use for
6629 * this access. The base and limits are checked.
6630 * @param GCPtrMem The address of the guest memory.
6631 */
6632VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6633{
6634 /* The lazy approach for now... */
6635 uint16_t const *pu16Src;
6636 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6637 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6638 if (rc == VINF_SUCCESS)
6639 {
6640 *pu16Dst = *pu16Src;
6641 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6642 }
6643 return rc;
6644}
6645
6646
6647#ifdef IEM_WITH_SETJMP
6648/**
6649 * Fetches a data word, longjmp on error.
6650 *
6651 * @returns The word
6652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6653 * @param iSegReg The index of the segment register to use for
6654 * this access. The base and limits are checked.
6655 * @param GCPtrMem The address of the guest memory.
6656 */
6657uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6658{
6659 /* The lazy approach for now... */
6660 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6661 sizeof(*pu16Src) - 1);
6662 uint16_t const u16Ret = *pu16Src;
6663 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6664 return u16Ret;
6665}
6666#endif
6667
6668
6669/**
6670 * Fetches a data dword.
6671 *
6672 * @returns Strict VBox status code.
6673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6674 * @param pu32Dst Where to return the dword.
6675 * @param iSegReg The index of the segment register to use for
6676 * this access. The base and limits are checked.
6677 * @param GCPtrMem The address of the guest memory.
6678 */
6679VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6680{
6681 /* The lazy approach for now... */
6682 uint32_t const *pu32Src;
6683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6684 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6685 if (rc == VINF_SUCCESS)
6686 {
6687 *pu32Dst = *pu32Src;
6688 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6689 }
6690 return rc;
6691}
6692
6693
6694/**
6695 * Fetches a data dword and zero extends it to a qword.
6696 *
6697 * @returns Strict VBox status code.
6698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6699 * @param pu64Dst Where to return the qword.
6700 * @param iSegReg The index of the segment register to use for
6701 * this access. The base and limits are checked.
6702 * @param GCPtrMem The address of the guest memory.
6703 */
6704VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6705{
6706 /* The lazy approach for now... */
6707 uint32_t const *pu32Src;
6708 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6709 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6710 if (rc == VINF_SUCCESS)
6711 {
6712 *pu64Dst = *pu32Src;
6713 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6714 }
6715 return rc;
6716}
6717
6718
6719#ifdef IEM_WITH_SETJMP
6720
6721/**
6722 * Fetches a data dword, longjmp on error, fallback/safe version.
6723 *
6724 * @returns The dword
6725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6726 * @param iSegReg The index of the segment register to use for
6727 * this access. The base and limits are checked.
6728 * @param GCPtrMem The address of the guest memory.
6729 */
6730uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6731{
6732 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6733 sizeof(*pu32Src) - 1);
6734 uint32_t const u32Ret = *pu32Src;
6735 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6736 return u32Ret;
6737}
6738
6739
6740/**
6741 * Fetches a data dword, longjmp on error.
6742 *
6743 * @returns The dword
6744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6745 * @param iSegReg The index of the segment register to use for
6746 * this access. The base and limits are checked.
6747 * @param GCPtrMem The address of the guest memory.
6748 */
6749uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6750{
6751# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6752 /*
6753 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6754 */
6755 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6756 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6757 {
6758 /*
6759 * TLB lookup.
6760 */
6761 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6762 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6763 if (pTlbe->uTag == uTag)
6764 {
6765 /*
6766 * Check TLB page table level access flags.
6767 */
6768 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6769 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6770 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6771 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6772 {
6773 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6774
6775 /*
6776 * Alignment check:
6777 */
6778 /** @todo check priority \#AC vs \#PF */
6779 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6780 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6781 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6782 || pVCpu->iem.s.uCpl != 3)
6783 {
6784 /*
6785 * Fetch and return the dword
6786 */
6787 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6788 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6789 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6790 }
6791 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6792 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6793 }
6794 }
6795 }
6796
6797 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6798 outdated page pointer, or other troubles. */
6799 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6800 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6801
6802# else
6803 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
6804 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6805 uint32_t const u32Ret = *pu32Src;
6806 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6807 return u32Ret;
6808# endif
6809}
6810#endif
6811
6812
6813#ifdef SOME_UNUSED_FUNCTION
6814/**
6815 * Fetches a data dword and sign extends it to a qword.
6816 *
6817 * @returns Strict VBox status code.
6818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6819 * @param pu64Dst Where to return the sign extended value.
6820 * @param iSegReg The index of the segment register to use for
6821 * this access. The base and limits are checked.
6822 * @param GCPtrMem The address of the guest memory.
6823 */
6824VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6825{
6826 /* The lazy approach for now... */
6827 int32_t const *pi32Src;
6828 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
6829 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
6830 if (rc == VINF_SUCCESS)
6831 {
6832 *pu64Dst = *pi32Src;
6833 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6834 }
6835#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6836 else
6837 *pu64Dst = 0;
6838#endif
6839 return rc;
6840}
6841#endif
6842
6843
6844/**
6845 * Fetches a data qword.
6846 *
6847 * @returns Strict VBox status code.
6848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6849 * @param pu64Dst Where to return the qword.
6850 * @param iSegReg The index of the segment register to use for
6851 * this access. The base and limits are checked.
6852 * @param GCPtrMem The address of the guest memory.
6853 */
6854VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6855{
6856 /* The lazy approach for now... */
6857 uint64_t const *pu64Src;
6858 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6859 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6860 if (rc == VINF_SUCCESS)
6861 {
6862 *pu64Dst = *pu64Src;
6863 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6864 }
6865 return rc;
6866}
6867
6868
6869#ifdef IEM_WITH_SETJMP
6870/**
6871 * Fetches a data qword, longjmp on error.
6872 *
6873 * @returns The qword.
6874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6875 * @param iSegReg The index of the segment register to use for
6876 * this access. The base and limits are checked.
6877 * @param GCPtrMem The address of the guest memory.
6878 */
6879uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6880{
6881 /* The lazy approach for now... */
6882 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
6883 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6884 uint64_t const u64Ret = *pu64Src;
6885 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6886 return u64Ret;
6887}
6888#endif
6889
6890
6891/**
6892 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6893 *
6894 * @returns Strict VBox status code.
6895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6896 * @param pu64Dst Where to return the qword.
6897 * @param iSegReg The index of the segment register to use for
6898 * this access. The base and limits are checked.
6899 * @param GCPtrMem The address of the guest memory.
6900 */
6901VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6902{
6903 /* The lazy approach for now... */
6904 uint64_t const *pu64Src;
6905 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6906 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6907 if (rc == VINF_SUCCESS)
6908 {
6909 *pu64Dst = *pu64Src;
6910 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6911 }
6912 return rc;
6913}
6914
6915
6916#ifdef IEM_WITH_SETJMP
6917/**
6918 * Fetches a data qword, longjmp on error.
6919 *
6920 * @returns The qword.
6921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6922 * @param iSegReg The index of the segment register to use for
6923 * this access. The base and limits are checked.
6924 * @param GCPtrMem The address of the guest memory.
6925 */
6926uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6927{
6928 /* The lazy approach for now... */
6929 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6930 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6931 uint64_t const u64Ret = *pu64Src;
6932 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6933 return u64Ret;
6934}
6935#endif
6936
6937
6938/**
6939 * Fetches a data tword.
6940 *
6941 * @returns Strict VBox status code.
6942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6943 * @param pr80Dst Where to return the tword.
6944 * @param iSegReg The index of the segment register to use for
6945 * this access. The base and limits are checked.
6946 * @param GCPtrMem The address of the guest memory.
6947 */
6948VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6949{
6950 /* The lazy approach for now... */
6951 PCRTFLOAT80U pr80Src;
6952 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
6953 if (rc == VINF_SUCCESS)
6954 {
6955 *pr80Dst = *pr80Src;
6956 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6957 }
6958 return rc;
6959}
6960
6961
6962#ifdef IEM_WITH_SETJMP
6963/**
6964 * Fetches a data tword, longjmp on error.
6965 *
6966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6967 * @param pr80Dst Where to return the tword.
6968 * @param iSegReg The index of the segment register to use for
6969 * this access. The base and limits are checked.
6970 * @param GCPtrMem The address of the guest memory.
6971 */
6972void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6973{
6974 /* The lazy approach for now... */
6975 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
6976 *pr80Dst = *pr80Src;
6977 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6978}
6979#endif
6980
6981
6982/**
6983 * Fetches a data decimal tword.
6984 *
6985 * @returns Strict VBox status code.
6986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6987 * @param pd80Dst Where to return the tword.
6988 * @param iSegReg The index of the segment register to use for
6989 * this access. The base and limits are checked.
6990 * @param GCPtrMem The address of the guest memory.
6991 */
6992VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6993{
6994 /* The lazy approach for now... */
6995 PCRTPBCD80U pd80Src;
6996 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
6997 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
6998 if (rc == VINF_SUCCESS)
6999 {
7000 *pd80Dst = *pd80Src;
7001 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7002 }
7003 return rc;
7004}
7005
7006
7007#ifdef IEM_WITH_SETJMP
7008/**
7009 * Fetches a data decimal tword, longjmp on error.
7010 *
7011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7012 * @param pd80Dst Where to return the tword.
7013 * @param iSegReg The index of the segment register to use for
7014 * this access. The base and limits are checked.
7015 * @param GCPtrMem The address of the guest memory.
7016 */
7017void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7018{
7019 /* The lazy approach for now... */
7020 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7021 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7022 *pd80Dst = *pd80Src;
7023 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7024}
7025#endif
7026
7027
7028/**
7029 * Fetches a data dqword (double qword), generally SSE related.
7030 *
7031 * @returns Strict VBox status code.
7032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7033 * @param pu128Dst Where to return the qword.
7034 * @param iSegReg The index of the segment register to use for
7035 * this access. The base and limits are checked.
7036 * @param GCPtrMem The address of the guest memory.
7037 */
7038VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7039{
7040 /* The lazy approach for now... */
7041 PCRTUINT128U pu128Src;
7042 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7043 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7044 if (rc == VINF_SUCCESS)
7045 {
7046 pu128Dst->au64[0] = pu128Src->au64[0];
7047 pu128Dst->au64[1] = pu128Src->au64[1];
7048 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7049 }
7050 return rc;
7051}
7052
7053
7054#ifdef IEM_WITH_SETJMP
7055/**
7056 * Fetches a data dqword (double qword), generally SSE related.
7057 *
7058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7059 * @param pu128Dst Where to return the qword.
7060 * @param iSegReg The index of the segment register to use for
7061 * this access. The base and limits are checked.
7062 * @param GCPtrMem The address of the guest memory.
7063 */
7064void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7065{
7066 /* The lazy approach for now... */
7067 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7068 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7069 pu128Dst->au64[0] = pu128Src->au64[0];
7070 pu128Dst->au64[1] = pu128Src->au64[1];
7071 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7072}
7073#endif
7074
7075
7076/**
7077 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7078 * related.
7079 *
7080 * Raises \#GP(0) if not aligned.
7081 *
7082 * @returns Strict VBox status code.
7083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7084 * @param pu128Dst Where to return the qword.
7085 * @param iSegReg The index of the segment register to use for
7086 * this access. The base and limits are checked.
7087 * @param GCPtrMem The address of the guest memory.
7088 */
7089VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7090{
7091 /* The lazy approach for now... */
7092 PCRTUINT128U pu128Src;
7093 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7094 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7095 if (rc == VINF_SUCCESS)
7096 {
7097 pu128Dst->au64[0] = pu128Src->au64[0];
7098 pu128Dst->au64[1] = pu128Src->au64[1];
7099 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7100 }
7101 return rc;
7102}
7103
7104
7105#ifdef IEM_WITH_SETJMP
7106/**
7107 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7108 * related, longjmp on error.
7109 *
7110 * Raises \#GP(0) if not aligned.
7111 *
7112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7113 * @param pu128Dst Where to return the qword.
7114 * @param iSegReg The index of the segment register to use for
7115 * this access. The base and limits are checked.
7116 * @param GCPtrMem The address of the guest memory.
7117 */
7118void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7119{
7120 /* The lazy approach for now... */
7121 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7122 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7123 pu128Dst->au64[0] = pu128Src->au64[0];
7124 pu128Dst->au64[1] = pu128Src->au64[1];
7125 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7126}
7127#endif
7128
7129
7130/**
7131 * Fetches a data oword (octo word), generally AVX related.
7132 *
7133 * @returns Strict VBox status code.
7134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7135 * @param pu256Dst Where to return the qword.
7136 * @param iSegReg The index of the segment register to use for
7137 * this access. The base and limits are checked.
7138 * @param GCPtrMem The address of the guest memory.
7139 */
7140VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7141{
7142 /* The lazy approach for now... */
7143 PCRTUINT256U pu256Src;
7144 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7145 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7146 if (rc == VINF_SUCCESS)
7147 {
7148 pu256Dst->au64[0] = pu256Src->au64[0];
7149 pu256Dst->au64[1] = pu256Src->au64[1];
7150 pu256Dst->au64[2] = pu256Src->au64[2];
7151 pu256Dst->au64[3] = pu256Src->au64[3];
7152 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7153 }
7154 return rc;
7155}
7156
7157
7158#ifdef IEM_WITH_SETJMP
7159/**
7160 * Fetches a data oword (octo word), generally AVX related.
7161 *
7162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7163 * @param pu256Dst Where to return the qword.
7164 * @param iSegReg The index of the segment register to use for
7165 * this access. The base and limits are checked.
7166 * @param GCPtrMem The address of the guest memory.
7167 */
7168void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7169{
7170 /* The lazy approach for now... */
7171 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7172 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7173 pu256Dst->au64[0] = pu256Src->au64[0];
7174 pu256Dst->au64[1] = pu256Src->au64[1];
7175 pu256Dst->au64[2] = pu256Src->au64[2];
7176 pu256Dst->au64[3] = pu256Src->au64[3];
7177 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7178}
7179#endif
7180
7181
7182/**
7183 * Fetches a data oword (octo word) at an aligned address, generally AVX
7184 * related.
7185 *
7186 * Raises \#GP(0) if not aligned.
7187 *
7188 * @returns Strict VBox status code.
7189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7190 * @param pu256Dst Where to return the qword.
7191 * @param iSegReg The index of the segment register to use for
7192 * this access. The base and limits are checked.
7193 * @param GCPtrMem The address of the guest memory.
7194 */
7195VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7196{
7197 /* The lazy approach for now... */
7198 PCRTUINT256U pu256Src;
7199 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7200 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7201 if (rc == VINF_SUCCESS)
7202 {
7203 pu256Dst->au64[0] = pu256Src->au64[0];
7204 pu256Dst->au64[1] = pu256Src->au64[1];
7205 pu256Dst->au64[2] = pu256Src->au64[2];
7206 pu256Dst->au64[3] = pu256Src->au64[3];
7207 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7208 }
7209 return rc;
7210}
7211
7212
7213#ifdef IEM_WITH_SETJMP
7214/**
7215 * Fetches a data oword (octo word) at an aligned address, generally AVX
7216 * related, longjmp on error.
7217 *
7218 * Raises \#GP(0) if not aligned.
7219 *
7220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7221 * @param pu256Dst Where to return the qword.
7222 * @param iSegReg The index of the segment register to use for
7223 * this access. The base and limits are checked.
7224 * @param GCPtrMem The address of the guest memory.
7225 */
7226void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7227{
7228 /* The lazy approach for now... */
7229 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7230 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7231 pu256Dst->au64[0] = pu256Src->au64[0];
7232 pu256Dst->au64[1] = pu256Src->au64[1];
7233 pu256Dst->au64[2] = pu256Src->au64[2];
7234 pu256Dst->au64[3] = pu256Src->au64[3];
7235 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7236}
7237#endif
7238
7239
7240
7241/**
7242 * Fetches a descriptor register (lgdt, lidt).
7243 *
7244 * @returns Strict VBox status code.
7245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7246 * @param pcbLimit Where to return the limit.
7247 * @param pGCPtrBase Where to return the base.
7248 * @param iSegReg The index of the segment register to use for
7249 * this access. The base and limits are checked.
7250 * @param GCPtrMem The address of the guest memory.
7251 * @param enmOpSize The effective operand size.
7252 */
7253VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7254 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7255{
7256 /*
7257 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7258 * little special:
7259 * - The two reads are done separately.
7260 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7261 * - We suspect the 386 to actually commit the limit before the base in
7262 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7263 * don't try emulate this eccentric behavior, because it's not well
7264 * enough understood and rather hard to trigger.
7265 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7266 */
7267 VBOXSTRICTRC rcStrict;
7268 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7269 {
7270 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7271 if (rcStrict == VINF_SUCCESS)
7272 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7273 }
7274 else
7275 {
7276 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7277 if (enmOpSize == IEMMODE_32BIT)
7278 {
7279 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7280 {
7281 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7282 if (rcStrict == VINF_SUCCESS)
7283 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7284 }
7285 else
7286 {
7287 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7288 if (rcStrict == VINF_SUCCESS)
7289 {
7290 *pcbLimit = (uint16_t)uTmp;
7291 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7292 }
7293 }
7294 if (rcStrict == VINF_SUCCESS)
7295 *pGCPtrBase = uTmp;
7296 }
7297 else
7298 {
7299 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7300 if (rcStrict == VINF_SUCCESS)
7301 {
7302 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7303 if (rcStrict == VINF_SUCCESS)
7304 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7305 }
7306 }
7307 }
7308 return rcStrict;
7309}
7310
7311
7312
7313/**
7314 * Stores a data byte.
7315 *
7316 * @returns Strict VBox status code.
7317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7318 * @param iSegReg The index of the segment register to use for
7319 * this access. The base and limits are checked.
7320 * @param GCPtrMem The address of the guest memory.
7321 * @param u8Value The value to store.
7322 */
7323VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7324{
7325 /* The lazy approach for now... */
7326 uint8_t *pu8Dst;
7327 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7328 if (rc == VINF_SUCCESS)
7329 {
7330 *pu8Dst = u8Value;
7331 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7332 }
7333 return rc;
7334}
7335
7336
7337#ifdef IEM_WITH_SETJMP
7338/**
7339 * Stores a data byte, longjmp on error.
7340 *
7341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7342 * @param iSegReg The index of the segment register to use for
7343 * this access. The base and limits are checked.
7344 * @param GCPtrMem The address of the guest memory.
7345 * @param u8Value The value to store.
7346 */
7347void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7348{
7349 /* The lazy approach for now... */
7350 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7351 *pu8Dst = u8Value;
7352 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7353}
7354#endif
7355
7356
7357/**
7358 * Stores a data word.
7359 *
7360 * @returns Strict VBox status code.
7361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7362 * @param iSegReg The index of the segment register to use for
7363 * this access. The base and limits are checked.
7364 * @param GCPtrMem The address of the guest memory.
7365 * @param u16Value The value to store.
7366 */
7367VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7368{
7369 /* The lazy approach for now... */
7370 uint16_t *pu16Dst;
7371 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7372 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7373 if (rc == VINF_SUCCESS)
7374 {
7375 *pu16Dst = u16Value;
7376 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7377 }
7378 return rc;
7379}
7380
7381
7382#ifdef IEM_WITH_SETJMP
7383/**
7384 * Stores a data word, longjmp on error.
7385 *
7386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7387 * @param iSegReg The index of the segment register to use for
7388 * this access. The base and limits are checked.
7389 * @param GCPtrMem The address of the guest memory.
7390 * @param u16Value The value to store.
7391 */
7392void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7393{
7394 /* The lazy approach for now... */
7395 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7396 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7397 *pu16Dst = u16Value;
7398 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7399}
7400#endif
7401
7402
7403/**
7404 * Stores a data dword.
7405 *
7406 * @returns Strict VBox status code.
7407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7408 * @param iSegReg The index of the segment register to use for
7409 * this access. The base and limits are checked.
7410 * @param GCPtrMem The address of the guest memory.
7411 * @param u32Value The value to store.
7412 */
7413VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7414{
7415 /* The lazy approach for now... */
7416 uint32_t *pu32Dst;
7417 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7418 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7419 if (rc == VINF_SUCCESS)
7420 {
7421 *pu32Dst = u32Value;
7422 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7423 }
7424 return rc;
7425}
7426
7427
7428#ifdef IEM_WITH_SETJMP
7429/**
7430 * Stores a data dword.
7431 *
7432 * @returns Strict VBox status code.
7433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7434 * @param iSegReg The index of the segment register to use for
7435 * this access. The base and limits are checked.
7436 * @param GCPtrMem The address of the guest memory.
7437 * @param u32Value The value to store.
7438 */
7439void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7440{
7441 /* The lazy approach for now... */
7442 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7443 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7444 *pu32Dst = u32Value;
7445 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7446}
7447#endif
7448
7449
7450/**
7451 * Stores a data qword.
7452 *
7453 * @returns Strict VBox status code.
7454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7455 * @param iSegReg The index of the segment register to use for
7456 * this access. The base and limits are checked.
7457 * @param GCPtrMem The address of the guest memory.
7458 * @param u64Value The value to store.
7459 */
7460VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7461{
7462 /* The lazy approach for now... */
7463 uint64_t *pu64Dst;
7464 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7465 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7466 if (rc == VINF_SUCCESS)
7467 {
7468 *pu64Dst = u64Value;
7469 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7470 }
7471 return rc;
7472}
7473
7474
7475#ifdef IEM_WITH_SETJMP
7476/**
7477 * Stores a data qword, longjmp on error.
7478 *
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 * @param iSegReg The index of the segment register to use for
7481 * this access. The base and limits are checked.
7482 * @param GCPtrMem The address of the guest memory.
7483 * @param u64Value The value to store.
7484 */
7485void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7486{
7487 /* The lazy approach for now... */
7488 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7489 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7490 *pu64Dst = u64Value;
7491 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7492}
7493#endif
7494
7495
7496/**
7497 * Stores a data dqword.
7498 *
7499 * @returns Strict VBox status code.
7500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7501 * @param iSegReg The index of the segment register to use for
7502 * this access. The base and limits are checked.
7503 * @param GCPtrMem The address of the guest memory.
7504 * @param u128Value The value to store.
7505 */
7506VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7507{
7508 /* The lazy approach for now... */
7509 PRTUINT128U pu128Dst;
7510 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7511 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7512 if (rc == VINF_SUCCESS)
7513 {
7514 pu128Dst->au64[0] = u128Value.au64[0];
7515 pu128Dst->au64[1] = u128Value.au64[1];
7516 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7517 }
7518 return rc;
7519}
7520
7521
7522#ifdef IEM_WITH_SETJMP
7523/**
7524 * Stores a data dqword, longjmp on error.
7525 *
7526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7527 * @param iSegReg The index of the segment register to use for
7528 * this access. The base and limits are checked.
7529 * @param GCPtrMem The address of the guest memory.
7530 * @param u128Value The value to store.
7531 */
7532void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7533{
7534 /* The lazy approach for now... */
7535 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7536 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7537 pu128Dst->au64[0] = u128Value.au64[0];
7538 pu128Dst->au64[1] = u128Value.au64[1];
7539 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7540}
7541#endif
7542
7543
7544/**
7545 * Stores a data dqword, SSE aligned.
7546 *
7547 * @returns Strict VBox status code.
7548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7549 * @param iSegReg The index of the segment register to use for
7550 * this access. The base and limits are checked.
7551 * @param GCPtrMem The address of the guest memory.
7552 * @param u128Value The value to store.
7553 */
7554VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7555{
7556 /* The lazy approach for now... */
7557 PRTUINT128U pu128Dst;
7558 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7559 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7560 if (rc == VINF_SUCCESS)
7561 {
7562 pu128Dst->au64[0] = u128Value.au64[0];
7563 pu128Dst->au64[1] = u128Value.au64[1];
7564 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7565 }
7566 return rc;
7567}
7568
7569
7570#ifdef IEM_WITH_SETJMP
7571/**
7572 * Stores a data dqword, SSE aligned.
7573 *
7574 * @returns Strict VBox status code.
7575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7576 * @param iSegReg The index of the segment register to use for
7577 * this access. The base and limits are checked.
7578 * @param GCPtrMem The address of the guest memory.
7579 * @param u128Value The value to store.
7580 */
7581void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7582{
7583 /* The lazy approach for now... */
7584 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7585 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7586 pu128Dst->au64[0] = u128Value.au64[0];
7587 pu128Dst->au64[1] = u128Value.au64[1];
7588 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7589}
7590#endif
7591
7592
7593/**
7594 * Stores a data dqword.
7595 *
7596 * @returns Strict VBox status code.
7597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7598 * @param iSegReg The index of the segment register to use for
7599 * this access. The base and limits are checked.
7600 * @param GCPtrMem The address of the guest memory.
7601 * @param pu256Value Pointer to the value to store.
7602 */
7603VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7604{
7605 /* The lazy approach for now... */
7606 PRTUINT256U pu256Dst;
7607 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7608 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7609 if (rc == VINF_SUCCESS)
7610 {
7611 pu256Dst->au64[0] = pu256Value->au64[0];
7612 pu256Dst->au64[1] = pu256Value->au64[1];
7613 pu256Dst->au64[2] = pu256Value->au64[2];
7614 pu256Dst->au64[3] = pu256Value->au64[3];
7615 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7616 }
7617 return rc;
7618}
7619
7620
7621#ifdef IEM_WITH_SETJMP
7622/**
7623 * Stores a data dqword, longjmp on error.
7624 *
7625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7626 * @param iSegReg The index of the segment register to use for
7627 * this access. The base and limits are checked.
7628 * @param GCPtrMem The address of the guest memory.
7629 * @param pu256Value Pointer to the value to store.
7630 */
7631void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7632{
7633 /* The lazy approach for now... */
7634 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7635 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7636 pu256Dst->au64[0] = pu256Value->au64[0];
7637 pu256Dst->au64[1] = pu256Value->au64[1];
7638 pu256Dst->au64[2] = pu256Value->au64[2];
7639 pu256Dst->au64[3] = pu256Value->au64[3];
7640 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7641}
7642#endif
7643
7644
7645/**
7646 * Stores a data dqword, AVX \#GP(0) aligned.
7647 *
7648 * @returns Strict VBox status code.
7649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7650 * @param iSegReg The index of the segment register to use for
7651 * this access. The base and limits are checked.
7652 * @param GCPtrMem The address of the guest memory.
7653 * @param pu256Value Pointer to the value to store.
7654 */
7655VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7656{
7657 /* The lazy approach for now... */
7658 PRTUINT256U pu256Dst;
7659 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7660 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7661 if (rc == VINF_SUCCESS)
7662 {
7663 pu256Dst->au64[0] = pu256Value->au64[0];
7664 pu256Dst->au64[1] = pu256Value->au64[1];
7665 pu256Dst->au64[2] = pu256Value->au64[2];
7666 pu256Dst->au64[3] = pu256Value->au64[3];
7667 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7668 }
7669 return rc;
7670}
7671
7672
7673#ifdef IEM_WITH_SETJMP
7674/**
7675 * Stores a data dqword, AVX aligned.
7676 *
7677 * @returns Strict VBox status code.
7678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7679 * @param iSegReg The index of the segment register to use for
7680 * this access. The base and limits are checked.
7681 * @param GCPtrMem The address of the guest memory.
7682 * @param pu256Value Pointer to the value to store.
7683 */
7684void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7685{
7686 /* The lazy approach for now... */
7687 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7688 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7689 pu256Dst->au64[0] = pu256Value->au64[0];
7690 pu256Dst->au64[1] = pu256Value->au64[1];
7691 pu256Dst->au64[2] = pu256Value->au64[2];
7692 pu256Dst->au64[3] = pu256Value->au64[3];
7693 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7694}
7695#endif
7696
7697
7698/**
7699 * Stores a descriptor register (sgdt, sidt).
7700 *
7701 * @returns Strict VBox status code.
7702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7703 * @param cbLimit The limit.
7704 * @param GCPtrBase The base address.
7705 * @param iSegReg The index of the segment register to use for
7706 * this access. The base and limits are checked.
7707 * @param GCPtrMem The address of the guest memory.
7708 */
7709VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7710{
7711 /*
7712 * The SIDT and SGDT instructions actually stores the data using two
7713 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7714 * does not respond to opsize prefixes.
7715 */
7716 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7717 if (rcStrict == VINF_SUCCESS)
7718 {
7719 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7720 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7721 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7722 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7723 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7724 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7725 else
7726 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7727 }
7728 return rcStrict;
7729}
7730
7731
7732/**
7733 * Pushes a word onto the stack.
7734 *
7735 * @returns Strict VBox status code.
7736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7737 * @param u16Value The value to push.
7738 */
7739VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7740{
7741 /* Increment the stack pointer. */
7742 uint64_t uNewRsp;
7743 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7744
7745 /* Write the word the lazy way. */
7746 uint16_t *pu16Dst;
7747 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7748 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7749 if (rc == VINF_SUCCESS)
7750 {
7751 *pu16Dst = u16Value;
7752 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7753 }
7754
7755 /* Commit the new RSP value unless we an access handler made trouble. */
7756 if (rc == VINF_SUCCESS)
7757 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7758
7759 return rc;
7760}
7761
7762
7763/**
7764 * Pushes a dword onto the stack.
7765 *
7766 * @returns Strict VBox status code.
7767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7768 * @param u32Value The value to push.
7769 */
7770VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7771{
7772 /* Increment the stack pointer. */
7773 uint64_t uNewRsp;
7774 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7775
7776 /* Write the dword the lazy way. */
7777 uint32_t *pu32Dst;
7778 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7779 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7780 if (rc == VINF_SUCCESS)
7781 {
7782 *pu32Dst = u32Value;
7783 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7784 }
7785
7786 /* Commit the new RSP value unless we an access handler made trouble. */
7787 if (rc == VINF_SUCCESS)
7788 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7789
7790 return rc;
7791}
7792
7793
7794/**
7795 * Pushes a dword segment register value onto the stack.
7796 *
7797 * @returns Strict VBox status code.
7798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7799 * @param u32Value The value to push.
7800 */
7801VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7802{
7803 /* Increment the stack pointer. */
7804 uint64_t uNewRsp;
7805 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7806
7807 /* The intel docs talks about zero extending the selector register
7808 value. My actual intel CPU here might be zero extending the value
7809 but it still only writes the lower word... */
7810 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7811 * happens when crossing an electric page boundrary, is the high word checked
7812 * for write accessibility or not? Probably it is. What about segment limits?
7813 * It appears this behavior is also shared with trap error codes.
7814 *
7815 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7816 * ancient hardware when it actually did change. */
7817 uint16_t *pu16Dst;
7818 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
7819 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
7820 if (rc == VINF_SUCCESS)
7821 {
7822 *pu16Dst = (uint16_t)u32Value;
7823 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7824 }
7825
7826 /* Commit the new RSP value unless we an access handler made trouble. */
7827 if (rc == VINF_SUCCESS)
7828 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7829
7830 return rc;
7831}
7832
7833
7834/**
7835 * Pushes a qword onto the stack.
7836 *
7837 * @returns Strict VBox status code.
7838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7839 * @param u64Value The value to push.
7840 */
7841VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7842{
7843 /* Increment the stack pointer. */
7844 uint64_t uNewRsp;
7845 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7846
7847 /* Write the word the lazy way. */
7848 uint64_t *pu64Dst;
7849 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7850 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7851 if (rc == VINF_SUCCESS)
7852 {
7853 *pu64Dst = u64Value;
7854 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7855 }
7856
7857 /* Commit the new RSP value unless we an access handler made trouble. */
7858 if (rc == VINF_SUCCESS)
7859 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7860
7861 return rc;
7862}
7863
7864
7865/**
7866 * Pops a word from the stack.
7867 *
7868 * @returns Strict VBox status code.
7869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7870 * @param pu16Value Where to store the popped value.
7871 */
7872VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7873{
7874 /* Increment the stack pointer. */
7875 uint64_t uNewRsp;
7876 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7877
7878 /* Write the word the lazy way. */
7879 uint16_t const *pu16Src;
7880 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
7881 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
7882 if (rc == VINF_SUCCESS)
7883 {
7884 *pu16Value = *pu16Src;
7885 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7886
7887 /* Commit the new RSP value. */
7888 if (rc == VINF_SUCCESS)
7889 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7890 }
7891
7892 return rc;
7893}
7894
7895
7896/**
7897 * Pops a dword from the stack.
7898 *
7899 * @returns Strict VBox status code.
7900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7901 * @param pu32Value Where to store the popped value.
7902 */
7903VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7904{
7905 /* Increment the stack pointer. */
7906 uint64_t uNewRsp;
7907 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7908
7909 /* Write the word the lazy way. */
7910 uint32_t const *pu32Src;
7911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
7912 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
7913 if (rc == VINF_SUCCESS)
7914 {
7915 *pu32Value = *pu32Src;
7916 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7917
7918 /* Commit the new RSP value. */
7919 if (rc == VINF_SUCCESS)
7920 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7921 }
7922
7923 return rc;
7924}
7925
7926
7927/**
7928 * Pops a qword from the stack.
7929 *
7930 * @returns Strict VBox status code.
7931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7932 * @param pu64Value Where to store the popped value.
7933 */
7934VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7935{
7936 /* Increment the stack pointer. */
7937 uint64_t uNewRsp;
7938 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
7939
7940 /* Write the word the lazy way. */
7941 uint64_t const *pu64Src;
7942 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
7943 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
7944 if (rc == VINF_SUCCESS)
7945 {
7946 *pu64Value = *pu64Src;
7947 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7948
7949 /* Commit the new RSP value. */
7950 if (rc == VINF_SUCCESS)
7951 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7952 }
7953
7954 return rc;
7955}
7956
7957
7958/**
7959 * Pushes a word onto the stack, using a temporary stack pointer.
7960 *
7961 * @returns Strict VBox status code.
7962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7963 * @param u16Value The value to push.
7964 * @param pTmpRsp Pointer to the temporary stack pointer.
7965 */
7966VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7967{
7968 /* Increment the stack pointer. */
7969 RTUINT64U NewRsp = *pTmpRsp;
7970 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
7971
7972 /* Write the word the lazy way. */
7973 uint16_t *pu16Dst;
7974 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7975 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7976 if (rc == VINF_SUCCESS)
7977 {
7978 *pu16Dst = u16Value;
7979 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7980 }
7981
7982 /* Commit the new RSP value unless we an access handler made trouble. */
7983 if (rc == VINF_SUCCESS)
7984 *pTmpRsp = NewRsp;
7985
7986 return rc;
7987}
7988
7989
7990/**
7991 * Pushes a dword onto the stack, using a temporary stack pointer.
7992 *
7993 * @returns Strict VBox status code.
7994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7995 * @param u32Value The value to push.
7996 * @param pTmpRsp Pointer to the temporary stack pointer.
7997 */
7998VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7999{
8000 /* Increment the stack pointer. */
8001 RTUINT64U NewRsp = *pTmpRsp;
8002 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8003
8004 /* Write the word the lazy way. */
8005 uint32_t *pu32Dst;
8006 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8007 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8008 if (rc == VINF_SUCCESS)
8009 {
8010 *pu32Dst = u32Value;
8011 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8012 }
8013
8014 /* Commit the new RSP value unless we an access handler made trouble. */
8015 if (rc == VINF_SUCCESS)
8016 *pTmpRsp = NewRsp;
8017
8018 return rc;
8019}
8020
8021
8022/**
8023 * Pushes a dword onto the stack, using a temporary stack pointer.
8024 *
8025 * @returns Strict VBox status code.
8026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8027 * @param u64Value The value to push.
8028 * @param pTmpRsp Pointer to the temporary stack pointer.
8029 */
8030VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8031{
8032 /* Increment the stack pointer. */
8033 RTUINT64U NewRsp = *pTmpRsp;
8034 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8035
8036 /* Write the word the lazy way. */
8037 uint64_t *pu64Dst;
8038 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8039 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8040 if (rc == VINF_SUCCESS)
8041 {
8042 *pu64Dst = u64Value;
8043 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8044 }
8045
8046 /* Commit the new RSP value unless we an access handler made trouble. */
8047 if (rc == VINF_SUCCESS)
8048 *pTmpRsp = NewRsp;
8049
8050 return rc;
8051}
8052
8053
8054/**
8055 * Pops a word from the stack, using a temporary stack pointer.
8056 *
8057 * @returns Strict VBox status code.
8058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8059 * @param pu16Value Where to store the popped value.
8060 * @param pTmpRsp Pointer to the temporary stack pointer.
8061 */
8062VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8063{
8064 /* Increment the stack pointer. */
8065 RTUINT64U NewRsp = *pTmpRsp;
8066 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8067
8068 /* Write the word the lazy way. */
8069 uint16_t const *pu16Src;
8070 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8071 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8072 if (rc == VINF_SUCCESS)
8073 {
8074 *pu16Value = *pu16Src;
8075 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8076
8077 /* Commit the new RSP value. */
8078 if (rc == VINF_SUCCESS)
8079 *pTmpRsp = NewRsp;
8080 }
8081
8082 return rc;
8083}
8084
8085
8086/**
8087 * Pops a dword from the stack, using a temporary stack pointer.
8088 *
8089 * @returns Strict VBox status code.
8090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8091 * @param pu32Value Where to store the popped value.
8092 * @param pTmpRsp Pointer to the temporary stack pointer.
8093 */
8094VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8095{
8096 /* Increment the stack pointer. */
8097 RTUINT64U NewRsp = *pTmpRsp;
8098 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8099
8100 /* Write the word the lazy way. */
8101 uint32_t const *pu32Src;
8102 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8103 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8104 if (rc == VINF_SUCCESS)
8105 {
8106 *pu32Value = *pu32Src;
8107 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8108
8109 /* Commit the new RSP value. */
8110 if (rc == VINF_SUCCESS)
8111 *pTmpRsp = NewRsp;
8112 }
8113
8114 return rc;
8115}
8116
8117
8118/**
8119 * Pops a qword from the stack, using a temporary stack pointer.
8120 *
8121 * @returns Strict VBox status code.
8122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8123 * @param pu64Value Where to store the popped value.
8124 * @param pTmpRsp Pointer to the temporary stack pointer.
8125 */
8126VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8127{
8128 /* Increment the stack pointer. */
8129 RTUINT64U NewRsp = *pTmpRsp;
8130 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8131
8132 /* Write the word the lazy way. */
8133 uint64_t const *pu64Src;
8134 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8135 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8136 if (rcStrict == VINF_SUCCESS)
8137 {
8138 *pu64Value = *pu64Src;
8139 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8140
8141 /* Commit the new RSP value. */
8142 if (rcStrict == VINF_SUCCESS)
8143 *pTmpRsp = NewRsp;
8144 }
8145
8146 return rcStrict;
8147}
8148
8149
8150/**
8151 * Begin a special stack push (used by interrupt, exceptions and such).
8152 *
8153 * This will raise \#SS or \#PF if appropriate.
8154 *
8155 * @returns Strict VBox status code.
8156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8157 * @param cbMem The number of bytes to push onto the stack.
8158 * @param cbAlign The alignment mask (7, 3, 1).
8159 * @param ppvMem Where to return the pointer to the stack memory.
8160 * As with the other memory functions this could be
8161 * direct access or bounce buffered access, so
8162 * don't commit register until the commit call
8163 * succeeds.
8164 * @param puNewRsp Where to return the new RSP value. This must be
8165 * passed unchanged to
8166 * iemMemStackPushCommitSpecial().
8167 */
8168VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8169 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8170{
8171 Assert(cbMem < UINT8_MAX);
8172 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8173 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8174 IEM_ACCESS_STACK_W, cbAlign);
8175}
8176
8177
8178/**
8179 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8180 *
8181 * This will update the rSP.
8182 *
8183 * @returns Strict VBox status code.
8184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8185 * @param pvMem The pointer returned by
8186 * iemMemStackPushBeginSpecial().
8187 * @param uNewRsp The new RSP value returned by
8188 * iemMemStackPushBeginSpecial().
8189 */
8190VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8191{
8192 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8193 if (rcStrict == VINF_SUCCESS)
8194 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8195 return rcStrict;
8196}
8197
8198
8199/**
8200 * Begin a special stack pop (used by iret, retf and such).
8201 *
8202 * This will raise \#SS or \#PF if appropriate.
8203 *
8204 * @returns Strict VBox status code.
8205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8206 * @param cbMem The number of bytes to pop from the stack.
8207 * @param cbAlign The alignment mask (7, 3, 1).
8208 * @param ppvMem Where to return the pointer to the stack memory.
8209 * @param puNewRsp Where to return the new RSP value. This must be
8210 * assigned to CPUMCTX::rsp manually some time
8211 * after iemMemStackPopDoneSpecial() has been
8212 * called.
8213 */
8214VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8215 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8216{
8217 Assert(cbMem < UINT8_MAX);
8218 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8219 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8220}
8221
8222
8223/**
8224 * Continue a special stack pop (used by iret and retf).
8225 *
8226 * This will raise \#SS or \#PF if appropriate.
8227 *
8228 * @returns Strict VBox status code.
8229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8230 * @param cbMem The number of bytes to pop from the stack.
8231 * @param ppvMem Where to return the pointer to the stack memory.
8232 * @param puNewRsp Where to return the new RSP value. This must be
8233 * assigned to CPUMCTX::rsp manually some time
8234 * after iemMemStackPopDoneSpecial() has been
8235 * called.
8236 */
8237VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8238{
8239 Assert(cbMem < UINT8_MAX);
8240 RTUINT64U NewRsp;
8241 NewRsp.u = *puNewRsp;
8242 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8243 *puNewRsp = NewRsp.u;
8244 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R,
8245 0 /* checked in iemMemStackPopBeginSpecial */);
8246}
8247
8248
8249/**
8250 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8251 * iemMemStackPopContinueSpecial).
8252 *
8253 * The caller will manually commit the rSP.
8254 *
8255 * @returns Strict VBox status code.
8256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8257 * @param pvMem The pointer returned by
8258 * iemMemStackPopBeginSpecial() or
8259 * iemMemStackPopContinueSpecial().
8260 */
8261VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8262{
8263 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8264}
8265
8266
8267/**
8268 * Fetches a system table byte.
8269 *
8270 * @returns Strict VBox status code.
8271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8272 * @param pbDst Where to return the byte.
8273 * @param iSegReg The index of the segment register to use for
8274 * this access. The base and limits are checked.
8275 * @param GCPtrMem The address of the guest memory.
8276 */
8277VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8278{
8279 /* The lazy approach for now... */
8280 uint8_t const *pbSrc;
8281 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8282 if (rc == VINF_SUCCESS)
8283 {
8284 *pbDst = *pbSrc;
8285 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8286 }
8287 return rc;
8288}
8289
8290
8291/**
8292 * Fetches a system table word.
8293 *
8294 * @returns Strict VBox status code.
8295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8296 * @param pu16Dst Where to return the word.
8297 * @param iSegReg The index of the segment register to use for
8298 * this access. The base and limits are checked.
8299 * @param GCPtrMem The address of the guest memory.
8300 */
8301VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8302{
8303 /* The lazy approach for now... */
8304 uint16_t const *pu16Src;
8305 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8306 if (rc == VINF_SUCCESS)
8307 {
8308 *pu16Dst = *pu16Src;
8309 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8310 }
8311 return rc;
8312}
8313
8314
8315/**
8316 * Fetches a system table dword.
8317 *
8318 * @returns Strict VBox status code.
8319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8320 * @param pu32Dst Where to return the dword.
8321 * @param iSegReg The index of the segment register to use for
8322 * this access. The base and limits are checked.
8323 * @param GCPtrMem The address of the guest memory.
8324 */
8325VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8326{
8327 /* The lazy approach for now... */
8328 uint32_t const *pu32Src;
8329 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8330 if (rc == VINF_SUCCESS)
8331 {
8332 *pu32Dst = *pu32Src;
8333 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8334 }
8335 return rc;
8336}
8337
8338
8339/**
8340 * Fetches a system table qword.
8341 *
8342 * @returns Strict VBox status code.
8343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8344 * @param pu64Dst Where to return the qword.
8345 * @param iSegReg The index of the segment register to use for
8346 * this access. The base and limits are checked.
8347 * @param GCPtrMem The address of the guest memory.
8348 */
8349VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8350{
8351 /* The lazy approach for now... */
8352 uint64_t const *pu64Src;
8353 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8354 if (rc == VINF_SUCCESS)
8355 {
8356 *pu64Dst = *pu64Src;
8357 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8358 }
8359 return rc;
8360}
8361
8362
8363/**
8364 * Fetches a descriptor table entry with caller specified error code.
8365 *
8366 * @returns Strict VBox status code.
8367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8368 * @param pDesc Where to return the descriptor table entry.
8369 * @param uSel The selector which table entry to fetch.
8370 * @param uXcpt The exception to raise on table lookup error.
8371 * @param uErrorCode The error code associated with the exception.
8372 */
8373static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8374 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8375{
8376 AssertPtr(pDesc);
8377 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8378
8379 /** @todo did the 286 require all 8 bytes to be accessible? */
8380 /*
8381 * Get the selector table base and check bounds.
8382 */
8383 RTGCPTR GCPtrBase;
8384 if (uSel & X86_SEL_LDT)
8385 {
8386 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8387 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8388 {
8389 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8390 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8391 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8392 uErrorCode, 0);
8393 }
8394
8395 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8396 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8397 }
8398 else
8399 {
8400 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8401 {
8402 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8403 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8404 uErrorCode, 0);
8405 }
8406 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8407 }
8408
8409 /*
8410 * Read the legacy descriptor and maybe the long mode extensions if
8411 * required.
8412 */
8413 VBOXSTRICTRC rcStrict;
8414 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8415 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8416 else
8417 {
8418 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8419 if (rcStrict == VINF_SUCCESS)
8420 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8421 if (rcStrict == VINF_SUCCESS)
8422 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8423 if (rcStrict == VINF_SUCCESS)
8424 pDesc->Legacy.au16[3] = 0;
8425 else
8426 return rcStrict;
8427 }
8428
8429 if (rcStrict == VINF_SUCCESS)
8430 {
8431 if ( !IEM_IS_LONG_MODE(pVCpu)
8432 || pDesc->Legacy.Gen.u1DescType)
8433 pDesc->Long.au64[1] = 0;
8434 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8435 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8436 else
8437 {
8438 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8439 /** @todo is this the right exception? */
8440 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8441 }
8442 }
8443 return rcStrict;
8444}
8445
8446
8447/**
8448 * Fetches a descriptor table entry.
8449 *
8450 * @returns Strict VBox status code.
8451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8452 * @param pDesc Where to return the descriptor table entry.
8453 * @param uSel The selector which table entry to fetch.
8454 * @param uXcpt The exception to raise on table lookup error.
8455 */
8456VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8457{
8458 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8459}
8460
8461
8462/**
8463 * Marks the selector descriptor as accessed (only non-system descriptors).
8464 *
8465 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8466 * will therefore skip the limit checks.
8467 *
8468 * @returns Strict VBox status code.
8469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8470 * @param uSel The selector.
8471 */
8472VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8473{
8474 /*
8475 * Get the selector table base and calculate the entry address.
8476 */
8477 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8478 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8479 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8480 GCPtr += uSel & X86_SEL_MASK;
8481
8482 /*
8483 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8484 * ugly stuff to avoid this. This will make sure it's an atomic access
8485 * as well more or less remove any question about 8-bit or 32-bit accesss.
8486 */
8487 VBOXSTRICTRC rcStrict;
8488 uint32_t volatile *pu32;
8489 if ((GCPtr & 3) == 0)
8490 {
8491 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8492 GCPtr += 2 + 2;
8493 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8494 if (rcStrict != VINF_SUCCESS)
8495 return rcStrict;
8496 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8497 }
8498 else
8499 {
8500 /* The misaligned GDT/LDT case, map the whole thing. */
8501 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8502 if (rcStrict != VINF_SUCCESS)
8503 return rcStrict;
8504 switch ((uintptr_t)pu32 & 3)
8505 {
8506 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8507 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8508 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8509 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8510 }
8511 }
8512
8513 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8514}
8515
8516/** @} */
8517
8518/** @name Opcode Helpers.
8519 * @{
8520 */
8521
8522/**
8523 * Calculates the effective address of a ModR/M memory operand.
8524 *
8525 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8526 *
8527 * @return Strict VBox status code.
8528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8529 * @param bRm The ModRM byte.
8530 * @param cbImm The size of any immediate following the
8531 * effective address opcode bytes. Important for
8532 * RIP relative addressing.
8533 * @param pGCPtrEff Where to return the effective address.
8534 */
8535VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8536{
8537 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8538# define SET_SS_DEF() \
8539 do \
8540 { \
8541 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8542 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8543 } while (0)
8544
8545 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8546 {
8547/** @todo Check the effective address size crap! */
8548 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8549 {
8550 uint16_t u16EffAddr;
8551
8552 /* Handle the disp16 form with no registers first. */
8553 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8554 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8555 else
8556 {
8557 /* Get the displacment. */
8558 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8559 {
8560 case 0: u16EffAddr = 0; break;
8561 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8562 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8563 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8564 }
8565
8566 /* Add the base and index registers to the disp. */
8567 switch (bRm & X86_MODRM_RM_MASK)
8568 {
8569 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8570 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8571 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8572 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8573 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8574 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8575 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8576 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8577 }
8578 }
8579
8580 *pGCPtrEff = u16EffAddr;
8581 }
8582 else
8583 {
8584 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8585 uint32_t u32EffAddr;
8586
8587 /* Handle the disp32 form with no registers first. */
8588 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8589 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8590 else
8591 {
8592 /* Get the register (or SIB) value. */
8593 switch ((bRm & X86_MODRM_RM_MASK))
8594 {
8595 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8596 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8597 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8598 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8599 case 4: /* SIB */
8600 {
8601 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8602
8603 /* Get the index and scale it. */
8604 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8605 {
8606 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8607 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8608 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8609 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8610 case 4: u32EffAddr = 0; /*none */ break;
8611 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8612 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8613 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8614 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8615 }
8616 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8617
8618 /* add base */
8619 switch (bSib & X86_SIB_BASE_MASK)
8620 {
8621 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8622 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8623 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8624 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8625 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8626 case 5:
8627 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8628 {
8629 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8630 SET_SS_DEF();
8631 }
8632 else
8633 {
8634 uint32_t u32Disp;
8635 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8636 u32EffAddr += u32Disp;
8637 }
8638 break;
8639 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8640 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8641 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8642 }
8643 break;
8644 }
8645 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8646 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8647 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8648 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8649 }
8650
8651 /* Get and add the displacement. */
8652 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8653 {
8654 case 0:
8655 break;
8656 case 1:
8657 {
8658 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8659 u32EffAddr += i8Disp;
8660 break;
8661 }
8662 case 2:
8663 {
8664 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8665 u32EffAddr += u32Disp;
8666 break;
8667 }
8668 default:
8669 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8670 }
8671
8672 }
8673 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8674 *pGCPtrEff = u32EffAddr;
8675 else
8676 {
8677 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8678 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8679 }
8680 }
8681 }
8682 else
8683 {
8684 uint64_t u64EffAddr;
8685
8686 /* Handle the rip+disp32 form with no registers first. */
8687 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8688 {
8689 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8690 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8691 }
8692 else
8693 {
8694 /* Get the register (or SIB) value. */
8695 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8696 {
8697 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8698 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8699 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8700 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8701 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8702 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8703 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8704 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8705 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8706 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8707 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8708 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8709 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8710 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8711 /* SIB */
8712 case 4:
8713 case 12:
8714 {
8715 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8716
8717 /* Get the index and scale it. */
8718 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8719 {
8720 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8721 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8722 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8723 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8724 case 4: u64EffAddr = 0; /*none */ break;
8725 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8726 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8727 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8728 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8729 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8730 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8731 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8732 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8733 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8734 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8735 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8736 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8737 }
8738 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8739
8740 /* add base */
8741 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8742 {
8743 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8744 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8745 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8746 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8747 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8748 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8749 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8750 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8751 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8752 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8753 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8754 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8755 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8756 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8757 /* complicated encodings */
8758 case 5:
8759 case 13:
8760 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8761 {
8762 if (!pVCpu->iem.s.uRexB)
8763 {
8764 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8765 SET_SS_DEF();
8766 }
8767 else
8768 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8769 }
8770 else
8771 {
8772 uint32_t u32Disp;
8773 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8774 u64EffAddr += (int32_t)u32Disp;
8775 }
8776 break;
8777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8778 }
8779 break;
8780 }
8781 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8782 }
8783
8784 /* Get and add the displacement. */
8785 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8786 {
8787 case 0:
8788 break;
8789 case 1:
8790 {
8791 int8_t i8Disp;
8792 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8793 u64EffAddr += i8Disp;
8794 break;
8795 }
8796 case 2:
8797 {
8798 uint32_t u32Disp;
8799 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8800 u64EffAddr += (int32_t)u32Disp;
8801 break;
8802 }
8803 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8804 }
8805
8806 }
8807
8808 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8809 *pGCPtrEff = u64EffAddr;
8810 else
8811 {
8812 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8813 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8814 }
8815 }
8816
8817 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8818 return VINF_SUCCESS;
8819}
8820
8821
8822/**
8823 * Calculates the effective address of a ModR/M memory operand.
8824 *
8825 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8826 *
8827 * @return Strict VBox status code.
8828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8829 * @param bRm The ModRM byte.
8830 * @param cbImm The size of any immediate following the
8831 * effective address opcode bytes. Important for
8832 * RIP relative addressing.
8833 * @param pGCPtrEff Where to return the effective address.
8834 * @param offRsp RSP displacement.
8835 */
8836VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8837{
8838 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8839# define SET_SS_DEF() \
8840 do \
8841 { \
8842 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8843 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8844 } while (0)
8845
8846 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8847 {
8848/** @todo Check the effective address size crap! */
8849 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8850 {
8851 uint16_t u16EffAddr;
8852
8853 /* Handle the disp16 form with no registers first. */
8854 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8855 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8856 else
8857 {
8858 /* Get the displacment. */
8859 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8860 {
8861 case 0: u16EffAddr = 0; break;
8862 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8863 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8864 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8865 }
8866
8867 /* Add the base and index registers to the disp. */
8868 switch (bRm & X86_MODRM_RM_MASK)
8869 {
8870 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8871 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8872 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8873 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8874 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8875 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8876 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8877 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8878 }
8879 }
8880
8881 *pGCPtrEff = u16EffAddr;
8882 }
8883 else
8884 {
8885 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8886 uint32_t u32EffAddr;
8887
8888 /* Handle the disp32 form with no registers first. */
8889 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8890 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8891 else
8892 {
8893 /* Get the register (or SIB) value. */
8894 switch ((bRm & X86_MODRM_RM_MASK))
8895 {
8896 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8897 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8898 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8899 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8900 case 4: /* SIB */
8901 {
8902 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8903
8904 /* Get the index and scale it. */
8905 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8906 {
8907 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8908 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8909 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8910 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8911 case 4: u32EffAddr = 0; /*none */ break;
8912 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8913 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8914 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8915 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8916 }
8917 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8918
8919 /* add base */
8920 switch (bSib & X86_SIB_BASE_MASK)
8921 {
8922 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8923 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8924 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8925 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8926 case 4:
8927 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
8928 SET_SS_DEF();
8929 break;
8930 case 5:
8931 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8932 {
8933 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8934 SET_SS_DEF();
8935 }
8936 else
8937 {
8938 uint32_t u32Disp;
8939 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8940 u32EffAddr += u32Disp;
8941 }
8942 break;
8943 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8944 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8945 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8946 }
8947 break;
8948 }
8949 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8950 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8951 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8952 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8953 }
8954
8955 /* Get and add the displacement. */
8956 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8957 {
8958 case 0:
8959 break;
8960 case 1:
8961 {
8962 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8963 u32EffAddr += i8Disp;
8964 break;
8965 }
8966 case 2:
8967 {
8968 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8969 u32EffAddr += u32Disp;
8970 break;
8971 }
8972 default:
8973 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8974 }
8975
8976 }
8977 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8978 *pGCPtrEff = u32EffAddr;
8979 else
8980 {
8981 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8982 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8983 }
8984 }
8985 }
8986 else
8987 {
8988 uint64_t u64EffAddr;
8989
8990 /* Handle the rip+disp32 form with no registers first. */
8991 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8992 {
8993 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8994 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8995 }
8996 else
8997 {
8998 /* Get the register (or SIB) value. */
8999 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9000 {
9001 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9002 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9003 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9004 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9005 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9006 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9007 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9008 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9009 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9010 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9011 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9012 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9013 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9014 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9015 /* SIB */
9016 case 4:
9017 case 12:
9018 {
9019 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9020
9021 /* Get the index and scale it. */
9022 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9023 {
9024 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9025 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9026 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9027 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9028 case 4: u64EffAddr = 0; /*none */ break;
9029 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9030 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9031 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9032 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9033 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9034 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9035 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9036 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9037 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9038 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9039 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9040 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9041 }
9042 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9043
9044 /* add base */
9045 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9046 {
9047 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9048 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9049 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9050 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9051 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
9052 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9053 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9054 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9055 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9056 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9057 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9058 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9059 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9060 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9061 /* complicated encodings */
9062 case 5:
9063 case 13:
9064 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9065 {
9066 if (!pVCpu->iem.s.uRexB)
9067 {
9068 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9069 SET_SS_DEF();
9070 }
9071 else
9072 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9073 }
9074 else
9075 {
9076 uint32_t u32Disp;
9077 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9078 u64EffAddr += (int32_t)u32Disp;
9079 }
9080 break;
9081 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9082 }
9083 break;
9084 }
9085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9086 }
9087
9088 /* Get and add the displacement. */
9089 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9090 {
9091 case 0:
9092 break;
9093 case 1:
9094 {
9095 int8_t i8Disp;
9096 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9097 u64EffAddr += i8Disp;
9098 break;
9099 }
9100 case 2:
9101 {
9102 uint32_t u32Disp;
9103 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9104 u64EffAddr += (int32_t)u32Disp;
9105 break;
9106 }
9107 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9108 }
9109
9110 }
9111
9112 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9113 *pGCPtrEff = u64EffAddr;
9114 else
9115 {
9116 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9117 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9118 }
9119 }
9120
9121 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9122 return VINF_SUCCESS;
9123}
9124
9125
9126#ifdef IEM_WITH_SETJMP
9127/**
9128 * Calculates the effective address of a ModR/M memory operand.
9129 *
9130 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9131 *
9132 * May longjmp on internal error.
9133 *
9134 * @return The effective address.
9135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9136 * @param bRm The ModRM byte.
9137 * @param cbImm The size of any immediate following the
9138 * effective address opcode bytes. Important for
9139 * RIP relative addressing.
9140 */
9141RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
9142{
9143 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9144# define SET_SS_DEF() \
9145 do \
9146 { \
9147 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9148 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9149 } while (0)
9150
9151 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9152 {
9153/** @todo Check the effective address size crap! */
9154 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9155 {
9156 uint16_t u16EffAddr;
9157
9158 /* Handle the disp16 form with no registers first. */
9159 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9160 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9161 else
9162 {
9163 /* Get the displacment. */
9164 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9165 {
9166 case 0: u16EffAddr = 0; break;
9167 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9168 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9169 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9170 }
9171
9172 /* Add the base and index registers to the disp. */
9173 switch (bRm & X86_MODRM_RM_MASK)
9174 {
9175 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9176 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9177 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9178 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9179 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9180 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9181 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9182 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9183 }
9184 }
9185
9186 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9187 return u16EffAddr;
9188 }
9189
9190 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9191 uint32_t u32EffAddr;
9192
9193 /* Handle the disp32 form with no registers first. */
9194 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9195 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9196 else
9197 {
9198 /* Get the register (or SIB) value. */
9199 switch ((bRm & X86_MODRM_RM_MASK))
9200 {
9201 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9202 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9203 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9204 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9205 case 4: /* SIB */
9206 {
9207 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9208
9209 /* Get the index and scale it. */
9210 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9211 {
9212 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9213 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9214 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9215 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9216 case 4: u32EffAddr = 0; /*none */ break;
9217 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9218 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9219 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9220 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9221 }
9222 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9223
9224 /* add base */
9225 switch (bSib & X86_SIB_BASE_MASK)
9226 {
9227 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9228 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9229 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9230 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9231 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9232 case 5:
9233 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9234 {
9235 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9236 SET_SS_DEF();
9237 }
9238 else
9239 {
9240 uint32_t u32Disp;
9241 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9242 u32EffAddr += u32Disp;
9243 }
9244 break;
9245 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9246 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9247 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9248 }
9249 break;
9250 }
9251 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9252 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9253 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9254 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9255 }
9256
9257 /* Get and add the displacement. */
9258 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9259 {
9260 case 0:
9261 break;
9262 case 1:
9263 {
9264 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9265 u32EffAddr += i8Disp;
9266 break;
9267 }
9268 case 2:
9269 {
9270 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9271 u32EffAddr += u32Disp;
9272 break;
9273 }
9274 default:
9275 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9276 }
9277 }
9278
9279 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9280 {
9281 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9282 return u32EffAddr;
9283 }
9284 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9285 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9286 return u32EffAddr & UINT16_MAX;
9287 }
9288
9289 uint64_t u64EffAddr;
9290
9291 /* Handle the rip+disp32 form with no registers first. */
9292 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9293 {
9294 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9295 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9296 }
9297 else
9298 {
9299 /* Get the register (or SIB) value. */
9300 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9301 {
9302 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9303 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9304 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9305 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9306 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9307 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9308 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9309 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9310 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9311 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9312 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9313 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9314 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9315 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9316 /* SIB */
9317 case 4:
9318 case 12:
9319 {
9320 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9321
9322 /* Get the index and scale it. */
9323 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9324 {
9325 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9326 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9327 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9328 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9329 case 4: u64EffAddr = 0; /*none */ break;
9330 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9331 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9332 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9333 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9334 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9335 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9336 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9337 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9338 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9339 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9340 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9341 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9342 }
9343 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9344
9345 /* add base */
9346 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9347 {
9348 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9349 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9350 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9351 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9352 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9353 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9354 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9355 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9356 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9357 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9358 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9359 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9360 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9361 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9362 /* complicated encodings */
9363 case 5:
9364 case 13:
9365 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9366 {
9367 if (!pVCpu->iem.s.uRexB)
9368 {
9369 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9370 SET_SS_DEF();
9371 }
9372 else
9373 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9374 }
9375 else
9376 {
9377 uint32_t u32Disp;
9378 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9379 u64EffAddr += (int32_t)u32Disp;
9380 }
9381 break;
9382 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9383 }
9384 break;
9385 }
9386 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9387 }
9388
9389 /* Get and add the displacement. */
9390 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9391 {
9392 case 0:
9393 break;
9394 case 1:
9395 {
9396 int8_t i8Disp;
9397 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9398 u64EffAddr += i8Disp;
9399 break;
9400 }
9401 case 2:
9402 {
9403 uint32_t u32Disp;
9404 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9405 u64EffAddr += (int32_t)u32Disp;
9406 break;
9407 }
9408 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9409 }
9410
9411 }
9412
9413 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9414 {
9415 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9416 return u64EffAddr;
9417 }
9418 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9419 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9420 return u64EffAddr & UINT32_MAX;
9421}
9422#endif /* IEM_WITH_SETJMP */
9423
9424/** @} */
9425
9426
9427#ifdef LOG_ENABLED
9428/**
9429 * Logs the current instruction.
9430 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9431 * @param fSameCtx Set if we have the same context information as the VMM,
9432 * clear if we may have already executed an instruction in
9433 * our debug context. When clear, we assume IEMCPU holds
9434 * valid CPU mode info.
9435 *
9436 * The @a fSameCtx parameter is now misleading and obsolete.
9437 * @param pszFunction The IEM function doing the execution.
9438 */
9439static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9440{
9441# ifdef IN_RING3
9442 if (LogIs2Enabled())
9443 {
9444 char szInstr[256];
9445 uint32_t cbInstr = 0;
9446 if (fSameCtx)
9447 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9448 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9449 szInstr, sizeof(szInstr), &cbInstr);
9450 else
9451 {
9452 uint32_t fFlags = 0;
9453 switch (pVCpu->iem.s.enmCpuMode)
9454 {
9455 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9456 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9457 case IEMMODE_16BIT:
9458 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9459 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9460 else
9461 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9462 break;
9463 }
9464 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9465 szInstr, sizeof(szInstr), &cbInstr);
9466 }
9467
9468 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9469 Log2(("**** %s\n"
9470 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9471 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9472 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9473 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9474 " %s\n"
9475 , pszFunction,
9476 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9477 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9478 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9479 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9480 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9481 szInstr));
9482
9483 if (LogIs3Enabled())
9484 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9485 }
9486 else
9487# endif
9488 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9489 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9490 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9491}
9492#endif /* LOG_ENABLED */
9493
9494
9495#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9496/**
9497 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9498 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9499 *
9500 * @returns Modified rcStrict.
9501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9502 * @param rcStrict The instruction execution status.
9503 */
9504static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9505{
9506 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9507 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9508 {
9509 /* VMX preemption timer takes priority over NMI-window exits. */
9510 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9511 {
9512 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9513 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9514 }
9515 /*
9516 * Check remaining intercepts.
9517 *
9518 * NMI-window and Interrupt-window VM-exits.
9519 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9520 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9521 *
9522 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9523 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9524 */
9525 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9526 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9527 && !TRPMHasTrap(pVCpu))
9528 {
9529 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9530 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9531 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9532 {
9533 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9534 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9535 }
9536 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9537 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9538 {
9539 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9540 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9541 }
9542 }
9543 }
9544 /* TPR-below threshold/APIC write has the highest priority. */
9545 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9546 {
9547 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9548 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9549 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9550 }
9551 /* MTF takes priority over VMX-preemption timer. */
9552 else
9553 {
9554 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9555 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9556 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9557 }
9558 return rcStrict;
9559}
9560#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9561
9562
9563/**
9564 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9565 * IEMExecOneWithPrefetchedByPC.
9566 *
9567 * Similar code is found in IEMExecLots.
9568 *
9569 * @return Strict VBox status code.
9570 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9571 * @param fExecuteInhibit If set, execute the instruction following CLI,
9572 * POP SS and MOV SS,GR.
9573 * @param pszFunction The calling function name.
9574 */
9575DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9576{
9577 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9578 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9579 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9580 RT_NOREF_PV(pszFunction);
9581
9582#ifdef IEM_WITH_SETJMP
9583 VBOXSTRICTRC rcStrict;
9584 jmp_buf JmpBuf;
9585 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9586 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9587 if ((rcStrict = setjmp(JmpBuf)) == 0)
9588 {
9589 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9590 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9591 }
9592 else
9593 pVCpu->iem.s.cLongJumps++;
9594 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9595#else
9596 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9597 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9598#endif
9599 if (rcStrict == VINF_SUCCESS)
9600 pVCpu->iem.s.cInstructions++;
9601 if (pVCpu->iem.s.cActiveMappings > 0)
9602 {
9603 Assert(rcStrict != VINF_SUCCESS);
9604 iemMemRollback(pVCpu);
9605 }
9606 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9607 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9608 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9609
9610//#ifdef DEBUG
9611// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9612//#endif
9613
9614#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9615 /*
9616 * Perform any VMX nested-guest instruction boundary actions.
9617 *
9618 * If any of these causes a VM-exit, we must skip executing the next
9619 * instruction (would run into stale page tables). A VM-exit makes sure
9620 * there is no interrupt-inhibition, so that should ensure we don't go
9621 * to try execute the next instruction. Clearing fExecuteInhibit is
9622 * problematic because of the setjmp/longjmp clobbering above.
9623 */
9624 if ( rcStrict == VINF_SUCCESS
9625 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9626 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9627 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
9628#endif
9629
9630 /* Execute the next instruction as well if a cli, pop ss or
9631 mov ss, Gr has just completed successfully. */
9632 if ( fExecuteInhibit
9633 && rcStrict == VINF_SUCCESS
9634 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9635 && EMIsInhibitInterruptsActive(pVCpu))
9636 {
9637 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9638 if (rcStrict == VINF_SUCCESS)
9639 {
9640#ifdef LOG_ENABLED
9641 iemLogCurInstr(pVCpu, false, pszFunction);
9642#endif
9643#ifdef IEM_WITH_SETJMP
9644 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9645 if ((rcStrict = setjmp(JmpBuf)) == 0)
9646 {
9647 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9648 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9649 }
9650 else
9651 pVCpu->iem.s.cLongJumps++;
9652 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9653#else
9654 IEM_OPCODE_GET_NEXT_U8(&b);
9655 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9656#endif
9657 if (rcStrict == VINF_SUCCESS)
9658 pVCpu->iem.s.cInstructions++;
9659 if (pVCpu->iem.s.cActiveMappings > 0)
9660 {
9661 Assert(rcStrict != VINF_SUCCESS);
9662 iemMemRollback(pVCpu);
9663 }
9664 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9665 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9666 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9667 }
9668 else if (pVCpu->iem.s.cActiveMappings > 0)
9669 iemMemRollback(pVCpu);
9670 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9671 }
9672
9673 /*
9674 * Return value fiddling, statistics and sanity assertions.
9675 */
9676 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9677
9678 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9679 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9680 return rcStrict;
9681}
9682
9683
9684/**
9685 * Execute one instruction.
9686 *
9687 * @return Strict VBox status code.
9688 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9689 */
9690VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9691{
9692 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9693#ifdef LOG_ENABLED
9694 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9695#endif
9696
9697 /*
9698 * Do the decoding and emulation.
9699 */
9700 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9701 if (rcStrict == VINF_SUCCESS)
9702 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9703 else if (pVCpu->iem.s.cActiveMappings > 0)
9704 iemMemRollback(pVCpu);
9705
9706 if (rcStrict != VINF_SUCCESS)
9707 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9708 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9709 return rcStrict;
9710}
9711
9712
9713VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9714{
9715 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9716
9717 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9718 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9719 if (rcStrict == VINF_SUCCESS)
9720 {
9721 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9722 if (pcbWritten)
9723 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9724 }
9725 else if (pVCpu->iem.s.cActiveMappings > 0)
9726 iemMemRollback(pVCpu);
9727
9728 return rcStrict;
9729}
9730
9731
9732VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9733 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9734{
9735 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9736
9737 VBOXSTRICTRC rcStrict;
9738 if ( cbOpcodeBytes
9739 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9740 {
9741 iemInitDecoder(pVCpu, false, false);
9742#ifdef IEM_WITH_CODE_TLB
9743 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9744 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9745 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9746 pVCpu->iem.s.offCurInstrStart = 0;
9747 pVCpu->iem.s.offInstrNextByte = 0;
9748#else
9749 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9750 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9751#endif
9752 rcStrict = VINF_SUCCESS;
9753 }
9754 else
9755 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9756 if (rcStrict == VINF_SUCCESS)
9757 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9758 else if (pVCpu->iem.s.cActiveMappings > 0)
9759 iemMemRollback(pVCpu);
9760
9761 return rcStrict;
9762}
9763
9764
9765VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9766{
9767 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9768
9769 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9770 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9771 if (rcStrict == VINF_SUCCESS)
9772 {
9773 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9774 if (pcbWritten)
9775 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9776 }
9777 else if (pVCpu->iem.s.cActiveMappings > 0)
9778 iemMemRollback(pVCpu);
9779
9780 return rcStrict;
9781}
9782
9783
9784VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9785 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9786{
9787 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9788
9789 VBOXSTRICTRC rcStrict;
9790 if ( cbOpcodeBytes
9791 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9792 {
9793 iemInitDecoder(pVCpu, true, false);
9794#ifdef IEM_WITH_CODE_TLB
9795 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9796 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9797 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9798 pVCpu->iem.s.offCurInstrStart = 0;
9799 pVCpu->iem.s.offInstrNextByte = 0;
9800#else
9801 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9802 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9803#endif
9804 rcStrict = VINF_SUCCESS;
9805 }
9806 else
9807 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9808 if (rcStrict == VINF_SUCCESS)
9809 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9810 else if (pVCpu->iem.s.cActiveMappings > 0)
9811 iemMemRollback(pVCpu);
9812
9813 return rcStrict;
9814}
9815
9816
9817/**
9818 * For debugging DISGetParamSize, may come in handy.
9819 *
9820 * @returns Strict VBox status code.
9821 * @param pVCpu The cross context virtual CPU structure of the
9822 * calling EMT.
9823 * @param pCtxCore The context core structure.
9824 * @param OpcodeBytesPC The PC of the opcode bytes.
9825 * @param pvOpcodeBytes Prefeched opcode bytes.
9826 * @param cbOpcodeBytes Number of prefetched bytes.
9827 * @param pcbWritten Where to return the number of bytes written.
9828 * Optional.
9829 */
9830VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9831 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9832 uint32_t *pcbWritten)
9833{
9834 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9835
9836 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9837 VBOXSTRICTRC rcStrict;
9838 if ( cbOpcodeBytes
9839 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9840 {
9841 iemInitDecoder(pVCpu, true, false);
9842#ifdef IEM_WITH_CODE_TLB
9843 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9844 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9845 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9846 pVCpu->iem.s.offCurInstrStart = 0;
9847 pVCpu->iem.s.offInstrNextByte = 0;
9848#else
9849 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9850 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9851#endif
9852 rcStrict = VINF_SUCCESS;
9853 }
9854 else
9855 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9856 if (rcStrict == VINF_SUCCESS)
9857 {
9858 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9859 if (pcbWritten)
9860 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9861 }
9862 else if (pVCpu->iem.s.cActiveMappings > 0)
9863 iemMemRollback(pVCpu);
9864
9865 return rcStrict;
9866}
9867
9868
9869/**
9870 * For handling split cacheline lock operations when the host has split-lock
9871 * detection enabled.
9872 *
9873 * This will cause the interpreter to disregard the lock prefix and implicit
9874 * locking (xchg).
9875 *
9876 * @returns Strict VBox status code.
9877 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9878 */
9879VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9880{
9881 /*
9882 * Do the decoding and emulation.
9883 */
9884 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9885 if (rcStrict == VINF_SUCCESS)
9886 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9887 else if (pVCpu->iem.s.cActiveMappings > 0)
9888 iemMemRollback(pVCpu);
9889
9890 if (rcStrict != VINF_SUCCESS)
9891 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9892 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9893 return rcStrict;
9894}
9895
9896
9897VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9898{
9899 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9900 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9901
9902 /*
9903 * See if there is an interrupt pending in TRPM, inject it if we can.
9904 */
9905 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9906#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9907 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9908 if (fIntrEnabled)
9909 {
9910 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9911 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9912 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9913 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9914 else
9915 {
9916 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9917 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9918 }
9919 }
9920#else
9921 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9922#endif
9923
9924 /** @todo What if we are injecting an exception and not an interrupt? Is that
9925 * possible here? For now we assert it is indeed only an interrupt. */
9926 if ( fIntrEnabled
9927 && TRPMHasTrap(pVCpu)
9928 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
9929 {
9930 uint8_t u8TrapNo;
9931 TRPMEVENT enmType;
9932 uint32_t uErrCode;
9933 RTGCPTR uCr2;
9934 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
9935 AssertRC(rc2);
9936 Assert(enmType == TRPM_HARDWARE_INT);
9937 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9938 TRPMResetTrap(pVCpu);
9939#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9940 /* Injecting an event may cause a VM-exit. */
9941 if ( rcStrict != VINF_SUCCESS
9942 && rcStrict != VINF_IEM_RAISED_XCPT)
9943 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9944#else
9945 NOREF(rcStrict);
9946#endif
9947 }
9948
9949 /*
9950 * Initial decoder init w/ prefetch, then setup setjmp.
9951 */
9952 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9953 if (rcStrict == VINF_SUCCESS)
9954 {
9955#ifdef IEM_WITH_SETJMP
9956 jmp_buf JmpBuf;
9957 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9958 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9959 pVCpu->iem.s.cActiveMappings = 0;
9960 if ((rcStrict = setjmp(JmpBuf)) == 0)
9961#endif
9962 {
9963 /*
9964 * The run loop. We limit ourselves to 4096 instructions right now.
9965 */
9966 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9967 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9968 for (;;)
9969 {
9970 /*
9971 * Log the state.
9972 */
9973#ifdef LOG_ENABLED
9974 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9975#endif
9976
9977 /*
9978 * Do the decoding and emulation.
9979 */
9980 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9981 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9982 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9983 {
9984 Assert(pVCpu->iem.s.cActiveMappings == 0);
9985 pVCpu->iem.s.cInstructions++;
9986 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9987 {
9988 uint64_t fCpu = pVCpu->fLocalForcedActions
9989 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9990 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9991 | VMCPU_FF_TLB_FLUSH
9992 | VMCPU_FF_INHIBIT_INTERRUPTS
9993 | VMCPU_FF_BLOCK_NMIS
9994 | VMCPU_FF_UNHALT ));
9995
9996 if (RT_LIKELY( ( !fCpu
9997 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9998 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9999 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10000 {
10001 if (cMaxInstructionsGccStupidity-- > 0)
10002 {
10003 /* Poll timers every now an then according to the caller's specs. */
10004 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10005 || !TMTimerPollBool(pVM, pVCpu))
10006 {
10007 Assert(pVCpu->iem.s.cActiveMappings == 0);
10008 iemReInitDecoder(pVCpu);
10009 continue;
10010 }
10011 }
10012 }
10013 }
10014 Assert(pVCpu->iem.s.cActiveMappings == 0);
10015 }
10016 else if (pVCpu->iem.s.cActiveMappings > 0)
10017 iemMemRollback(pVCpu);
10018 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10019 break;
10020 }
10021 }
10022#ifdef IEM_WITH_SETJMP
10023 else
10024 {
10025 if (pVCpu->iem.s.cActiveMappings > 0)
10026 iemMemRollback(pVCpu);
10027# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10028 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10029# endif
10030 pVCpu->iem.s.cLongJumps++;
10031 }
10032 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10033#endif
10034
10035 /*
10036 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10037 */
10038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10040 }
10041 else
10042 {
10043 if (pVCpu->iem.s.cActiveMappings > 0)
10044 iemMemRollback(pVCpu);
10045
10046#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10047 /*
10048 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10049 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10050 */
10051 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10052#endif
10053 }
10054
10055 /*
10056 * Maybe re-enter raw-mode and log.
10057 */
10058 if (rcStrict != VINF_SUCCESS)
10059 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10060 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10061 if (pcInstructions)
10062 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10063 return rcStrict;
10064}
10065
10066
10067/**
10068 * Interface used by EMExecuteExec, does exit statistics and limits.
10069 *
10070 * @returns Strict VBox status code.
10071 * @param pVCpu The cross context virtual CPU structure.
10072 * @param fWillExit To be defined.
10073 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10074 * @param cMaxInstructions Maximum number of instructions to execute.
10075 * @param cMaxInstructionsWithoutExits
10076 * The max number of instructions without exits.
10077 * @param pStats Where to return statistics.
10078 */
10079VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10080 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10081{
10082 NOREF(fWillExit); /** @todo define flexible exit crits */
10083
10084 /*
10085 * Initialize return stats.
10086 */
10087 pStats->cInstructions = 0;
10088 pStats->cExits = 0;
10089 pStats->cMaxExitDistance = 0;
10090 pStats->cReserved = 0;
10091
10092 /*
10093 * Initial decoder init w/ prefetch, then setup setjmp.
10094 */
10095 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10096 if (rcStrict == VINF_SUCCESS)
10097 {
10098#ifdef IEM_WITH_SETJMP
10099 jmp_buf JmpBuf;
10100 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10101 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10102 pVCpu->iem.s.cActiveMappings = 0;
10103 if ((rcStrict = setjmp(JmpBuf)) == 0)
10104#endif
10105 {
10106#ifdef IN_RING0
10107 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10108#endif
10109 uint32_t cInstructionSinceLastExit = 0;
10110
10111 /*
10112 * The run loop. We limit ourselves to 4096 instructions right now.
10113 */
10114 PVM pVM = pVCpu->CTX_SUFF(pVM);
10115 for (;;)
10116 {
10117 /*
10118 * Log the state.
10119 */
10120#ifdef LOG_ENABLED
10121 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10122#endif
10123
10124 /*
10125 * Do the decoding and emulation.
10126 */
10127 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10128
10129 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10130 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10131
10132 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10133 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10134 {
10135 pStats->cExits += 1;
10136 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10137 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10138 cInstructionSinceLastExit = 0;
10139 }
10140
10141 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10142 {
10143 Assert(pVCpu->iem.s.cActiveMappings == 0);
10144 pVCpu->iem.s.cInstructions++;
10145 pStats->cInstructions++;
10146 cInstructionSinceLastExit++;
10147 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10148 {
10149 uint64_t fCpu = pVCpu->fLocalForcedActions
10150 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10151 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10152 | VMCPU_FF_TLB_FLUSH
10153 | VMCPU_FF_INHIBIT_INTERRUPTS
10154 | VMCPU_FF_BLOCK_NMIS
10155 | VMCPU_FF_UNHALT ));
10156
10157 if (RT_LIKELY( ( ( !fCpu
10158 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10159 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10160 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10161 || pStats->cInstructions < cMinInstructions))
10162 {
10163 if (pStats->cInstructions < cMaxInstructions)
10164 {
10165 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10166 {
10167#ifdef IN_RING0
10168 if ( !fCheckPreemptionPending
10169 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10170#endif
10171 {
10172 Assert(pVCpu->iem.s.cActiveMappings == 0);
10173 iemReInitDecoder(pVCpu);
10174 continue;
10175 }
10176#ifdef IN_RING0
10177 rcStrict = VINF_EM_RAW_INTERRUPT;
10178 break;
10179#endif
10180 }
10181 }
10182 }
10183 Assert(!(fCpu & VMCPU_FF_IEM));
10184 }
10185 Assert(pVCpu->iem.s.cActiveMappings == 0);
10186 }
10187 else if (pVCpu->iem.s.cActiveMappings > 0)
10188 iemMemRollback(pVCpu);
10189 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10190 break;
10191 }
10192 }
10193#ifdef IEM_WITH_SETJMP
10194 else
10195 {
10196 if (pVCpu->iem.s.cActiveMappings > 0)
10197 iemMemRollback(pVCpu);
10198 pVCpu->iem.s.cLongJumps++;
10199 }
10200 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10201#endif
10202
10203 /*
10204 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10205 */
10206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10208 }
10209 else
10210 {
10211 if (pVCpu->iem.s.cActiveMappings > 0)
10212 iemMemRollback(pVCpu);
10213
10214#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10215 /*
10216 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10217 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10218 */
10219 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10220#endif
10221 }
10222
10223 /*
10224 * Maybe re-enter raw-mode and log.
10225 */
10226 if (rcStrict != VINF_SUCCESS)
10227 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10228 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10229 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10230 return rcStrict;
10231}
10232
10233
10234/**
10235 * Injects a trap, fault, abort, software interrupt or external interrupt.
10236 *
10237 * The parameter list matches TRPMQueryTrapAll pretty closely.
10238 *
10239 * @returns Strict VBox status code.
10240 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10241 * @param u8TrapNo The trap number.
10242 * @param enmType What type is it (trap/fault/abort), software
10243 * interrupt or hardware interrupt.
10244 * @param uErrCode The error code if applicable.
10245 * @param uCr2 The CR2 value if applicable.
10246 * @param cbInstr The instruction length (only relevant for
10247 * software interrupts).
10248 */
10249VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10250 uint8_t cbInstr)
10251{
10252 iemInitDecoder(pVCpu, false, false);
10253#ifdef DBGFTRACE_ENABLED
10254 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10255 u8TrapNo, enmType, uErrCode, uCr2);
10256#endif
10257
10258 uint32_t fFlags;
10259 switch (enmType)
10260 {
10261 case TRPM_HARDWARE_INT:
10262 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10263 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10264 uErrCode = uCr2 = 0;
10265 break;
10266
10267 case TRPM_SOFTWARE_INT:
10268 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10269 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10270 uErrCode = uCr2 = 0;
10271 break;
10272
10273 case TRPM_TRAP:
10274 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10275 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10276 if (u8TrapNo == X86_XCPT_PF)
10277 fFlags |= IEM_XCPT_FLAGS_CR2;
10278 switch (u8TrapNo)
10279 {
10280 case X86_XCPT_DF:
10281 case X86_XCPT_TS:
10282 case X86_XCPT_NP:
10283 case X86_XCPT_SS:
10284 case X86_XCPT_PF:
10285 case X86_XCPT_AC:
10286 case X86_XCPT_GP:
10287 fFlags |= IEM_XCPT_FLAGS_ERR;
10288 break;
10289 }
10290 break;
10291
10292 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10293 }
10294
10295 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10296
10297 if (pVCpu->iem.s.cActiveMappings > 0)
10298 iemMemRollback(pVCpu);
10299
10300 return rcStrict;
10301}
10302
10303
10304/**
10305 * Injects the active TRPM event.
10306 *
10307 * @returns Strict VBox status code.
10308 * @param pVCpu The cross context virtual CPU structure.
10309 */
10310VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10311{
10312#ifndef IEM_IMPLEMENTS_TASKSWITCH
10313 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10314#else
10315 uint8_t u8TrapNo;
10316 TRPMEVENT enmType;
10317 uint32_t uErrCode;
10318 RTGCUINTPTR uCr2;
10319 uint8_t cbInstr;
10320 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10321 if (RT_FAILURE(rc))
10322 return rc;
10323
10324 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10325 * ICEBP \#DB injection as a special case. */
10326 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10327#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10328 if (rcStrict == VINF_SVM_VMEXIT)
10329 rcStrict = VINF_SUCCESS;
10330#endif
10331#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10332 if (rcStrict == VINF_VMX_VMEXIT)
10333 rcStrict = VINF_SUCCESS;
10334#endif
10335 /** @todo Are there any other codes that imply the event was successfully
10336 * delivered to the guest? See @bugref{6607}. */
10337 if ( rcStrict == VINF_SUCCESS
10338 || rcStrict == VINF_IEM_RAISED_XCPT)
10339 TRPMResetTrap(pVCpu);
10340
10341 return rcStrict;
10342#endif
10343}
10344
10345
10346VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10347{
10348 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10349 return VERR_NOT_IMPLEMENTED;
10350}
10351
10352
10353VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10354{
10355 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10356 return VERR_NOT_IMPLEMENTED;
10357}
10358
10359
10360#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10361/**
10362 * Executes a IRET instruction with default operand size.
10363 *
10364 * This is for PATM.
10365 *
10366 * @returns VBox status code.
10367 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10368 * @param pCtxCore The register frame.
10369 */
10370VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10371{
10372 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10373
10374 iemCtxCoreToCtx(pCtx, pCtxCore);
10375 iemInitDecoder(pVCpu);
10376 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10377 if (rcStrict == VINF_SUCCESS)
10378 iemCtxToCtxCore(pCtxCore, pCtx);
10379 else
10380 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10381 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10382 return rcStrict;
10383}
10384#endif
10385
10386
10387/**
10388 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10389 *
10390 * This API ASSUMES that the caller has already verified that the guest code is
10391 * allowed to access the I/O port. (The I/O port is in the DX register in the
10392 * guest state.)
10393 *
10394 * @returns Strict VBox status code.
10395 * @param pVCpu The cross context virtual CPU structure.
10396 * @param cbValue The size of the I/O port access (1, 2, or 4).
10397 * @param enmAddrMode The addressing mode.
10398 * @param fRepPrefix Indicates whether a repeat prefix is used
10399 * (doesn't matter which for this instruction).
10400 * @param cbInstr The instruction length in bytes.
10401 * @param iEffSeg The effective segment address.
10402 * @param fIoChecked Whether the access to the I/O port has been
10403 * checked or not. It's typically checked in the
10404 * HM scenario.
10405 */
10406VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10407 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10408{
10409 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10410 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10411
10412 /*
10413 * State init.
10414 */
10415 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10416
10417 /*
10418 * Switch orgy for getting to the right handler.
10419 */
10420 VBOXSTRICTRC rcStrict;
10421 if (fRepPrefix)
10422 {
10423 switch (enmAddrMode)
10424 {
10425 case IEMMODE_16BIT:
10426 switch (cbValue)
10427 {
10428 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10429 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10430 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10431 default:
10432 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10433 }
10434 break;
10435
10436 case IEMMODE_32BIT:
10437 switch (cbValue)
10438 {
10439 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10440 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10441 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10442 default:
10443 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10444 }
10445 break;
10446
10447 case IEMMODE_64BIT:
10448 switch (cbValue)
10449 {
10450 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10451 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10452 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10453 default:
10454 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10455 }
10456 break;
10457
10458 default:
10459 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10460 }
10461 }
10462 else
10463 {
10464 switch (enmAddrMode)
10465 {
10466 case IEMMODE_16BIT:
10467 switch (cbValue)
10468 {
10469 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10470 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10471 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10472 default:
10473 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10474 }
10475 break;
10476
10477 case IEMMODE_32BIT:
10478 switch (cbValue)
10479 {
10480 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10481 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10482 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10483 default:
10484 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10485 }
10486 break;
10487
10488 case IEMMODE_64BIT:
10489 switch (cbValue)
10490 {
10491 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10492 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10493 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10494 default:
10495 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10496 }
10497 break;
10498
10499 default:
10500 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10501 }
10502 }
10503
10504 if (pVCpu->iem.s.cActiveMappings)
10505 iemMemRollback(pVCpu);
10506
10507 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10508}
10509
10510
10511/**
10512 * Interface for HM and EM for executing string I/O IN (read) instructions.
10513 *
10514 * This API ASSUMES that the caller has already verified that the guest code is
10515 * allowed to access the I/O port. (The I/O port is in the DX register in the
10516 * guest state.)
10517 *
10518 * @returns Strict VBox status code.
10519 * @param pVCpu The cross context virtual CPU structure.
10520 * @param cbValue The size of the I/O port access (1, 2, or 4).
10521 * @param enmAddrMode The addressing mode.
10522 * @param fRepPrefix Indicates whether a repeat prefix is used
10523 * (doesn't matter which for this instruction).
10524 * @param cbInstr The instruction length in bytes.
10525 * @param fIoChecked Whether the access to the I/O port has been
10526 * checked or not. It's typically checked in the
10527 * HM scenario.
10528 */
10529VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10530 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10531{
10532 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10533
10534 /*
10535 * State init.
10536 */
10537 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10538
10539 /*
10540 * Switch orgy for getting to the right handler.
10541 */
10542 VBOXSTRICTRC rcStrict;
10543 if (fRepPrefix)
10544 {
10545 switch (enmAddrMode)
10546 {
10547 case IEMMODE_16BIT:
10548 switch (cbValue)
10549 {
10550 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10551 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10552 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10553 default:
10554 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10555 }
10556 break;
10557
10558 case IEMMODE_32BIT:
10559 switch (cbValue)
10560 {
10561 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10562 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10563 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10564 default:
10565 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10566 }
10567 break;
10568
10569 case IEMMODE_64BIT:
10570 switch (cbValue)
10571 {
10572 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10573 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10574 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10575 default:
10576 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10577 }
10578 break;
10579
10580 default:
10581 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10582 }
10583 }
10584 else
10585 {
10586 switch (enmAddrMode)
10587 {
10588 case IEMMODE_16BIT:
10589 switch (cbValue)
10590 {
10591 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10592 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10593 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10594 default:
10595 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10596 }
10597 break;
10598
10599 case IEMMODE_32BIT:
10600 switch (cbValue)
10601 {
10602 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10603 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10604 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10605 default:
10606 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10607 }
10608 break;
10609
10610 case IEMMODE_64BIT:
10611 switch (cbValue)
10612 {
10613 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10614 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10615 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10616 default:
10617 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10618 }
10619 break;
10620
10621 default:
10622 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10623 }
10624 }
10625
10626 if ( pVCpu->iem.s.cActiveMappings == 0
10627 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10628 { /* likely */ }
10629 else
10630 {
10631 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10632 iemMemRollback(pVCpu);
10633 }
10634 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10635}
10636
10637
10638/**
10639 * Interface for rawmode to write execute an OUT instruction.
10640 *
10641 * @returns Strict VBox status code.
10642 * @param pVCpu The cross context virtual CPU structure.
10643 * @param cbInstr The instruction length in bytes.
10644 * @param u16Port The port to read.
10645 * @param fImm Whether the port is specified using an immediate operand or
10646 * using the implicit DX register.
10647 * @param cbReg The register size.
10648 *
10649 * @remarks In ring-0 not all of the state needs to be synced in.
10650 */
10651VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10652{
10653 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10654 Assert(cbReg <= 4 && cbReg != 3);
10655
10656 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10657 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10658 Assert(!pVCpu->iem.s.cActiveMappings);
10659 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10660}
10661
10662
10663/**
10664 * Interface for rawmode to write execute an IN instruction.
10665 *
10666 * @returns Strict VBox status code.
10667 * @param pVCpu The cross context virtual CPU structure.
10668 * @param cbInstr The instruction length in bytes.
10669 * @param u16Port The port to read.
10670 * @param fImm Whether the port is specified using an immediate operand or
10671 * using the implicit DX.
10672 * @param cbReg The register size.
10673 */
10674VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10675{
10676 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10677 Assert(cbReg <= 4 && cbReg != 3);
10678
10679 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10680 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10681 Assert(!pVCpu->iem.s.cActiveMappings);
10682 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10683}
10684
10685
10686/**
10687 * Interface for HM and EM to write to a CRx register.
10688 *
10689 * @returns Strict VBox status code.
10690 * @param pVCpu The cross context virtual CPU structure.
10691 * @param cbInstr The instruction length in bytes.
10692 * @param iCrReg The control register number (destination).
10693 * @param iGReg The general purpose register number (source).
10694 *
10695 * @remarks In ring-0 not all of the state needs to be synced in.
10696 */
10697VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10698{
10699 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10700 Assert(iCrReg < 16);
10701 Assert(iGReg < 16);
10702
10703 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10704 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10705 Assert(!pVCpu->iem.s.cActiveMappings);
10706 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10707}
10708
10709
10710/**
10711 * Interface for HM and EM to read from a CRx register.
10712 *
10713 * @returns Strict VBox status code.
10714 * @param pVCpu The cross context virtual CPU structure.
10715 * @param cbInstr The instruction length in bytes.
10716 * @param iGReg The general purpose register number (destination).
10717 * @param iCrReg The control register number (source).
10718 *
10719 * @remarks In ring-0 not all of the state needs to be synced in.
10720 */
10721VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10722{
10723 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10724 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10725 | CPUMCTX_EXTRN_APIC_TPR);
10726 Assert(iCrReg < 16);
10727 Assert(iGReg < 16);
10728
10729 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10730 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10731 Assert(!pVCpu->iem.s.cActiveMappings);
10732 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10733}
10734
10735
10736/**
10737 * Interface for HM and EM to clear the CR0[TS] bit.
10738 *
10739 * @returns Strict VBox status code.
10740 * @param pVCpu The cross context virtual CPU structure.
10741 * @param cbInstr The instruction length in bytes.
10742 *
10743 * @remarks In ring-0 not all of the state needs to be synced in.
10744 */
10745VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10746{
10747 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10748
10749 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10750 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10751 Assert(!pVCpu->iem.s.cActiveMappings);
10752 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10753}
10754
10755
10756/**
10757 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10758 *
10759 * @returns Strict VBox status code.
10760 * @param pVCpu The cross context virtual CPU structure.
10761 * @param cbInstr The instruction length in bytes.
10762 * @param uValue The value to load into CR0.
10763 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10764 * memory operand. Otherwise pass NIL_RTGCPTR.
10765 *
10766 * @remarks In ring-0 not all of the state needs to be synced in.
10767 */
10768VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10769{
10770 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10771
10772 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10773 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10774 Assert(!pVCpu->iem.s.cActiveMappings);
10775 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10776}
10777
10778
10779/**
10780 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10781 *
10782 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10783 *
10784 * @returns Strict VBox status code.
10785 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10786 * @param cbInstr The instruction length in bytes.
10787 * @remarks In ring-0 not all of the state needs to be synced in.
10788 * @thread EMT(pVCpu)
10789 */
10790VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10791{
10792 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10793
10794 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10795 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10796 Assert(!pVCpu->iem.s.cActiveMappings);
10797 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10798}
10799
10800
10801/**
10802 * Interface for HM and EM to emulate the WBINVD instruction.
10803 *
10804 * @returns Strict VBox status code.
10805 * @param pVCpu The cross context virtual CPU structure.
10806 * @param cbInstr The instruction length in bytes.
10807 *
10808 * @remarks In ring-0 not all of the state needs to be synced in.
10809 */
10810VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10811{
10812 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10813
10814 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10815 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10816 Assert(!pVCpu->iem.s.cActiveMappings);
10817 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10818}
10819
10820
10821/**
10822 * Interface for HM and EM to emulate the INVD instruction.
10823 *
10824 * @returns Strict VBox status code.
10825 * @param pVCpu The cross context virtual CPU structure.
10826 * @param cbInstr The instruction length in bytes.
10827 *
10828 * @remarks In ring-0 not all of the state needs to be synced in.
10829 */
10830VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10831{
10832 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10833
10834 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10835 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10836 Assert(!pVCpu->iem.s.cActiveMappings);
10837 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10838}
10839
10840
10841/**
10842 * Interface for HM and EM to emulate the INVLPG instruction.
10843 *
10844 * @returns Strict VBox status code.
10845 * @retval VINF_PGM_SYNC_CR3
10846 *
10847 * @param pVCpu The cross context virtual CPU structure.
10848 * @param cbInstr The instruction length in bytes.
10849 * @param GCPtrPage The effective address of the page to invalidate.
10850 *
10851 * @remarks In ring-0 not all of the state needs to be synced in.
10852 */
10853VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10854{
10855 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10856
10857 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10858 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10859 Assert(!pVCpu->iem.s.cActiveMappings);
10860 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10861}
10862
10863
10864/**
10865 * Interface for HM and EM to emulate the INVPCID instruction.
10866 *
10867 * @returns Strict VBox status code.
10868 * @retval VINF_PGM_SYNC_CR3
10869 *
10870 * @param pVCpu The cross context virtual CPU structure.
10871 * @param cbInstr The instruction length in bytes.
10872 * @param iEffSeg The effective segment register.
10873 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10874 * @param uType The invalidation type.
10875 *
10876 * @remarks In ring-0 not all of the state needs to be synced in.
10877 */
10878VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10879 uint64_t uType)
10880{
10881 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10882
10883 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10884 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10885 Assert(!pVCpu->iem.s.cActiveMappings);
10886 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10887}
10888
10889
10890/**
10891 * Interface for HM and EM to emulate the CPUID instruction.
10892 *
10893 * @returns Strict VBox status code.
10894 *
10895 * @param pVCpu The cross context virtual CPU structure.
10896 * @param cbInstr The instruction length in bytes.
10897 *
10898 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10899 */
10900VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10901{
10902 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10903 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10904
10905 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10906 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10907 Assert(!pVCpu->iem.s.cActiveMappings);
10908 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10909}
10910
10911
10912/**
10913 * Interface for HM and EM to emulate the RDPMC instruction.
10914 *
10915 * @returns Strict VBox status code.
10916 *
10917 * @param pVCpu The cross context virtual CPU structure.
10918 * @param cbInstr The instruction length in bytes.
10919 *
10920 * @remarks Not all of the state needs to be synced in.
10921 */
10922VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10923{
10924 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10925 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10926
10927 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10928 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10929 Assert(!pVCpu->iem.s.cActiveMappings);
10930 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10931}
10932
10933
10934/**
10935 * Interface for HM and EM to emulate the RDTSC instruction.
10936 *
10937 * @returns Strict VBox status code.
10938 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10939 *
10940 * @param pVCpu The cross context virtual CPU structure.
10941 * @param cbInstr The instruction length in bytes.
10942 *
10943 * @remarks Not all of the state needs to be synced in.
10944 */
10945VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10946{
10947 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10948 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10949
10950 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10951 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10952 Assert(!pVCpu->iem.s.cActiveMappings);
10953 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10954}
10955
10956
10957/**
10958 * Interface for HM and EM to emulate the RDTSCP instruction.
10959 *
10960 * @returns Strict VBox status code.
10961 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10962 *
10963 * @param pVCpu The cross context virtual CPU structure.
10964 * @param cbInstr The instruction length in bytes.
10965 *
10966 * @remarks Not all of the state needs to be synced in. Recommended
10967 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10968 */
10969VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10970{
10971 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10972 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10973
10974 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10975 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10976 Assert(!pVCpu->iem.s.cActiveMappings);
10977 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10978}
10979
10980
10981/**
10982 * Interface for HM and EM to emulate the RDMSR instruction.
10983 *
10984 * @returns Strict VBox status code.
10985 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10986 *
10987 * @param pVCpu The cross context virtual CPU structure.
10988 * @param cbInstr The instruction length in bytes.
10989 *
10990 * @remarks Not all of the state needs to be synced in. Requires RCX and
10991 * (currently) all MSRs.
10992 */
10993VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10994{
10995 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10996 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10997
10998 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10999 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11000 Assert(!pVCpu->iem.s.cActiveMappings);
11001 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11002}
11003
11004
11005/**
11006 * Interface for HM and EM to emulate the WRMSR instruction.
11007 *
11008 * @returns Strict VBox status code.
11009 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11010 *
11011 * @param pVCpu The cross context virtual CPU structure.
11012 * @param cbInstr The instruction length in bytes.
11013 *
11014 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11015 * and (currently) all MSRs.
11016 */
11017VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11018{
11019 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11020 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11021 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11022
11023 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11024 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11025 Assert(!pVCpu->iem.s.cActiveMappings);
11026 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11027}
11028
11029
11030/**
11031 * Interface for HM and EM to emulate the MONITOR instruction.
11032 *
11033 * @returns Strict VBox status code.
11034 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11035 *
11036 * @param pVCpu The cross context virtual CPU structure.
11037 * @param cbInstr The instruction length in bytes.
11038 *
11039 * @remarks Not all of the state needs to be synced in.
11040 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11041 * are used.
11042 */
11043VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11044{
11045 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11046 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11047
11048 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11049 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11050 Assert(!pVCpu->iem.s.cActiveMappings);
11051 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11052}
11053
11054
11055/**
11056 * Interface for HM and EM to emulate the MWAIT instruction.
11057 *
11058 * @returns Strict VBox status code.
11059 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11060 *
11061 * @param pVCpu The cross context virtual CPU structure.
11062 * @param cbInstr The instruction length in bytes.
11063 *
11064 * @remarks Not all of the state needs to be synced in.
11065 */
11066VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11067{
11068 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11069 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11070
11071 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11072 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11073 Assert(!pVCpu->iem.s.cActiveMappings);
11074 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11075}
11076
11077
11078/**
11079 * Interface for HM and EM to emulate the HLT instruction.
11080 *
11081 * @returns Strict VBox status code.
11082 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11083 *
11084 * @param pVCpu The cross context virtual CPU structure.
11085 * @param cbInstr The instruction length in bytes.
11086 *
11087 * @remarks Not all of the state needs to be synced in.
11088 */
11089VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11090{
11091 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11092
11093 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11094 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11095 Assert(!pVCpu->iem.s.cActiveMappings);
11096 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11097}
11098
11099
11100/**
11101 * Checks if IEM is in the process of delivering an event (interrupt or
11102 * exception).
11103 *
11104 * @returns true if we're in the process of raising an interrupt or exception,
11105 * false otherwise.
11106 * @param pVCpu The cross context virtual CPU structure.
11107 * @param puVector Where to store the vector associated with the
11108 * currently delivered event, optional.
11109 * @param pfFlags Where to store th event delivery flags (see
11110 * IEM_XCPT_FLAGS_XXX), optional.
11111 * @param puErr Where to store the error code associated with the
11112 * event, optional.
11113 * @param puCr2 Where to store the CR2 associated with the event,
11114 * optional.
11115 * @remarks The caller should check the flags to determine if the error code and
11116 * CR2 are valid for the event.
11117 */
11118VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11119{
11120 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11121 if (fRaisingXcpt)
11122 {
11123 if (puVector)
11124 *puVector = pVCpu->iem.s.uCurXcpt;
11125 if (pfFlags)
11126 *pfFlags = pVCpu->iem.s.fCurXcpt;
11127 if (puErr)
11128 *puErr = pVCpu->iem.s.uCurXcptErr;
11129 if (puCr2)
11130 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11131 }
11132 return fRaisingXcpt;
11133}
11134
11135#ifdef IN_RING3
11136
11137/**
11138 * Handles the unlikely and probably fatal merge cases.
11139 *
11140 * @returns Merged status code.
11141 * @param rcStrict Current EM status code.
11142 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11143 * with @a rcStrict.
11144 * @param iMemMap The memory mapping index. For error reporting only.
11145 * @param pVCpu The cross context virtual CPU structure of the calling
11146 * thread, for error reporting only.
11147 */
11148DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11149 unsigned iMemMap, PVMCPUCC pVCpu)
11150{
11151 if (RT_FAILURE_NP(rcStrict))
11152 return rcStrict;
11153
11154 if (RT_FAILURE_NP(rcStrictCommit))
11155 return rcStrictCommit;
11156
11157 if (rcStrict == rcStrictCommit)
11158 return rcStrictCommit;
11159
11160 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11161 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11162 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11163 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11164 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11165 return VERR_IOM_FF_STATUS_IPE;
11166}
11167
11168
11169/**
11170 * Helper for IOMR3ProcessForceFlag.
11171 *
11172 * @returns Merged status code.
11173 * @param rcStrict Current EM status code.
11174 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11175 * with @a rcStrict.
11176 * @param iMemMap The memory mapping index. For error reporting only.
11177 * @param pVCpu The cross context virtual CPU structure of the calling
11178 * thread, for error reporting only.
11179 */
11180DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11181{
11182 /* Simple. */
11183 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11184 return rcStrictCommit;
11185
11186 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11187 return rcStrict;
11188
11189 /* EM scheduling status codes. */
11190 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11191 && rcStrict <= VINF_EM_LAST))
11192 {
11193 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11194 && rcStrictCommit <= VINF_EM_LAST))
11195 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11196 }
11197
11198 /* Unlikely */
11199 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11200}
11201
11202
11203/**
11204 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11205 *
11206 * @returns Merge between @a rcStrict and what the commit operation returned.
11207 * @param pVM The cross context VM structure.
11208 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11209 * @param rcStrict The status code returned by ring-0 or raw-mode.
11210 */
11211VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11212{
11213 /*
11214 * Reset the pending commit.
11215 */
11216 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11217 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11218 ("%#x %#x %#x\n",
11219 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11220 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11221
11222 /*
11223 * Commit the pending bounce buffers (usually just one).
11224 */
11225 unsigned cBufs = 0;
11226 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11227 while (iMemMap-- > 0)
11228 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11229 {
11230 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11231 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11232 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11233
11234 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11235 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11236 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11237
11238 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11239 {
11240 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11241 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11242 pbBuf,
11243 cbFirst,
11244 PGMACCESSORIGIN_IEM);
11245 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11246 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11247 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11248 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11249 }
11250
11251 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11252 {
11253 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11254 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11255 pbBuf + cbFirst,
11256 cbSecond,
11257 PGMACCESSORIGIN_IEM);
11258 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11259 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11260 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11261 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11262 }
11263 cBufs++;
11264 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11265 }
11266
11267 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11268 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11269 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11270 pVCpu->iem.s.cActiveMappings = 0;
11271 return rcStrict;
11272}
11273
11274#endif /* IN_RING3 */
11275
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette