VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 99220

Last change on this file since 99220 was 99220, checked in by vboxsync, 14 months ago

Disassember,*: Start separating the disassembler into a architecture specific and common part, bugref:10394

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 467.9 KB
Line 
1/* $Id: IEMAll.cpp 99220 2023-03-30 12:40:46Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <iprt/asm-math.h>
130#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
131# include <iprt/asm-amd64-x86.h>
132#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
133# include <iprt/asm-arm.h>
134#endif
135#include <iprt/assert.h>
136#include <iprt/string.h>
137#include <iprt/x86.h>
138
139#include "IEMInline.h"
140
141
142/*********************************************************************************************************************************
143* Structures and Typedefs *
144*********************************************************************************************************************************/
145/**
146 * CPU exception classes.
147 */
148typedef enum IEMXCPTCLASS
149{
150 IEMXCPTCLASS_BENIGN,
151 IEMXCPTCLASS_CONTRIBUTORY,
152 IEMXCPTCLASS_PAGE_FAULT,
153 IEMXCPTCLASS_DOUBLE_FAULT
154} IEMXCPTCLASS;
155
156
157/*********************************************************************************************************************************
158* Global Variables *
159*********************************************************************************************************************************/
160#if defined(IEM_LOG_MEMORY_WRITES)
161/** What IEM just wrote. */
162uint8_t g_abIemWrote[256];
163/** How much IEM just wrote. */
164size_t g_cbIemWrote;
165#endif
166
167
168/*********************************************************************************************************************************
169* Internal Functions *
170*********************************************************************************************************************************/
171static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
172 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
173
174
175/**
176 * Slow path of iemInitDecoder() and iemInitExec() that checks what kind of
177 * breakpoints are enabled.
178 *
179 * @param pVCpu The cross context virtual CPU structure of the
180 * calling thread.
181 */
182void iemInitPendingBreakpointsSlow(PVMCPUCC pVCpu)
183{
184 /*
185 * Process guest breakpoints.
186 */
187#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
188 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
189 { \
190 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
191 { \
192 case X86_DR7_RW_EO: \
193 pVCpu->iem.s.fPendingInstructionBreakpoints = true; \
194 break; \
195 case X86_DR7_RW_WO: \
196 case X86_DR7_RW_RW: \
197 pVCpu->iem.s.fPendingDataBreakpoints = true; \
198 break; \
199 case X86_DR7_RW_IO: \
200 pVCpu->iem.s.fPendingIoBreakpoints = true; \
201 break; \
202 } \
203 } \
204 } while (0)
205 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
206 if (fGstDr7 & X86_DR7_ENABLED_MASK)
207 {
208 PROCESS_ONE_BP(fGstDr7, 0);
209 PROCESS_ONE_BP(fGstDr7, 1);
210 PROCESS_ONE_BP(fGstDr7, 2);
211 PROCESS_ONE_BP(fGstDr7, 3);
212 }
213
214 /*
215 * Process hypervisor breakpoints.
216 */
217 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
218 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
219 {
220 PROCESS_ONE_BP(fHyperDr7, 0);
221 PROCESS_ONE_BP(fHyperDr7, 1);
222 PROCESS_ONE_BP(fHyperDr7, 2);
223 PROCESS_ONE_BP(fHyperDr7, 3);
224 }
225}
226
227
228/**
229 * Initializes the decoder state.
230 *
231 * iemReInitDecoder is mostly a copy of this function.
232 *
233 * @param pVCpu The cross context virtual CPU structure of the
234 * calling thread.
235 * @param fBypassHandlers Whether to bypass access handlers.
236 * @param fDisregardLock Whether to disregard the LOCK prefix.
237 */
238DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
239{
240 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
241 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
242 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
243 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
244 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
245 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
246 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
247 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
250
251 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
252 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
253 pVCpu->iem.s.enmCpuMode = enmMode;
254 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
255 pVCpu->iem.s.enmEffAddrMode = enmMode;
256 if (enmMode != IEMMODE_64BIT)
257 {
258 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
259 pVCpu->iem.s.enmEffOpSize = enmMode;
260 }
261 else
262 {
263 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
264 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
265 }
266 pVCpu->iem.s.fPrefixes = 0;
267 pVCpu->iem.s.uRexReg = 0;
268 pVCpu->iem.s.uRexB = 0;
269 pVCpu->iem.s.uRexIndex = 0;
270 pVCpu->iem.s.idxPrefix = 0;
271 pVCpu->iem.s.uVex3rdReg = 0;
272 pVCpu->iem.s.uVexLength = 0;
273 pVCpu->iem.s.fEvexStuff = 0;
274 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
275#ifdef IEM_WITH_CODE_TLB
276 pVCpu->iem.s.pbInstrBuf = NULL;
277 pVCpu->iem.s.offInstrNextByte = 0;
278 pVCpu->iem.s.offCurInstrStart = 0;
279# ifdef VBOX_STRICT
280 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
281 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
282 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
283# endif
284#else
285 pVCpu->iem.s.offOpcode = 0;
286 pVCpu->iem.s.cbOpcode = 0;
287#endif
288 pVCpu->iem.s.offModRm = 0;
289 pVCpu->iem.s.cActiveMappings = 0;
290 pVCpu->iem.s.iNextMapping = 0;
291 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
292 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
293 pVCpu->iem.s.fDisregardLock = fDisregardLock;
294 pVCpu->iem.s.fPendingInstructionBreakpoints = false;
295 pVCpu->iem.s.fPendingDataBreakpoints = false;
296 pVCpu->iem.s.fPendingIoBreakpoints = false;
297 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
298 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
299 { /* likely */ }
300 else
301 iemInitPendingBreakpointsSlow(pVCpu);
302
303#ifdef DBGFTRACE_ENABLED
304 switch (enmMode)
305 {
306 case IEMMODE_64BIT:
307 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
308 break;
309 case IEMMODE_32BIT:
310 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
311 break;
312 case IEMMODE_16BIT:
313 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
314 break;
315 }
316#endif
317}
318
319
320/**
321 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
322 *
323 * This is mostly a copy of iemInitDecoder.
324 *
325 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
326 */
327DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
328{
329 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
330 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
332 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
333 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
336 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
338
339 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
340 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
341 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
342 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
343 pVCpu->iem.s.enmEffAddrMode = enmMode;
344 if (enmMode != IEMMODE_64BIT)
345 {
346 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
347 pVCpu->iem.s.enmEffOpSize = enmMode;
348 }
349 else
350 {
351 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
352 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
353 }
354 pVCpu->iem.s.fPrefixes = 0;
355 pVCpu->iem.s.uRexReg = 0;
356 pVCpu->iem.s.uRexB = 0;
357 pVCpu->iem.s.uRexIndex = 0;
358 pVCpu->iem.s.idxPrefix = 0;
359 pVCpu->iem.s.uVex3rdReg = 0;
360 pVCpu->iem.s.uVexLength = 0;
361 pVCpu->iem.s.fEvexStuff = 0;
362 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
363#ifdef IEM_WITH_CODE_TLB
364 if (pVCpu->iem.s.pbInstrBuf)
365 {
366 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
367 ? pVCpu->cpum.GstCtx.rip
368 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
369 - pVCpu->iem.s.uInstrBufPc;
370 if (off < pVCpu->iem.s.cbInstrBufTotal)
371 {
372 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
373 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
374 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
375 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
376 else
377 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
378 }
379 else
380 {
381 pVCpu->iem.s.pbInstrBuf = NULL;
382 pVCpu->iem.s.offInstrNextByte = 0;
383 pVCpu->iem.s.offCurInstrStart = 0;
384 pVCpu->iem.s.cbInstrBuf = 0;
385 pVCpu->iem.s.cbInstrBufTotal = 0;
386 }
387 }
388 else
389 {
390 pVCpu->iem.s.offInstrNextByte = 0;
391 pVCpu->iem.s.offCurInstrStart = 0;
392 pVCpu->iem.s.cbInstrBuf = 0;
393 pVCpu->iem.s.cbInstrBufTotal = 0;
394 }
395#else
396 pVCpu->iem.s.cbOpcode = 0;
397 pVCpu->iem.s.offOpcode = 0;
398#endif
399 pVCpu->iem.s.offModRm = 0;
400 Assert(pVCpu->iem.s.cActiveMappings == 0);
401 pVCpu->iem.s.iNextMapping = 0;
402 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
403 Assert(pVCpu->iem.s.fBypassHandlers == false);
404
405#ifdef DBGFTRACE_ENABLED
406 switch (enmMode)
407 {
408 case IEMMODE_64BIT:
409 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
410 break;
411 case IEMMODE_32BIT:
412 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
413 break;
414 case IEMMODE_16BIT:
415 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
416 break;
417 }
418#endif
419}
420
421
422
423/**
424 * Prefetch opcodes the first time when starting executing.
425 *
426 * @returns Strict VBox status code.
427 * @param pVCpu The cross context virtual CPU structure of the
428 * calling thread.
429 * @param fBypassHandlers Whether to bypass access handlers.
430 * @param fDisregardLock Whether to disregard LOCK prefixes.
431 *
432 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
433 * store them as such.
434 */
435static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
436{
437 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
438
439#ifndef IEM_WITH_CODE_TLB
440 /*
441 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
442 *
443 * First translate CS:rIP to a physical address.
444 *
445 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
446 * all relevant bytes from the first page, as it ASSUMES it's only ever
447 * called for dealing with CS.LIM, page crossing and instructions that
448 * are too long.
449 */
450 uint32_t cbToTryRead;
451 RTGCPTR GCPtrPC;
452 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
453 {
454 cbToTryRead = GUEST_PAGE_SIZE;
455 GCPtrPC = pVCpu->cpum.GstCtx.rip;
456 if (IEM_IS_CANONICAL(GCPtrPC))
457 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
458 else
459 return iemRaiseGeneralProtectionFault0(pVCpu);
460 }
461 else
462 {
463 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
464 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
465 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
466 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
467 else
468 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
469 if (cbToTryRead) { /* likely */ }
470 else /* overflowed */
471 {
472 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
473 cbToTryRead = UINT32_MAX;
474 }
475 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
476 Assert(GCPtrPC <= UINT32_MAX);
477 }
478
479 PGMPTWALK Walk;
480 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
481 if (RT_SUCCESS(rc))
482 Assert(Walk.fSucceeded); /* probable. */
483 else
484 {
485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
486# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
487 if (Walk.fFailed & PGM_WALKFAIL_EPT)
488 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
489# endif
490 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
491 }
492 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
493 else
494 {
495 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
496# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
497 if (Walk.fFailed & PGM_WALKFAIL_EPT)
498 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
499# endif
500 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
501 }
502 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
503 else
504 {
505 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
506# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
507 if (Walk.fFailed & PGM_WALKFAIL_EPT)
508 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
509# endif
510 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
511 }
512 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
513 /** @todo Check reserved bits and such stuff. PGM is better at doing
514 * that, so do it when implementing the guest virtual address
515 * TLB... */
516
517 /*
518 * Read the bytes at this address.
519 */
520 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
521 if (cbToTryRead > cbLeftOnPage)
522 cbToTryRead = cbLeftOnPage;
523 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
524 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
525
526 if (!pVCpu->iem.s.fBypassHandlers)
527 {
528 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
529 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
530 { /* likely */ }
531 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
532 {
533 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
534 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
535 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
536 }
537 else
538 {
539 Log((RT_SUCCESS(rcStrict)
540 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
541 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
542 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
543 return rcStrict;
544 }
545 }
546 else
547 {
548 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
549 if (RT_SUCCESS(rc))
550 { /* likely */ }
551 else
552 {
553 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
554 GCPtrPC, GCPhys, rc, cbToTryRead));
555 return rc;
556 }
557 }
558 pVCpu->iem.s.cbOpcode = cbToTryRead;
559#endif /* !IEM_WITH_CODE_TLB */
560 return VINF_SUCCESS;
561}
562
563
564/**
565 * Invalidates the IEM TLBs.
566 *
567 * This is called internally as well as by PGM when moving GC mappings.
568 *
569 * @returns
570 * @param pVCpu The cross context virtual CPU structure of the calling
571 * thread.
572 */
573VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
574{
575#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
576 Log10(("IEMTlbInvalidateAll\n"));
577# ifdef IEM_WITH_CODE_TLB
578 pVCpu->iem.s.cbInstrBufTotal = 0;
579 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
580 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
581 { /* very likely */ }
582 else
583 {
584 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
585 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
586 while (i-- > 0)
587 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
588 }
589# endif
590
591# ifdef IEM_WITH_DATA_TLB
592 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
593 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
594 { /* very likely */ }
595 else
596 {
597 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
598 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
599 while (i-- > 0)
600 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
601 }
602# endif
603#else
604 RT_NOREF(pVCpu);
605#endif
606}
607
608
609/**
610 * Invalidates a page in the TLBs.
611 *
612 * @param pVCpu The cross context virtual CPU structure of the calling
613 * thread.
614 * @param GCPtr The address of the page to invalidate
615 * @thread EMT(pVCpu)
616 */
617VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
618{
619#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
620 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
621 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
622 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
623 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
624
625# ifdef IEM_WITH_CODE_TLB
626 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
627 {
628 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
629 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
630 pVCpu->iem.s.cbInstrBufTotal = 0;
631 }
632# endif
633
634# ifdef IEM_WITH_DATA_TLB
635 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
636 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
637# endif
638#else
639 NOREF(pVCpu); NOREF(GCPtr);
640#endif
641}
642
643
644#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
645/**
646 * Invalid both TLBs slow fashion following a rollover.
647 *
648 * Worker for IEMTlbInvalidateAllPhysical,
649 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
650 * iemMemMapJmp and others.
651 *
652 * @thread EMT(pVCpu)
653 */
654static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
655{
656 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
657 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
658 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
659
660 unsigned i;
661# ifdef IEM_WITH_CODE_TLB
662 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
663 while (i-- > 0)
664 {
665 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
666 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
667 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
668 }
669# endif
670# ifdef IEM_WITH_DATA_TLB
671 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
672 while (i-- > 0)
673 {
674 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
675 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
676 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
677 }
678# endif
679
680}
681#endif
682
683
684/**
685 * Invalidates the host physical aspects of the IEM TLBs.
686 *
687 * This is called internally as well as by PGM when moving GC mappings.
688 *
689 * @param pVCpu The cross context virtual CPU structure of the calling
690 * thread.
691 * @note Currently not used.
692 */
693VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
694{
695#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
696 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
697 Log10(("IEMTlbInvalidateAllPhysical\n"));
698
699# ifdef IEM_WITH_CODE_TLB
700 pVCpu->iem.s.cbInstrBufTotal = 0;
701# endif
702 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
703 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
704 {
705 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
706 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
707 }
708 else
709 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
710#else
711 NOREF(pVCpu);
712#endif
713}
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVM The cross context VM structure.
722 * @param idCpuCaller The ID of the calling EMT if available to the caller,
723 * otherwise NIL_VMCPUID.
724 *
725 * @remarks Caller holds the PGM lock.
726 */
727VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
728{
729#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
730 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
731 if (pVCpuCaller)
732 VMCPU_ASSERT_EMT(pVCpuCaller);
733 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
734
735 VMCC_FOR_EACH_VMCPU(pVM)
736 {
737# ifdef IEM_WITH_CODE_TLB
738 if (pVCpuCaller == pVCpu)
739 pVCpu->iem.s.cbInstrBufTotal = 0;
740# endif
741
742 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
743 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
744 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
745 { /* likely */}
746 else if (pVCpuCaller == pVCpu)
747 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
748 else
749 {
750 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
751 continue;
752 }
753 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
754 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
755 }
756 VMCC_FOR_EACH_VMCPU_END(pVM);
757
758#else
759 RT_NOREF(pVM, idCpuCaller);
760#endif
761}
762
763#ifdef IEM_WITH_CODE_TLB
764
765/**
766 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
767 * failure and jumps.
768 *
769 * We end up here for a number of reasons:
770 * - pbInstrBuf isn't yet initialized.
771 * - Advancing beyond the buffer boundrary (e.g. cross page).
772 * - Advancing beyond the CS segment limit.
773 * - Fetching from non-mappable page (e.g. MMIO).
774 *
775 * @param pVCpu The cross context virtual CPU structure of the
776 * calling thread.
777 * @param pvDst Where to return the bytes.
778 * @param cbDst Number of bytes to read.
779 *
780 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
781 */
782void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
783{
784# ifdef IN_RING3
785 for (;;)
786 {
787 Assert(cbDst <= 8);
788 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
789
790 /*
791 * We might have a partial buffer match, deal with that first to make the
792 * rest simpler. This is the first part of the cross page/buffer case.
793 */
794 if (pVCpu->iem.s.pbInstrBuf != NULL)
795 {
796 if (offBuf < pVCpu->iem.s.cbInstrBuf)
797 {
798 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
799 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
800 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
801
802 cbDst -= cbCopy;
803 pvDst = (uint8_t *)pvDst + cbCopy;
804 offBuf += cbCopy;
805 pVCpu->iem.s.offInstrNextByte += offBuf;
806 }
807 }
808
809 /*
810 * Check segment limit, figuring how much we're allowed to access at this point.
811 *
812 * We will fault immediately if RIP is past the segment limit / in non-canonical
813 * territory. If we do continue, there are one or more bytes to read before we
814 * end up in trouble and we need to do that first before faulting.
815 */
816 RTGCPTR GCPtrFirst;
817 uint32_t cbMaxRead;
818 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
819 {
820 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
821 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
822 { /* likely */ }
823 else
824 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
825 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
826 }
827 else
828 {
829 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
830 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
831 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
832 { /* likely */ }
833 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
834 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
835 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
836 if (cbMaxRead != 0)
837 { /* likely */ }
838 else
839 {
840 /* Overflowed because address is 0 and limit is max. */
841 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
842 cbMaxRead = X86_PAGE_SIZE;
843 }
844 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
845 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
846 if (cbMaxRead2 < cbMaxRead)
847 cbMaxRead = cbMaxRead2;
848 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
849 }
850
851 /*
852 * Get the TLB entry for this piece of code.
853 */
854 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
855 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
856 if (pTlbe->uTag == uTag)
857 {
858 /* likely when executing lots of code, otherwise unlikely */
859# ifdef VBOX_WITH_STATISTICS
860 pVCpu->iem.s.CodeTlb.cTlbHits++;
861# endif
862 }
863 else
864 {
865 pVCpu->iem.s.CodeTlb.cTlbMisses++;
866 PGMPTWALK Walk;
867 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
868 if (RT_FAILURE(rc))
869 {
870#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
871 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
872 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
873#endif
874 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
875 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
876 }
877
878 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
879 Assert(Walk.fSucceeded);
880 pTlbe->uTag = uTag;
881 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
882 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
883 pTlbe->GCPhys = Walk.GCPhys;
884 pTlbe->pbMappingR3 = NULL;
885 }
886
887 /*
888 * Check TLB page table level access flags.
889 */
890 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
891 {
892 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
893 {
894 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
895 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
896 }
897 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
898 {
899 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
900 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
901 }
902 }
903
904 /*
905 * Look up the physical page info if necessary.
906 */
907 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
908 { /* not necessary */ }
909 else
910 {
911 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
912 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
913 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
914 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
915 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
916 { /* likely */ }
917 else
918 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
919 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
920 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
921 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
922 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
923 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
924 }
925
926# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
927 /*
928 * Try do a direct read using the pbMappingR3 pointer.
929 */
930 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
931 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
932 {
933 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
934 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
935 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
936 {
937 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
938 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
939 }
940 else
941 {
942 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
943 if (cbInstr + (uint32_t)cbDst <= 15)
944 {
945 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
946 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
947 }
948 else
949 {
950 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
951 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
952 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
953 }
954 }
955 if (cbDst <= cbMaxRead)
956 {
957 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
958 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
959 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
960 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
961 return;
962 }
963 pVCpu->iem.s.pbInstrBuf = NULL;
964
965 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
966 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
967 }
968# else
969# error "refactor as needed"
970 /*
971 * If there is no special read handling, so we can read a bit more and
972 * put it in the prefetch buffer.
973 */
974 if ( cbDst < cbMaxRead
975 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
976 {
977 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
978 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
979 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
980 { /* likely */ }
981 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
982 {
983 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
984 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
985 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
986 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
987 }
988 else
989 {
990 Log((RT_SUCCESS(rcStrict)
991 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
992 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
993 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
994 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
995 }
996 }
997# endif
998 /*
999 * Special read handling, so only read exactly what's needed.
1000 * This is a highly unlikely scenario.
1001 */
1002 else
1003 {
1004 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1005
1006 /* Check instruction length. */
1007 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1008 if (RT_LIKELY(cbInstr + cbDst <= 15))
1009 { /* likely */ }
1010 else
1011 {
1012 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1013 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1014 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1015 }
1016
1017 /* Do the reading. */
1018 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1019 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1020 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1021 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1022 { /* likely */ }
1023 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1024 {
1025 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1026 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1027 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1028 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1029 }
1030 else
1031 {
1032 Log((RT_SUCCESS(rcStrict)
1033 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1034 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1035 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1036 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1037 }
1038 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1039 if (cbToRead == cbDst)
1040 return;
1041 }
1042
1043 /*
1044 * More to read, loop.
1045 */
1046 cbDst -= cbMaxRead;
1047 pvDst = (uint8_t *)pvDst + cbMaxRead;
1048 }
1049# else /* !IN_RING3 */
1050 RT_NOREF(pvDst, cbDst);
1051 if (pvDst || cbDst)
1052 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1053# endif /* !IN_RING3 */
1054}
1055
1056#else
1057
1058/**
1059 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1060 * exception if it fails.
1061 *
1062 * @returns Strict VBox status code.
1063 * @param pVCpu The cross context virtual CPU structure of the
1064 * calling thread.
1065 * @param cbMin The minimum number of bytes relative offOpcode
1066 * that must be read.
1067 */
1068VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1069{
1070 /*
1071 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1072 *
1073 * First translate CS:rIP to a physical address.
1074 */
1075 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1076 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1077 uint8_t const cbLeft = cbOpcode - offOpcode;
1078 Assert(cbLeft < cbMin);
1079 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1080
1081 uint32_t cbToTryRead;
1082 RTGCPTR GCPtrNext;
1083 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1084 {
1085 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1086 if (!IEM_IS_CANONICAL(GCPtrNext))
1087 return iemRaiseGeneralProtectionFault0(pVCpu);
1088 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1089 }
1090 else
1091 {
1092 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1093 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
1094 GCPtrNext32 += cbOpcode;
1095 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1096 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1097 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1098 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1099 if (!cbToTryRead) /* overflowed */
1100 {
1101 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1102 cbToTryRead = UINT32_MAX;
1103 /** @todo check out wrapping around the code segment. */
1104 }
1105 if (cbToTryRead < cbMin - cbLeft)
1106 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1107 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1108
1109 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1110 if (cbToTryRead > cbLeftOnPage)
1111 cbToTryRead = cbLeftOnPage;
1112 }
1113
1114 /* Restrict to opcode buffer space.
1115
1116 We're making ASSUMPTIONS here based on work done previously in
1117 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1118 be fetched in case of an instruction crossing two pages. */
1119 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1120 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1121 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1122 { /* likely */ }
1123 else
1124 {
1125 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1126 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1127 return iemRaiseGeneralProtectionFault0(pVCpu);
1128 }
1129
1130 PGMPTWALK Walk;
1131 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1132 if (RT_FAILURE(rc))
1133 {
1134 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1135#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1136 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1137 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1138#endif
1139 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1140 }
1141 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1142 {
1143 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1144#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1145 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1146 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1147#endif
1148 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1149 }
1150 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1151 {
1152 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1153#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1154 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1155 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1156#endif
1157 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1158 }
1159 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1160 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1161 /** @todo Check reserved bits and such stuff. PGM is better at doing
1162 * that, so do it when implementing the guest virtual address
1163 * TLB... */
1164
1165 /*
1166 * Read the bytes at this address.
1167 *
1168 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1169 * and since PATM should only patch the start of an instruction there
1170 * should be no need to check again here.
1171 */
1172 if (!pVCpu->iem.s.fBypassHandlers)
1173 {
1174 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1175 cbToTryRead, PGMACCESSORIGIN_IEM);
1176 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1177 { /* likely */ }
1178 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1179 {
1180 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1181 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1182 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1183 }
1184 else
1185 {
1186 Log((RT_SUCCESS(rcStrict)
1187 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1188 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1189 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1190 return rcStrict;
1191 }
1192 }
1193 else
1194 {
1195 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1196 if (RT_SUCCESS(rc))
1197 { /* likely */ }
1198 else
1199 {
1200 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1201 return rc;
1202 }
1203 }
1204 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1205 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1206
1207 return VINF_SUCCESS;
1208}
1209
1210#endif /* !IEM_WITH_CODE_TLB */
1211#ifndef IEM_WITH_SETJMP
1212
1213/**
1214 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1215 *
1216 * @returns Strict VBox status code.
1217 * @param pVCpu The cross context virtual CPU structure of the
1218 * calling thread.
1219 * @param pb Where to return the opcode byte.
1220 */
1221VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1222{
1223 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1224 if (rcStrict == VINF_SUCCESS)
1225 {
1226 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1227 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1228 pVCpu->iem.s.offOpcode = offOpcode + 1;
1229 }
1230 else
1231 *pb = 0;
1232 return rcStrict;
1233}
1234
1235#else /* IEM_WITH_SETJMP */
1236
1237/**
1238 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1239 *
1240 * @returns The opcode byte.
1241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1242 */
1243uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1244{
1245# ifdef IEM_WITH_CODE_TLB
1246 uint8_t u8;
1247 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1248 return u8;
1249# else
1250 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1251 if (rcStrict == VINF_SUCCESS)
1252 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1253 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1254# endif
1255}
1256
1257#endif /* IEM_WITH_SETJMP */
1258
1259#ifndef IEM_WITH_SETJMP
1260
1261/**
1262 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1263 *
1264 * @returns Strict VBox status code.
1265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1266 * @param pu16 Where to return the opcode dword.
1267 */
1268VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1269{
1270 uint8_t u8;
1271 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1272 if (rcStrict == VINF_SUCCESS)
1273 *pu16 = (int8_t)u8;
1274 return rcStrict;
1275}
1276
1277
1278/**
1279 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1280 *
1281 * @returns Strict VBox status code.
1282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1283 * @param pu32 Where to return the opcode dword.
1284 */
1285VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1286{
1287 uint8_t u8;
1288 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1289 if (rcStrict == VINF_SUCCESS)
1290 *pu32 = (int8_t)u8;
1291 return rcStrict;
1292}
1293
1294
1295/**
1296 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1297 *
1298 * @returns Strict VBox status code.
1299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1300 * @param pu64 Where to return the opcode qword.
1301 */
1302VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1303{
1304 uint8_t u8;
1305 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1306 if (rcStrict == VINF_SUCCESS)
1307 *pu64 = (int8_t)u8;
1308 return rcStrict;
1309}
1310
1311#endif /* !IEM_WITH_SETJMP */
1312
1313
1314#ifndef IEM_WITH_SETJMP
1315
1316/**
1317 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1321 * @param pu16 Where to return the opcode word.
1322 */
1323VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1324{
1325 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1326 if (rcStrict == VINF_SUCCESS)
1327 {
1328 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1329# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1330 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1331# else
1332 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1333# endif
1334 pVCpu->iem.s.offOpcode = offOpcode + 2;
1335 }
1336 else
1337 *pu16 = 0;
1338 return rcStrict;
1339}
1340
1341#else /* IEM_WITH_SETJMP */
1342
1343/**
1344 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1345 *
1346 * @returns The opcode word.
1347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1348 */
1349uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1350{
1351# ifdef IEM_WITH_CODE_TLB
1352 uint16_t u16;
1353 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1354 return u16;
1355# else
1356 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1357 if (rcStrict == VINF_SUCCESS)
1358 {
1359 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1360 pVCpu->iem.s.offOpcode += 2;
1361# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1362 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1363# else
1364 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1365# endif
1366 }
1367 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1368# endif
1369}
1370
1371#endif /* IEM_WITH_SETJMP */
1372
1373#ifndef IEM_WITH_SETJMP
1374
1375/**
1376 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1377 *
1378 * @returns Strict VBox status code.
1379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1380 * @param pu32 Where to return the opcode double word.
1381 */
1382VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1383{
1384 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1385 if (rcStrict == VINF_SUCCESS)
1386 {
1387 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1388 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1389 pVCpu->iem.s.offOpcode = offOpcode + 2;
1390 }
1391 else
1392 *pu32 = 0;
1393 return rcStrict;
1394}
1395
1396
1397/**
1398 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1399 *
1400 * @returns Strict VBox status code.
1401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1402 * @param pu64 Where to return the opcode quad word.
1403 */
1404VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1405{
1406 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1407 if (rcStrict == VINF_SUCCESS)
1408 {
1409 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1410 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1411 pVCpu->iem.s.offOpcode = offOpcode + 2;
1412 }
1413 else
1414 *pu64 = 0;
1415 return rcStrict;
1416}
1417
1418#endif /* !IEM_WITH_SETJMP */
1419
1420#ifndef IEM_WITH_SETJMP
1421
1422/**
1423 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1424 *
1425 * @returns Strict VBox status code.
1426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1427 * @param pu32 Where to return the opcode dword.
1428 */
1429VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1430{
1431 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1432 if (rcStrict == VINF_SUCCESS)
1433 {
1434 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1435# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1436 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1437# else
1438 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1439 pVCpu->iem.s.abOpcode[offOpcode + 1],
1440 pVCpu->iem.s.abOpcode[offOpcode + 2],
1441 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1442# endif
1443 pVCpu->iem.s.offOpcode = offOpcode + 4;
1444 }
1445 else
1446 *pu32 = 0;
1447 return rcStrict;
1448}
1449
1450#else /* IEM_WITH_SETJMP */
1451
1452/**
1453 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1454 *
1455 * @returns The opcode dword.
1456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1457 */
1458uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1459{
1460# ifdef IEM_WITH_CODE_TLB
1461 uint32_t u32;
1462 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1463 return u32;
1464# else
1465 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1466 if (rcStrict == VINF_SUCCESS)
1467 {
1468 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1469 pVCpu->iem.s.offOpcode = offOpcode + 4;
1470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1471 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1472# else
1473 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1474 pVCpu->iem.s.abOpcode[offOpcode + 1],
1475 pVCpu->iem.s.abOpcode[offOpcode + 2],
1476 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1477# endif
1478 }
1479 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1480# endif
1481}
1482
1483#endif /* IEM_WITH_SETJMP */
1484
1485#ifndef IEM_WITH_SETJMP
1486
1487/**
1488 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1489 *
1490 * @returns Strict VBox status code.
1491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1492 * @param pu64 Where to return the opcode dword.
1493 */
1494VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1495{
1496 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1497 if (rcStrict == VINF_SUCCESS)
1498 {
1499 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1500 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1501 pVCpu->iem.s.abOpcode[offOpcode + 1],
1502 pVCpu->iem.s.abOpcode[offOpcode + 2],
1503 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1504 pVCpu->iem.s.offOpcode = offOpcode + 4;
1505 }
1506 else
1507 *pu64 = 0;
1508 return rcStrict;
1509}
1510
1511
1512/**
1513 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1514 *
1515 * @returns Strict VBox status code.
1516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1517 * @param pu64 Where to return the opcode qword.
1518 */
1519VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1520{
1521 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1522 if (rcStrict == VINF_SUCCESS)
1523 {
1524 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1525 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1526 pVCpu->iem.s.abOpcode[offOpcode + 1],
1527 pVCpu->iem.s.abOpcode[offOpcode + 2],
1528 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1529 pVCpu->iem.s.offOpcode = offOpcode + 4;
1530 }
1531 else
1532 *pu64 = 0;
1533 return rcStrict;
1534}
1535
1536#endif /* !IEM_WITH_SETJMP */
1537
1538#ifndef IEM_WITH_SETJMP
1539
1540/**
1541 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1542 *
1543 * @returns Strict VBox status code.
1544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1545 * @param pu64 Where to return the opcode qword.
1546 */
1547VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1548{
1549 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1550 if (rcStrict == VINF_SUCCESS)
1551 {
1552 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1553# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1554 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1555# else
1556 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1557 pVCpu->iem.s.abOpcode[offOpcode + 1],
1558 pVCpu->iem.s.abOpcode[offOpcode + 2],
1559 pVCpu->iem.s.abOpcode[offOpcode + 3],
1560 pVCpu->iem.s.abOpcode[offOpcode + 4],
1561 pVCpu->iem.s.abOpcode[offOpcode + 5],
1562 pVCpu->iem.s.abOpcode[offOpcode + 6],
1563 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1564# endif
1565 pVCpu->iem.s.offOpcode = offOpcode + 8;
1566 }
1567 else
1568 *pu64 = 0;
1569 return rcStrict;
1570}
1571
1572#else /* IEM_WITH_SETJMP */
1573
1574/**
1575 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1576 *
1577 * @returns The opcode qword.
1578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1579 */
1580uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1581{
1582# ifdef IEM_WITH_CODE_TLB
1583 uint64_t u64;
1584 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1585 return u64;
1586# else
1587 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1588 if (rcStrict == VINF_SUCCESS)
1589 {
1590 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1591 pVCpu->iem.s.offOpcode = offOpcode + 8;
1592# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1593 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1594# else
1595 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1596 pVCpu->iem.s.abOpcode[offOpcode + 1],
1597 pVCpu->iem.s.abOpcode[offOpcode + 2],
1598 pVCpu->iem.s.abOpcode[offOpcode + 3],
1599 pVCpu->iem.s.abOpcode[offOpcode + 4],
1600 pVCpu->iem.s.abOpcode[offOpcode + 5],
1601 pVCpu->iem.s.abOpcode[offOpcode + 6],
1602 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1603# endif
1604 }
1605 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1606# endif
1607}
1608
1609#endif /* IEM_WITH_SETJMP */
1610
1611
1612
1613/** @name Misc Worker Functions.
1614 * @{
1615 */
1616
1617/**
1618 * Gets the exception class for the specified exception vector.
1619 *
1620 * @returns The class of the specified exception.
1621 * @param uVector The exception vector.
1622 */
1623static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1624{
1625 Assert(uVector <= X86_XCPT_LAST);
1626 switch (uVector)
1627 {
1628 case X86_XCPT_DE:
1629 case X86_XCPT_TS:
1630 case X86_XCPT_NP:
1631 case X86_XCPT_SS:
1632 case X86_XCPT_GP:
1633 case X86_XCPT_SX: /* AMD only */
1634 return IEMXCPTCLASS_CONTRIBUTORY;
1635
1636 case X86_XCPT_PF:
1637 case X86_XCPT_VE: /* Intel only */
1638 return IEMXCPTCLASS_PAGE_FAULT;
1639
1640 case X86_XCPT_DF:
1641 return IEMXCPTCLASS_DOUBLE_FAULT;
1642 }
1643 return IEMXCPTCLASS_BENIGN;
1644}
1645
1646
1647/**
1648 * Evaluates how to handle an exception caused during delivery of another event
1649 * (exception / interrupt).
1650 *
1651 * @returns How to handle the recursive exception.
1652 * @param pVCpu The cross context virtual CPU structure of the
1653 * calling thread.
1654 * @param fPrevFlags The flags of the previous event.
1655 * @param uPrevVector The vector of the previous event.
1656 * @param fCurFlags The flags of the current exception.
1657 * @param uCurVector The vector of the current exception.
1658 * @param pfXcptRaiseInfo Where to store additional information about the
1659 * exception condition. Optional.
1660 */
1661VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1662 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1663{
1664 /*
1665 * Only CPU exceptions can be raised while delivering other events, software interrupt
1666 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1667 */
1668 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1669 Assert(pVCpu); RT_NOREF(pVCpu);
1670 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1671
1672 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1673 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1674 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1675 {
1676 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1677 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1678 {
1679 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1680 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1681 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1682 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1683 {
1684 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1685 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1686 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1687 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1688 uCurVector, pVCpu->cpum.GstCtx.cr2));
1689 }
1690 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1691 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1692 {
1693 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1694 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1695 }
1696 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1697 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1698 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1699 {
1700 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1701 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1702 }
1703 }
1704 else
1705 {
1706 if (uPrevVector == X86_XCPT_NMI)
1707 {
1708 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1709 if (uCurVector == X86_XCPT_PF)
1710 {
1711 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1712 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1713 }
1714 }
1715 else if ( uPrevVector == X86_XCPT_AC
1716 && uCurVector == X86_XCPT_AC)
1717 {
1718 enmRaise = IEMXCPTRAISE_CPU_HANG;
1719 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1720 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1721 }
1722 }
1723 }
1724 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1725 {
1726 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1727 if (uCurVector == X86_XCPT_PF)
1728 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1729 }
1730 else
1731 {
1732 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1733 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1734 }
1735
1736 if (pfXcptRaiseInfo)
1737 *pfXcptRaiseInfo = fRaiseInfo;
1738 return enmRaise;
1739}
1740
1741
1742/**
1743 * Enters the CPU shutdown state initiated by a triple fault or other
1744 * unrecoverable conditions.
1745 *
1746 * @returns Strict VBox status code.
1747 * @param pVCpu The cross context virtual CPU structure of the
1748 * calling thread.
1749 */
1750static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1751{
1752 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1753 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1754
1755 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1756 {
1757 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1758 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1759 }
1760
1761 RT_NOREF(pVCpu);
1762 return VINF_EM_TRIPLE_FAULT;
1763}
1764
1765
1766/**
1767 * Validates a new SS segment.
1768 *
1769 * @returns VBox strict status code.
1770 * @param pVCpu The cross context virtual CPU structure of the
1771 * calling thread.
1772 * @param NewSS The new SS selctor.
1773 * @param uCpl The CPL to load the stack for.
1774 * @param pDesc Where to return the descriptor.
1775 */
1776static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1777{
1778 /* Null selectors are not allowed (we're not called for dispatching
1779 interrupts with SS=0 in long mode). */
1780 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1781 {
1782 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1783 return iemRaiseTaskSwitchFault0(pVCpu);
1784 }
1785
1786 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1787 if ((NewSS & X86_SEL_RPL) != uCpl)
1788 {
1789 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1790 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1791 }
1792
1793 /*
1794 * Read the descriptor.
1795 */
1796 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1797 if (rcStrict != VINF_SUCCESS)
1798 return rcStrict;
1799
1800 /*
1801 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1802 */
1803 if (!pDesc->Legacy.Gen.u1DescType)
1804 {
1805 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1806 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1807 }
1808
1809 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1810 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1811 {
1812 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1813 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1814 }
1815 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1816 {
1817 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1818 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1819 }
1820
1821 /* Is it there? */
1822 /** @todo testcase: Is this checked before the canonical / limit check below? */
1823 if (!pDesc->Legacy.Gen.u1Present)
1824 {
1825 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1826 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1827 }
1828
1829 return VINF_SUCCESS;
1830}
1831
1832/** @} */
1833
1834
1835/** @name Raising Exceptions.
1836 *
1837 * @{
1838 */
1839
1840
1841/**
1842 * Loads the specified stack far pointer from the TSS.
1843 *
1844 * @returns VBox strict status code.
1845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1846 * @param uCpl The CPL to load the stack for.
1847 * @param pSelSS Where to return the new stack segment.
1848 * @param puEsp Where to return the new stack pointer.
1849 */
1850static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1851{
1852 VBOXSTRICTRC rcStrict;
1853 Assert(uCpl < 4);
1854
1855 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1856 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1857 {
1858 /*
1859 * 16-bit TSS (X86TSS16).
1860 */
1861 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1862 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1863 {
1864 uint32_t off = uCpl * 4 + 2;
1865 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1866 {
1867 /** @todo check actual access pattern here. */
1868 uint32_t u32Tmp = 0; /* gcc maybe... */
1869 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1870 if (rcStrict == VINF_SUCCESS)
1871 {
1872 *puEsp = RT_LOWORD(u32Tmp);
1873 *pSelSS = RT_HIWORD(u32Tmp);
1874 return VINF_SUCCESS;
1875 }
1876 }
1877 else
1878 {
1879 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1880 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1881 }
1882 break;
1883 }
1884
1885 /*
1886 * 32-bit TSS (X86TSS32).
1887 */
1888 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1889 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1890 {
1891 uint32_t off = uCpl * 8 + 4;
1892 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1893 {
1894/** @todo check actual access pattern here. */
1895 uint64_t u64Tmp;
1896 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1897 if (rcStrict == VINF_SUCCESS)
1898 {
1899 *puEsp = u64Tmp & UINT32_MAX;
1900 *pSelSS = (RTSEL)(u64Tmp >> 32);
1901 return VINF_SUCCESS;
1902 }
1903 }
1904 else
1905 {
1906 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1907 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1908 }
1909 break;
1910 }
1911
1912 default:
1913 AssertFailed();
1914 rcStrict = VERR_IEM_IPE_4;
1915 break;
1916 }
1917
1918 *puEsp = 0; /* make gcc happy */
1919 *pSelSS = 0; /* make gcc happy */
1920 return rcStrict;
1921}
1922
1923
1924/**
1925 * Loads the specified stack pointer from the 64-bit TSS.
1926 *
1927 * @returns VBox strict status code.
1928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1929 * @param uCpl The CPL to load the stack for.
1930 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1931 * @param puRsp Where to return the new stack pointer.
1932 */
1933static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1934{
1935 Assert(uCpl < 4);
1936 Assert(uIst < 8);
1937 *puRsp = 0; /* make gcc happy */
1938
1939 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1940 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1941
1942 uint32_t off;
1943 if (uIst)
1944 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1945 else
1946 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1947 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1948 {
1949 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1950 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1951 }
1952
1953 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1954}
1955
1956
1957/**
1958 * Adjust the CPU state according to the exception being raised.
1959 *
1960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1961 * @param u8Vector The exception that has been raised.
1962 */
1963DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1964{
1965 switch (u8Vector)
1966 {
1967 case X86_XCPT_DB:
1968 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1969 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1970 break;
1971 /** @todo Read the AMD and Intel exception reference... */
1972 }
1973}
1974
1975
1976/**
1977 * Implements exceptions and interrupts for real mode.
1978 *
1979 * @returns VBox strict status code.
1980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1981 * @param cbInstr The number of bytes to offset rIP by in the return
1982 * address.
1983 * @param u8Vector The interrupt / exception vector number.
1984 * @param fFlags The flags.
1985 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1986 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1987 */
1988static VBOXSTRICTRC
1989iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1990 uint8_t cbInstr,
1991 uint8_t u8Vector,
1992 uint32_t fFlags,
1993 uint16_t uErr,
1994 uint64_t uCr2) RT_NOEXCEPT
1995{
1996 NOREF(uErr); NOREF(uCr2);
1997 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1998
1999 /*
2000 * Read the IDT entry.
2001 */
2002 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2003 {
2004 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2005 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2006 }
2007 RTFAR16 Idte;
2008 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2009 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2010 {
2011 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2012 return rcStrict;
2013 }
2014
2015 /*
2016 * Push the stack frame.
2017 */
2018 uint16_t *pu16Frame;
2019 uint64_t uNewRsp;
2020 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2021 if (rcStrict != VINF_SUCCESS)
2022 return rcStrict;
2023
2024 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2025#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2026 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2027 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2028 fEfl |= UINT16_C(0xf000);
2029#endif
2030 pu16Frame[2] = (uint16_t)fEfl;
2031 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2032 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2033 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2034 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2035 return rcStrict;
2036
2037 /*
2038 * Load the vector address into cs:ip and make exception specific state
2039 * adjustments.
2040 */
2041 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2042 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2043 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2044 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2045 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2046 pVCpu->cpum.GstCtx.rip = Idte.off;
2047 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2048 IEMMISC_SET_EFL(pVCpu, fEfl);
2049
2050 /** @todo do we actually do this in real mode? */
2051 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2052 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2053
2054 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2055}
2056
2057
2058/**
2059 * Loads a NULL data selector into when coming from V8086 mode.
2060 *
2061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2062 * @param pSReg Pointer to the segment register.
2063 */
2064DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2065{
2066 pSReg->Sel = 0;
2067 pSReg->ValidSel = 0;
2068 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2069 {
2070 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2071 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2072 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2073 }
2074 else
2075 {
2076 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2077 /** @todo check this on AMD-V */
2078 pSReg->u64Base = 0;
2079 pSReg->u32Limit = 0;
2080 }
2081}
2082
2083
2084/**
2085 * Loads a segment selector during a task switch in V8086 mode.
2086 *
2087 * @param pSReg Pointer to the segment register.
2088 * @param uSel The selector value to load.
2089 */
2090DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2091{
2092 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2093 pSReg->Sel = uSel;
2094 pSReg->ValidSel = uSel;
2095 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2096 pSReg->u64Base = uSel << 4;
2097 pSReg->u32Limit = 0xffff;
2098 pSReg->Attr.u = 0xf3;
2099}
2100
2101
2102/**
2103 * Loads a segment selector during a task switch in protected mode.
2104 *
2105 * In this task switch scenario, we would throw \#TS exceptions rather than
2106 * \#GPs.
2107 *
2108 * @returns VBox strict status code.
2109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2110 * @param pSReg Pointer to the segment register.
2111 * @param uSel The new selector value.
2112 *
2113 * @remarks This does _not_ handle CS or SS.
2114 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2115 */
2116static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2117{
2118 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2119
2120 /* Null data selector. */
2121 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2122 {
2123 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2125 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2126 return VINF_SUCCESS;
2127 }
2128
2129 /* Fetch the descriptor. */
2130 IEMSELDESC Desc;
2131 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2132 if (rcStrict != VINF_SUCCESS)
2133 {
2134 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2135 VBOXSTRICTRC_VAL(rcStrict)));
2136 return rcStrict;
2137 }
2138
2139 /* Must be a data segment or readable code segment. */
2140 if ( !Desc.Legacy.Gen.u1DescType
2141 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2142 {
2143 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2144 Desc.Legacy.Gen.u4Type));
2145 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2146 }
2147
2148 /* Check privileges for data segments and non-conforming code segments. */
2149 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2150 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2151 {
2152 /* The RPL and the new CPL must be less than or equal to the DPL. */
2153 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2154 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2155 {
2156 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2157 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2158 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2159 }
2160 }
2161
2162 /* Is it there? */
2163 if (!Desc.Legacy.Gen.u1Present)
2164 {
2165 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2166 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2167 }
2168
2169 /* The base and limit. */
2170 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2171 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2172
2173 /*
2174 * Ok, everything checked out fine. Now set the accessed bit before
2175 * committing the result into the registers.
2176 */
2177 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2178 {
2179 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2180 if (rcStrict != VINF_SUCCESS)
2181 return rcStrict;
2182 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2183 }
2184
2185 /* Commit */
2186 pSReg->Sel = uSel;
2187 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2188 pSReg->u32Limit = cbLimit;
2189 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2190 pSReg->ValidSel = uSel;
2191 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2192 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2193 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2194
2195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2196 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2197 return VINF_SUCCESS;
2198}
2199
2200
2201/**
2202 * Performs a task switch.
2203 *
2204 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2205 * caller is responsible for performing the necessary checks (like DPL, TSS
2206 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2207 * reference for JMP, CALL, IRET.
2208 *
2209 * If the task switch is the due to a software interrupt or hardware exception,
2210 * the caller is responsible for validating the TSS selector and descriptor. See
2211 * Intel Instruction reference for INT n.
2212 *
2213 * @returns VBox strict status code.
2214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2215 * @param enmTaskSwitch The cause of the task switch.
2216 * @param uNextEip The EIP effective after the task switch.
2217 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2218 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2219 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2220 * @param SelTSS The TSS selector of the new task.
2221 * @param pNewDescTSS Pointer to the new TSS descriptor.
2222 */
2223VBOXSTRICTRC
2224iemTaskSwitch(PVMCPUCC pVCpu,
2225 IEMTASKSWITCH enmTaskSwitch,
2226 uint32_t uNextEip,
2227 uint32_t fFlags,
2228 uint16_t uErr,
2229 uint64_t uCr2,
2230 RTSEL SelTSS,
2231 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2232{
2233 Assert(!IEM_IS_REAL_MODE(pVCpu));
2234 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2235 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2236
2237 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2238 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2239 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2240 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2241 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2242
2243 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2244 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2245
2246 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2247 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2248
2249 /* Update CR2 in case it's a page-fault. */
2250 /** @todo This should probably be done much earlier in IEM/PGM. See
2251 * @bugref{5653#c49}. */
2252 if (fFlags & IEM_XCPT_FLAGS_CR2)
2253 pVCpu->cpum.GstCtx.cr2 = uCr2;
2254
2255 /*
2256 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2257 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2258 */
2259 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2260 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2261 if (uNewTSSLimit < uNewTSSLimitMin)
2262 {
2263 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2264 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2265 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2266 }
2267
2268 /*
2269 * Task switches in VMX non-root mode always cause task switches.
2270 * The new TSS must have been read and validated (DPL, limits etc.) before a
2271 * task-switch VM-exit commences.
2272 *
2273 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2274 */
2275 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2276 {
2277 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2278 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2279 }
2280
2281 /*
2282 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2283 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2284 */
2285 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2286 {
2287 uint32_t const uExitInfo1 = SelTSS;
2288 uint32_t uExitInfo2 = uErr;
2289 switch (enmTaskSwitch)
2290 {
2291 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2292 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2293 default: break;
2294 }
2295 if (fFlags & IEM_XCPT_FLAGS_ERR)
2296 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2297 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2298 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2299
2300 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2301 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2302 RT_NOREF2(uExitInfo1, uExitInfo2);
2303 }
2304
2305 /*
2306 * Check the current TSS limit. The last written byte to the current TSS during the
2307 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2308 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2309 *
2310 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2311 * end up with smaller than "legal" TSS limits.
2312 */
2313 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2314 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2315 if (uCurTSSLimit < uCurTSSLimitMin)
2316 {
2317 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2318 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2319 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2320 }
2321
2322 /*
2323 * Verify that the new TSS can be accessed and map it. Map only the required contents
2324 * and not the entire TSS.
2325 */
2326 void *pvNewTSS;
2327 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2328 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2329 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2330 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2331 * not perform correct translation if this happens. See Intel spec. 7.2.1
2332 * "Task-State Segment". */
2333 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2334 if (rcStrict != VINF_SUCCESS)
2335 {
2336 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2337 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2338 return rcStrict;
2339 }
2340
2341 /*
2342 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2343 */
2344 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2345 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2346 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2347 {
2348 PX86DESC pDescCurTSS;
2349 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2350 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2351 if (rcStrict != VINF_SUCCESS)
2352 {
2353 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2354 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2355 return rcStrict;
2356 }
2357
2358 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2359 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2360 if (rcStrict != VINF_SUCCESS)
2361 {
2362 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2363 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2364 return rcStrict;
2365 }
2366
2367 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2368 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2369 {
2370 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2371 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2372 fEFlags &= ~X86_EFL_NT;
2373 }
2374 }
2375
2376 /*
2377 * Save the CPU state into the current TSS.
2378 */
2379 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2380 if (GCPtrNewTSS == GCPtrCurTSS)
2381 {
2382 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2383 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2384 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2385 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2386 pVCpu->cpum.GstCtx.ldtr.Sel));
2387 }
2388 if (fIsNewTSS386)
2389 {
2390 /*
2391 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2392 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2393 */
2394 void *pvCurTSS32;
2395 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2396 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2397 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2398 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2399 if (rcStrict != VINF_SUCCESS)
2400 {
2401 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2402 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2403 return rcStrict;
2404 }
2405
2406 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2407 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2408 pCurTSS32->eip = uNextEip;
2409 pCurTSS32->eflags = fEFlags;
2410 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2411 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2412 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2413 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2414 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2415 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2416 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2417 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2418 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2419 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2420 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2421 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2422 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2423 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2424
2425 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2426 if (rcStrict != VINF_SUCCESS)
2427 {
2428 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2429 VBOXSTRICTRC_VAL(rcStrict)));
2430 return rcStrict;
2431 }
2432 }
2433 else
2434 {
2435 /*
2436 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2437 */
2438 void *pvCurTSS16;
2439 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2440 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2441 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2442 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2443 if (rcStrict != VINF_SUCCESS)
2444 {
2445 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2446 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2447 return rcStrict;
2448 }
2449
2450 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2451 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2452 pCurTSS16->ip = uNextEip;
2453 pCurTSS16->flags = (uint16_t)fEFlags;
2454 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2455 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2456 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2457 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2458 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2459 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2460 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2461 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2462 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2463 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2464 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2465 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2466
2467 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2468 if (rcStrict != VINF_SUCCESS)
2469 {
2470 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2471 VBOXSTRICTRC_VAL(rcStrict)));
2472 return rcStrict;
2473 }
2474 }
2475
2476 /*
2477 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2478 */
2479 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2480 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2481 {
2482 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2483 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2484 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2485 }
2486
2487 /*
2488 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2489 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2490 */
2491 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2492 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2493 bool fNewDebugTrap;
2494 if (fIsNewTSS386)
2495 {
2496 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2497 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2498 uNewEip = pNewTSS32->eip;
2499 uNewEflags = pNewTSS32->eflags;
2500 uNewEax = pNewTSS32->eax;
2501 uNewEcx = pNewTSS32->ecx;
2502 uNewEdx = pNewTSS32->edx;
2503 uNewEbx = pNewTSS32->ebx;
2504 uNewEsp = pNewTSS32->esp;
2505 uNewEbp = pNewTSS32->ebp;
2506 uNewEsi = pNewTSS32->esi;
2507 uNewEdi = pNewTSS32->edi;
2508 uNewES = pNewTSS32->es;
2509 uNewCS = pNewTSS32->cs;
2510 uNewSS = pNewTSS32->ss;
2511 uNewDS = pNewTSS32->ds;
2512 uNewFS = pNewTSS32->fs;
2513 uNewGS = pNewTSS32->gs;
2514 uNewLdt = pNewTSS32->selLdt;
2515 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2516 }
2517 else
2518 {
2519 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2520 uNewCr3 = 0;
2521 uNewEip = pNewTSS16->ip;
2522 uNewEflags = pNewTSS16->flags;
2523 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2524 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2525 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2526 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2527 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2528 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2529 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2530 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2531 uNewES = pNewTSS16->es;
2532 uNewCS = pNewTSS16->cs;
2533 uNewSS = pNewTSS16->ss;
2534 uNewDS = pNewTSS16->ds;
2535 uNewFS = 0;
2536 uNewGS = 0;
2537 uNewLdt = pNewTSS16->selLdt;
2538 fNewDebugTrap = false;
2539 }
2540
2541 if (GCPtrNewTSS == GCPtrCurTSS)
2542 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2543 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2544
2545 /*
2546 * We're done accessing the new TSS.
2547 */
2548 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2549 if (rcStrict != VINF_SUCCESS)
2550 {
2551 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2552 return rcStrict;
2553 }
2554
2555 /*
2556 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2557 */
2558 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2559 {
2560 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2561 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2562 if (rcStrict != VINF_SUCCESS)
2563 {
2564 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2565 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2566 return rcStrict;
2567 }
2568
2569 /* Check that the descriptor indicates the new TSS is available (not busy). */
2570 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2571 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2572 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2573
2574 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2575 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2576 if (rcStrict != VINF_SUCCESS)
2577 {
2578 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2579 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2580 return rcStrict;
2581 }
2582 }
2583
2584 /*
2585 * From this point on, we're technically in the new task. We will defer exceptions
2586 * until the completion of the task switch but before executing any instructions in the new task.
2587 */
2588 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2589 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2590 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2591 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2592 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2593 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2594 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2595
2596 /* Set the busy bit in TR. */
2597 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2598
2599 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2600 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2601 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2602 {
2603 uNewEflags |= X86_EFL_NT;
2604 }
2605
2606 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2607 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2608 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2609
2610 pVCpu->cpum.GstCtx.eip = uNewEip;
2611 pVCpu->cpum.GstCtx.eax = uNewEax;
2612 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2613 pVCpu->cpum.GstCtx.edx = uNewEdx;
2614 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2615 pVCpu->cpum.GstCtx.esp = uNewEsp;
2616 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2617 pVCpu->cpum.GstCtx.esi = uNewEsi;
2618 pVCpu->cpum.GstCtx.edi = uNewEdi;
2619
2620 uNewEflags &= X86_EFL_LIVE_MASK;
2621 uNewEflags |= X86_EFL_RA1_MASK;
2622 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2623
2624 /*
2625 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2626 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2627 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2628 */
2629 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2630 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2631
2632 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2633 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2634
2635 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2636 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2637
2638 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2639 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2640
2641 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2642 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2643
2644 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2645 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2646 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2647
2648 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2649 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2650 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2651 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2652
2653 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2654 {
2655 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2656 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2657 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2658 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2659 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2660 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2661 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2662 }
2663
2664 /*
2665 * Switch CR3 for the new task.
2666 */
2667 if ( fIsNewTSS386
2668 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2669 {
2670 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2671 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2672 AssertRCSuccessReturn(rc, rc);
2673
2674 /* Inform PGM. */
2675 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2676 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2677 AssertRCReturn(rc, rc);
2678 /* ignore informational status codes */
2679
2680 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2681 }
2682
2683 /*
2684 * Switch LDTR for the new task.
2685 */
2686 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2687 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2688 else
2689 {
2690 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2691
2692 IEMSELDESC DescNewLdt;
2693 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2694 if (rcStrict != VINF_SUCCESS)
2695 {
2696 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2697 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2698 return rcStrict;
2699 }
2700 if ( !DescNewLdt.Legacy.Gen.u1Present
2701 || DescNewLdt.Legacy.Gen.u1DescType
2702 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2703 {
2704 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2705 uNewLdt, DescNewLdt.Legacy.u));
2706 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2707 }
2708
2709 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2710 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2711 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2712 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2713 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2714 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2715 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2716 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2717 }
2718
2719 IEMSELDESC DescSS;
2720 if (IEM_IS_V86_MODE(pVCpu))
2721 {
2722 pVCpu->iem.s.uCpl = 3;
2723 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2724 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2725 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2726 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2727 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2728 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2729
2730 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2731 DescSS.Legacy.u = 0;
2732 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2733 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2734 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2735 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2736 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2737 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2738 DescSS.Legacy.Gen.u2Dpl = 3;
2739 }
2740 else
2741 {
2742 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2743
2744 /*
2745 * Load the stack segment for the new task.
2746 */
2747 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2748 {
2749 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2750 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2751 }
2752
2753 /* Fetch the descriptor. */
2754 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2755 if (rcStrict != VINF_SUCCESS)
2756 {
2757 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2758 VBOXSTRICTRC_VAL(rcStrict)));
2759 return rcStrict;
2760 }
2761
2762 /* SS must be a data segment and writable. */
2763 if ( !DescSS.Legacy.Gen.u1DescType
2764 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2765 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2766 {
2767 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2768 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2769 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2770 }
2771
2772 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2773 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2774 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2775 {
2776 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2777 uNewCpl));
2778 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2779 }
2780
2781 /* Is it there? */
2782 if (!DescSS.Legacy.Gen.u1Present)
2783 {
2784 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2785 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2786 }
2787
2788 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2789 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2790
2791 /* Set the accessed bit before committing the result into SS. */
2792 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2793 {
2794 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2795 if (rcStrict != VINF_SUCCESS)
2796 return rcStrict;
2797 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2798 }
2799
2800 /* Commit SS. */
2801 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2802 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2803 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2804 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2805 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2806 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2808
2809 /* CPL has changed, update IEM before loading rest of segments. */
2810 pVCpu->iem.s.uCpl = uNewCpl;
2811
2812 /*
2813 * Load the data segments for the new task.
2814 */
2815 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2816 if (rcStrict != VINF_SUCCESS)
2817 return rcStrict;
2818 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2819 if (rcStrict != VINF_SUCCESS)
2820 return rcStrict;
2821 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2822 if (rcStrict != VINF_SUCCESS)
2823 return rcStrict;
2824 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2825 if (rcStrict != VINF_SUCCESS)
2826 return rcStrict;
2827
2828 /*
2829 * Load the code segment for the new task.
2830 */
2831 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2832 {
2833 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2834 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2835 }
2836
2837 /* Fetch the descriptor. */
2838 IEMSELDESC DescCS;
2839 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2840 if (rcStrict != VINF_SUCCESS)
2841 {
2842 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2843 return rcStrict;
2844 }
2845
2846 /* CS must be a code segment. */
2847 if ( !DescCS.Legacy.Gen.u1DescType
2848 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2849 {
2850 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2851 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2852 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2853 }
2854
2855 /* For conforming CS, DPL must be less than or equal to the RPL. */
2856 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2857 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2858 {
2859 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2860 DescCS.Legacy.Gen.u2Dpl));
2861 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2862 }
2863
2864 /* For non-conforming CS, DPL must match RPL. */
2865 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2866 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2867 {
2868 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2869 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2870 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2871 }
2872
2873 /* Is it there? */
2874 if (!DescCS.Legacy.Gen.u1Present)
2875 {
2876 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2877 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2878 }
2879
2880 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2881 u64Base = X86DESC_BASE(&DescCS.Legacy);
2882
2883 /* Set the accessed bit before committing the result into CS. */
2884 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2885 {
2886 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2887 if (rcStrict != VINF_SUCCESS)
2888 return rcStrict;
2889 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2890 }
2891
2892 /* Commit CS. */
2893 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2894 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2895 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2896 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2897 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2898 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2899 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2900 }
2901
2902 /** @todo Debug trap. */
2903 if (fIsNewTSS386 && fNewDebugTrap)
2904 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2905
2906 /*
2907 * Construct the error code masks based on what caused this task switch.
2908 * See Intel Instruction reference for INT.
2909 */
2910 uint16_t uExt;
2911 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2912 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2913 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2914 {
2915 uExt = 1;
2916 }
2917 else
2918 uExt = 0;
2919
2920 /*
2921 * Push any error code on to the new stack.
2922 */
2923 if (fFlags & IEM_XCPT_FLAGS_ERR)
2924 {
2925 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2926 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2927 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2928
2929 /* Check that there is sufficient space on the stack. */
2930 /** @todo Factor out segment limit checking for normal/expand down segments
2931 * into a separate function. */
2932 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2933 {
2934 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2935 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2936 {
2937 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2938 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2939 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2940 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2941 }
2942 }
2943 else
2944 {
2945 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2946 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2947 {
2948 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2949 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2950 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2951 }
2952 }
2953
2954
2955 if (fIsNewTSS386)
2956 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2957 else
2958 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2959 if (rcStrict != VINF_SUCCESS)
2960 {
2961 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2962 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2963 return rcStrict;
2964 }
2965 }
2966
2967 /* Check the new EIP against the new CS limit. */
2968 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2969 {
2970 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2971 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2972 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2973 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2974 }
2975
2976 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2977 pVCpu->cpum.GstCtx.ss.Sel));
2978 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2979}
2980
2981
2982/**
2983 * Implements exceptions and interrupts for protected mode.
2984 *
2985 * @returns VBox strict status code.
2986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2987 * @param cbInstr The number of bytes to offset rIP by in the return
2988 * address.
2989 * @param u8Vector The interrupt / exception vector number.
2990 * @param fFlags The flags.
2991 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2992 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2993 */
2994static VBOXSTRICTRC
2995iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2996 uint8_t cbInstr,
2997 uint8_t u8Vector,
2998 uint32_t fFlags,
2999 uint16_t uErr,
3000 uint64_t uCr2) RT_NOEXCEPT
3001{
3002 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3003
3004 /*
3005 * Read the IDT entry.
3006 */
3007 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3008 {
3009 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3010 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3011 }
3012 X86DESC Idte;
3013 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3014 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3015 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3016 {
3017 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3018 return rcStrict;
3019 }
3020 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3021 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3022 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3023
3024 /*
3025 * Check the descriptor type, DPL and such.
3026 * ASSUMES this is done in the same order as described for call-gate calls.
3027 */
3028 if (Idte.Gate.u1DescType)
3029 {
3030 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3031 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3032 }
3033 bool fTaskGate = false;
3034 uint8_t f32BitGate = true;
3035 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3036 switch (Idte.Gate.u4Type)
3037 {
3038 case X86_SEL_TYPE_SYS_UNDEFINED:
3039 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3040 case X86_SEL_TYPE_SYS_LDT:
3041 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3042 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3043 case X86_SEL_TYPE_SYS_UNDEFINED2:
3044 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3045 case X86_SEL_TYPE_SYS_UNDEFINED3:
3046 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3047 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3048 case X86_SEL_TYPE_SYS_UNDEFINED4:
3049 {
3050 /** @todo check what actually happens when the type is wrong...
3051 * esp. call gates. */
3052 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3053 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3054 }
3055
3056 case X86_SEL_TYPE_SYS_286_INT_GATE:
3057 f32BitGate = false;
3058 RT_FALL_THRU();
3059 case X86_SEL_TYPE_SYS_386_INT_GATE:
3060 fEflToClear |= X86_EFL_IF;
3061 break;
3062
3063 case X86_SEL_TYPE_SYS_TASK_GATE:
3064 fTaskGate = true;
3065#ifndef IEM_IMPLEMENTS_TASKSWITCH
3066 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3067#endif
3068 break;
3069
3070 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3071 f32BitGate = false;
3072 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3073 break;
3074
3075 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3076 }
3077
3078 /* Check DPL against CPL if applicable. */
3079 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3080 {
3081 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3082 {
3083 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3084 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3085 }
3086 }
3087
3088 /* Is it there? */
3089 if (!Idte.Gate.u1Present)
3090 {
3091 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3092 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3093 }
3094
3095 /* Is it a task-gate? */
3096 if (fTaskGate)
3097 {
3098 /*
3099 * Construct the error code masks based on what caused this task switch.
3100 * See Intel Instruction reference for INT.
3101 */
3102 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3103 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3104 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3105 RTSEL SelTSS = Idte.Gate.u16Sel;
3106
3107 /*
3108 * Fetch the TSS descriptor in the GDT.
3109 */
3110 IEMSELDESC DescTSS;
3111 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3112 if (rcStrict != VINF_SUCCESS)
3113 {
3114 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3115 VBOXSTRICTRC_VAL(rcStrict)));
3116 return rcStrict;
3117 }
3118
3119 /* The TSS descriptor must be a system segment and be available (not busy). */
3120 if ( DescTSS.Legacy.Gen.u1DescType
3121 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3122 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3123 {
3124 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3125 u8Vector, SelTSS, DescTSS.Legacy.au64));
3126 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3127 }
3128
3129 /* The TSS must be present. */
3130 if (!DescTSS.Legacy.Gen.u1Present)
3131 {
3132 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3133 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3134 }
3135
3136 /* Do the actual task switch. */
3137 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3138 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3139 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3140 }
3141
3142 /* A null CS is bad. */
3143 RTSEL NewCS = Idte.Gate.u16Sel;
3144 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3145 {
3146 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3147 return iemRaiseGeneralProtectionFault0(pVCpu);
3148 }
3149
3150 /* Fetch the descriptor for the new CS. */
3151 IEMSELDESC DescCS;
3152 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3153 if (rcStrict != VINF_SUCCESS)
3154 {
3155 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3156 return rcStrict;
3157 }
3158
3159 /* Must be a code segment. */
3160 if (!DescCS.Legacy.Gen.u1DescType)
3161 {
3162 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3163 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3164 }
3165 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3166 {
3167 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3168 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3169 }
3170
3171 /* Don't allow lowering the privilege level. */
3172 /** @todo Does the lowering of privileges apply to software interrupts
3173 * only? This has bearings on the more-privileged or
3174 * same-privilege stack behavior further down. A testcase would
3175 * be nice. */
3176 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3177 {
3178 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3179 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3180 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3181 }
3182
3183 /* Make sure the selector is present. */
3184 if (!DescCS.Legacy.Gen.u1Present)
3185 {
3186 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3187 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3188 }
3189
3190 /* Check the new EIP against the new CS limit. */
3191 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3192 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3193 ? Idte.Gate.u16OffsetLow
3194 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3195 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3196 if (uNewEip > cbLimitCS)
3197 {
3198 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3199 u8Vector, uNewEip, cbLimitCS, NewCS));
3200 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3201 }
3202 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3203
3204 /* Calc the flag image to push. */
3205 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3206 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3207 fEfl &= ~X86_EFL_RF;
3208 else
3209 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3210
3211 /* From V8086 mode only go to CPL 0. */
3212 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3213 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3214 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3215 {
3216 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3217 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3218 }
3219
3220 /*
3221 * If the privilege level changes, we need to get a new stack from the TSS.
3222 * This in turns means validating the new SS and ESP...
3223 */
3224 if (uNewCpl != pVCpu->iem.s.uCpl)
3225 {
3226 RTSEL NewSS;
3227 uint32_t uNewEsp;
3228 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3229 if (rcStrict != VINF_SUCCESS)
3230 return rcStrict;
3231
3232 IEMSELDESC DescSS;
3233 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3234 if (rcStrict != VINF_SUCCESS)
3235 return rcStrict;
3236 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3237 if (!DescSS.Legacy.Gen.u1DefBig)
3238 {
3239 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3240 uNewEsp = (uint16_t)uNewEsp;
3241 }
3242
3243 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3244
3245 /* Check that there is sufficient space for the stack frame. */
3246 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3247 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3248 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3249 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3250
3251 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3252 {
3253 if ( uNewEsp - 1 > cbLimitSS
3254 || uNewEsp < cbStackFrame)
3255 {
3256 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3257 u8Vector, NewSS, uNewEsp, cbStackFrame));
3258 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3259 }
3260 }
3261 else
3262 {
3263 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3264 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3265 {
3266 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3267 u8Vector, NewSS, uNewEsp, cbStackFrame));
3268 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3269 }
3270 }
3271
3272 /*
3273 * Start making changes.
3274 */
3275
3276 /* Set the new CPL so that stack accesses use it. */
3277 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3278 pVCpu->iem.s.uCpl = uNewCpl;
3279
3280 /* Create the stack frame. */
3281 RTPTRUNION uStackFrame;
3282 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3283 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3284 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3285 if (rcStrict != VINF_SUCCESS)
3286 return rcStrict;
3287 void * const pvStackFrame = uStackFrame.pv;
3288 if (f32BitGate)
3289 {
3290 if (fFlags & IEM_XCPT_FLAGS_ERR)
3291 *uStackFrame.pu32++ = uErr;
3292 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3293 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3294 uStackFrame.pu32[2] = fEfl;
3295 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3296 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3297 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3298 if (fEfl & X86_EFL_VM)
3299 {
3300 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3301 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3302 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3303 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3304 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3305 }
3306 }
3307 else
3308 {
3309 if (fFlags & IEM_XCPT_FLAGS_ERR)
3310 *uStackFrame.pu16++ = uErr;
3311 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3312 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3313 uStackFrame.pu16[2] = fEfl;
3314 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3315 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3316 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3317 if (fEfl & X86_EFL_VM)
3318 {
3319 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3320 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3321 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3322 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3323 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3324 }
3325 }
3326 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3327 if (rcStrict != VINF_SUCCESS)
3328 return rcStrict;
3329
3330 /* Mark the selectors 'accessed' (hope this is the correct time). */
3331 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3332 * after pushing the stack frame? (Write protect the gdt + stack to
3333 * find out.) */
3334 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3335 {
3336 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3337 if (rcStrict != VINF_SUCCESS)
3338 return rcStrict;
3339 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3340 }
3341
3342 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3343 {
3344 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3345 if (rcStrict != VINF_SUCCESS)
3346 return rcStrict;
3347 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3348 }
3349
3350 /*
3351 * Start comitting the register changes (joins with the DPL=CPL branch).
3352 */
3353 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3354 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3355 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3356 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3357 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3358 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3359 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3360 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3361 * SP is loaded).
3362 * Need to check the other combinations too:
3363 * - 16-bit TSS, 32-bit handler
3364 * - 32-bit TSS, 16-bit handler */
3365 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3366 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3367 else
3368 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3369
3370 if (fEfl & X86_EFL_VM)
3371 {
3372 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3373 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3374 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3375 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3376 }
3377 }
3378 /*
3379 * Same privilege, no stack change and smaller stack frame.
3380 */
3381 else
3382 {
3383 uint64_t uNewRsp;
3384 RTPTRUNION uStackFrame;
3385 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3386 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3387 if (rcStrict != VINF_SUCCESS)
3388 return rcStrict;
3389 void * const pvStackFrame = uStackFrame.pv;
3390
3391 if (f32BitGate)
3392 {
3393 if (fFlags & IEM_XCPT_FLAGS_ERR)
3394 *uStackFrame.pu32++ = uErr;
3395 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3396 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3397 uStackFrame.pu32[2] = fEfl;
3398 }
3399 else
3400 {
3401 if (fFlags & IEM_XCPT_FLAGS_ERR)
3402 *uStackFrame.pu16++ = uErr;
3403 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3404 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3405 uStackFrame.pu16[2] = fEfl;
3406 }
3407 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3408 if (rcStrict != VINF_SUCCESS)
3409 return rcStrict;
3410
3411 /* Mark the CS selector as 'accessed'. */
3412 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3413 {
3414 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3415 if (rcStrict != VINF_SUCCESS)
3416 return rcStrict;
3417 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3418 }
3419
3420 /*
3421 * Start committing the register changes (joins with the other branch).
3422 */
3423 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3424 }
3425
3426 /* ... register committing continues. */
3427 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3428 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3429 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3430 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3431 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3432 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3433
3434 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3435 fEfl &= ~fEflToClear;
3436 IEMMISC_SET_EFL(pVCpu, fEfl);
3437
3438 if (fFlags & IEM_XCPT_FLAGS_CR2)
3439 pVCpu->cpum.GstCtx.cr2 = uCr2;
3440
3441 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3442 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3443
3444 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3445}
3446
3447
3448/**
3449 * Implements exceptions and interrupts for long mode.
3450 *
3451 * @returns VBox strict status code.
3452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3453 * @param cbInstr The number of bytes to offset rIP by in the return
3454 * address.
3455 * @param u8Vector The interrupt / exception vector number.
3456 * @param fFlags The flags.
3457 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3458 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3459 */
3460static VBOXSTRICTRC
3461iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3462 uint8_t cbInstr,
3463 uint8_t u8Vector,
3464 uint32_t fFlags,
3465 uint16_t uErr,
3466 uint64_t uCr2) RT_NOEXCEPT
3467{
3468 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3469
3470 /*
3471 * Read the IDT entry.
3472 */
3473 uint16_t offIdt = (uint16_t)u8Vector << 4;
3474 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3475 {
3476 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3477 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3478 }
3479 X86DESC64 Idte;
3480#ifdef _MSC_VER /* Shut up silly compiler warning. */
3481 Idte.au64[0] = 0;
3482 Idte.au64[1] = 0;
3483#endif
3484 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3485 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3486 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3487 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3488 {
3489 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3490 return rcStrict;
3491 }
3492 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3493 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3494 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3495
3496 /*
3497 * Check the descriptor type, DPL and such.
3498 * ASSUMES this is done in the same order as described for call-gate calls.
3499 */
3500 if (Idte.Gate.u1DescType)
3501 {
3502 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3503 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3504 }
3505 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3506 switch (Idte.Gate.u4Type)
3507 {
3508 case AMD64_SEL_TYPE_SYS_INT_GATE:
3509 fEflToClear |= X86_EFL_IF;
3510 break;
3511 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3512 break;
3513
3514 default:
3515 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3516 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3517 }
3518
3519 /* Check DPL against CPL if applicable. */
3520 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3521 {
3522 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3523 {
3524 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3525 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3526 }
3527 }
3528
3529 /* Is it there? */
3530 if (!Idte.Gate.u1Present)
3531 {
3532 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3533 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3534 }
3535
3536 /* A null CS is bad. */
3537 RTSEL NewCS = Idte.Gate.u16Sel;
3538 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3539 {
3540 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3541 return iemRaiseGeneralProtectionFault0(pVCpu);
3542 }
3543
3544 /* Fetch the descriptor for the new CS. */
3545 IEMSELDESC DescCS;
3546 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3547 if (rcStrict != VINF_SUCCESS)
3548 {
3549 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3550 return rcStrict;
3551 }
3552
3553 /* Must be a 64-bit code segment. */
3554 if (!DescCS.Long.Gen.u1DescType)
3555 {
3556 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3557 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3558 }
3559 if ( !DescCS.Long.Gen.u1Long
3560 || DescCS.Long.Gen.u1DefBig
3561 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3562 {
3563 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3564 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3565 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3566 }
3567
3568 /* Don't allow lowering the privilege level. For non-conforming CS
3569 selectors, the CS.DPL sets the privilege level the trap/interrupt
3570 handler runs at. For conforming CS selectors, the CPL remains
3571 unchanged, but the CS.DPL must be <= CPL. */
3572 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3573 * when CPU in Ring-0. Result \#GP? */
3574 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3575 {
3576 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3577 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3578 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3579 }
3580
3581
3582 /* Make sure the selector is present. */
3583 if (!DescCS.Legacy.Gen.u1Present)
3584 {
3585 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3586 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3587 }
3588
3589 /* Check that the new RIP is canonical. */
3590 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3591 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3592 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3593 if (!IEM_IS_CANONICAL(uNewRip))
3594 {
3595 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3596 return iemRaiseGeneralProtectionFault0(pVCpu);
3597 }
3598
3599 /*
3600 * If the privilege level changes or if the IST isn't zero, we need to get
3601 * a new stack from the TSS.
3602 */
3603 uint64_t uNewRsp;
3604 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3605 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3606 if ( uNewCpl != pVCpu->iem.s.uCpl
3607 || Idte.Gate.u3IST != 0)
3608 {
3609 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3610 if (rcStrict != VINF_SUCCESS)
3611 return rcStrict;
3612 }
3613 else
3614 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3615 uNewRsp &= ~(uint64_t)0xf;
3616
3617 /*
3618 * Calc the flag image to push.
3619 */
3620 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3621 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3622 fEfl &= ~X86_EFL_RF;
3623 else
3624 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3625
3626 /*
3627 * Start making changes.
3628 */
3629 /* Set the new CPL so that stack accesses use it. */
3630 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3631 pVCpu->iem.s.uCpl = uNewCpl;
3632
3633 /* Create the stack frame. */
3634 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3635 RTPTRUNION uStackFrame;
3636 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3637 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3638 if (rcStrict != VINF_SUCCESS)
3639 return rcStrict;
3640 void * const pvStackFrame = uStackFrame.pv;
3641
3642 if (fFlags & IEM_XCPT_FLAGS_ERR)
3643 *uStackFrame.pu64++ = uErr;
3644 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3645 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3646 uStackFrame.pu64[2] = fEfl;
3647 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3648 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3649 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3650 if (rcStrict != VINF_SUCCESS)
3651 return rcStrict;
3652
3653 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3654 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3655 * after pushing the stack frame? (Write protect the gdt + stack to
3656 * find out.) */
3657 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3658 {
3659 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3660 if (rcStrict != VINF_SUCCESS)
3661 return rcStrict;
3662 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3663 }
3664
3665 /*
3666 * Start comitting the register changes.
3667 */
3668 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3669 * hidden registers when interrupting 32-bit or 16-bit code! */
3670 if (uNewCpl != uOldCpl)
3671 {
3672 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3673 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3674 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3675 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3676 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3677 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3678 }
3679 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3680 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3681 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3682 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3683 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3684 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3685 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3686 pVCpu->cpum.GstCtx.rip = uNewRip;
3687
3688 fEfl &= ~fEflToClear;
3689 IEMMISC_SET_EFL(pVCpu, fEfl);
3690
3691 if (fFlags & IEM_XCPT_FLAGS_CR2)
3692 pVCpu->cpum.GstCtx.cr2 = uCr2;
3693
3694 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3695 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3696
3697 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3698}
3699
3700
3701/**
3702 * Implements exceptions and interrupts.
3703 *
3704 * All exceptions and interrupts goes thru this function!
3705 *
3706 * @returns VBox strict status code.
3707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3708 * @param cbInstr The number of bytes to offset rIP by in the return
3709 * address.
3710 * @param u8Vector The interrupt / exception vector number.
3711 * @param fFlags The flags.
3712 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3713 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3714 */
3715VBOXSTRICTRC
3716iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3717 uint8_t cbInstr,
3718 uint8_t u8Vector,
3719 uint32_t fFlags,
3720 uint16_t uErr,
3721 uint64_t uCr2) RT_NOEXCEPT
3722{
3723 /*
3724 * Get all the state that we might need here.
3725 */
3726 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3727 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3728
3729#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3730 /*
3731 * Flush prefetch buffer
3732 */
3733 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3734#endif
3735
3736 /*
3737 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3738 */
3739 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3740 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3741 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3742 | IEM_XCPT_FLAGS_BP_INSTR
3743 | IEM_XCPT_FLAGS_ICEBP_INSTR
3744 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3745 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3746 {
3747 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3748 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3749 u8Vector = X86_XCPT_GP;
3750 uErr = 0;
3751 }
3752#ifdef DBGFTRACE_ENABLED
3753 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3754 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3755 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3756#endif
3757
3758 /*
3759 * Evaluate whether NMI blocking should be in effect.
3760 * Normally, NMI blocking is in effect whenever we inject an NMI.
3761 */
3762 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3763 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3764
3765#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3766 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3767 {
3768 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3769 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3770 return rcStrict0;
3771
3772 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3773 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3774 {
3775 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3776 fBlockNmi = false;
3777 }
3778 }
3779#endif
3780
3781#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3782 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3783 {
3784 /*
3785 * If the event is being injected as part of VMRUN, it isn't subject to event
3786 * intercepts in the nested-guest. However, secondary exceptions that occur
3787 * during injection of any event -are- subject to exception intercepts.
3788 *
3789 * See AMD spec. 15.20 "Event Injection".
3790 */
3791 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3792 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3793 else
3794 {
3795 /*
3796 * Check and handle if the event being raised is intercepted.
3797 */
3798 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3799 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3800 return rcStrict0;
3801 }
3802 }
3803#endif
3804
3805 /*
3806 * Set NMI blocking if necessary.
3807 */
3808 if (fBlockNmi)
3809 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3810
3811 /*
3812 * Do recursion accounting.
3813 */
3814 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3815 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3816 if (pVCpu->iem.s.cXcptRecursions == 0)
3817 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3818 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3819 else
3820 {
3821 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3822 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3823 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3824
3825 if (pVCpu->iem.s.cXcptRecursions >= 4)
3826 {
3827#ifdef DEBUG_bird
3828 AssertFailed();
3829#endif
3830 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3831 }
3832
3833 /*
3834 * Evaluate the sequence of recurring events.
3835 */
3836 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3837 NULL /* pXcptRaiseInfo */);
3838 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3839 { /* likely */ }
3840 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3841 {
3842 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3843 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3844 u8Vector = X86_XCPT_DF;
3845 uErr = 0;
3846#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3847 /* VMX nested-guest #DF intercept needs to be checked here. */
3848 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3849 {
3850 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3851 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3852 return rcStrict0;
3853 }
3854#endif
3855 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3856 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3857 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3858 }
3859 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3860 {
3861 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3862 return iemInitiateCpuShutdown(pVCpu);
3863 }
3864 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3865 {
3866 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3867 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3868 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3869 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3870 return VERR_EM_GUEST_CPU_HANG;
3871 }
3872 else
3873 {
3874 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3875 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3876 return VERR_IEM_IPE_9;
3877 }
3878
3879 /*
3880 * The 'EXT' bit is set when an exception occurs during deliver of an external
3881 * event (such as an interrupt or earlier exception)[1]. Privileged software
3882 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3883 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3884 *
3885 * [1] - Intel spec. 6.13 "Error Code"
3886 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3887 * [3] - Intel Instruction reference for INT n.
3888 */
3889 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3890 && (fFlags & IEM_XCPT_FLAGS_ERR)
3891 && u8Vector != X86_XCPT_PF
3892 && u8Vector != X86_XCPT_DF)
3893 {
3894 uErr |= X86_TRAP_ERR_EXTERNAL;
3895 }
3896 }
3897
3898 pVCpu->iem.s.cXcptRecursions++;
3899 pVCpu->iem.s.uCurXcpt = u8Vector;
3900 pVCpu->iem.s.fCurXcpt = fFlags;
3901 pVCpu->iem.s.uCurXcptErr = uErr;
3902 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3903
3904 /*
3905 * Extensive logging.
3906 */
3907#if defined(LOG_ENABLED) && defined(IN_RING3)
3908 if (LogIs3Enabled())
3909 {
3910 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3911 PVM pVM = pVCpu->CTX_SUFF(pVM);
3912 char szRegs[4096];
3913 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3914 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3915 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3916 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3917 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3918 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3919 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3920 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3921 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3922 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3923 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3924 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3925 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3926 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3927 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3928 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3929 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3930 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3931 " efer=%016VR{efer}\n"
3932 " pat=%016VR{pat}\n"
3933 " sf_mask=%016VR{sf_mask}\n"
3934 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3935 " lstar=%016VR{lstar}\n"
3936 " star=%016VR{star} cstar=%016VR{cstar}\n"
3937 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3938 );
3939
3940 char szInstr[256];
3941 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3942 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3943 szInstr, sizeof(szInstr), NULL);
3944 Log3(("%s%s\n", szRegs, szInstr));
3945 }
3946#endif /* LOG_ENABLED */
3947
3948 /*
3949 * Stats.
3950 */
3951 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3952 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3953 else if (u8Vector <= X86_XCPT_LAST)
3954 {
3955 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3956 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3957 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3958 }
3959
3960 /*
3961 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3962 * to ensure that a stale TLB or paging cache entry will only cause one
3963 * spurious #PF.
3964 */
3965 if ( u8Vector == X86_XCPT_PF
3966 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3967 IEMTlbInvalidatePage(pVCpu, uCr2);
3968
3969 /*
3970 * Call the mode specific worker function.
3971 */
3972 VBOXSTRICTRC rcStrict;
3973 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3974 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3975 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3976 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3977 else
3978 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3979
3980 /* Flush the prefetch buffer. */
3981#ifdef IEM_WITH_CODE_TLB
3982 pVCpu->iem.s.pbInstrBuf = NULL;
3983#else
3984 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3985#endif
3986
3987 /*
3988 * Unwind.
3989 */
3990 pVCpu->iem.s.cXcptRecursions--;
3991 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3992 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3993 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3994 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3995 pVCpu->iem.s.cXcptRecursions + 1));
3996 return rcStrict;
3997}
3998
3999#ifdef IEM_WITH_SETJMP
4000/**
4001 * See iemRaiseXcptOrInt. Will not return.
4002 */
4003DECL_NO_RETURN(void)
4004iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4005 uint8_t cbInstr,
4006 uint8_t u8Vector,
4007 uint32_t fFlags,
4008 uint16_t uErr,
4009 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4010{
4011 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4012 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4013}
4014#endif
4015
4016
4017/** \#DE - 00. */
4018VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4019{
4020 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4021}
4022
4023
4024/** \#DB - 01.
4025 * @note This automatically clear DR7.GD. */
4026VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4027{
4028 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4029 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4030 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4031}
4032
4033
4034/** \#BR - 05. */
4035VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4036{
4037 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4038}
4039
4040
4041/** \#UD - 06. */
4042VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4043{
4044 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4045}
4046
4047
4048/** \#NM - 07. */
4049VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4050{
4051 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4052}
4053
4054
4055/** \#TS(err) - 0a. */
4056VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4057{
4058 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4059}
4060
4061
4062/** \#TS(tr) - 0a. */
4063VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4064{
4065 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4066 pVCpu->cpum.GstCtx.tr.Sel, 0);
4067}
4068
4069
4070/** \#TS(0) - 0a. */
4071VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4072{
4073 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4074 0, 0);
4075}
4076
4077
4078/** \#TS(err) - 0a. */
4079VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4080{
4081 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4082 uSel & X86_SEL_MASK_OFF_RPL, 0);
4083}
4084
4085
4086/** \#NP(err) - 0b. */
4087VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4088{
4089 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4090}
4091
4092
4093/** \#NP(sel) - 0b. */
4094VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4095{
4096 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4097 uSel & ~X86_SEL_RPL, 0);
4098}
4099
4100
4101/** \#SS(seg) - 0c. */
4102VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4103{
4104 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4105 uSel & ~X86_SEL_RPL, 0);
4106}
4107
4108
4109/** \#SS(err) - 0c. */
4110VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4111{
4112 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4113}
4114
4115
4116/** \#GP(n) - 0d. */
4117VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4118{
4119 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4120}
4121
4122
4123/** \#GP(0) - 0d. */
4124VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4125{
4126 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4127}
4128
4129#ifdef IEM_WITH_SETJMP
4130/** \#GP(0) - 0d. */
4131DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4132{
4133 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4134}
4135#endif
4136
4137
4138/** \#GP(sel) - 0d. */
4139VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4140{
4141 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4142 Sel & ~X86_SEL_RPL, 0);
4143}
4144
4145
4146/** \#GP(0) - 0d. */
4147VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4148{
4149 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4150}
4151
4152
4153/** \#GP(sel) - 0d. */
4154VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4155{
4156 NOREF(iSegReg); NOREF(fAccess);
4157 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4158 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4159}
4160
4161#ifdef IEM_WITH_SETJMP
4162/** \#GP(sel) - 0d, longjmp. */
4163DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4164{
4165 NOREF(iSegReg); NOREF(fAccess);
4166 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4167 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4168}
4169#endif
4170
4171/** \#GP(sel) - 0d. */
4172VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4173{
4174 NOREF(Sel);
4175 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4176}
4177
4178#ifdef IEM_WITH_SETJMP
4179/** \#GP(sel) - 0d, longjmp. */
4180DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4181{
4182 NOREF(Sel);
4183 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4184}
4185#endif
4186
4187
4188/** \#GP(sel) - 0d. */
4189VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4190{
4191 NOREF(iSegReg); NOREF(fAccess);
4192 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4193}
4194
4195#ifdef IEM_WITH_SETJMP
4196/** \#GP(sel) - 0d, longjmp. */
4197DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4198{
4199 NOREF(iSegReg); NOREF(fAccess);
4200 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4201}
4202#endif
4203
4204
4205/** \#PF(n) - 0e. */
4206VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4207{
4208 uint16_t uErr;
4209 switch (rc)
4210 {
4211 case VERR_PAGE_NOT_PRESENT:
4212 case VERR_PAGE_TABLE_NOT_PRESENT:
4213 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4214 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4215 uErr = 0;
4216 break;
4217
4218 default:
4219 AssertMsgFailed(("%Rrc\n", rc));
4220 RT_FALL_THRU();
4221 case VERR_ACCESS_DENIED:
4222 uErr = X86_TRAP_PF_P;
4223 break;
4224
4225 /** @todo reserved */
4226 }
4227
4228 if (pVCpu->iem.s.uCpl == 3)
4229 uErr |= X86_TRAP_PF_US;
4230
4231 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4232 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4233 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4234 uErr |= X86_TRAP_PF_ID;
4235
4236#if 0 /* This is so much non-sense, really. Why was it done like that? */
4237 /* Note! RW access callers reporting a WRITE protection fault, will clear
4238 the READ flag before calling. So, read-modify-write accesses (RW)
4239 can safely be reported as READ faults. */
4240 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4241 uErr |= X86_TRAP_PF_RW;
4242#else
4243 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4244 {
4245 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4246 /// (regardless of outcome of the comparison in the latter case).
4247 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4248 uErr |= X86_TRAP_PF_RW;
4249 }
4250#endif
4251
4252 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4253 of the memory operand rather than at the start of it. (Not sure what
4254 happens if it crosses a page boundrary.) The current heuristics for
4255 this is to report the #PF for the last byte if the access is more than
4256 64 bytes. This is probably not correct, but we can work that out later,
4257 main objective now is to get FXSAVE to work like for real hardware and
4258 make bs3-cpu-basic2 work. */
4259 if (cbAccess <= 64)
4260 { /* likely*/ }
4261 else
4262 GCPtrWhere += cbAccess - 1;
4263
4264 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4265 uErr, GCPtrWhere);
4266}
4267
4268#ifdef IEM_WITH_SETJMP
4269/** \#PF(n) - 0e, longjmp. */
4270DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4271 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4272{
4273 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4274}
4275#endif
4276
4277
4278/** \#MF(0) - 10. */
4279VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4280{
4281 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4282 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4283
4284 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4285 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4286 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4287}
4288
4289
4290/** \#AC(0) - 11. */
4291VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4292{
4293 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4294}
4295
4296#ifdef IEM_WITH_SETJMP
4297/** \#AC(0) - 11, longjmp. */
4298DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4299{
4300 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4301}
4302#endif
4303
4304
4305/** \#XF(0)/\#XM(0) - 19. */
4306VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4307{
4308 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4309}
4310
4311
4312/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4313IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4314{
4315 NOREF(cbInstr);
4316 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4317}
4318
4319
4320/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4321IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4322{
4323 NOREF(cbInstr);
4324 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4325}
4326
4327
4328/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4329IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4330{
4331 NOREF(cbInstr);
4332 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4333}
4334
4335
4336/** @} */
4337
4338/** @name Common opcode decoders.
4339 * @{
4340 */
4341//#include <iprt/mem.h>
4342
4343/**
4344 * Used to add extra details about a stub case.
4345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4346 */
4347void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4348{
4349#if defined(LOG_ENABLED) && defined(IN_RING3)
4350 PVM pVM = pVCpu->CTX_SUFF(pVM);
4351 char szRegs[4096];
4352 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4353 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4354 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4355 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4356 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4357 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4358 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4359 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4360 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4361 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4362 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4363 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4364 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4365 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4366 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4367 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4368 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4369 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4370 " efer=%016VR{efer}\n"
4371 " pat=%016VR{pat}\n"
4372 " sf_mask=%016VR{sf_mask}\n"
4373 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4374 " lstar=%016VR{lstar}\n"
4375 " star=%016VR{star} cstar=%016VR{cstar}\n"
4376 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4377 );
4378
4379 char szInstr[256];
4380 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4381 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4382 szInstr, sizeof(szInstr), NULL);
4383
4384 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4385#else
4386 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4387#endif
4388}
4389
4390/** @} */
4391
4392
4393
4394/** @name Register Access.
4395 * @{
4396 */
4397
4398/**
4399 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4400 *
4401 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4402 * segment limit.
4403 *
4404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4405 * @param cbInstr Instruction size.
4406 * @param offNextInstr The offset of the next instruction.
4407 * @param enmEffOpSize Effective operand size.
4408 */
4409VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4410 IEMMODE enmEffOpSize) RT_NOEXCEPT
4411{
4412 switch (enmEffOpSize)
4413 {
4414 case IEMMODE_16BIT:
4415 {
4416 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4417 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4418 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no CS limit checks in 64-bit mode */))
4419 pVCpu->cpum.GstCtx.rip = uNewIp;
4420 else
4421 return iemRaiseGeneralProtectionFault0(pVCpu);
4422 break;
4423 }
4424
4425 case IEMMODE_32BIT:
4426 {
4427 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4428 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4429
4430 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4431 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4432 pVCpu->cpum.GstCtx.rip = uNewEip;
4433 else
4434 return iemRaiseGeneralProtectionFault0(pVCpu);
4435 break;
4436 }
4437
4438 case IEMMODE_64BIT:
4439 {
4440 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4441
4442 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4443 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4444 pVCpu->cpum.GstCtx.rip = uNewRip;
4445 else
4446 return iemRaiseGeneralProtectionFault0(pVCpu);
4447 break;
4448 }
4449
4450 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4451 }
4452
4453#ifndef IEM_WITH_CODE_TLB
4454 /* Flush the prefetch buffer. */
4455 pVCpu->iem.s.cbOpcode = cbInstr;
4456#endif
4457
4458 /*
4459 * Clear RF and finish the instruction (maybe raise #DB).
4460 */
4461 return iemRegFinishClearingRF(pVCpu);
4462}
4463
4464
4465/**
4466 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4467 *
4468 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4469 * segment limit.
4470 *
4471 * @returns Strict VBox status code.
4472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4473 * @param cbInstr Instruction size.
4474 * @param offNextInstr The offset of the next instruction.
4475 */
4476VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4477{
4478 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4479
4480 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4481 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4482 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checking in 64-bit mode */))
4483 pVCpu->cpum.GstCtx.rip = uNewIp;
4484 else
4485 return iemRaiseGeneralProtectionFault0(pVCpu);
4486
4487#ifndef IEM_WITH_CODE_TLB
4488 /* Flush the prefetch buffer. */
4489 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4490#endif
4491
4492 /*
4493 * Clear RF and finish the instruction (maybe raise #DB).
4494 */
4495 return iemRegFinishClearingRF(pVCpu);
4496}
4497
4498
4499/**
4500 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4501 *
4502 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4503 * segment limit.
4504 *
4505 * @returns Strict VBox status code.
4506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4507 * @param cbInstr Instruction size.
4508 * @param offNextInstr The offset of the next instruction.
4509 * @param enmEffOpSize Effective operand size.
4510 */
4511VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4512 IEMMODE enmEffOpSize) RT_NOEXCEPT
4513{
4514 if (enmEffOpSize == IEMMODE_32BIT)
4515 {
4516 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4517
4518 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4519 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4520 pVCpu->cpum.GstCtx.rip = uNewEip;
4521 else
4522 return iemRaiseGeneralProtectionFault0(pVCpu);
4523 }
4524 else
4525 {
4526 Assert(enmEffOpSize == IEMMODE_64BIT);
4527
4528 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4529 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4530 pVCpu->cpum.GstCtx.rip = uNewRip;
4531 else
4532 return iemRaiseGeneralProtectionFault0(pVCpu);
4533 }
4534
4535#ifndef IEM_WITH_CODE_TLB
4536 /* Flush the prefetch buffer. */
4537 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4538#endif
4539
4540 /*
4541 * Clear RF and finish the instruction (maybe raise #DB).
4542 */
4543 return iemRegFinishClearingRF(pVCpu);
4544}
4545
4546
4547/**
4548 * Performs a near jump to the specified address.
4549 *
4550 * May raise a \#GP(0) if the new IP outside the code segment limit.
4551 *
4552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4553 * @param uNewIp The new IP value.
4554 */
4555VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4556{
4557 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4558 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checks in 64-bit mode */))
4559 pVCpu->cpum.GstCtx.rip = uNewIp;
4560 else
4561 return iemRaiseGeneralProtectionFault0(pVCpu);
4562 /** @todo Test 16-bit jump in 64-bit mode. */
4563
4564#ifndef IEM_WITH_CODE_TLB
4565 /* Flush the prefetch buffer. */
4566 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4567#endif
4568
4569 /*
4570 * Clear RF and finish the instruction (maybe raise #DB).
4571 */
4572 return iemRegFinishClearingRF(pVCpu);
4573}
4574
4575
4576/**
4577 * Performs a near jump to the specified address.
4578 *
4579 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4580 *
4581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4582 * @param uNewEip The new EIP value.
4583 */
4584VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4585{
4586 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4587 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4588
4589 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4590 pVCpu->cpum.GstCtx.rip = uNewEip;
4591 else
4592 return iemRaiseGeneralProtectionFault0(pVCpu);
4593
4594#ifndef IEM_WITH_CODE_TLB
4595 /* Flush the prefetch buffer. */
4596 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4597#endif
4598
4599 /*
4600 * Clear RF and finish the instruction (maybe raise #DB).
4601 */
4602 return iemRegFinishClearingRF(pVCpu);
4603}
4604
4605
4606/**
4607 * Performs a near jump to the specified address.
4608 *
4609 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4610 * segment limit.
4611 *
4612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4613 * @param uNewRip The new RIP value.
4614 */
4615VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4616{
4617 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4618
4619 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4620 pVCpu->cpum.GstCtx.rip = uNewRip;
4621 else
4622 return iemRaiseGeneralProtectionFault0(pVCpu);
4623
4624#ifndef IEM_WITH_CODE_TLB
4625 /* Flush the prefetch buffer. */
4626 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4627#endif
4628
4629 /*
4630 * Clear RF and finish the instruction (maybe raise #DB).
4631 */
4632 return iemRegFinishClearingRF(pVCpu);
4633}
4634
4635/** @} */
4636
4637
4638/** @name FPU access and helpers.
4639 *
4640 * @{
4641 */
4642
4643/**
4644 * Updates the x87.DS and FPUDP registers.
4645 *
4646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4647 * @param pFpuCtx The FPU context.
4648 * @param iEffSeg The effective segment register.
4649 * @param GCPtrEff The effective address relative to @a iEffSeg.
4650 */
4651DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4652{
4653 RTSEL sel;
4654 switch (iEffSeg)
4655 {
4656 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4657 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4658 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4659 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4660 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4661 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4662 default:
4663 AssertMsgFailed(("%d\n", iEffSeg));
4664 sel = pVCpu->cpum.GstCtx.ds.Sel;
4665 }
4666 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4667 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4668 {
4669 pFpuCtx->DS = 0;
4670 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4671 }
4672 else if (!IEM_IS_LONG_MODE(pVCpu))
4673 {
4674 pFpuCtx->DS = sel;
4675 pFpuCtx->FPUDP = GCPtrEff;
4676 }
4677 else
4678 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4679}
4680
4681
4682/**
4683 * Rotates the stack registers in the push direction.
4684 *
4685 * @param pFpuCtx The FPU context.
4686 * @remarks This is a complete waste of time, but fxsave stores the registers in
4687 * stack order.
4688 */
4689DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4690{
4691 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4692 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4693 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4694 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4695 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4696 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4697 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4698 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4699 pFpuCtx->aRegs[0].r80 = r80Tmp;
4700}
4701
4702
4703/**
4704 * Rotates the stack registers in the pop direction.
4705 *
4706 * @param pFpuCtx The FPU context.
4707 * @remarks This is a complete waste of time, but fxsave stores the registers in
4708 * stack order.
4709 */
4710DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4711{
4712 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4713 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4714 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4715 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4716 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4717 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4718 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4719 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4720 pFpuCtx->aRegs[7].r80 = r80Tmp;
4721}
4722
4723
4724/**
4725 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4726 * exception prevents it.
4727 *
4728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4729 * @param pResult The FPU operation result to push.
4730 * @param pFpuCtx The FPU context.
4731 */
4732static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4733{
4734 /* Update FSW and bail if there are pending exceptions afterwards. */
4735 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4736 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4737 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4738 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4739 {
4740 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4741 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4742 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4743 pFpuCtx->FSW = fFsw;
4744 return;
4745 }
4746
4747 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4748 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4749 {
4750 /* All is fine, push the actual value. */
4751 pFpuCtx->FTW |= RT_BIT(iNewTop);
4752 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4753 }
4754 else if (pFpuCtx->FCW & X86_FCW_IM)
4755 {
4756 /* Masked stack overflow, push QNaN. */
4757 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4758 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4759 }
4760 else
4761 {
4762 /* Raise stack overflow, don't push anything. */
4763 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4764 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4765 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4766 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4767 return;
4768 }
4769
4770 fFsw &= ~X86_FSW_TOP_MASK;
4771 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4772 pFpuCtx->FSW = fFsw;
4773
4774 iemFpuRotateStackPush(pFpuCtx);
4775 RT_NOREF(pVCpu);
4776}
4777
4778
4779/**
4780 * Stores a result in a FPU register and updates the FSW and FTW.
4781 *
4782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4783 * @param pFpuCtx The FPU context.
4784 * @param pResult The result to store.
4785 * @param iStReg Which FPU register to store it in.
4786 */
4787static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4788{
4789 Assert(iStReg < 8);
4790 uint16_t fNewFsw = pFpuCtx->FSW;
4791 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4792 fNewFsw &= ~X86_FSW_C_MASK;
4793 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4794 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4795 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4796 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4797 pFpuCtx->FSW = fNewFsw;
4798 pFpuCtx->FTW |= RT_BIT(iReg);
4799 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4800 RT_NOREF(pVCpu);
4801}
4802
4803
4804/**
4805 * Only updates the FPU status word (FSW) with the result of the current
4806 * instruction.
4807 *
4808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4809 * @param pFpuCtx The FPU context.
4810 * @param u16FSW The FSW output of the current instruction.
4811 */
4812static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4813{
4814 uint16_t fNewFsw = pFpuCtx->FSW;
4815 fNewFsw &= ~X86_FSW_C_MASK;
4816 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4817 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4818 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4819 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4820 pFpuCtx->FSW = fNewFsw;
4821 RT_NOREF(pVCpu);
4822}
4823
4824
4825/**
4826 * Pops one item off the FPU stack if no pending exception prevents it.
4827 *
4828 * @param pFpuCtx The FPU context.
4829 */
4830static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4831{
4832 /* Check pending exceptions. */
4833 uint16_t uFSW = pFpuCtx->FSW;
4834 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4835 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4836 return;
4837
4838 /* TOP--. */
4839 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4840 uFSW &= ~X86_FSW_TOP_MASK;
4841 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4842 pFpuCtx->FSW = uFSW;
4843
4844 /* Mark the previous ST0 as empty. */
4845 iOldTop >>= X86_FSW_TOP_SHIFT;
4846 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4847
4848 /* Rotate the registers. */
4849 iemFpuRotateStackPop(pFpuCtx);
4850}
4851
4852
4853/**
4854 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4855 *
4856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4857 * @param pResult The FPU operation result to push.
4858 */
4859void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4860{
4861 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4862 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4863 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4864}
4865
4866
4867/**
4868 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4869 * and sets FPUDP and FPUDS.
4870 *
4871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4872 * @param pResult The FPU operation result to push.
4873 * @param iEffSeg The effective segment register.
4874 * @param GCPtrEff The effective address relative to @a iEffSeg.
4875 */
4876void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4877{
4878 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4879 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4880 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4881 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4882}
4883
4884
4885/**
4886 * Replace ST0 with the first value and push the second onto the FPU stack,
4887 * unless a pending exception prevents it.
4888 *
4889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4890 * @param pResult The FPU operation result to store and push.
4891 */
4892void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4893{
4894 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4895 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4896
4897 /* Update FSW and bail if there are pending exceptions afterwards. */
4898 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4899 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4900 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4901 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4902 {
4903 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4904 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4905 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4906 pFpuCtx->FSW = fFsw;
4907 return;
4908 }
4909
4910 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4911 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4912 {
4913 /* All is fine, push the actual value. */
4914 pFpuCtx->FTW |= RT_BIT(iNewTop);
4915 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4916 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4917 }
4918 else if (pFpuCtx->FCW & X86_FCW_IM)
4919 {
4920 /* Masked stack overflow, push QNaN. */
4921 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4922 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4923 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4924 }
4925 else
4926 {
4927 /* Raise stack overflow, don't push anything. */
4928 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4929 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4930 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4931 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4932 return;
4933 }
4934
4935 fFsw &= ~X86_FSW_TOP_MASK;
4936 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4937 pFpuCtx->FSW = fFsw;
4938
4939 iemFpuRotateStackPush(pFpuCtx);
4940}
4941
4942
4943/**
4944 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4945 * FOP.
4946 *
4947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4948 * @param pResult The result to store.
4949 * @param iStReg Which FPU register to store it in.
4950 */
4951void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4952{
4953 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4954 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4955 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4956}
4957
4958
4959/**
4960 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4961 * FOP, and then pops the stack.
4962 *
4963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4964 * @param pResult The result to store.
4965 * @param iStReg Which FPU register to store it in.
4966 */
4967void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4968{
4969 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4970 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4971 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4972 iemFpuMaybePopOne(pFpuCtx);
4973}
4974
4975
4976/**
4977 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4978 * FPUDP, and FPUDS.
4979 *
4980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4981 * @param pResult The result to store.
4982 * @param iStReg Which FPU register to store it in.
4983 * @param iEffSeg The effective memory operand selector register.
4984 * @param GCPtrEff The effective memory operand offset.
4985 */
4986void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4987 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4988{
4989 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4990 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4991 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4992 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4993}
4994
4995
4996/**
4997 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4998 * FPUDP, and FPUDS, and then pops the stack.
4999 *
5000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5001 * @param pResult The result to store.
5002 * @param iStReg Which FPU register to store it in.
5003 * @param iEffSeg The effective memory operand selector register.
5004 * @param GCPtrEff The effective memory operand offset.
5005 */
5006void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5007 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5008{
5009 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5010 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5011 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5012 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5013 iemFpuMaybePopOne(pFpuCtx);
5014}
5015
5016
5017/**
5018 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5019 *
5020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5021 */
5022void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
5023{
5024 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5025 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5026}
5027
5028
5029/**
5030 * Updates the FSW, FOP, FPUIP, and FPUCS.
5031 *
5032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5033 * @param u16FSW The FSW from the current instruction.
5034 */
5035void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5036{
5037 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5038 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5039 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5040}
5041
5042
5043/**
5044 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5045 *
5046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5047 * @param u16FSW The FSW from the current instruction.
5048 */
5049void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5050{
5051 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5052 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5053 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5054 iemFpuMaybePopOne(pFpuCtx);
5055}
5056
5057
5058/**
5059 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5060 *
5061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5062 * @param u16FSW The FSW from the current instruction.
5063 * @param iEffSeg The effective memory operand selector register.
5064 * @param GCPtrEff The effective memory operand offset.
5065 */
5066void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5067{
5068 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5069 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5070 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5071 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5072}
5073
5074
5075/**
5076 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5077 *
5078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5079 * @param u16FSW The FSW from the current instruction.
5080 */
5081void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5082{
5083 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5084 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5085 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5086 iemFpuMaybePopOne(pFpuCtx);
5087 iemFpuMaybePopOne(pFpuCtx);
5088}
5089
5090
5091/**
5092 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5093 *
5094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5095 * @param u16FSW The FSW from the current instruction.
5096 * @param iEffSeg The effective memory operand selector register.
5097 * @param GCPtrEff The effective memory operand offset.
5098 */
5099void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5100{
5101 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5102 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5103 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5104 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5105 iemFpuMaybePopOne(pFpuCtx);
5106}
5107
5108
5109/**
5110 * Worker routine for raising an FPU stack underflow exception.
5111 *
5112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5113 * @param pFpuCtx The FPU context.
5114 * @param iStReg The stack register being accessed.
5115 */
5116static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5117{
5118 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5119 if (pFpuCtx->FCW & X86_FCW_IM)
5120 {
5121 /* Masked underflow. */
5122 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5123 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5124 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5125 if (iStReg != UINT8_MAX)
5126 {
5127 pFpuCtx->FTW |= RT_BIT(iReg);
5128 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5129 }
5130 }
5131 else
5132 {
5133 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5134 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5135 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5136 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5137 }
5138 RT_NOREF(pVCpu);
5139}
5140
5141
5142/**
5143 * Raises a FPU stack underflow exception.
5144 *
5145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5146 * @param iStReg The destination register that should be loaded
5147 * with QNaN if \#IS is not masked. Specify
5148 * UINT8_MAX if none (like for fcom).
5149 */
5150void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5151{
5152 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5153 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5154 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5155}
5156
5157
5158void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5159{
5160 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5161 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5162 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5163 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5164}
5165
5166
5167void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5168{
5169 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5170 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5171 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5172 iemFpuMaybePopOne(pFpuCtx);
5173}
5174
5175
5176void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5177{
5178 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5179 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5180 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5181 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5182 iemFpuMaybePopOne(pFpuCtx);
5183}
5184
5185
5186void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5187{
5188 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5189 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5190 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5191 iemFpuMaybePopOne(pFpuCtx);
5192 iemFpuMaybePopOne(pFpuCtx);
5193}
5194
5195
5196void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5197{
5198 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5199 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5200
5201 if (pFpuCtx->FCW & X86_FCW_IM)
5202 {
5203 /* Masked overflow - Push QNaN. */
5204 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5205 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5206 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5207 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5208 pFpuCtx->FTW |= RT_BIT(iNewTop);
5209 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5210 iemFpuRotateStackPush(pFpuCtx);
5211 }
5212 else
5213 {
5214 /* Exception pending - don't change TOP or the register stack. */
5215 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5216 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5217 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5218 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5219 }
5220}
5221
5222
5223void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5224{
5225 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5226 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5227
5228 if (pFpuCtx->FCW & X86_FCW_IM)
5229 {
5230 /* Masked overflow - Push QNaN. */
5231 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5232 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5233 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5234 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5235 pFpuCtx->FTW |= RT_BIT(iNewTop);
5236 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5237 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5238 iemFpuRotateStackPush(pFpuCtx);
5239 }
5240 else
5241 {
5242 /* Exception pending - don't change TOP or the register stack. */
5243 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5244 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5245 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5246 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5247 }
5248}
5249
5250
5251/**
5252 * Worker routine for raising an FPU stack overflow exception on a push.
5253 *
5254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5255 * @param pFpuCtx The FPU context.
5256 */
5257static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5258{
5259 if (pFpuCtx->FCW & X86_FCW_IM)
5260 {
5261 /* Masked overflow. */
5262 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5263 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5264 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5265 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5266 pFpuCtx->FTW |= RT_BIT(iNewTop);
5267 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5268 iemFpuRotateStackPush(pFpuCtx);
5269 }
5270 else
5271 {
5272 /* Exception pending - don't change TOP or the register stack. */
5273 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5274 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5275 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5276 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5277 }
5278 RT_NOREF(pVCpu);
5279}
5280
5281
5282/**
5283 * Raises a FPU stack overflow exception on a push.
5284 *
5285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5286 */
5287void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5288{
5289 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5290 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5291 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5292}
5293
5294
5295/**
5296 * Raises a FPU stack overflow exception on a push with a memory operand.
5297 *
5298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5299 * @param iEffSeg The effective memory operand selector register.
5300 * @param GCPtrEff The effective memory operand offset.
5301 */
5302void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5303{
5304 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5305 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5306 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5307 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5308}
5309
5310/** @} */
5311
5312
5313/** @name SSE+AVX SIMD access and helpers.
5314 *
5315 * @{
5316 */
5317/**
5318 * Stores a result in a SIMD XMM register, updates the MXCSR.
5319 *
5320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5321 * @param pResult The result to store.
5322 * @param iXmmReg Which SIMD XMM register to store the result in.
5323 */
5324void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5325{
5326 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5327 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5328
5329 /* The result is only updated if there is no unmasked exception pending. */
5330 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5331 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5332 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5333}
5334
5335
5336/**
5337 * Updates the MXCSR.
5338 *
5339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5340 * @param fMxcsr The new MXCSR value.
5341 */
5342void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5343{
5344 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5345 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5346}
5347/** @} */
5348
5349
5350/** @name Memory access.
5351 *
5352 * @{
5353 */
5354
5355
5356/**
5357 * Updates the IEMCPU::cbWritten counter if applicable.
5358 *
5359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5360 * @param fAccess The access being accounted for.
5361 * @param cbMem The access size.
5362 */
5363DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5364{
5365 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5366 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5367 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5368}
5369
5370
5371/**
5372 * Applies the segment limit, base and attributes.
5373 *
5374 * This may raise a \#GP or \#SS.
5375 *
5376 * @returns VBox strict status code.
5377 *
5378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5379 * @param fAccess The kind of access which is being performed.
5380 * @param iSegReg The index of the segment register to apply.
5381 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5382 * TSS, ++).
5383 * @param cbMem The access size.
5384 * @param pGCPtrMem Pointer to the guest memory address to apply
5385 * segmentation to. Input and output parameter.
5386 */
5387VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5388{
5389 if (iSegReg == UINT8_MAX)
5390 return VINF_SUCCESS;
5391
5392 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5393 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5394 switch (pVCpu->iem.s.enmCpuMode)
5395 {
5396 case IEMMODE_16BIT:
5397 case IEMMODE_32BIT:
5398 {
5399 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5400 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5401
5402 if ( pSel->Attr.n.u1Present
5403 && !pSel->Attr.n.u1Unusable)
5404 {
5405 Assert(pSel->Attr.n.u1DescType);
5406 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5407 {
5408 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5409 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5410 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5411
5412 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5413 {
5414 /** @todo CPL check. */
5415 }
5416
5417 /*
5418 * There are two kinds of data selectors, normal and expand down.
5419 */
5420 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5421 {
5422 if ( GCPtrFirst32 > pSel->u32Limit
5423 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5424 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5425 }
5426 else
5427 {
5428 /*
5429 * The upper boundary is defined by the B bit, not the G bit!
5430 */
5431 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5432 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5433 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5434 }
5435 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5436 }
5437 else
5438 {
5439 /*
5440 * Code selector and usually be used to read thru, writing is
5441 * only permitted in real and V8086 mode.
5442 */
5443 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5444 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5445 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5446 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5447 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5448
5449 if ( GCPtrFirst32 > pSel->u32Limit
5450 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5451 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5452
5453 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5454 {
5455 /** @todo CPL check. */
5456 }
5457
5458 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5459 }
5460 }
5461 else
5462 return iemRaiseGeneralProtectionFault0(pVCpu);
5463 return VINF_SUCCESS;
5464 }
5465
5466 case IEMMODE_64BIT:
5467 {
5468 RTGCPTR GCPtrMem = *pGCPtrMem;
5469 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5470 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5471
5472 Assert(cbMem >= 1);
5473 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5474 return VINF_SUCCESS;
5475 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5476 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5477 return iemRaiseGeneralProtectionFault0(pVCpu);
5478 }
5479
5480 default:
5481 AssertFailedReturn(VERR_IEM_IPE_7);
5482 }
5483}
5484
5485
5486/**
5487 * Translates a virtual address to a physical physical address and checks if we
5488 * can access the page as specified.
5489 *
5490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5491 * @param GCPtrMem The virtual address.
5492 * @param cbAccess The access size, for raising \#PF correctly for
5493 * FXSAVE and such.
5494 * @param fAccess The intended access.
5495 * @param pGCPhysMem Where to return the physical address.
5496 */
5497VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5498 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5499{
5500 /** @todo Need a different PGM interface here. We're currently using
5501 * generic / REM interfaces. this won't cut it for R0. */
5502 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5503 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5504 * here. */
5505 PGMPTWALK Walk;
5506 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5507 if (RT_FAILURE(rc))
5508 {
5509 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5510 /** @todo Check unassigned memory in unpaged mode. */
5511 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5512#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5513 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5514 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5515#endif
5516 *pGCPhysMem = NIL_RTGCPHYS;
5517 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5518 }
5519
5520 /* If the page is writable and does not have the no-exec bit set, all
5521 access is allowed. Otherwise we'll have to check more carefully... */
5522 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5523 {
5524 /* Write to read only memory? */
5525 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5526 && !(Walk.fEffective & X86_PTE_RW)
5527 && ( ( pVCpu->iem.s.uCpl == 3
5528 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5529 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5530 {
5531 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5532 *pGCPhysMem = NIL_RTGCPHYS;
5533#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5534 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5535 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5536#endif
5537 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5538 }
5539
5540 /* Kernel memory accessed by userland? */
5541 if ( !(Walk.fEffective & X86_PTE_US)
5542 && pVCpu->iem.s.uCpl == 3
5543 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5544 {
5545 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5546 *pGCPhysMem = NIL_RTGCPHYS;
5547#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5548 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5549 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5550#endif
5551 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5552 }
5553
5554 /* Executing non-executable memory? */
5555 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5556 && (Walk.fEffective & X86_PTE_PAE_NX)
5557 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5558 {
5559 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5560 *pGCPhysMem = NIL_RTGCPHYS;
5561#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5562 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5563 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5564#endif
5565 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5566 VERR_ACCESS_DENIED);
5567 }
5568 }
5569
5570 /*
5571 * Set the dirty / access flags.
5572 * ASSUMES this is set when the address is translated rather than on committ...
5573 */
5574 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5575 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5576 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5577 {
5578 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5579 AssertRC(rc2);
5580 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5581 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5582 }
5583
5584 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5585 *pGCPhysMem = GCPhys;
5586 return VINF_SUCCESS;
5587}
5588
5589
5590/**
5591 * Looks up a memory mapping entry.
5592 *
5593 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5595 * @param pvMem The memory address.
5596 * @param fAccess The access to.
5597 */
5598DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5599{
5600 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5601 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5602 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5603 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5604 return 0;
5605 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5606 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5607 return 1;
5608 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5609 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5610 return 2;
5611 return VERR_NOT_FOUND;
5612}
5613
5614
5615/**
5616 * Finds a free memmap entry when using iNextMapping doesn't work.
5617 *
5618 * @returns Memory mapping index, 1024 on failure.
5619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5620 */
5621static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5622{
5623 /*
5624 * The easy case.
5625 */
5626 if (pVCpu->iem.s.cActiveMappings == 0)
5627 {
5628 pVCpu->iem.s.iNextMapping = 1;
5629 return 0;
5630 }
5631
5632 /* There should be enough mappings for all instructions. */
5633 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5634
5635 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5636 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5637 return i;
5638
5639 AssertFailedReturn(1024);
5640}
5641
5642
5643/**
5644 * Commits a bounce buffer that needs writing back and unmaps it.
5645 *
5646 * @returns Strict VBox status code.
5647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5648 * @param iMemMap The index of the buffer to commit.
5649 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5650 * Always false in ring-3, obviously.
5651 */
5652static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5653{
5654 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5655 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5656#ifdef IN_RING3
5657 Assert(!fPostponeFail);
5658 RT_NOREF_PV(fPostponeFail);
5659#endif
5660
5661 /*
5662 * Do the writing.
5663 */
5664 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5665 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5666 {
5667 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5668 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5669 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5670 if (!pVCpu->iem.s.fBypassHandlers)
5671 {
5672 /*
5673 * Carefully and efficiently dealing with access handler return
5674 * codes make this a little bloated.
5675 */
5676 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5677 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5678 pbBuf,
5679 cbFirst,
5680 PGMACCESSORIGIN_IEM);
5681 if (rcStrict == VINF_SUCCESS)
5682 {
5683 if (cbSecond)
5684 {
5685 rcStrict = PGMPhysWrite(pVM,
5686 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5687 pbBuf + cbFirst,
5688 cbSecond,
5689 PGMACCESSORIGIN_IEM);
5690 if (rcStrict == VINF_SUCCESS)
5691 { /* nothing */ }
5692 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5693 {
5694 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5695 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5696 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5697 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5698 }
5699#ifndef IN_RING3
5700 else if (fPostponeFail)
5701 {
5702 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5703 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5704 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5705 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5706 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5707 return iemSetPassUpStatus(pVCpu, rcStrict);
5708 }
5709#endif
5710 else
5711 {
5712 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5713 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5714 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5715 return rcStrict;
5716 }
5717 }
5718 }
5719 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5720 {
5721 if (!cbSecond)
5722 {
5723 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5724 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5725 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5726 }
5727 else
5728 {
5729 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5730 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5731 pbBuf + cbFirst,
5732 cbSecond,
5733 PGMACCESSORIGIN_IEM);
5734 if (rcStrict2 == VINF_SUCCESS)
5735 {
5736 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5737 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5738 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5739 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5740 }
5741 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5742 {
5743 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5744 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5745 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5746 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5747 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5748 }
5749#ifndef IN_RING3
5750 else if (fPostponeFail)
5751 {
5752 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5753 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5754 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5755 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5756 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5757 return iemSetPassUpStatus(pVCpu, rcStrict);
5758 }
5759#endif
5760 else
5761 {
5762 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5763 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5764 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5765 return rcStrict2;
5766 }
5767 }
5768 }
5769#ifndef IN_RING3
5770 else if (fPostponeFail)
5771 {
5772 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5773 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5774 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5775 if (!cbSecond)
5776 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5777 else
5778 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5779 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5780 return iemSetPassUpStatus(pVCpu, rcStrict);
5781 }
5782#endif
5783 else
5784 {
5785 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5786 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5788 return rcStrict;
5789 }
5790 }
5791 else
5792 {
5793 /*
5794 * No access handlers, much simpler.
5795 */
5796 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5797 if (RT_SUCCESS(rc))
5798 {
5799 if (cbSecond)
5800 {
5801 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5802 if (RT_SUCCESS(rc))
5803 { /* likely */ }
5804 else
5805 {
5806 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5809 return rc;
5810 }
5811 }
5812 }
5813 else
5814 {
5815 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5816 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5817 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5818 return rc;
5819 }
5820 }
5821 }
5822
5823#if defined(IEM_LOG_MEMORY_WRITES)
5824 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5825 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5826 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5827 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5828 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5829 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5830
5831 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5832 g_cbIemWrote = cbWrote;
5833 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5834#endif
5835
5836 /*
5837 * Free the mapping entry.
5838 */
5839 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5840 Assert(pVCpu->iem.s.cActiveMappings != 0);
5841 pVCpu->iem.s.cActiveMappings--;
5842 return VINF_SUCCESS;
5843}
5844
5845
5846/**
5847 * iemMemMap worker that deals with a request crossing pages.
5848 */
5849static VBOXSTRICTRC
5850iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5851{
5852 Assert(cbMem <= GUEST_PAGE_SIZE);
5853
5854 /*
5855 * Do the address translations.
5856 */
5857 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5858 RTGCPHYS GCPhysFirst;
5859 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5860 if (rcStrict != VINF_SUCCESS)
5861 return rcStrict;
5862 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5863
5864 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5865 RTGCPHYS GCPhysSecond;
5866 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5867 cbSecondPage, fAccess, &GCPhysSecond);
5868 if (rcStrict != VINF_SUCCESS)
5869 return rcStrict;
5870 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5871 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5872
5873 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5874
5875 /*
5876 * Read in the current memory content if it's a read, execute or partial
5877 * write access.
5878 */
5879 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5880
5881 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5882 {
5883 if (!pVCpu->iem.s.fBypassHandlers)
5884 {
5885 /*
5886 * Must carefully deal with access handler status codes here,
5887 * makes the code a bit bloated.
5888 */
5889 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5890 if (rcStrict == VINF_SUCCESS)
5891 {
5892 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5893 if (rcStrict == VINF_SUCCESS)
5894 { /*likely */ }
5895 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5896 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5897 else
5898 {
5899 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5900 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5901 return rcStrict;
5902 }
5903 }
5904 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5905 {
5906 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5907 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5908 {
5909 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5910 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5911 }
5912 else
5913 {
5914 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5915 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5916 return rcStrict2;
5917 }
5918 }
5919 else
5920 {
5921 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5922 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5923 return rcStrict;
5924 }
5925 }
5926 else
5927 {
5928 /*
5929 * No informational status codes here, much more straight forward.
5930 */
5931 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5932 if (RT_SUCCESS(rc))
5933 {
5934 Assert(rc == VINF_SUCCESS);
5935 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5936 if (RT_SUCCESS(rc))
5937 Assert(rc == VINF_SUCCESS);
5938 else
5939 {
5940 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5941 return rc;
5942 }
5943 }
5944 else
5945 {
5946 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5947 return rc;
5948 }
5949 }
5950 }
5951#ifdef VBOX_STRICT
5952 else
5953 memset(pbBuf, 0xcc, cbMem);
5954 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5955 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5956#endif
5957 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
5958
5959 /*
5960 * Commit the bounce buffer entry.
5961 */
5962 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5963 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5964 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5965 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5966 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5967 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5968 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5969 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5970 pVCpu->iem.s.cActiveMappings++;
5971
5972 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5973 *ppvMem = pbBuf;
5974 return VINF_SUCCESS;
5975}
5976
5977
5978/**
5979 * iemMemMap woker that deals with iemMemPageMap failures.
5980 */
5981static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5982 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5983{
5984 /*
5985 * Filter out conditions we can handle and the ones which shouldn't happen.
5986 */
5987 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5988 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5989 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5990 {
5991 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5992 return rcMap;
5993 }
5994 pVCpu->iem.s.cPotentialExits++;
5995
5996 /*
5997 * Read in the current memory content if it's a read, execute or partial
5998 * write access.
5999 */
6000 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6001 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6002 {
6003 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6004 memset(pbBuf, 0xff, cbMem);
6005 else
6006 {
6007 int rc;
6008 if (!pVCpu->iem.s.fBypassHandlers)
6009 {
6010 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6011 if (rcStrict == VINF_SUCCESS)
6012 { /* nothing */ }
6013 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6014 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6015 else
6016 {
6017 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6018 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6019 return rcStrict;
6020 }
6021 }
6022 else
6023 {
6024 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6025 if (RT_SUCCESS(rc))
6026 { /* likely */ }
6027 else
6028 {
6029 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6030 GCPhysFirst, rc));
6031 return rc;
6032 }
6033 }
6034 }
6035 }
6036#ifdef VBOX_STRICT
6037 else
6038 memset(pbBuf, 0xcc, cbMem);
6039#endif
6040#ifdef VBOX_STRICT
6041 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6042 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6043#endif
6044
6045 /*
6046 * Commit the bounce buffer entry.
6047 */
6048 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6049 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6050 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6051 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6052 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6053 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6054 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6055 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6056 pVCpu->iem.s.cActiveMappings++;
6057
6058 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6059 *ppvMem = pbBuf;
6060 return VINF_SUCCESS;
6061}
6062
6063
6064
6065/**
6066 * Maps the specified guest memory for the given kind of access.
6067 *
6068 * This may be using bounce buffering of the memory if it's crossing a page
6069 * boundary or if there is an access handler installed for any of it. Because
6070 * of lock prefix guarantees, we're in for some extra clutter when this
6071 * happens.
6072 *
6073 * This may raise a \#GP, \#SS, \#PF or \#AC.
6074 *
6075 * @returns VBox strict status code.
6076 *
6077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6078 * @param ppvMem Where to return the pointer to the mapped memory.
6079 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6080 * 8, 12, 16, 32 or 512. When used by string operations
6081 * it can be up to a page.
6082 * @param iSegReg The index of the segment register to use for this
6083 * access. The base and limits are checked. Use UINT8_MAX
6084 * to indicate that no segmentation is required (for IDT,
6085 * GDT and LDT accesses).
6086 * @param GCPtrMem The address of the guest memory.
6087 * @param fAccess How the memory is being accessed. The
6088 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6089 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6090 * when raising exceptions.
6091 * @param uAlignCtl Alignment control:
6092 * - Bits 15:0 is the alignment mask.
6093 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6094 * IEM_MEMMAP_F_ALIGN_SSE, and
6095 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6096 * Pass zero to skip alignment.
6097 */
6098VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6099 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6100{
6101 /*
6102 * Check the input and figure out which mapping entry to use.
6103 */
6104 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6105 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6106 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6107 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6108 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6109
6110 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6111 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6112 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6113 {
6114 iMemMap = iemMemMapFindFree(pVCpu);
6115 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6116 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6117 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6118 pVCpu->iem.s.aMemMappings[2].fAccess),
6119 VERR_IEM_IPE_9);
6120 }
6121
6122 /*
6123 * Map the memory, checking that we can actually access it. If something
6124 * slightly complicated happens, fall back on bounce buffering.
6125 */
6126 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6127 if (rcStrict == VINF_SUCCESS)
6128 { /* likely */ }
6129 else
6130 return rcStrict;
6131
6132 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6133 { /* likely */ }
6134 else
6135 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6136
6137 /*
6138 * Alignment check.
6139 */
6140 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6141 { /* likelyish */ }
6142 else
6143 {
6144 /* Misaligned access. */
6145 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6146 {
6147 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6148 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6149 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6150 {
6151 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6152
6153 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6154 return iemRaiseAlignmentCheckException(pVCpu);
6155 }
6156 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6157 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6158 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6159 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6160 * that's what FXSAVE does on a 10980xe. */
6161 && iemMemAreAlignmentChecksEnabled(pVCpu))
6162 return iemRaiseAlignmentCheckException(pVCpu);
6163 else
6164 return iemRaiseGeneralProtectionFault0(pVCpu);
6165 }
6166 }
6167
6168#ifdef IEM_WITH_DATA_TLB
6169 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6170
6171 /*
6172 * Get the TLB entry for this page.
6173 */
6174 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6175 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6176 if (pTlbe->uTag == uTag)
6177 {
6178# ifdef VBOX_WITH_STATISTICS
6179 pVCpu->iem.s.DataTlb.cTlbHits++;
6180# endif
6181 }
6182 else
6183 {
6184 pVCpu->iem.s.DataTlb.cTlbMisses++;
6185 PGMPTWALK Walk;
6186 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6187 if (RT_FAILURE(rc))
6188 {
6189 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6190# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6191 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6192 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6193# endif
6194 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6195 }
6196
6197 Assert(Walk.fSucceeded);
6198 pTlbe->uTag = uTag;
6199 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6200 pTlbe->GCPhys = Walk.GCPhys;
6201 pTlbe->pbMappingR3 = NULL;
6202 }
6203
6204 /*
6205 * Check TLB page table level access flags.
6206 */
6207 /* If the page is either supervisor only or non-writable, we need to do
6208 more careful access checks. */
6209 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6210 {
6211 /* Write to read only memory? */
6212 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6213 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6214 && ( ( pVCpu->iem.s.uCpl == 3
6215 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6216 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6217 {
6218 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6219# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6220 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6221 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6222# endif
6223 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6224 }
6225
6226 /* Kernel memory accessed by userland? */
6227 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6228 && pVCpu->iem.s.uCpl == 3
6229 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6230 {
6231 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6232# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6233 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6234 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6235# endif
6236 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6237 }
6238 }
6239
6240 /*
6241 * Set the dirty / access flags.
6242 * ASSUMES this is set when the address is translated rather than on commit...
6243 */
6244 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6245 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6246 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6247 {
6248 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6249 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6250 AssertRC(rc2);
6251 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6252 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6253 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6254 }
6255
6256 /*
6257 * Look up the physical page info if necessary.
6258 */
6259 uint8_t *pbMem = NULL;
6260 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6261# ifdef IN_RING3
6262 pbMem = pTlbe->pbMappingR3;
6263# else
6264 pbMem = NULL;
6265# endif
6266 else
6267 {
6268 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6269 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6270 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6271 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6272 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6273 { /* likely */ }
6274 else
6275 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6276 pTlbe->pbMappingR3 = NULL;
6277 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6278 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6279 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6280 &pbMem, &pTlbe->fFlagsAndPhysRev);
6281 AssertRCReturn(rc, rc);
6282# ifdef IN_RING3
6283 pTlbe->pbMappingR3 = pbMem;
6284# endif
6285 }
6286
6287 /*
6288 * Check the physical page level access and mapping.
6289 */
6290 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6291 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6292 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6293 { /* probably likely */ }
6294 else
6295 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6296 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6297 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6298 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6299 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6300 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6301
6302 if (pbMem)
6303 {
6304 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6305 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6306 fAccess |= IEM_ACCESS_NOT_LOCKED;
6307 }
6308 else
6309 {
6310 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6311 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6312 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6313 if (rcStrict != VINF_SUCCESS)
6314 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6315 }
6316
6317 void * const pvMem = pbMem;
6318
6319 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6320 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6321 if (fAccess & IEM_ACCESS_TYPE_READ)
6322 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6323
6324#else /* !IEM_WITH_DATA_TLB */
6325
6326 RTGCPHYS GCPhysFirst;
6327 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6328 if (rcStrict != VINF_SUCCESS)
6329 return rcStrict;
6330
6331 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6332 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6333 if (fAccess & IEM_ACCESS_TYPE_READ)
6334 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6335
6336 void *pvMem;
6337 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6338 if (rcStrict != VINF_SUCCESS)
6339 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6340
6341#endif /* !IEM_WITH_DATA_TLB */
6342
6343 /*
6344 * Fill in the mapping table entry.
6345 */
6346 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6347 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6348 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6349 pVCpu->iem.s.cActiveMappings += 1;
6350
6351 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6352 *ppvMem = pvMem;
6353
6354 return VINF_SUCCESS;
6355}
6356
6357
6358/**
6359 * Commits the guest memory if bounce buffered and unmaps it.
6360 *
6361 * @returns Strict VBox status code.
6362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6363 * @param pvMem The mapping.
6364 * @param fAccess The kind of access.
6365 */
6366VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6367{
6368 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6369 AssertReturn(iMemMap >= 0, iMemMap);
6370
6371 /* If it's bounce buffered, we may need to write back the buffer. */
6372 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6373 {
6374 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6375 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6376 }
6377 /* Otherwise unlock it. */
6378 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6379 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6380
6381 /* Free the entry. */
6382 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6383 Assert(pVCpu->iem.s.cActiveMappings != 0);
6384 pVCpu->iem.s.cActiveMappings--;
6385 return VINF_SUCCESS;
6386}
6387
6388#ifdef IEM_WITH_SETJMP
6389
6390/**
6391 * Maps the specified guest memory for the given kind of access, longjmp on
6392 * error.
6393 *
6394 * This may be using bounce buffering of the memory if it's crossing a page
6395 * boundary or if there is an access handler installed for any of it. Because
6396 * of lock prefix guarantees, we're in for some extra clutter when this
6397 * happens.
6398 *
6399 * This may raise a \#GP, \#SS, \#PF or \#AC.
6400 *
6401 * @returns Pointer to the mapped memory.
6402 *
6403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6404 * @param cbMem The number of bytes to map. This is usually 1,
6405 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6406 * string operations it can be up to a page.
6407 * @param iSegReg The index of the segment register to use for
6408 * this access. The base and limits are checked.
6409 * Use UINT8_MAX to indicate that no segmentation
6410 * is required (for IDT, GDT and LDT accesses).
6411 * @param GCPtrMem The address of the guest memory.
6412 * @param fAccess How the memory is being accessed. The
6413 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6414 * how to map the memory, while the
6415 * IEM_ACCESS_WHAT_XXX bit is used when raising
6416 * exceptions.
6417 * @param uAlignCtl Alignment control:
6418 * - Bits 15:0 is the alignment mask.
6419 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6420 * IEM_MEMMAP_F_ALIGN_SSE, and
6421 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6422 * Pass zero to skip alignment.
6423 */
6424void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6425 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6426{
6427 /*
6428 * Check the input, check segment access and adjust address
6429 * with segment base.
6430 */
6431 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6432 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6433 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6434
6435 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6436 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6437 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6438
6439 /*
6440 * Alignment check.
6441 */
6442 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6443 { /* likelyish */ }
6444 else
6445 {
6446 /* Misaligned access. */
6447 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6448 {
6449 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6450 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6451 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6452 {
6453 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6454
6455 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6456 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6457 }
6458 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6459 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6460 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6461 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6462 * that's what FXSAVE does on a 10980xe. */
6463 && iemMemAreAlignmentChecksEnabled(pVCpu))
6464 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6465 else
6466 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6467 }
6468 }
6469
6470 /*
6471 * Figure out which mapping entry to use.
6472 */
6473 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6474 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6475 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6476 {
6477 iMemMap = iemMemMapFindFree(pVCpu);
6478 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6479 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6480 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6481 pVCpu->iem.s.aMemMappings[2].fAccess),
6482 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6483 }
6484
6485 /*
6486 * Crossing a page boundary?
6487 */
6488 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6489 { /* No (likely). */ }
6490 else
6491 {
6492 void *pvMem;
6493 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6494 if (rcStrict == VINF_SUCCESS)
6495 return pvMem;
6496 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6497 }
6498
6499#ifdef IEM_WITH_DATA_TLB
6500 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6501
6502 /*
6503 * Get the TLB entry for this page.
6504 */
6505 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6506 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6507 if (pTlbe->uTag == uTag)
6508 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6509 else
6510 {
6511 pVCpu->iem.s.DataTlb.cTlbMisses++;
6512 PGMPTWALK Walk;
6513 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6514 if (RT_FAILURE(rc))
6515 {
6516 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6517# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6518 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6519 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6520# endif
6521 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6522 }
6523
6524 Assert(Walk.fSucceeded);
6525 pTlbe->uTag = uTag;
6526 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6527 pTlbe->GCPhys = Walk.GCPhys;
6528 pTlbe->pbMappingR3 = NULL;
6529 }
6530
6531 /*
6532 * Check the flags and physical revision.
6533 */
6534 /** @todo make the caller pass these in with fAccess. */
6535 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6536 ? IEMTLBE_F_PT_NO_USER : 0;
6537 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6538 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6539 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6540 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6541 ? IEMTLBE_F_PT_NO_WRITE : 0)
6542 : 0;
6543 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6544 uint8_t *pbMem = NULL;
6545 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6546 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6547# ifdef IN_RING3
6548 pbMem = pTlbe->pbMappingR3;
6549# else
6550 pbMem = NULL;
6551# endif
6552 else
6553 {
6554 /*
6555 * Okay, something isn't quite right or needs refreshing.
6556 */
6557 /* Write to read only memory? */
6558 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6559 {
6560 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6561# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6562 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6563 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6564# endif
6565 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6566 }
6567
6568 /* Kernel memory accessed by userland? */
6569 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6570 {
6571 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6572# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6573 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6574 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6575# endif
6576 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6577 }
6578
6579 /* Set the dirty / access flags.
6580 ASSUMES this is set when the address is translated rather than on commit... */
6581 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6582 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6583 {
6584 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6585 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6586 AssertRC(rc2);
6587 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6588 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6589 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6590 }
6591
6592 /*
6593 * Check if the physical page info needs updating.
6594 */
6595 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6596# ifdef IN_RING3
6597 pbMem = pTlbe->pbMappingR3;
6598# else
6599 pbMem = NULL;
6600# endif
6601 else
6602 {
6603 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6604 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6605 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6606 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6607 pTlbe->pbMappingR3 = NULL;
6608 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6609 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6610 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6611 &pbMem, &pTlbe->fFlagsAndPhysRev);
6612 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6613# ifdef IN_RING3
6614 pTlbe->pbMappingR3 = pbMem;
6615# endif
6616 }
6617
6618 /*
6619 * Check the physical page level access and mapping.
6620 */
6621 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6622 { /* probably likely */ }
6623 else
6624 {
6625 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6626 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6627 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6628 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6629 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6630 if (rcStrict == VINF_SUCCESS)
6631 return pbMem;
6632 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6633 }
6634 }
6635 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6636
6637 if (pbMem)
6638 {
6639 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6640 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6641 fAccess |= IEM_ACCESS_NOT_LOCKED;
6642 }
6643 else
6644 {
6645 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6646 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6647 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6648 if (rcStrict == VINF_SUCCESS)
6649 return pbMem;
6650 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6651 }
6652
6653 void * const pvMem = pbMem;
6654
6655 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6656 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6657 if (fAccess & IEM_ACCESS_TYPE_READ)
6658 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6659
6660#else /* !IEM_WITH_DATA_TLB */
6661
6662
6663 RTGCPHYS GCPhysFirst;
6664 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6665 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6666 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6667
6668 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6669 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6670 if (fAccess & IEM_ACCESS_TYPE_READ)
6671 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6672
6673 void *pvMem;
6674 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6675 if (rcStrict == VINF_SUCCESS)
6676 { /* likely */ }
6677 else
6678 {
6679 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6680 if (rcStrict == VINF_SUCCESS)
6681 return pvMem;
6682 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6683 }
6684
6685#endif /* !IEM_WITH_DATA_TLB */
6686
6687 /*
6688 * Fill in the mapping table entry.
6689 */
6690 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6691 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6692 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6693 pVCpu->iem.s.cActiveMappings++;
6694
6695 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6696 return pvMem;
6697}
6698
6699
6700/**
6701 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6702 *
6703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6704 * @param pvMem The mapping.
6705 * @param fAccess The kind of access.
6706 */
6707void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6708{
6709 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6710 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6711
6712 /* If it's bounce buffered, we may need to write back the buffer. */
6713 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6714 {
6715 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6716 {
6717 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6718 if (rcStrict == VINF_SUCCESS)
6719 return;
6720 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6721 }
6722 }
6723 /* Otherwise unlock it. */
6724 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6725 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6726
6727 /* Free the entry. */
6728 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6729 Assert(pVCpu->iem.s.cActiveMappings != 0);
6730 pVCpu->iem.s.cActiveMappings--;
6731}
6732
6733#endif /* IEM_WITH_SETJMP */
6734
6735#ifndef IN_RING3
6736/**
6737 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6738 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6739 *
6740 * Allows the instruction to be completed and retired, while the IEM user will
6741 * return to ring-3 immediately afterwards and do the postponed writes there.
6742 *
6743 * @returns VBox status code (no strict statuses). Caller must check
6744 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6746 * @param pvMem The mapping.
6747 * @param fAccess The kind of access.
6748 */
6749VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6750{
6751 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6752 AssertReturn(iMemMap >= 0, iMemMap);
6753
6754 /* If it's bounce buffered, we may need to write back the buffer. */
6755 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6756 {
6757 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6758 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6759 }
6760 /* Otherwise unlock it. */
6761 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6762 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6763
6764 /* Free the entry. */
6765 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6766 Assert(pVCpu->iem.s.cActiveMappings != 0);
6767 pVCpu->iem.s.cActiveMappings--;
6768 return VINF_SUCCESS;
6769}
6770#endif
6771
6772
6773/**
6774 * Rollbacks mappings, releasing page locks and such.
6775 *
6776 * The caller shall only call this after checking cActiveMappings.
6777 *
6778 * @returns Strict VBox status code to pass up.
6779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6780 */
6781void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6782{
6783 Assert(pVCpu->iem.s.cActiveMappings > 0);
6784
6785 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6786 while (iMemMap-- > 0)
6787 {
6788 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6789 if (fAccess != IEM_ACCESS_INVALID)
6790 {
6791 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6792 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6793 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6794 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6795 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6796 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6797 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6798 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6799 pVCpu->iem.s.cActiveMappings--;
6800 }
6801 }
6802}
6803
6804
6805/**
6806 * Fetches a data byte.
6807 *
6808 * @returns Strict VBox status code.
6809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6810 * @param pu8Dst Where to return the byte.
6811 * @param iSegReg The index of the segment register to use for
6812 * this access. The base and limits are checked.
6813 * @param GCPtrMem The address of the guest memory.
6814 */
6815VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6816{
6817 /* The lazy approach for now... */
6818 uint8_t const *pu8Src;
6819 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6820 if (rc == VINF_SUCCESS)
6821 {
6822 *pu8Dst = *pu8Src;
6823 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6824 }
6825 return rc;
6826}
6827
6828
6829#ifdef IEM_WITH_SETJMP
6830/**
6831 * Fetches a data byte, longjmp on error.
6832 *
6833 * @returns The byte.
6834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6835 * @param iSegReg The index of the segment register to use for
6836 * this access. The base and limits are checked.
6837 * @param GCPtrMem The address of the guest memory.
6838 */
6839uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6840{
6841 /* The lazy approach for now... */
6842 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6843 uint8_t const bRet = *pu8Src;
6844 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6845 return bRet;
6846}
6847#endif /* IEM_WITH_SETJMP */
6848
6849
6850/**
6851 * Fetches a data word.
6852 *
6853 * @returns Strict VBox status code.
6854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6855 * @param pu16Dst Where to return the word.
6856 * @param iSegReg The index of the segment register to use for
6857 * this access. The base and limits are checked.
6858 * @param GCPtrMem The address of the guest memory.
6859 */
6860VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6861{
6862 /* The lazy approach for now... */
6863 uint16_t const *pu16Src;
6864 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6865 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6866 if (rc == VINF_SUCCESS)
6867 {
6868 *pu16Dst = *pu16Src;
6869 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6870 }
6871 return rc;
6872}
6873
6874
6875#ifdef IEM_WITH_SETJMP
6876/**
6877 * Fetches a data word, longjmp on error.
6878 *
6879 * @returns The word
6880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6881 * @param iSegReg The index of the segment register to use for
6882 * this access. The base and limits are checked.
6883 * @param GCPtrMem The address of the guest memory.
6884 */
6885uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6886{
6887 /* The lazy approach for now... */
6888 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6889 sizeof(*pu16Src) - 1);
6890 uint16_t const u16Ret = *pu16Src;
6891 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6892 return u16Ret;
6893}
6894#endif
6895
6896
6897/**
6898 * Fetches a data dword.
6899 *
6900 * @returns Strict VBox status code.
6901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6902 * @param pu32Dst Where to return the dword.
6903 * @param iSegReg The index of the segment register to use for
6904 * this access. The base and limits are checked.
6905 * @param GCPtrMem The address of the guest memory.
6906 */
6907VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6908{
6909 /* The lazy approach for now... */
6910 uint32_t const *pu32Src;
6911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6912 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6913 if (rc == VINF_SUCCESS)
6914 {
6915 *pu32Dst = *pu32Src;
6916 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6917 }
6918 return rc;
6919}
6920
6921
6922/**
6923 * Fetches a data dword and zero extends it to a qword.
6924 *
6925 * @returns Strict VBox status code.
6926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6927 * @param pu64Dst Where to return the qword.
6928 * @param iSegReg The index of the segment register to use for
6929 * this access. The base and limits are checked.
6930 * @param GCPtrMem The address of the guest memory.
6931 */
6932VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6933{
6934 /* The lazy approach for now... */
6935 uint32_t const *pu32Src;
6936 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6937 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6938 if (rc == VINF_SUCCESS)
6939 {
6940 *pu64Dst = *pu32Src;
6941 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6942 }
6943 return rc;
6944}
6945
6946
6947#ifdef IEM_WITH_SETJMP
6948
6949/**
6950 * Fetches a data dword, longjmp on error, fallback/safe version.
6951 *
6952 * @returns The dword
6953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6954 * @param iSegReg The index of the segment register to use for
6955 * this access. The base and limits are checked.
6956 * @param GCPtrMem The address of the guest memory.
6957 */
6958uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6959{
6960 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6961 sizeof(*pu32Src) - 1);
6962 uint32_t const u32Ret = *pu32Src;
6963 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6964 return u32Ret;
6965}
6966
6967
6968/**
6969 * Fetches a data dword, longjmp on error.
6970 *
6971 * @returns The dword
6972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6973 * @param iSegReg The index of the segment register to use for
6974 * this access. The base and limits are checked.
6975 * @param GCPtrMem The address of the guest memory.
6976 */
6977uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6978{
6979# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6980 /*
6981 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6982 */
6983 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6984 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6985 {
6986 /*
6987 * TLB lookup.
6988 */
6989 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6990 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6991 if (pTlbe->uTag == uTag)
6992 {
6993 /*
6994 * Check TLB page table level access flags.
6995 */
6996 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6997 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6998 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6999 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7000 {
7001 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7002
7003 /*
7004 * Alignment check:
7005 */
7006 /** @todo check priority \#AC vs \#PF */
7007 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7008 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7009 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7010 || pVCpu->iem.s.uCpl != 3)
7011 {
7012 /*
7013 * Fetch and return the dword
7014 */
7015 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7016 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7017 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7018 }
7019 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7020 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7021 }
7022 }
7023 }
7024
7025 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7026 outdated page pointer, or other troubles. */
7027 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7028 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7029
7030# else
7031 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7032 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7033 uint32_t const u32Ret = *pu32Src;
7034 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7035 return u32Ret;
7036# endif
7037}
7038#endif
7039
7040
7041#ifdef SOME_UNUSED_FUNCTION
7042/**
7043 * Fetches a data dword and sign extends it to a qword.
7044 *
7045 * @returns Strict VBox status code.
7046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7047 * @param pu64Dst Where to return the sign extended value.
7048 * @param iSegReg The index of the segment register to use for
7049 * this access. The base and limits are checked.
7050 * @param GCPtrMem The address of the guest memory.
7051 */
7052VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7053{
7054 /* The lazy approach for now... */
7055 int32_t const *pi32Src;
7056 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7057 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7058 if (rc == VINF_SUCCESS)
7059 {
7060 *pu64Dst = *pi32Src;
7061 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7062 }
7063#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7064 else
7065 *pu64Dst = 0;
7066#endif
7067 return rc;
7068}
7069#endif
7070
7071
7072/**
7073 * Fetches a data qword.
7074 *
7075 * @returns Strict VBox status code.
7076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7077 * @param pu64Dst Where to return the qword.
7078 * @param iSegReg The index of the segment register to use for
7079 * this access. The base and limits are checked.
7080 * @param GCPtrMem The address of the guest memory.
7081 */
7082VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7083{
7084 /* The lazy approach for now... */
7085 uint64_t const *pu64Src;
7086 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7087 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7088 if (rc == VINF_SUCCESS)
7089 {
7090 *pu64Dst = *pu64Src;
7091 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7092 }
7093 return rc;
7094}
7095
7096
7097#ifdef IEM_WITH_SETJMP
7098/**
7099 * Fetches a data qword, longjmp on error.
7100 *
7101 * @returns The qword.
7102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7103 * @param iSegReg The index of the segment register to use for
7104 * this access. The base and limits are checked.
7105 * @param GCPtrMem The address of the guest memory.
7106 */
7107uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7108{
7109 /* The lazy approach for now... */
7110 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7111 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7112 uint64_t const u64Ret = *pu64Src;
7113 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7114 return u64Ret;
7115}
7116#endif
7117
7118
7119/**
7120 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7121 *
7122 * @returns Strict VBox status code.
7123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7124 * @param pu64Dst Where to return the qword.
7125 * @param iSegReg The index of the segment register to use for
7126 * this access. The base and limits are checked.
7127 * @param GCPtrMem The address of the guest memory.
7128 */
7129VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7130{
7131 /* The lazy approach for now... */
7132 uint64_t const *pu64Src;
7133 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7134 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7135 if (rc == VINF_SUCCESS)
7136 {
7137 *pu64Dst = *pu64Src;
7138 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7139 }
7140 return rc;
7141}
7142
7143
7144#ifdef IEM_WITH_SETJMP
7145/**
7146 * Fetches a data qword, longjmp on error.
7147 *
7148 * @returns The qword.
7149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7150 * @param iSegReg The index of the segment register to use for
7151 * this access. The base and limits are checked.
7152 * @param GCPtrMem The address of the guest memory.
7153 */
7154uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7155{
7156 /* The lazy approach for now... */
7157 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7158 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7159 uint64_t const u64Ret = *pu64Src;
7160 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7161 return u64Ret;
7162}
7163#endif
7164
7165
7166/**
7167 * Fetches a data tword.
7168 *
7169 * @returns Strict VBox status code.
7170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7171 * @param pr80Dst Where to return the tword.
7172 * @param iSegReg The index of the segment register to use for
7173 * this access. The base and limits are checked.
7174 * @param GCPtrMem The address of the guest memory.
7175 */
7176VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7177{
7178 /* The lazy approach for now... */
7179 PCRTFLOAT80U pr80Src;
7180 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7181 if (rc == VINF_SUCCESS)
7182 {
7183 *pr80Dst = *pr80Src;
7184 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7185 }
7186 return rc;
7187}
7188
7189
7190#ifdef IEM_WITH_SETJMP
7191/**
7192 * Fetches a data tword, longjmp on error.
7193 *
7194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7195 * @param pr80Dst Where to return the tword.
7196 * @param iSegReg The index of the segment register to use for
7197 * this access. The base and limits are checked.
7198 * @param GCPtrMem The address of the guest memory.
7199 */
7200void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7201{
7202 /* The lazy approach for now... */
7203 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7204 *pr80Dst = *pr80Src;
7205 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7206}
7207#endif
7208
7209
7210/**
7211 * Fetches a data decimal tword.
7212 *
7213 * @returns Strict VBox status code.
7214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7215 * @param pd80Dst Where to return the tword.
7216 * @param iSegReg The index of the segment register to use for
7217 * this access. The base and limits are checked.
7218 * @param GCPtrMem The address of the guest memory.
7219 */
7220VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7221{
7222 /* The lazy approach for now... */
7223 PCRTPBCD80U pd80Src;
7224 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7225 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7226 if (rc == VINF_SUCCESS)
7227 {
7228 *pd80Dst = *pd80Src;
7229 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7230 }
7231 return rc;
7232}
7233
7234
7235#ifdef IEM_WITH_SETJMP
7236/**
7237 * Fetches a data decimal tword, longjmp on error.
7238 *
7239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7240 * @param pd80Dst Where to return the tword.
7241 * @param iSegReg The index of the segment register to use for
7242 * this access. The base and limits are checked.
7243 * @param GCPtrMem The address of the guest memory.
7244 */
7245void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7246{
7247 /* The lazy approach for now... */
7248 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7249 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7250 *pd80Dst = *pd80Src;
7251 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7252}
7253#endif
7254
7255
7256/**
7257 * Fetches a data dqword (double qword), generally SSE related.
7258 *
7259 * @returns Strict VBox status code.
7260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7261 * @param pu128Dst Where to return the qword.
7262 * @param iSegReg The index of the segment register to use for
7263 * this access. The base and limits are checked.
7264 * @param GCPtrMem The address of the guest memory.
7265 */
7266VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7267{
7268 /* The lazy approach for now... */
7269 PCRTUINT128U pu128Src;
7270 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7271 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7272 if (rc == VINF_SUCCESS)
7273 {
7274 pu128Dst->au64[0] = pu128Src->au64[0];
7275 pu128Dst->au64[1] = pu128Src->au64[1];
7276 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7277 }
7278 return rc;
7279}
7280
7281
7282#ifdef IEM_WITH_SETJMP
7283/**
7284 * Fetches a data dqword (double qword), generally SSE related.
7285 *
7286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7287 * @param pu128Dst Where to return the qword.
7288 * @param iSegReg The index of the segment register to use for
7289 * this access. The base and limits are checked.
7290 * @param GCPtrMem The address of the guest memory.
7291 */
7292void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7293{
7294 /* The lazy approach for now... */
7295 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7296 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7297 pu128Dst->au64[0] = pu128Src->au64[0];
7298 pu128Dst->au64[1] = pu128Src->au64[1];
7299 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7300}
7301#endif
7302
7303
7304/**
7305 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7306 * related.
7307 *
7308 * Raises \#GP(0) if not aligned.
7309 *
7310 * @returns Strict VBox status code.
7311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7312 * @param pu128Dst Where to return the qword.
7313 * @param iSegReg The index of the segment register to use for
7314 * this access. The base and limits are checked.
7315 * @param GCPtrMem The address of the guest memory.
7316 */
7317VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7318{
7319 /* The lazy approach for now... */
7320 PCRTUINT128U pu128Src;
7321 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7322 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7323 if (rc == VINF_SUCCESS)
7324 {
7325 pu128Dst->au64[0] = pu128Src->au64[0];
7326 pu128Dst->au64[1] = pu128Src->au64[1];
7327 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7328 }
7329 return rc;
7330}
7331
7332
7333#ifdef IEM_WITH_SETJMP
7334/**
7335 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7336 * related, longjmp on error.
7337 *
7338 * Raises \#GP(0) if not aligned.
7339 *
7340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7341 * @param pu128Dst Where to return the qword.
7342 * @param iSegReg The index of the segment register to use for
7343 * this access. The base and limits are checked.
7344 * @param GCPtrMem The address of the guest memory.
7345 */
7346void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7347 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7348{
7349 /* The lazy approach for now... */
7350 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7351 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7352 pu128Dst->au64[0] = pu128Src->au64[0];
7353 pu128Dst->au64[1] = pu128Src->au64[1];
7354 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7355}
7356#endif
7357
7358
7359/**
7360 * Fetches a data oword (octo word), generally AVX related.
7361 *
7362 * @returns Strict VBox status code.
7363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7364 * @param pu256Dst Where to return the qword.
7365 * @param iSegReg The index of the segment register to use for
7366 * this access. The base and limits are checked.
7367 * @param GCPtrMem The address of the guest memory.
7368 */
7369VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7370{
7371 /* The lazy approach for now... */
7372 PCRTUINT256U pu256Src;
7373 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7374 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7375 if (rc == VINF_SUCCESS)
7376 {
7377 pu256Dst->au64[0] = pu256Src->au64[0];
7378 pu256Dst->au64[1] = pu256Src->au64[1];
7379 pu256Dst->au64[2] = pu256Src->au64[2];
7380 pu256Dst->au64[3] = pu256Src->au64[3];
7381 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7382 }
7383 return rc;
7384}
7385
7386
7387#ifdef IEM_WITH_SETJMP
7388/**
7389 * Fetches a data oword (octo word), generally AVX related.
7390 *
7391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7392 * @param pu256Dst Where to return the qword.
7393 * @param iSegReg The index of the segment register to use for
7394 * this access. The base and limits are checked.
7395 * @param GCPtrMem The address of the guest memory.
7396 */
7397void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7398{
7399 /* The lazy approach for now... */
7400 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7401 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7402 pu256Dst->au64[0] = pu256Src->au64[0];
7403 pu256Dst->au64[1] = pu256Src->au64[1];
7404 pu256Dst->au64[2] = pu256Src->au64[2];
7405 pu256Dst->au64[3] = pu256Src->au64[3];
7406 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7407}
7408#endif
7409
7410
7411/**
7412 * Fetches a data oword (octo word) at an aligned address, generally AVX
7413 * related.
7414 *
7415 * Raises \#GP(0) if not aligned.
7416 *
7417 * @returns Strict VBox status code.
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param pu256Dst Where to return the qword.
7420 * @param iSegReg The index of the segment register to use for
7421 * this access. The base and limits are checked.
7422 * @param GCPtrMem The address of the guest memory.
7423 */
7424VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7425{
7426 /* The lazy approach for now... */
7427 PCRTUINT256U pu256Src;
7428 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7429 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7430 if (rc == VINF_SUCCESS)
7431 {
7432 pu256Dst->au64[0] = pu256Src->au64[0];
7433 pu256Dst->au64[1] = pu256Src->au64[1];
7434 pu256Dst->au64[2] = pu256Src->au64[2];
7435 pu256Dst->au64[3] = pu256Src->au64[3];
7436 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7437 }
7438 return rc;
7439}
7440
7441
7442#ifdef IEM_WITH_SETJMP
7443/**
7444 * Fetches a data oword (octo word) at an aligned address, generally AVX
7445 * related, longjmp on error.
7446 *
7447 * Raises \#GP(0) if not aligned.
7448 *
7449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7450 * @param pu256Dst Where to return the qword.
7451 * @param iSegReg The index of the segment register to use for
7452 * this access. The base and limits are checked.
7453 * @param GCPtrMem The address of the guest memory.
7454 */
7455void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7456 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7457{
7458 /* The lazy approach for now... */
7459 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7460 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7461 pu256Dst->au64[0] = pu256Src->au64[0];
7462 pu256Dst->au64[1] = pu256Src->au64[1];
7463 pu256Dst->au64[2] = pu256Src->au64[2];
7464 pu256Dst->au64[3] = pu256Src->au64[3];
7465 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7466}
7467#endif
7468
7469
7470
7471/**
7472 * Fetches a descriptor register (lgdt, lidt).
7473 *
7474 * @returns Strict VBox status code.
7475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7476 * @param pcbLimit Where to return the limit.
7477 * @param pGCPtrBase Where to return the base.
7478 * @param iSegReg The index of the segment register to use for
7479 * this access. The base and limits are checked.
7480 * @param GCPtrMem The address of the guest memory.
7481 * @param enmOpSize The effective operand size.
7482 */
7483VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7484 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7485{
7486 /*
7487 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7488 * little special:
7489 * - The two reads are done separately.
7490 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7491 * - We suspect the 386 to actually commit the limit before the base in
7492 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7493 * don't try emulate this eccentric behavior, because it's not well
7494 * enough understood and rather hard to trigger.
7495 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7496 */
7497 VBOXSTRICTRC rcStrict;
7498 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7499 {
7500 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7501 if (rcStrict == VINF_SUCCESS)
7502 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7503 }
7504 else
7505 {
7506 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7507 if (enmOpSize == IEMMODE_32BIT)
7508 {
7509 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7510 {
7511 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7512 if (rcStrict == VINF_SUCCESS)
7513 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7514 }
7515 else
7516 {
7517 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7518 if (rcStrict == VINF_SUCCESS)
7519 {
7520 *pcbLimit = (uint16_t)uTmp;
7521 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7522 }
7523 }
7524 if (rcStrict == VINF_SUCCESS)
7525 *pGCPtrBase = uTmp;
7526 }
7527 else
7528 {
7529 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7530 if (rcStrict == VINF_SUCCESS)
7531 {
7532 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7533 if (rcStrict == VINF_SUCCESS)
7534 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7535 }
7536 }
7537 }
7538 return rcStrict;
7539}
7540
7541
7542
7543/**
7544 * Stores a data byte.
7545 *
7546 * @returns Strict VBox status code.
7547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7548 * @param iSegReg The index of the segment register to use for
7549 * this access. The base and limits are checked.
7550 * @param GCPtrMem The address of the guest memory.
7551 * @param u8Value The value to store.
7552 */
7553VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7554{
7555 /* The lazy approach for now... */
7556 uint8_t *pu8Dst;
7557 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7558 if (rc == VINF_SUCCESS)
7559 {
7560 *pu8Dst = u8Value;
7561 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7562 }
7563 return rc;
7564}
7565
7566
7567#ifdef IEM_WITH_SETJMP
7568/**
7569 * Stores a data byte, longjmp on error.
7570 *
7571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7572 * @param iSegReg The index of the segment register to use for
7573 * this access. The base and limits are checked.
7574 * @param GCPtrMem The address of the guest memory.
7575 * @param u8Value The value to store.
7576 */
7577void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7578{
7579 /* The lazy approach for now... */
7580 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7581 *pu8Dst = u8Value;
7582 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7583}
7584#endif
7585
7586
7587/**
7588 * Stores a data word.
7589 *
7590 * @returns Strict VBox status code.
7591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7592 * @param iSegReg The index of the segment register to use for
7593 * this access. The base and limits are checked.
7594 * @param GCPtrMem The address of the guest memory.
7595 * @param u16Value The value to store.
7596 */
7597VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7598{
7599 /* The lazy approach for now... */
7600 uint16_t *pu16Dst;
7601 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7602 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7603 if (rc == VINF_SUCCESS)
7604 {
7605 *pu16Dst = u16Value;
7606 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7607 }
7608 return rc;
7609}
7610
7611
7612#ifdef IEM_WITH_SETJMP
7613/**
7614 * Stores a data word, longjmp on error.
7615 *
7616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7617 * @param iSegReg The index of the segment register to use for
7618 * this access. The base and limits are checked.
7619 * @param GCPtrMem The address of the guest memory.
7620 * @param u16Value The value to store.
7621 */
7622void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7623{
7624 /* The lazy approach for now... */
7625 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7626 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7627 *pu16Dst = u16Value;
7628 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7629}
7630#endif
7631
7632
7633/**
7634 * Stores a data dword.
7635 *
7636 * @returns Strict VBox status code.
7637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7638 * @param iSegReg The index of the segment register to use for
7639 * this access. The base and limits are checked.
7640 * @param GCPtrMem The address of the guest memory.
7641 * @param u32Value The value to store.
7642 */
7643VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7644{
7645 /* The lazy approach for now... */
7646 uint32_t *pu32Dst;
7647 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7648 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7649 if (rc == VINF_SUCCESS)
7650 {
7651 *pu32Dst = u32Value;
7652 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7653 }
7654 return rc;
7655}
7656
7657
7658#ifdef IEM_WITH_SETJMP
7659/**
7660 * Stores a data dword.
7661 *
7662 * @returns Strict VBox status code.
7663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7664 * @param iSegReg The index of the segment register to use for
7665 * this access. The base and limits are checked.
7666 * @param GCPtrMem The address of the guest memory.
7667 * @param u32Value The value to store.
7668 */
7669void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7670{
7671 /* The lazy approach for now... */
7672 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7673 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7674 *pu32Dst = u32Value;
7675 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7676}
7677#endif
7678
7679
7680/**
7681 * Stores a data qword.
7682 *
7683 * @returns Strict VBox status code.
7684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7685 * @param iSegReg The index of the segment register to use for
7686 * this access. The base and limits are checked.
7687 * @param GCPtrMem The address of the guest memory.
7688 * @param u64Value The value to store.
7689 */
7690VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7691{
7692 /* The lazy approach for now... */
7693 uint64_t *pu64Dst;
7694 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7695 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7696 if (rc == VINF_SUCCESS)
7697 {
7698 *pu64Dst = u64Value;
7699 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7700 }
7701 return rc;
7702}
7703
7704
7705#ifdef IEM_WITH_SETJMP
7706/**
7707 * Stores a data qword, longjmp on error.
7708 *
7709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7710 * @param iSegReg The index of the segment register to use for
7711 * this access. The base and limits are checked.
7712 * @param GCPtrMem The address of the guest memory.
7713 * @param u64Value The value to store.
7714 */
7715void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7716{
7717 /* The lazy approach for now... */
7718 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7719 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7720 *pu64Dst = u64Value;
7721 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7722}
7723#endif
7724
7725
7726/**
7727 * Stores a data dqword.
7728 *
7729 * @returns Strict VBox status code.
7730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7731 * @param iSegReg The index of the segment register to use for
7732 * this access. The base and limits are checked.
7733 * @param GCPtrMem The address of the guest memory.
7734 * @param u128Value The value to store.
7735 */
7736VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7737{
7738 /* The lazy approach for now... */
7739 PRTUINT128U pu128Dst;
7740 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7741 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7742 if (rc == VINF_SUCCESS)
7743 {
7744 pu128Dst->au64[0] = u128Value.au64[0];
7745 pu128Dst->au64[1] = u128Value.au64[1];
7746 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7747 }
7748 return rc;
7749}
7750
7751
7752#ifdef IEM_WITH_SETJMP
7753/**
7754 * Stores a data dqword, longjmp on error.
7755 *
7756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7757 * @param iSegReg The index of the segment register to use for
7758 * this access. The base and limits are checked.
7759 * @param GCPtrMem The address of the guest memory.
7760 * @param u128Value The value to store.
7761 */
7762void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7763{
7764 /* The lazy approach for now... */
7765 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7766 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7767 pu128Dst->au64[0] = u128Value.au64[0];
7768 pu128Dst->au64[1] = u128Value.au64[1];
7769 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7770}
7771#endif
7772
7773
7774/**
7775 * Stores a data dqword, SSE aligned.
7776 *
7777 * @returns Strict VBox status code.
7778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7779 * @param iSegReg The index of the segment register to use for
7780 * this access. The base and limits are checked.
7781 * @param GCPtrMem The address of the guest memory.
7782 * @param u128Value The value to store.
7783 */
7784VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7785{
7786 /* The lazy approach for now... */
7787 PRTUINT128U pu128Dst;
7788 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7789 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7790 if (rc == VINF_SUCCESS)
7791 {
7792 pu128Dst->au64[0] = u128Value.au64[0];
7793 pu128Dst->au64[1] = u128Value.au64[1];
7794 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7795 }
7796 return rc;
7797}
7798
7799
7800#ifdef IEM_WITH_SETJMP
7801/**
7802 * Stores a data dqword, SSE aligned.
7803 *
7804 * @returns Strict VBox status code.
7805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7806 * @param iSegReg The index of the segment register to use for
7807 * this access. The base and limits are checked.
7808 * @param GCPtrMem The address of the guest memory.
7809 * @param u128Value The value to store.
7810 */
7811void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7812 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7813{
7814 /* The lazy approach for now... */
7815 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7816 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7817 pu128Dst->au64[0] = u128Value.au64[0];
7818 pu128Dst->au64[1] = u128Value.au64[1];
7819 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7820}
7821#endif
7822
7823
7824/**
7825 * Stores a data dqword.
7826 *
7827 * @returns Strict VBox status code.
7828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7829 * @param iSegReg The index of the segment register to use for
7830 * this access. The base and limits are checked.
7831 * @param GCPtrMem The address of the guest memory.
7832 * @param pu256Value Pointer to the value to store.
7833 */
7834VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7835{
7836 /* The lazy approach for now... */
7837 PRTUINT256U pu256Dst;
7838 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7839 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7840 if (rc == VINF_SUCCESS)
7841 {
7842 pu256Dst->au64[0] = pu256Value->au64[0];
7843 pu256Dst->au64[1] = pu256Value->au64[1];
7844 pu256Dst->au64[2] = pu256Value->au64[2];
7845 pu256Dst->au64[3] = pu256Value->au64[3];
7846 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7847 }
7848 return rc;
7849}
7850
7851
7852#ifdef IEM_WITH_SETJMP
7853/**
7854 * Stores a data dqword, longjmp on error.
7855 *
7856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7857 * @param iSegReg The index of the segment register to use for
7858 * this access. The base and limits are checked.
7859 * @param GCPtrMem The address of the guest memory.
7860 * @param pu256Value Pointer to the value to store.
7861 */
7862void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7863{
7864 /* The lazy approach for now... */
7865 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7866 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7867 pu256Dst->au64[0] = pu256Value->au64[0];
7868 pu256Dst->au64[1] = pu256Value->au64[1];
7869 pu256Dst->au64[2] = pu256Value->au64[2];
7870 pu256Dst->au64[3] = pu256Value->au64[3];
7871 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7872}
7873#endif
7874
7875
7876/**
7877 * Stores a data dqword, AVX \#GP(0) aligned.
7878 *
7879 * @returns Strict VBox status code.
7880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7881 * @param iSegReg The index of the segment register to use for
7882 * this access. The base and limits are checked.
7883 * @param GCPtrMem The address of the guest memory.
7884 * @param pu256Value Pointer to the value to store.
7885 */
7886VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7887{
7888 /* The lazy approach for now... */
7889 PRTUINT256U pu256Dst;
7890 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7891 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7892 if (rc == VINF_SUCCESS)
7893 {
7894 pu256Dst->au64[0] = pu256Value->au64[0];
7895 pu256Dst->au64[1] = pu256Value->au64[1];
7896 pu256Dst->au64[2] = pu256Value->au64[2];
7897 pu256Dst->au64[3] = pu256Value->au64[3];
7898 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7899 }
7900 return rc;
7901}
7902
7903
7904#ifdef IEM_WITH_SETJMP
7905/**
7906 * Stores a data dqword, AVX aligned.
7907 *
7908 * @returns Strict VBox status code.
7909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7910 * @param iSegReg The index of the segment register to use for
7911 * this access. The base and limits are checked.
7912 * @param GCPtrMem The address of the guest memory.
7913 * @param pu256Value Pointer to the value to store.
7914 */
7915void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7916 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7917{
7918 /* The lazy approach for now... */
7919 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7920 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7921 pu256Dst->au64[0] = pu256Value->au64[0];
7922 pu256Dst->au64[1] = pu256Value->au64[1];
7923 pu256Dst->au64[2] = pu256Value->au64[2];
7924 pu256Dst->au64[3] = pu256Value->au64[3];
7925 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7926}
7927#endif
7928
7929
7930/**
7931 * Stores a descriptor register (sgdt, sidt).
7932 *
7933 * @returns Strict VBox status code.
7934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7935 * @param cbLimit The limit.
7936 * @param GCPtrBase The base address.
7937 * @param iSegReg The index of the segment register to use for
7938 * this access. The base and limits are checked.
7939 * @param GCPtrMem The address of the guest memory.
7940 */
7941VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7942{
7943 /*
7944 * The SIDT and SGDT instructions actually stores the data using two
7945 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7946 * does not respond to opsize prefixes.
7947 */
7948 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7949 if (rcStrict == VINF_SUCCESS)
7950 {
7951 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7952 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7953 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7954 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7955 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7956 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7957 else
7958 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7959 }
7960 return rcStrict;
7961}
7962
7963
7964/**
7965 * Pushes a word onto the stack.
7966 *
7967 * @returns Strict VBox status code.
7968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7969 * @param u16Value The value to push.
7970 */
7971VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7972{
7973 /* Increment the stack pointer. */
7974 uint64_t uNewRsp;
7975 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7976
7977 /* Write the word the lazy way. */
7978 uint16_t *pu16Dst;
7979 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7980 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7981 if (rc == VINF_SUCCESS)
7982 {
7983 *pu16Dst = u16Value;
7984 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7985 }
7986
7987 /* Commit the new RSP value unless we an access handler made trouble. */
7988 if (rc == VINF_SUCCESS)
7989 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7990
7991 return rc;
7992}
7993
7994
7995/**
7996 * Pushes a dword onto the stack.
7997 *
7998 * @returns Strict VBox status code.
7999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8000 * @param u32Value The value to push.
8001 */
8002VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8003{
8004 /* Increment the stack pointer. */
8005 uint64_t uNewRsp;
8006 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8007
8008 /* Write the dword the lazy way. */
8009 uint32_t *pu32Dst;
8010 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8011 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8012 if (rc == VINF_SUCCESS)
8013 {
8014 *pu32Dst = u32Value;
8015 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8016 }
8017
8018 /* Commit the new RSP value unless we an access handler made trouble. */
8019 if (rc == VINF_SUCCESS)
8020 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8021
8022 return rc;
8023}
8024
8025
8026/**
8027 * Pushes a dword segment register value onto the stack.
8028 *
8029 * @returns Strict VBox status code.
8030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8031 * @param u32Value The value to push.
8032 */
8033VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8034{
8035 /* Increment the stack pointer. */
8036 uint64_t uNewRsp;
8037 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8038
8039 /* The intel docs talks about zero extending the selector register
8040 value. My actual intel CPU here might be zero extending the value
8041 but it still only writes the lower word... */
8042 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8043 * happens when crossing an electric page boundrary, is the high word checked
8044 * for write accessibility or not? Probably it is. What about segment limits?
8045 * It appears this behavior is also shared with trap error codes.
8046 *
8047 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8048 * ancient hardware when it actually did change. */
8049 uint16_t *pu16Dst;
8050 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8051 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8052 if (rc == VINF_SUCCESS)
8053 {
8054 *pu16Dst = (uint16_t)u32Value;
8055 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8056 }
8057
8058 /* Commit the new RSP value unless we an access handler made trouble. */
8059 if (rc == VINF_SUCCESS)
8060 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8061
8062 return rc;
8063}
8064
8065
8066/**
8067 * Pushes a qword onto the stack.
8068 *
8069 * @returns Strict VBox status code.
8070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8071 * @param u64Value The value to push.
8072 */
8073VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8074{
8075 /* Increment the stack pointer. */
8076 uint64_t uNewRsp;
8077 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8078
8079 /* Write the word the lazy way. */
8080 uint64_t *pu64Dst;
8081 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8082 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8083 if (rc == VINF_SUCCESS)
8084 {
8085 *pu64Dst = u64Value;
8086 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8087 }
8088
8089 /* Commit the new RSP value unless we an access handler made trouble. */
8090 if (rc == VINF_SUCCESS)
8091 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8092
8093 return rc;
8094}
8095
8096
8097/**
8098 * Pops a word from the stack.
8099 *
8100 * @returns Strict VBox status code.
8101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8102 * @param pu16Value Where to store the popped value.
8103 */
8104VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8105{
8106 /* Increment the stack pointer. */
8107 uint64_t uNewRsp;
8108 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8109
8110 /* Write the word the lazy way. */
8111 uint16_t const *pu16Src;
8112 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8113 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8114 if (rc == VINF_SUCCESS)
8115 {
8116 *pu16Value = *pu16Src;
8117 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8118
8119 /* Commit the new RSP value. */
8120 if (rc == VINF_SUCCESS)
8121 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8122 }
8123
8124 return rc;
8125}
8126
8127
8128/**
8129 * Pops a dword from the stack.
8130 *
8131 * @returns Strict VBox status code.
8132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8133 * @param pu32Value Where to store the popped value.
8134 */
8135VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8136{
8137 /* Increment the stack pointer. */
8138 uint64_t uNewRsp;
8139 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8140
8141 /* Write the word the lazy way. */
8142 uint32_t const *pu32Src;
8143 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8144 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8145 if (rc == VINF_SUCCESS)
8146 {
8147 *pu32Value = *pu32Src;
8148 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8149
8150 /* Commit the new RSP value. */
8151 if (rc == VINF_SUCCESS)
8152 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8153 }
8154
8155 return rc;
8156}
8157
8158
8159/**
8160 * Pops a qword from the stack.
8161 *
8162 * @returns Strict VBox status code.
8163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8164 * @param pu64Value Where to store the popped value.
8165 */
8166VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8167{
8168 /* Increment the stack pointer. */
8169 uint64_t uNewRsp;
8170 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8171
8172 /* Write the word the lazy way. */
8173 uint64_t const *pu64Src;
8174 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8175 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8176 if (rc == VINF_SUCCESS)
8177 {
8178 *pu64Value = *pu64Src;
8179 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8180
8181 /* Commit the new RSP value. */
8182 if (rc == VINF_SUCCESS)
8183 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8184 }
8185
8186 return rc;
8187}
8188
8189
8190/**
8191 * Pushes a word onto the stack, using a temporary stack pointer.
8192 *
8193 * @returns Strict VBox status code.
8194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8195 * @param u16Value The value to push.
8196 * @param pTmpRsp Pointer to the temporary stack pointer.
8197 */
8198VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8199{
8200 /* Increment the stack pointer. */
8201 RTUINT64U NewRsp = *pTmpRsp;
8202 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8203
8204 /* Write the word the lazy way. */
8205 uint16_t *pu16Dst;
8206 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8207 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8208 if (rc == VINF_SUCCESS)
8209 {
8210 *pu16Dst = u16Value;
8211 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8212 }
8213
8214 /* Commit the new RSP value unless we an access handler made trouble. */
8215 if (rc == VINF_SUCCESS)
8216 *pTmpRsp = NewRsp;
8217
8218 return rc;
8219}
8220
8221
8222/**
8223 * Pushes a dword onto the stack, using a temporary stack pointer.
8224 *
8225 * @returns Strict VBox status code.
8226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8227 * @param u32Value The value to push.
8228 * @param pTmpRsp Pointer to the temporary stack pointer.
8229 */
8230VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8231{
8232 /* Increment the stack pointer. */
8233 RTUINT64U NewRsp = *pTmpRsp;
8234 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8235
8236 /* Write the word the lazy way. */
8237 uint32_t *pu32Dst;
8238 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8239 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8240 if (rc == VINF_SUCCESS)
8241 {
8242 *pu32Dst = u32Value;
8243 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8244 }
8245
8246 /* Commit the new RSP value unless we an access handler made trouble. */
8247 if (rc == VINF_SUCCESS)
8248 *pTmpRsp = NewRsp;
8249
8250 return rc;
8251}
8252
8253
8254/**
8255 * Pushes a dword onto the stack, using a temporary stack pointer.
8256 *
8257 * @returns Strict VBox status code.
8258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8259 * @param u64Value The value to push.
8260 * @param pTmpRsp Pointer to the temporary stack pointer.
8261 */
8262VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8263{
8264 /* Increment the stack pointer. */
8265 RTUINT64U NewRsp = *pTmpRsp;
8266 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8267
8268 /* Write the word the lazy way. */
8269 uint64_t *pu64Dst;
8270 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8271 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8272 if (rc == VINF_SUCCESS)
8273 {
8274 *pu64Dst = u64Value;
8275 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8276 }
8277
8278 /* Commit the new RSP value unless we an access handler made trouble. */
8279 if (rc == VINF_SUCCESS)
8280 *pTmpRsp = NewRsp;
8281
8282 return rc;
8283}
8284
8285
8286/**
8287 * Pops a word from the stack, using a temporary stack pointer.
8288 *
8289 * @returns Strict VBox status code.
8290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8291 * @param pu16Value Where to store the popped value.
8292 * @param pTmpRsp Pointer to the temporary stack pointer.
8293 */
8294VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8295{
8296 /* Increment the stack pointer. */
8297 RTUINT64U NewRsp = *pTmpRsp;
8298 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8299
8300 /* Write the word the lazy way. */
8301 uint16_t const *pu16Src;
8302 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8303 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8304 if (rc == VINF_SUCCESS)
8305 {
8306 *pu16Value = *pu16Src;
8307 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8308
8309 /* Commit the new RSP value. */
8310 if (rc == VINF_SUCCESS)
8311 *pTmpRsp = NewRsp;
8312 }
8313
8314 return rc;
8315}
8316
8317
8318/**
8319 * Pops a dword from the stack, using a temporary stack pointer.
8320 *
8321 * @returns Strict VBox status code.
8322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8323 * @param pu32Value Where to store the popped value.
8324 * @param pTmpRsp Pointer to the temporary stack pointer.
8325 */
8326VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8327{
8328 /* Increment the stack pointer. */
8329 RTUINT64U NewRsp = *pTmpRsp;
8330 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8331
8332 /* Write the word the lazy way. */
8333 uint32_t const *pu32Src;
8334 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8335 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8336 if (rc == VINF_SUCCESS)
8337 {
8338 *pu32Value = *pu32Src;
8339 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8340
8341 /* Commit the new RSP value. */
8342 if (rc == VINF_SUCCESS)
8343 *pTmpRsp = NewRsp;
8344 }
8345
8346 return rc;
8347}
8348
8349
8350/**
8351 * Pops a qword from the stack, using a temporary stack pointer.
8352 *
8353 * @returns Strict VBox status code.
8354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8355 * @param pu64Value Where to store the popped value.
8356 * @param pTmpRsp Pointer to the temporary stack pointer.
8357 */
8358VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8359{
8360 /* Increment the stack pointer. */
8361 RTUINT64U NewRsp = *pTmpRsp;
8362 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8363
8364 /* Write the word the lazy way. */
8365 uint64_t const *pu64Src;
8366 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8367 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8368 if (rcStrict == VINF_SUCCESS)
8369 {
8370 *pu64Value = *pu64Src;
8371 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8372
8373 /* Commit the new RSP value. */
8374 if (rcStrict == VINF_SUCCESS)
8375 *pTmpRsp = NewRsp;
8376 }
8377
8378 return rcStrict;
8379}
8380
8381
8382/**
8383 * Begin a special stack push (used by interrupt, exceptions and such).
8384 *
8385 * This will raise \#SS or \#PF if appropriate.
8386 *
8387 * @returns Strict VBox status code.
8388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8389 * @param cbMem The number of bytes to push onto the stack.
8390 * @param cbAlign The alignment mask (7, 3, 1).
8391 * @param ppvMem Where to return the pointer to the stack memory.
8392 * As with the other memory functions this could be
8393 * direct access or bounce buffered access, so
8394 * don't commit register until the commit call
8395 * succeeds.
8396 * @param puNewRsp Where to return the new RSP value. This must be
8397 * passed unchanged to
8398 * iemMemStackPushCommitSpecial().
8399 */
8400VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8401 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8402{
8403 Assert(cbMem < UINT8_MAX);
8404 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8405 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8406 IEM_ACCESS_STACK_W, cbAlign);
8407}
8408
8409
8410/**
8411 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8412 *
8413 * This will update the rSP.
8414 *
8415 * @returns Strict VBox status code.
8416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8417 * @param pvMem The pointer returned by
8418 * iemMemStackPushBeginSpecial().
8419 * @param uNewRsp The new RSP value returned by
8420 * iemMemStackPushBeginSpecial().
8421 */
8422VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8423{
8424 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8425 if (rcStrict == VINF_SUCCESS)
8426 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8427 return rcStrict;
8428}
8429
8430
8431/**
8432 * Begin a special stack pop (used by iret, retf and such).
8433 *
8434 * This will raise \#SS or \#PF if appropriate.
8435 *
8436 * @returns Strict VBox status code.
8437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8438 * @param cbMem The number of bytes to pop from the stack.
8439 * @param cbAlign The alignment mask (7, 3, 1).
8440 * @param ppvMem Where to return the pointer to the stack memory.
8441 * @param puNewRsp Where to return the new RSP value. This must be
8442 * assigned to CPUMCTX::rsp manually some time
8443 * after iemMemStackPopDoneSpecial() has been
8444 * called.
8445 */
8446VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8447 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8448{
8449 Assert(cbMem < UINT8_MAX);
8450 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8451 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8452}
8453
8454
8455/**
8456 * Continue a special stack pop (used by iret and retf), for the purpose of
8457 * retrieving a new stack pointer.
8458 *
8459 * This will raise \#SS or \#PF if appropriate.
8460 *
8461 * @returns Strict VBox status code.
8462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8463 * @param off Offset from the top of the stack. This is zero
8464 * except in the retf case.
8465 * @param cbMem The number of bytes to pop from the stack.
8466 * @param ppvMem Where to return the pointer to the stack memory.
8467 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8468 * return this because all use of this function is
8469 * to retrieve a new value and anything we return
8470 * here would be discarded.)
8471 */
8472VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8473 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8474{
8475 Assert(cbMem < UINT8_MAX);
8476
8477 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8478 RTGCPTR GCPtrTop;
8479 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8480 GCPtrTop = uCurNewRsp;
8481 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8482 GCPtrTop = (uint32_t)uCurNewRsp;
8483 else
8484 GCPtrTop = (uint16_t)uCurNewRsp;
8485
8486 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8487 0 /* checked in iemMemStackPopBeginSpecial */);
8488}
8489
8490
8491/**
8492 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8493 * iemMemStackPopContinueSpecial).
8494 *
8495 * The caller will manually commit the rSP.
8496 *
8497 * @returns Strict VBox status code.
8498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8499 * @param pvMem The pointer returned by
8500 * iemMemStackPopBeginSpecial() or
8501 * iemMemStackPopContinueSpecial().
8502 */
8503VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8504{
8505 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8506}
8507
8508
8509/**
8510 * Fetches a system table byte.
8511 *
8512 * @returns Strict VBox status code.
8513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8514 * @param pbDst Where to return the byte.
8515 * @param iSegReg The index of the segment register to use for
8516 * this access. The base and limits are checked.
8517 * @param GCPtrMem The address of the guest memory.
8518 */
8519VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8520{
8521 /* The lazy approach for now... */
8522 uint8_t const *pbSrc;
8523 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8524 if (rc == VINF_SUCCESS)
8525 {
8526 *pbDst = *pbSrc;
8527 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8528 }
8529 return rc;
8530}
8531
8532
8533/**
8534 * Fetches a system table word.
8535 *
8536 * @returns Strict VBox status code.
8537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8538 * @param pu16Dst Where to return the word.
8539 * @param iSegReg The index of the segment register to use for
8540 * this access. The base and limits are checked.
8541 * @param GCPtrMem The address of the guest memory.
8542 */
8543VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8544{
8545 /* The lazy approach for now... */
8546 uint16_t const *pu16Src;
8547 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8548 if (rc == VINF_SUCCESS)
8549 {
8550 *pu16Dst = *pu16Src;
8551 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8552 }
8553 return rc;
8554}
8555
8556
8557/**
8558 * Fetches a system table dword.
8559 *
8560 * @returns Strict VBox status code.
8561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8562 * @param pu32Dst Where to return the dword.
8563 * @param iSegReg The index of the segment register to use for
8564 * this access. The base and limits are checked.
8565 * @param GCPtrMem The address of the guest memory.
8566 */
8567VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8568{
8569 /* The lazy approach for now... */
8570 uint32_t const *pu32Src;
8571 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8572 if (rc == VINF_SUCCESS)
8573 {
8574 *pu32Dst = *pu32Src;
8575 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8576 }
8577 return rc;
8578}
8579
8580
8581/**
8582 * Fetches a system table qword.
8583 *
8584 * @returns Strict VBox status code.
8585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8586 * @param pu64Dst Where to return the qword.
8587 * @param iSegReg The index of the segment register to use for
8588 * this access. The base and limits are checked.
8589 * @param GCPtrMem The address of the guest memory.
8590 */
8591VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8592{
8593 /* The lazy approach for now... */
8594 uint64_t const *pu64Src;
8595 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8596 if (rc == VINF_SUCCESS)
8597 {
8598 *pu64Dst = *pu64Src;
8599 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8600 }
8601 return rc;
8602}
8603
8604
8605/**
8606 * Fetches a descriptor table entry with caller specified error code.
8607 *
8608 * @returns Strict VBox status code.
8609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8610 * @param pDesc Where to return the descriptor table entry.
8611 * @param uSel The selector which table entry to fetch.
8612 * @param uXcpt The exception to raise on table lookup error.
8613 * @param uErrorCode The error code associated with the exception.
8614 */
8615static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8616 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8617{
8618 AssertPtr(pDesc);
8619 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8620
8621 /** @todo did the 286 require all 8 bytes to be accessible? */
8622 /*
8623 * Get the selector table base and check bounds.
8624 */
8625 RTGCPTR GCPtrBase;
8626 if (uSel & X86_SEL_LDT)
8627 {
8628 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8629 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8630 {
8631 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8632 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8633 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8634 uErrorCode, 0);
8635 }
8636
8637 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8638 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8639 }
8640 else
8641 {
8642 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8643 {
8644 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8645 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8646 uErrorCode, 0);
8647 }
8648 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8649 }
8650
8651 /*
8652 * Read the legacy descriptor and maybe the long mode extensions if
8653 * required.
8654 */
8655 VBOXSTRICTRC rcStrict;
8656 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8657 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8658 else
8659 {
8660 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8661 if (rcStrict == VINF_SUCCESS)
8662 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8663 if (rcStrict == VINF_SUCCESS)
8664 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8665 if (rcStrict == VINF_SUCCESS)
8666 pDesc->Legacy.au16[3] = 0;
8667 else
8668 return rcStrict;
8669 }
8670
8671 if (rcStrict == VINF_SUCCESS)
8672 {
8673 if ( !IEM_IS_LONG_MODE(pVCpu)
8674 || pDesc->Legacy.Gen.u1DescType)
8675 pDesc->Long.au64[1] = 0;
8676 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8677 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8678 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8679 else
8680 {
8681 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8682 /** @todo is this the right exception? */
8683 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8684 }
8685 }
8686 return rcStrict;
8687}
8688
8689
8690/**
8691 * Fetches a descriptor table entry.
8692 *
8693 * @returns Strict VBox status code.
8694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8695 * @param pDesc Where to return the descriptor table entry.
8696 * @param uSel The selector which table entry to fetch.
8697 * @param uXcpt The exception to raise on table lookup error.
8698 */
8699VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8700{
8701 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8702}
8703
8704
8705/**
8706 * Marks the selector descriptor as accessed (only non-system descriptors).
8707 *
8708 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8709 * will therefore skip the limit checks.
8710 *
8711 * @returns Strict VBox status code.
8712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8713 * @param uSel The selector.
8714 */
8715VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8716{
8717 /*
8718 * Get the selector table base and calculate the entry address.
8719 */
8720 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8721 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8722 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8723 GCPtr += uSel & X86_SEL_MASK;
8724
8725 /*
8726 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8727 * ugly stuff to avoid this. This will make sure it's an atomic access
8728 * as well more or less remove any question about 8-bit or 32-bit accesss.
8729 */
8730 VBOXSTRICTRC rcStrict;
8731 uint32_t volatile *pu32;
8732 if ((GCPtr & 3) == 0)
8733 {
8734 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8735 GCPtr += 2 + 2;
8736 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8737 if (rcStrict != VINF_SUCCESS)
8738 return rcStrict;
8739 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8740 }
8741 else
8742 {
8743 /* The misaligned GDT/LDT case, map the whole thing. */
8744 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8745 if (rcStrict != VINF_SUCCESS)
8746 return rcStrict;
8747 switch ((uintptr_t)pu32 & 3)
8748 {
8749 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8750 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8751 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8752 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8753 }
8754 }
8755
8756 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8757}
8758
8759/** @} */
8760
8761/** @name Opcode Helpers.
8762 * @{
8763 */
8764
8765/**
8766 * Calculates the effective address of a ModR/M memory operand.
8767 *
8768 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8769 *
8770 * @return Strict VBox status code.
8771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8772 * @param bRm The ModRM byte.
8773 * @param cbImm The size of any immediate following the
8774 * effective address opcode bytes. Important for
8775 * RIP relative addressing.
8776 * @param pGCPtrEff Where to return the effective address.
8777 */
8778VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8779{
8780 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8781# define SET_SS_DEF() \
8782 do \
8783 { \
8784 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8785 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8786 } while (0)
8787
8788 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8789 {
8790/** @todo Check the effective address size crap! */
8791 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8792 {
8793 uint16_t u16EffAddr;
8794
8795 /* Handle the disp16 form with no registers first. */
8796 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8797 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8798 else
8799 {
8800 /* Get the displacment. */
8801 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8802 {
8803 case 0: u16EffAddr = 0; break;
8804 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8805 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8806 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8807 }
8808
8809 /* Add the base and index registers to the disp. */
8810 switch (bRm & X86_MODRM_RM_MASK)
8811 {
8812 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8813 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8814 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8815 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8816 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8817 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8818 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8819 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8820 }
8821 }
8822
8823 *pGCPtrEff = u16EffAddr;
8824 }
8825 else
8826 {
8827 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8828 uint32_t u32EffAddr;
8829
8830 /* Handle the disp32 form with no registers first. */
8831 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8832 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8833 else
8834 {
8835 /* Get the register (or SIB) value. */
8836 switch ((bRm & X86_MODRM_RM_MASK))
8837 {
8838 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8839 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8840 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8841 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8842 case 4: /* SIB */
8843 {
8844 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8845
8846 /* Get the index and scale it. */
8847 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8848 {
8849 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8850 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8851 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8852 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8853 case 4: u32EffAddr = 0; /*none */ break;
8854 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8855 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8856 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8858 }
8859 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8860
8861 /* add base */
8862 switch (bSib & X86_SIB_BASE_MASK)
8863 {
8864 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8865 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8866 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8867 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8868 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8869 case 5:
8870 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8871 {
8872 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8873 SET_SS_DEF();
8874 }
8875 else
8876 {
8877 uint32_t u32Disp;
8878 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8879 u32EffAddr += u32Disp;
8880 }
8881 break;
8882 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8883 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8884 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8885 }
8886 break;
8887 }
8888 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8889 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8890 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8891 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8892 }
8893
8894 /* Get and add the displacement. */
8895 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8896 {
8897 case 0:
8898 break;
8899 case 1:
8900 {
8901 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8902 u32EffAddr += i8Disp;
8903 break;
8904 }
8905 case 2:
8906 {
8907 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8908 u32EffAddr += u32Disp;
8909 break;
8910 }
8911 default:
8912 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8913 }
8914
8915 }
8916 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8917 *pGCPtrEff = u32EffAddr;
8918 else
8919 {
8920 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8921 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8922 }
8923 }
8924 }
8925 else
8926 {
8927 uint64_t u64EffAddr;
8928
8929 /* Handle the rip+disp32 form with no registers first. */
8930 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8931 {
8932 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8933 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8934 }
8935 else
8936 {
8937 /* Get the register (or SIB) value. */
8938 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8939 {
8940 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8941 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8942 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8943 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8944 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8945 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8946 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8947 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8948 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8949 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8950 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8951 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8952 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8953 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8954 /* SIB */
8955 case 4:
8956 case 12:
8957 {
8958 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8959
8960 /* Get the index and scale it. */
8961 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8962 {
8963 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8964 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8965 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8966 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8967 case 4: u64EffAddr = 0; /*none */ break;
8968 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8969 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8970 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8971 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8972 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8973 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8974 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8975 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8976 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8977 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8978 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8980 }
8981 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8982
8983 /* add base */
8984 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8985 {
8986 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8987 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8988 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8989 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8990 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8991 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8992 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8993 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8994 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8995 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8996 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8997 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8998 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8999 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9000 /* complicated encodings */
9001 case 5:
9002 case 13:
9003 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9004 {
9005 if (!pVCpu->iem.s.uRexB)
9006 {
9007 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9008 SET_SS_DEF();
9009 }
9010 else
9011 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9012 }
9013 else
9014 {
9015 uint32_t u32Disp;
9016 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9017 u64EffAddr += (int32_t)u32Disp;
9018 }
9019 break;
9020 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9021 }
9022 break;
9023 }
9024 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9025 }
9026
9027 /* Get and add the displacement. */
9028 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9029 {
9030 case 0:
9031 break;
9032 case 1:
9033 {
9034 int8_t i8Disp;
9035 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9036 u64EffAddr += i8Disp;
9037 break;
9038 }
9039 case 2:
9040 {
9041 uint32_t u32Disp;
9042 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9043 u64EffAddr += (int32_t)u32Disp;
9044 break;
9045 }
9046 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9047 }
9048
9049 }
9050
9051 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9052 *pGCPtrEff = u64EffAddr;
9053 else
9054 {
9055 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9056 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9057 }
9058 }
9059
9060 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9061 return VINF_SUCCESS;
9062}
9063
9064
9065/**
9066 * Calculates the effective address of a ModR/M memory operand.
9067 *
9068 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9069 *
9070 * @return Strict VBox status code.
9071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9072 * @param bRm The ModRM byte.
9073 * @param cbImm The size of any immediate following the
9074 * effective address opcode bytes. Important for
9075 * RIP relative addressing.
9076 * @param pGCPtrEff Where to return the effective address.
9077 * @param offRsp RSP displacement.
9078 */
9079VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
9080{
9081 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9082# define SET_SS_DEF() \
9083 do \
9084 { \
9085 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9086 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9087 } while (0)
9088
9089 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9090 {
9091/** @todo Check the effective address size crap! */
9092 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9093 {
9094 uint16_t u16EffAddr;
9095
9096 /* Handle the disp16 form with no registers first. */
9097 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9098 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9099 else
9100 {
9101 /* Get the displacment. */
9102 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9103 {
9104 case 0: u16EffAddr = 0; break;
9105 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9106 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9107 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9108 }
9109
9110 /* Add the base and index registers to the disp. */
9111 switch (bRm & X86_MODRM_RM_MASK)
9112 {
9113 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9114 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9115 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9116 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9117 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9118 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9119 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9120 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9121 }
9122 }
9123
9124 *pGCPtrEff = u16EffAddr;
9125 }
9126 else
9127 {
9128 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9129 uint32_t u32EffAddr;
9130
9131 /* Handle the disp32 form with no registers first. */
9132 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9133 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9134 else
9135 {
9136 /* Get the register (or SIB) value. */
9137 switch ((bRm & X86_MODRM_RM_MASK))
9138 {
9139 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9140 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9141 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9142 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9143 case 4: /* SIB */
9144 {
9145 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9146
9147 /* Get the index and scale it. */
9148 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9149 {
9150 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9151 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9152 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9153 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9154 case 4: u32EffAddr = 0; /*none */ break;
9155 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9156 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9157 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9158 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9159 }
9160 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9161
9162 /* add base */
9163 switch (bSib & X86_SIB_BASE_MASK)
9164 {
9165 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9166 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9167 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9168 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9169 case 4:
9170 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
9171 SET_SS_DEF();
9172 break;
9173 case 5:
9174 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9175 {
9176 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9177 SET_SS_DEF();
9178 }
9179 else
9180 {
9181 uint32_t u32Disp;
9182 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9183 u32EffAddr += u32Disp;
9184 }
9185 break;
9186 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9187 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9188 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9189 }
9190 break;
9191 }
9192 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9193 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9194 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9195 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9196 }
9197
9198 /* Get and add the displacement. */
9199 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9200 {
9201 case 0:
9202 break;
9203 case 1:
9204 {
9205 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9206 u32EffAddr += i8Disp;
9207 break;
9208 }
9209 case 2:
9210 {
9211 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9212 u32EffAddr += u32Disp;
9213 break;
9214 }
9215 default:
9216 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9217 }
9218
9219 }
9220 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9221 *pGCPtrEff = u32EffAddr;
9222 else
9223 {
9224 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9225 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9226 }
9227 }
9228 }
9229 else
9230 {
9231 uint64_t u64EffAddr;
9232
9233 /* Handle the rip+disp32 form with no registers first. */
9234 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9235 {
9236 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9237 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9238 }
9239 else
9240 {
9241 /* Get the register (or SIB) value. */
9242 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9243 {
9244 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9245 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9246 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9247 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9248 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9249 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9250 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9251 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9252 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9253 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9254 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9255 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9256 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9257 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9258 /* SIB */
9259 case 4:
9260 case 12:
9261 {
9262 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9263
9264 /* Get the index and scale it. */
9265 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9266 {
9267 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9268 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9269 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9270 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9271 case 4: u64EffAddr = 0; /*none */ break;
9272 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9273 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9274 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9275 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9276 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9277 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9278 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9279 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9280 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9281 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9282 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9283 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9284 }
9285 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9286
9287 /* add base */
9288 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9289 {
9290 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9291 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9292 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9293 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9294 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
9295 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9296 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9297 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9298 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9299 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9300 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9301 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9302 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9303 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9304 /* complicated encodings */
9305 case 5:
9306 case 13:
9307 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9308 {
9309 if (!pVCpu->iem.s.uRexB)
9310 {
9311 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9312 SET_SS_DEF();
9313 }
9314 else
9315 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9316 }
9317 else
9318 {
9319 uint32_t u32Disp;
9320 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9321 u64EffAddr += (int32_t)u32Disp;
9322 }
9323 break;
9324 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9325 }
9326 break;
9327 }
9328 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9329 }
9330
9331 /* Get and add the displacement. */
9332 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9333 {
9334 case 0:
9335 break;
9336 case 1:
9337 {
9338 int8_t i8Disp;
9339 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9340 u64EffAddr += i8Disp;
9341 break;
9342 }
9343 case 2:
9344 {
9345 uint32_t u32Disp;
9346 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9347 u64EffAddr += (int32_t)u32Disp;
9348 break;
9349 }
9350 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9351 }
9352
9353 }
9354
9355 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9356 *pGCPtrEff = u64EffAddr;
9357 else
9358 {
9359 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9360 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9361 }
9362 }
9363
9364 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9365 return VINF_SUCCESS;
9366}
9367
9368
9369#ifdef IEM_WITH_SETJMP
9370/**
9371 * Calculates the effective address of a ModR/M memory operand.
9372 *
9373 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9374 *
9375 * May longjmp on internal error.
9376 *
9377 * @return The effective address.
9378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9379 * @param bRm The ModRM byte.
9380 * @param cbImm The size of any immediate following the
9381 * effective address opcode bytes. Important for
9382 * RIP relative addressing.
9383 */
9384RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) IEM_NOEXCEPT_MAY_LONGJMP
9385{
9386 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9387# define SET_SS_DEF() \
9388 do \
9389 { \
9390 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9391 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9392 } while (0)
9393
9394 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9395 {
9396/** @todo Check the effective address size crap! */
9397 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9398 {
9399 uint16_t u16EffAddr;
9400
9401 /* Handle the disp16 form with no registers first. */
9402 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9403 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9404 else
9405 {
9406 /* Get the displacment. */
9407 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9408 {
9409 case 0: u16EffAddr = 0; break;
9410 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9411 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9412 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9413 }
9414
9415 /* Add the base and index registers to the disp. */
9416 switch (bRm & X86_MODRM_RM_MASK)
9417 {
9418 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9419 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9420 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9421 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9422 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9423 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9424 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9425 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9426 }
9427 }
9428
9429 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9430 return u16EffAddr;
9431 }
9432
9433 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9434 uint32_t u32EffAddr;
9435
9436 /* Handle the disp32 form with no registers first. */
9437 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9438 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9439 else
9440 {
9441 /* Get the register (or SIB) value. */
9442 switch ((bRm & X86_MODRM_RM_MASK))
9443 {
9444 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9445 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9446 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9447 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9448 case 4: /* SIB */
9449 {
9450 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9451
9452 /* Get the index and scale it. */
9453 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9454 {
9455 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9456 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9457 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9458 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9459 case 4: u32EffAddr = 0; /*none */ break;
9460 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9461 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9462 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9463 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9464 }
9465 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9466
9467 /* add base */
9468 switch (bSib & X86_SIB_BASE_MASK)
9469 {
9470 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9471 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9472 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9473 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9474 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9475 case 5:
9476 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9477 {
9478 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9479 SET_SS_DEF();
9480 }
9481 else
9482 {
9483 uint32_t u32Disp;
9484 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9485 u32EffAddr += u32Disp;
9486 }
9487 break;
9488 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9489 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9490 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9491 }
9492 break;
9493 }
9494 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9495 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9496 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9497 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9498 }
9499
9500 /* Get and add the displacement. */
9501 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9502 {
9503 case 0:
9504 break;
9505 case 1:
9506 {
9507 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9508 u32EffAddr += i8Disp;
9509 break;
9510 }
9511 case 2:
9512 {
9513 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9514 u32EffAddr += u32Disp;
9515 break;
9516 }
9517 default:
9518 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9519 }
9520 }
9521
9522 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9523 {
9524 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9525 return u32EffAddr;
9526 }
9527 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9528 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9529 return u32EffAddr & UINT16_MAX;
9530 }
9531
9532 uint64_t u64EffAddr;
9533
9534 /* Handle the rip+disp32 form with no registers first. */
9535 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9536 {
9537 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9538 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9539 }
9540 else
9541 {
9542 /* Get the register (or SIB) value. */
9543 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9544 {
9545 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9546 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9547 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9548 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9549 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9550 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9551 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9552 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9553 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9554 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9555 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9556 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9557 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9558 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9559 /* SIB */
9560 case 4:
9561 case 12:
9562 {
9563 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9564
9565 /* Get the index and scale it. */
9566 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9567 {
9568 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9569 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9570 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9571 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9572 case 4: u64EffAddr = 0; /*none */ break;
9573 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9574 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9575 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9576 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9577 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9578 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9579 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9580 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9581 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9582 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9583 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9584 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9585 }
9586 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9587
9588 /* add base */
9589 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9590 {
9591 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9592 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9593 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9594 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9595 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9596 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9597 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9598 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9599 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9600 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9601 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9602 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9603 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9604 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9605 /* complicated encodings */
9606 case 5:
9607 case 13:
9608 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9609 {
9610 if (!pVCpu->iem.s.uRexB)
9611 {
9612 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9613 SET_SS_DEF();
9614 }
9615 else
9616 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9617 }
9618 else
9619 {
9620 uint32_t u32Disp;
9621 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9622 u64EffAddr += (int32_t)u32Disp;
9623 }
9624 break;
9625 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9626 }
9627 break;
9628 }
9629 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9630 }
9631
9632 /* Get and add the displacement. */
9633 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9634 {
9635 case 0:
9636 break;
9637 case 1:
9638 {
9639 int8_t i8Disp;
9640 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9641 u64EffAddr += i8Disp;
9642 break;
9643 }
9644 case 2:
9645 {
9646 uint32_t u32Disp;
9647 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9648 u64EffAddr += (int32_t)u32Disp;
9649 break;
9650 }
9651 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9652 }
9653
9654 }
9655
9656 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9657 {
9658 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9659 return u64EffAddr;
9660 }
9661 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9662 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9663 return u64EffAddr & UINT32_MAX;
9664}
9665#endif /* IEM_WITH_SETJMP */
9666
9667/** @} */
9668
9669
9670#ifdef LOG_ENABLED
9671/**
9672 * Logs the current instruction.
9673 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9674 * @param fSameCtx Set if we have the same context information as the VMM,
9675 * clear if we may have already executed an instruction in
9676 * our debug context. When clear, we assume IEMCPU holds
9677 * valid CPU mode info.
9678 *
9679 * The @a fSameCtx parameter is now misleading and obsolete.
9680 * @param pszFunction The IEM function doing the execution.
9681 */
9682static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9683{
9684# ifdef IN_RING3
9685 if (LogIs2Enabled())
9686 {
9687 char szInstr[256];
9688 uint32_t cbInstr = 0;
9689 if (fSameCtx)
9690 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9691 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9692 szInstr, sizeof(szInstr), &cbInstr);
9693 else
9694 {
9695 uint32_t fFlags = 0;
9696 switch (pVCpu->iem.s.enmCpuMode)
9697 {
9698 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9699 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9700 case IEMMODE_16BIT:
9701 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9702 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9703 else
9704 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9705 break;
9706 }
9707 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9708 szInstr, sizeof(szInstr), &cbInstr);
9709 }
9710
9711 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9712 Log2(("**** %s\n"
9713 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9714 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9715 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9716 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9717 " %s\n"
9718 , pszFunction,
9719 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9720 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9721 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9722 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9723 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9724 szInstr));
9725
9726 if (LogIs3Enabled())
9727 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9728 }
9729 else
9730# endif
9731 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9732 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9733 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9734}
9735#endif /* LOG_ENABLED */
9736
9737
9738#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9739/**
9740 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9741 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9742 *
9743 * @returns Modified rcStrict.
9744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9745 * @param rcStrict The instruction execution status.
9746 */
9747static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9748{
9749 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9750 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9751 {
9752 /* VMX preemption timer takes priority over NMI-window exits. */
9753 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9754 {
9755 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9756 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9757 }
9758 /*
9759 * Check remaining intercepts.
9760 *
9761 * NMI-window and Interrupt-window VM-exits.
9762 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9763 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9764 *
9765 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9766 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9767 */
9768 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9769 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9770 && !TRPMHasTrap(pVCpu))
9771 {
9772 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9773 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9774 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9775 {
9776 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9777 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9778 }
9779 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9780 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9781 {
9782 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9783 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9784 }
9785 }
9786 }
9787 /* TPR-below threshold/APIC write has the highest priority. */
9788 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9789 {
9790 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9791 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9792 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9793 }
9794 /* MTF takes priority over VMX-preemption timer. */
9795 else
9796 {
9797 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9798 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9799 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9800 }
9801 return rcStrict;
9802}
9803#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9804
9805
9806/** @def IEM_TRY_SETJMP
9807 * Wrapper around setjmp / try, hiding all the ugly differences.
9808 *
9809 * @note Use with extreme care as this is a fragile macro.
9810 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
9811 * @param a_rcTarget The variable that should receive the status code in case
9812 * of a longjmp/throw.
9813 */
9814/** @def IEM_TRY_SETJMP_AGAIN
9815 * For when setjmp / try is used again in the same variable scope as a previous
9816 * IEM_TRY_SETJMP invocation.
9817 */
9818/** @def IEM_CATCH_LONGJMP_BEGIN
9819 * Start wrapper for catch / setjmp-else.
9820 *
9821 * This will set up a scope.
9822 *
9823 * @note Use with extreme care as this is a fragile macro.
9824 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
9825 * @param a_rcTarget The variable that should receive the status code in case
9826 * of a longjmp/throw.
9827 */
9828/** @def IEM_CATCH_LONGJMP_END
9829 * End wrapper for catch / setjmp-else.
9830 *
9831 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
9832 * state.
9833 *
9834 * @note Use with extreme care as this is a fragile macro.
9835 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
9836 */
9837#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
9838# ifdef IEM_WITH_THROW_CATCH
9839# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
9840 a_rcTarget = VINF_SUCCESS; \
9841 try
9842# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
9843 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
9844# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
9845 catch (int rcThrown) \
9846 { \
9847 a_rcTarget = rcThrown
9848# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
9849 } \
9850 ((void)0)
9851# else /* !IEM_WITH_THROW_CATCH */
9852# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
9853 jmp_buf JmpBuf; \
9854 jmp_buf * volatile pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \
9855 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
9856 if ((rcStrict = setjmp(JmpBuf)) == 0)
9857# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
9858 pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \
9859 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
9860 if ((rcStrict = setjmp(JmpBuf)) == 0)
9861# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
9862 else \
9863 { \
9864 ((void)0)
9865# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
9866 } \
9867 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
9868# endif /* !IEM_WITH_THROW_CATCH */
9869#endif /* IEM_WITH_SETJMP */
9870
9871
9872/**
9873 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9874 * IEMExecOneWithPrefetchedByPC.
9875 *
9876 * Similar code is found in IEMExecLots.
9877 *
9878 * @return Strict VBox status code.
9879 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9880 * @param fExecuteInhibit If set, execute the instruction following CLI,
9881 * POP SS and MOV SS,GR.
9882 * @param pszFunction The calling function name.
9883 */
9884DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9885{
9886 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9887 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9888 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9889 RT_NOREF_PV(pszFunction);
9890
9891#ifdef IEM_WITH_SETJMP
9892 VBOXSTRICTRC rcStrict;
9893 IEM_TRY_SETJMP(pVCpu, rcStrict)
9894 {
9895 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9896 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9897 }
9898 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9899 {
9900 pVCpu->iem.s.cLongJumps++;
9901 }
9902 IEM_CATCH_LONGJMP_END(pVCpu);
9903#else
9904 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9905 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9906#endif
9907 if (rcStrict == VINF_SUCCESS)
9908 pVCpu->iem.s.cInstructions++;
9909 if (pVCpu->iem.s.cActiveMappings > 0)
9910 {
9911 Assert(rcStrict != VINF_SUCCESS);
9912 iemMemRollback(pVCpu);
9913 }
9914 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9915 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9916 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9917
9918//#ifdef DEBUG
9919// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9920//#endif
9921
9922#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9923 /*
9924 * Perform any VMX nested-guest instruction boundary actions.
9925 *
9926 * If any of these causes a VM-exit, we must skip executing the next
9927 * instruction (would run into stale page tables). A VM-exit makes sure
9928 * there is no interrupt-inhibition, so that should ensure we don't go
9929 * to try execute the next instruction. Clearing fExecuteInhibit is
9930 * problematic because of the setjmp/longjmp clobbering above.
9931 */
9932 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9933 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9934 || rcStrict != VINF_SUCCESS)
9935 { /* likely */ }
9936 else
9937 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9938#endif
9939
9940 /* Execute the next instruction as well if a cli, pop ss or
9941 mov ss, Gr has just completed successfully. */
9942 if ( fExecuteInhibit
9943 && rcStrict == VINF_SUCCESS
9944 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9945 {
9946 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9947 if (rcStrict == VINF_SUCCESS)
9948 {
9949#ifdef LOG_ENABLED
9950 iemLogCurInstr(pVCpu, false, pszFunction);
9951#endif
9952#ifdef IEM_WITH_SETJMP
9953 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9954 {
9955 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9956 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9957 }
9958 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9959 {
9960 pVCpu->iem.s.cLongJumps++;
9961 }
9962 IEM_CATCH_LONGJMP_END(pVCpu);
9963#else
9964 IEM_OPCODE_GET_FIRST_U8(&b);
9965 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9966#endif
9967 if (rcStrict == VINF_SUCCESS)
9968 {
9969 pVCpu->iem.s.cInstructions++;
9970#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9971 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9972 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9973 { /* likely */ }
9974 else
9975 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9976#endif
9977 }
9978 if (pVCpu->iem.s.cActiveMappings > 0)
9979 {
9980 Assert(rcStrict != VINF_SUCCESS);
9981 iemMemRollback(pVCpu);
9982 }
9983 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9984 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9985 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9986 }
9987 else if (pVCpu->iem.s.cActiveMappings > 0)
9988 iemMemRollback(pVCpu);
9989 /** @todo drop this after we bake this change into RIP advancing. */
9990 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9991 }
9992
9993 /*
9994 * Return value fiddling, statistics and sanity assertions.
9995 */
9996 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9997
9998 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9999 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10000 return rcStrict;
10001}
10002
10003
10004/**
10005 * Execute one instruction.
10006 *
10007 * @return Strict VBox status code.
10008 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10009 */
10010VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10011{
10012 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10013#ifdef LOG_ENABLED
10014 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10015#endif
10016
10017 /*
10018 * Do the decoding and emulation.
10019 */
10020 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10021 if (rcStrict == VINF_SUCCESS)
10022 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10023 else if (pVCpu->iem.s.cActiveMappings > 0)
10024 iemMemRollback(pVCpu);
10025
10026 if (rcStrict != VINF_SUCCESS)
10027 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10028 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10029 return rcStrict;
10030}
10031
10032
10033VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10034{
10035 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10036 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10037 if (rcStrict == VINF_SUCCESS)
10038 {
10039 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10040 if (pcbWritten)
10041 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10042 }
10043 else if (pVCpu->iem.s.cActiveMappings > 0)
10044 iemMemRollback(pVCpu);
10045
10046 return rcStrict;
10047}
10048
10049
10050VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10051 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10052{
10053 VBOXSTRICTRC rcStrict;
10054 if ( cbOpcodeBytes
10055 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10056 {
10057 iemInitDecoder(pVCpu, false, false);
10058#ifdef IEM_WITH_CODE_TLB
10059 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10060 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10061 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10062 pVCpu->iem.s.offCurInstrStart = 0;
10063 pVCpu->iem.s.offInstrNextByte = 0;
10064#else
10065 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10066 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10067#endif
10068 rcStrict = VINF_SUCCESS;
10069 }
10070 else
10071 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10072 if (rcStrict == VINF_SUCCESS)
10073 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10074 else if (pVCpu->iem.s.cActiveMappings > 0)
10075 iemMemRollback(pVCpu);
10076
10077 return rcStrict;
10078}
10079
10080
10081VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10082{
10083 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10084 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
10085 if (rcStrict == VINF_SUCCESS)
10086 {
10087 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10088 if (pcbWritten)
10089 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10090 }
10091 else if (pVCpu->iem.s.cActiveMappings > 0)
10092 iemMemRollback(pVCpu);
10093
10094 return rcStrict;
10095}
10096
10097
10098VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10099 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10100{
10101 VBOXSTRICTRC rcStrict;
10102 if ( cbOpcodeBytes
10103 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10104 {
10105 iemInitDecoder(pVCpu, true, false);
10106#ifdef IEM_WITH_CODE_TLB
10107 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10108 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10109 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10110 pVCpu->iem.s.offCurInstrStart = 0;
10111 pVCpu->iem.s.offInstrNextByte = 0;
10112#else
10113 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10114 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10115#endif
10116 rcStrict = VINF_SUCCESS;
10117 }
10118 else
10119 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
10120 if (rcStrict == VINF_SUCCESS)
10121 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10122 else if (pVCpu->iem.s.cActiveMappings > 0)
10123 iemMemRollback(pVCpu);
10124
10125 return rcStrict;
10126}
10127
10128
10129/**
10130 * For handling split cacheline lock operations when the host has split-lock
10131 * detection enabled.
10132 *
10133 * This will cause the interpreter to disregard the lock prefix and implicit
10134 * locking (xchg).
10135 *
10136 * @returns Strict VBox status code.
10137 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10138 */
10139VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10140{
10141 /*
10142 * Do the decoding and emulation.
10143 */
10144 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
10145 if (rcStrict == VINF_SUCCESS)
10146 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10147 else if (pVCpu->iem.s.cActiveMappings > 0)
10148 iemMemRollback(pVCpu);
10149
10150 if (rcStrict != VINF_SUCCESS)
10151 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10152 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10153 return rcStrict;
10154}
10155
10156
10157VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10158{
10159 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10160 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10161
10162 /*
10163 * See if there is an interrupt pending in TRPM, inject it if we can.
10164 */
10165 /** @todo What if we are injecting an exception and not an interrupt? Is that
10166 * possible here? For now we assert it is indeed only an interrupt. */
10167 if (!TRPMHasTrap(pVCpu))
10168 { /* likely */ }
10169 else
10170 {
10171 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10172 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10173 {
10174 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10175#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10176 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10177 if (fIntrEnabled)
10178 {
10179 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10180 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10181 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10182 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10183 else
10184 {
10185 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10186 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10187 }
10188 }
10189#else
10190 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10191#endif
10192 if (fIntrEnabled)
10193 {
10194 uint8_t u8TrapNo;
10195 TRPMEVENT enmType;
10196 uint32_t uErrCode;
10197 RTGCPTR uCr2;
10198 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10199 AssertRC(rc2);
10200 Assert(enmType == TRPM_HARDWARE_INT);
10201 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10202
10203 TRPMResetTrap(pVCpu);
10204
10205#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10206 /* Injecting an event may cause a VM-exit. */
10207 if ( rcStrict != VINF_SUCCESS
10208 && rcStrict != VINF_IEM_RAISED_XCPT)
10209 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10210#else
10211 NOREF(rcStrict);
10212#endif
10213 }
10214 }
10215 }
10216
10217 /*
10218 * Initial decoder init w/ prefetch, then setup setjmp.
10219 */
10220 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10221 if (rcStrict == VINF_SUCCESS)
10222 {
10223#ifdef IEM_WITH_SETJMP
10224 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10225 IEM_TRY_SETJMP(pVCpu, rcStrict)
10226#endif
10227 {
10228 /*
10229 * The run loop. We limit ourselves to 4096 instructions right now.
10230 */
10231 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10232 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10233 for (;;)
10234 {
10235 /*
10236 * Log the state.
10237 */
10238#ifdef LOG_ENABLED
10239 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10240#endif
10241
10242 /*
10243 * Do the decoding and emulation.
10244 */
10245 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10246 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10247#ifdef VBOX_STRICT
10248 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10249#endif
10250 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10251 {
10252 Assert(pVCpu->iem.s.cActiveMappings == 0);
10253 pVCpu->iem.s.cInstructions++;
10254
10255#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10256 /* Perform any VMX nested-guest instruction boundary actions. */
10257 uint64_t fCpu = pVCpu->fLocalForcedActions;
10258 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10259 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10260 { /* likely */ }
10261 else
10262 {
10263 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10264 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10265 fCpu = pVCpu->fLocalForcedActions;
10266 else
10267 {
10268 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10269 break;
10270 }
10271 }
10272#endif
10273 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10274 {
10275#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10276 uint64_t fCpu = pVCpu->fLocalForcedActions;
10277#endif
10278 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10279 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10280 | VMCPU_FF_TLB_FLUSH
10281 | VMCPU_FF_UNHALT );
10282
10283 if (RT_LIKELY( ( !fCpu
10284 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10285 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10286 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10287 {
10288 if (cMaxInstructionsGccStupidity-- > 0)
10289 {
10290 /* Poll timers every now an then according to the caller's specs. */
10291 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10292 || !TMTimerPollBool(pVM, pVCpu))
10293 {
10294 Assert(pVCpu->iem.s.cActiveMappings == 0);
10295 iemReInitDecoder(pVCpu);
10296 continue;
10297 }
10298 }
10299 }
10300 }
10301 Assert(pVCpu->iem.s.cActiveMappings == 0);
10302 }
10303 else if (pVCpu->iem.s.cActiveMappings > 0)
10304 iemMemRollback(pVCpu);
10305 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10306 break;
10307 }
10308 }
10309#ifdef IEM_WITH_SETJMP
10310 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10311 {
10312 if (pVCpu->iem.s.cActiveMappings > 0)
10313 iemMemRollback(pVCpu);
10314# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10315 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10316# endif
10317 pVCpu->iem.s.cLongJumps++;
10318 }
10319 IEM_CATCH_LONGJMP_END(pVCpu);
10320#endif
10321
10322 /*
10323 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10324 */
10325 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10326 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10327 }
10328 else
10329 {
10330 if (pVCpu->iem.s.cActiveMappings > 0)
10331 iemMemRollback(pVCpu);
10332
10333#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10334 /*
10335 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10336 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10337 */
10338 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10339#endif
10340 }
10341
10342 /*
10343 * Maybe re-enter raw-mode and log.
10344 */
10345 if (rcStrict != VINF_SUCCESS)
10346 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10347 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10348 if (pcInstructions)
10349 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10350 return rcStrict;
10351}
10352
10353
10354/**
10355 * Interface used by EMExecuteExec, does exit statistics and limits.
10356 *
10357 * @returns Strict VBox status code.
10358 * @param pVCpu The cross context virtual CPU structure.
10359 * @param fWillExit To be defined.
10360 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10361 * @param cMaxInstructions Maximum number of instructions to execute.
10362 * @param cMaxInstructionsWithoutExits
10363 * The max number of instructions without exits.
10364 * @param pStats Where to return statistics.
10365 */
10366VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10367 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10368{
10369 NOREF(fWillExit); /** @todo define flexible exit crits */
10370
10371 /*
10372 * Initialize return stats.
10373 */
10374 pStats->cInstructions = 0;
10375 pStats->cExits = 0;
10376 pStats->cMaxExitDistance = 0;
10377 pStats->cReserved = 0;
10378
10379 /*
10380 * Initial decoder init w/ prefetch, then setup setjmp.
10381 */
10382 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10383 if (rcStrict == VINF_SUCCESS)
10384 {
10385#ifdef IEM_WITH_SETJMP
10386 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10387 IEM_TRY_SETJMP(pVCpu, rcStrict)
10388#endif
10389 {
10390#ifdef IN_RING0
10391 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10392#endif
10393 uint32_t cInstructionSinceLastExit = 0;
10394
10395 /*
10396 * The run loop. We limit ourselves to 4096 instructions right now.
10397 */
10398 PVM pVM = pVCpu->CTX_SUFF(pVM);
10399 for (;;)
10400 {
10401 /*
10402 * Log the state.
10403 */
10404#ifdef LOG_ENABLED
10405 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10406#endif
10407
10408 /*
10409 * Do the decoding and emulation.
10410 */
10411 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10412
10413 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10414 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10415
10416 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10417 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10418 {
10419 pStats->cExits += 1;
10420 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10421 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10422 cInstructionSinceLastExit = 0;
10423 }
10424
10425 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10426 {
10427 Assert(pVCpu->iem.s.cActiveMappings == 0);
10428 pVCpu->iem.s.cInstructions++;
10429 pStats->cInstructions++;
10430 cInstructionSinceLastExit++;
10431
10432#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10433 /* Perform any VMX nested-guest instruction boundary actions. */
10434 uint64_t fCpu = pVCpu->fLocalForcedActions;
10435 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10436 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10437 { /* likely */ }
10438 else
10439 {
10440 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10441 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10442 fCpu = pVCpu->fLocalForcedActions;
10443 else
10444 {
10445 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10446 break;
10447 }
10448 }
10449#endif
10450 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10451 {
10452#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10453 uint64_t fCpu = pVCpu->fLocalForcedActions;
10454#endif
10455 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10456 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10457 | VMCPU_FF_TLB_FLUSH
10458 | VMCPU_FF_UNHALT );
10459 if (RT_LIKELY( ( ( !fCpu
10460 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10461 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10462 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10463 || pStats->cInstructions < cMinInstructions))
10464 {
10465 if (pStats->cInstructions < cMaxInstructions)
10466 {
10467 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10468 {
10469#ifdef IN_RING0
10470 if ( !fCheckPreemptionPending
10471 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10472#endif
10473 {
10474 Assert(pVCpu->iem.s.cActiveMappings == 0);
10475 iemReInitDecoder(pVCpu);
10476 continue;
10477 }
10478#ifdef IN_RING0
10479 rcStrict = VINF_EM_RAW_INTERRUPT;
10480 break;
10481#endif
10482 }
10483 }
10484 }
10485 Assert(!(fCpu & VMCPU_FF_IEM));
10486 }
10487 Assert(pVCpu->iem.s.cActiveMappings == 0);
10488 }
10489 else if (pVCpu->iem.s.cActiveMappings > 0)
10490 iemMemRollback(pVCpu);
10491 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10492 break;
10493 }
10494 }
10495#ifdef IEM_WITH_SETJMP
10496 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10497 {
10498 if (pVCpu->iem.s.cActiveMappings > 0)
10499 iemMemRollback(pVCpu);
10500 pVCpu->iem.s.cLongJumps++;
10501 }
10502 IEM_CATCH_LONGJMP_END(pVCpu);
10503#endif
10504
10505 /*
10506 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10507 */
10508 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10509 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10510 }
10511 else
10512 {
10513 if (pVCpu->iem.s.cActiveMappings > 0)
10514 iemMemRollback(pVCpu);
10515
10516#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10517 /*
10518 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10519 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10520 */
10521 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10522#endif
10523 }
10524
10525 /*
10526 * Maybe re-enter raw-mode and log.
10527 */
10528 if (rcStrict != VINF_SUCCESS)
10529 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10530 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10531 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10532 return rcStrict;
10533}
10534
10535
10536/**
10537 * Injects a trap, fault, abort, software interrupt or external interrupt.
10538 *
10539 * The parameter list matches TRPMQueryTrapAll pretty closely.
10540 *
10541 * @returns Strict VBox status code.
10542 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10543 * @param u8TrapNo The trap number.
10544 * @param enmType What type is it (trap/fault/abort), software
10545 * interrupt or hardware interrupt.
10546 * @param uErrCode The error code if applicable.
10547 * @param uCr2 The CR2 value if applicable.
10548 * @param cbInstr The instruction length (only relevant for
10549 * software interrupts).
10550 */
10551VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10552 uint8_t cbInstr)
10553{
10554 iemInitDecoder(pVCpu, false, false);
10555#ifdef DBGFTRACE_ENABLED
10556 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10557 u8TrapNo, enmType, uErrCode, uCr2);
10558#endif
10559
10560 uint32_t fFlags;
10561 switch (enmType)
10562 {
10563 case TRPM_HARDWARE_INT:
10564 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10565 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10566 uErrCode = uCr2 = 0;
10567 break;
10568
10569 case TRPM_SOFTWARE_INT:
10570 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10571 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10572 uErrCode = uCr2 = 0;
10573 break;
10574
10575 case TRPM_TRAP:
10576 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10577 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10578 if (u8TrapNo == X86_XCPT_PF)
10579 fFlags |= IEM_XCPT_FLAGS_CR2;
10580 switch (u8TrapNo)
10581 {
10582 case X86_XCPT_DF:
10583 case X86_XCPT_TS:
10584 case X86_XCPT_NP:
10585 case X86_XCPT_SS:
10586 case X86_XCPT_PF:
10587 case X86_XCPT_AC:
10588 case X86_XCPT_GP:
10589 fFlags |= IEM_XCPT_FLAGS_ERR;
10590 break;
10591 }
10592 break;
10593
10594 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10595 }
10596
10597 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10598
10599 if (pVCpu->iem.s.cActiveMappings > 0)
10600 iemMemRollback(pVCpu);
10601
10602 return rcStrict;
10603}
10604
10605
10606/**
10607 * Injects the active TRPM event.
10608 *
10609 * @returns Strict VBox status code.
10610 * @param pVCpu The cross context virtual CPU structure.
10611 */
10612VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10613{
10614#ifndef IEM_IMPLEMENTS_TASKSWITCH
10615 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10616#else
10617 uint8_t u8TrapNo;
10618 TRPMEVENT enmType;
10619 uint32_t uErrCode;
10620 RTGCUINTPTR uCr2;
10621 uint8_t cbInstr;
10622 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10623 if (RT_FAILURE(rc))
10624 return rc;
10625
10626 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10627 * ICEBP \#DB injection as a special case. */
10628 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10629#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10630 if (rcStrict == VINF_SVM_VMEXIT)
10631 rcStrict = VINF_SUCCESS;
10632#endif
10633#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10634 if (rcStrict == VINF_VMX_VMEXIT)
10635 rcStrict = VINF_SUCCESS;
10636#endif
10637 /** @todo Are there any other codes that imply the event was successfully
10638 * delivered to the guest? See @bugref{6607}. */
10639 if ( rcStrict == VINF_SUCCESS
10640 || rcStrict == VINF_IEM_RAISED_XCPT)
10641 TRPMResetTrap(pVCpu);
10642
10643 return rcStrict;
10644#endif
10645}
10646
10647
10648VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10649{
10650 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10651 return VERR_NOT_IMPLEMENTED;
10652}
10653
10654
10655VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10656{
10657 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10658 return VERR_NOT_IMPLEMENTED;
10659}
10660
10661
10662/**
10663 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10664 *
10665 * This API ASSUMES that the caller has already verified that the guest code is
10666 * allowed to access the I/O port. (The I/O port is in the DX register in the
10667 * guest state.)
10668 *
10669 * @returns Strict VBox status code.
10670 * @param pVCpu The cross context virtual CPU structure.
10671 * @param cbValue The size of the I/O port access (1, 2, or 4).
10672 * @param enmAddrMode The addressing mode.
10673 * @param fRepPrefix Indicates whether a repeat prefix is used
10674 * (doesn't matter which for this instruction).
10675 * @param cbInstr The instruction length in bytes.
10676 * @param iEffSeg The effective segment address.
10677 * @param fIoChecked Whether the access to the I/O port has been
10678 * checked or not. It's typically checked in the
10679 * HM scenario.
10680 */
10681VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10682 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10683{
10684 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10685 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10686
10687 /*
10688 * State init.
10689 */
10690 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10691
10692 /*
10693 * Switch orgy for getting to the right handler.
10694 */
10695 VBOXSTRICTRC rcStrict;
10696 if (fRepPrefix)
10697 {
10698 switch (enmAddrMode)
10699 {
10700 case IEMMODE_16BIT:
10701 switch (cbValue)
10702 {
10703 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10704 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10705 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10706 default:
10707 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10708 }
10709 break;
10710
10711 case IEMMODE_32BIT:
10712 switch (cbValue)
10713 {
10714 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10715 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10716 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10717 default:
10718 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10719 }
10720 break;
10721
10722 case IEMMODE_64BIT:
10723 switch (cbValue)
10724 {
10725 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10726 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10727 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10728 default:
10729 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10730 }
10731 break;
10732
10733 default:
10734 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10735 }
10736 }
10737 else
10738 {
10739 switch (enmAddrMode)
10740 {
10741 case IEMMODE_16BIT:
10742 switch (cbValue)
10743 {
10744 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10745 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10746 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10747 default:
10748 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10749 }
10750 break;
10751
10752 case IEMMODE_32BIT:
10753 switch (cbValue)
10754 {
10755 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10756 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10757 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10758 default:
10759 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10760 }
10761 break;
10762
10763 case IEMMODE_64BIT:
10764 switch (cbValue)
10765 {
10766 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10767 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10768 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10769 default:
10770 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10771 }
10772 break;
10773
10774 default:
10775 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10776 }
10777 }
10778
10779 if (pVCpu->iem.s.cActiveMappings)
10780 iemMemRollback(pVCpu);
10781
10782 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10783}
10784
10785
10786/**
10787 * Interface for HM and EM for executing string I/O IN (read) instructions.
10788 *
10789 * This API ASSUMES that the caller has already verified that the guest code is
10790 * allowed to access the I/O port. (The I/O port is in the DX register in the
10791 * guest state.)
10792 *
10793 * @returns Strict VBox status code.
10794 * @param pVCpu The cross context virtual CPU structure.
10795 * @param cbValue The size of the I/O port access (1, 2, or 4).
10796 * @param enmAddrMode The addressing mode.
10797 * @param fRepPrefix Indicates whether a repeat prefix is used
10798 * (doesn't matter which for this instruction).
10799 * @param cbInstr The instruction length in bytes.
10800 * @param fIoChecked Whether the access to the I/O port has been
10801 * checked or not. It's typically checked in the
10802 * HM scenario.
10803 */
10804VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10805 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10806{
10807 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10808
10809 /*
10810 * State init.
10811 */
10812 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10813
10814 /*
10815 * Switch orgy for getting to the right handler.
10816 */
10817 VBOXSTRICTRC rcStrict;
10818 if (fRepPrefix)
10819 {
10820 switch (enmAddrMode)
10821 {
10822 case IEMMODE_16BIT:
10823 switch (cbValue)
10824 {
10825 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10826 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10827 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10828 default:
10829 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10830 }
10831 break;
10832
10833 case IEMMODE_32BIT:
10834 switch (cbValue)
10835 {
10836 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10837 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10838 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10839 default:
10840 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10841 }
10842 break;
10843
10844 case IEMMODE_64BIT:
10845 switch (cbValue)
10846 {
10847 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10848 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10849 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10850 default:
10851 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10852 }
10853 break;
10854
10855 default:
10856 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10857 }
10858 }
10859 else
10860 {
10861 switch (enmAddrMode)
10862 {
10863 case IEMMODE_16BIT:
10864 switch (cbValue)
10865 {
10866 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10867 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10868 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10869 default:
10870 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10871 }
10872 break;
10873
10874 case IEMMODE_32BIT:
10875 switch (cbValue)
10876 {
10877 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10878 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10879 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10880 default:
10881 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10882 }
10883 break;
10884
10885 case IEMMODE_64BIT:
10886 switch (cbValue)
10887 {
10888 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10889 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10890 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10891 default:
10892 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10893 }
10894 break;
10895
10896 default:
10897 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10898 }
10899 }
10900
10901 if ( pVCpu->iem.s.cActiveMappings == 0
10902 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10903 { /* likely */ }
10904 else
10905 {
10906 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10907 iemMemRollback(pVCpu);
10908 }
10909 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10910}
10911
10912
10913/**
10914 * Interface for rawmode to write execute an OUT instruction.
10915 *
10916 * @returns Strict VBox status code.
10917 * @param pVCpu The cross context virtual CPU structure.
10918 * @param cbInstr The instruction length in bytes.
10919 * @param u16Port The port to read.
10920 * @param fImm Whether the port is specified using an immediate operand or
10921 * using the implicit DX register.
10922 * @param cbReg The register size.
10923 *
10924 * @remarks In ring-0 not all of the state needs to be synced in.
10925 */
10926VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10927{
10928 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10929 Assert(cbReg <= 4 && cbReg != 3);
10930
10931 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10932 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10933 Assert(!pVCpu->iem.s.cActiveMappings);
10934 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10935}
10936
10937
10938/**
10939 * Interface for rawmode to write execute an IN instruction.
10940 *
10941 * @returns Strict VBox status code.
10942 * @param pVCpu The cross context virtual CPU structure.
10943 * @param cbInstr The instruction length in bytes.
10944 * @param u16Port The port to read.
10945 * @param fImm Whether the port is specified using an immediate operand or
10946 * using the implicit DX.
10947 * @param cbReg The register size.
10948 */
10949VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10950{
10951 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10952 Assert(cbReg <= 4 && cbReg != 3);
10953
10954 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10955 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10956 Assert(!pVCpu->iem.s.cActiveMappings);
10957 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10958}
10959
10960
10961/**
10962 * Interface for HM and EM to write to a CRx register.
10963 *
10964 * @returns Strict VBox status code.
10965 * @param pVCpu The cross context virtual CPU structure.
10966 * @param cbInstr The instruction length in bytes.
10967 * @param iCrReg The control register number (destination).
10968 * @param iGReg The general purpose register number (source).
10969 *
10970 * @remarks In ring-0 not all of the state needs to be synced in.
10971 */
10972VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10973{
10974 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10975 Assert(iCrReg < 16);
10976 Assert(iGReg < 16);
10977
10978 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10979 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10980 Assert(!pVCpu->iem.s.cActiveMappings);
10981 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10982}
10983
10984
10985/**
10986 * Interface for HM and EM to read from a CRx register.
10987 *
10988 * @returns Strict VBox status code.
10989 * @param pVCpu The cross context virtual CPU structure.
10990 * @param cbInstr The instruction length in bytes.
10991 * @param iGReg The general purpose register number (destination).
10992 * @param iCrReg The control register number (source).
10993 *
10994 * @remarks In ring-0 not all of the state needs to be synced in.
10995 */
10996VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10997{
10998 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10999 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11000 | CPUMCTX_EXTRN_APIC_TPR);
11001 Assert(iCrReg < 16);
11002 Assert(iGReg < 16);
11003
11004 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11005 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11006 Assert(!pVCpu->iem.s.cActiveMappings);
11007 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11008}
11009
11010
11011/**
11012 * Interface for HM and EM to write to a DRx register.
11013 *
11014 * @returns Strict VBox status code.
11015 * @param pVCpu The cross context virtual CPU structure.
11016 * @param cbInstr The instruction length in bytes.
11017 * @param iDrReg The debug register number (destination).
11018 * @param iGReg The general purpose register number (source).
11019 *
11020 * @remarks In ring-0 not all of the state needs to be synced in.
11021 */
11022VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11023{
11024 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11025 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11026 Assert(iDrReg < 8);
11027 Assert(iGReg < 16);
11028
11029 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11030 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11031 Assert(!pVCpu->iem.s.cActiveMappings);
11032 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11033}
11034
11035
11036/**
11037 * Interface for HM and EM to read from a DRx register.
11038 *
11039 * @returns Strict VBox status code.
11040 * @param pVCpu The cross context virtual CPU structure.
11041 * @param cbInstr The instruction length in bytes.
11042 * @param iGReg The general purpose register number (destination).
11043 * @param iDrReg The debug register number (source).
11044 *
11045 * @remarks In ring-0 not all of the state needs to be synced in.
11046 */
11047VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11048{
11049 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11050 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11051 Assert(iDrReg < 8);
11052 Assert(iGReg < 16);
11053
11054 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11055 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11056 Assert(!pVCpu->iem.s.cActiveMappings);
11057 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11058}
11059
11060
11061/**
11062 * Interface for HM and EM to clear the CR0[TS] bit.
11063 *
11064 * @returns Strict VBox status code.
11065 * @param pVCpu The cross context virtual CPU structure.
11066 * @param cbInstr The instruction length in bytes.
11067 *
11068 * @remarks In ring-0 not all of the state needs to be synced in.
11069 */
11070VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11071{
11072 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11073
11074 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11075 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11076 Assert(!pVCpu->iem.s.cActiveMappings);
11077 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11078}
11079
11080
11081/**
11082 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11083 *
11084 * @returns Strict VBox status code.
11085 * @param pVCpu The cross context virtual CPU structure.
11086 * @param cbInstr The instruction length in bytes.
11087 * @param uValue The value to load into CR0.
11088 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11089 * memory operand. Otherwise pass NIL_RTGCPTR.
11090 *
11091 * @remarks In ring-0 not all of the state needs to be synced in.
11092 */
11093VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11094{
11095 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11096
11097 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11098 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11099 Assert(!pVCpu->iem.s.cActiveMappings);
11100 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11101}
11102
11103
11104/**
11105 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11106 *
11107 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11108 *
11109 * @returns Strict VBox status code.
11110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11111 * @param cbInstr The instruction length in bytes.
11112 * @remarks In ring-0 not all of the state needs to be synced in.
11113 * @thread EMT(pVCpu)
11114 */
11115VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11116{
11117 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11118
11119 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11120 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11121 Assert(!pVCpu->iem.s.cActiveMappings);
11122 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11123}
11124
11125
11126/**
11127 * Interface for HM and EM to emulate the WBINVD instruction.
11128 *
11129 * @returns Strict VBox status code.
11130 * @param pVCpu The cross context virtual CPU structure.
11131 * @param cbInstr The instruction length in bytes.
11132 *
11133 * @remarks In ring-0 not all of the state needs to be synced in.
11134 */
11135VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11136{
11137 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11138
11139 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11140 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11141 Assert(!pVCpu->iem.s.cActiveMappings);
11142 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11143}
11144
11145
11146/**
11147 * Interface for HM and EM to emulate the INVD instruction.
11148 *
11149 * @returns Strict VBox status code.
11150 * @param pVCpu The cross context virtual CPU structure.
11151 * @param cbInstr The instruction length in bytes.
11152 *
11153 * @remarks In ring-0 not all of the state needs to be synced in.
11154 */
11155VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11156{
11157 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11158
11159 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11160 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11161 Assert(!pVCpu->iem.s.cActiveMappings);
11162 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11163}
11164
11165
11166/**
11167 * Interface for HM and EM to emulate the INVLPG instruction.
11168 *
11169 * @returns Strict VBox status code.
11170 * @retval VINF_PGM_SYNC_CR3
11171 *
11172 * @param pVCpu The cross context virtual CPU structure.
11173 * @param cbInstr The instruction length in bytes.
11174 * @param GCPtrPage The effective address of the page to invalidate.
11175 *
11176 * @remarks In ring-0 not all of the state needs to be synced in.
11177 */
11178VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11179{
11180 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11181
11182 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11183 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11184 Assert(!pVCpu->iem.s.cActiveMappings);
11185 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11186}
11187
11188
11189/**
11190 * Interface for HM and EM to emulate the INVPCID instruction.
11191 *
11192 * @returns Strict VBox status code.
11193 * @retval VINF_PGM_SYNC_CR3
11194 *
11195 * @param pVCpu The cross context virtual CPU structure.
11196 * @param cbInstr The instruction length in bytes.
11197 * @param iEffSeg The effective segment register.
11198 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11199 * @param uType The invalidation type.
11200 *
11201 * @remarks In ring-0 not all of the state needs to be synced in.
11202 */
11203VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11204 uint64_t uType)
11205{
11206 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11207
11208 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11209 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11210 Assert(!pVCpu->iem.s.cActiveMappings);
11211 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11212}
11213
11214
11215/**
11216 * Interface for HM and EM to emulate the CPUID instruction.
11217 *
11218 * @returns Strict VBox status code.
11219 *
11220 * @param pVCpu The cross context virtual CPU structure.
11221 * @param cbInstr The instruction length in bytes.
11222 *
11223 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11224 */
11225VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11226{
11227 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11228 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11229
11230 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11231 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11232 Assert(!pVCpu->iem.s.cActiveMappings);
11233 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11234}
11235
11236
11237/**
11238 * Interface for HM and EM to emulate the RDPMC instruction.
11239 *
11240 * @returns Strict VBox status code.
11241 *
11242 * @param pVCpu The cross context virtual CPU structure.
11243 * @param cbInstr The instruction length in bytes.
11244 *
11245 * @remarks Not all of the state needs to be synced in.
11246 */
11247VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11248{
11249 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11250 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11251
11252 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11253 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11254 Assert(!pVCpu->iem.s.cActiveMappings);
11255 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11256}
11257
11258
11259/**
11260 * Interface for HM and EM to emulate the RDTSC instruction.
11261 *
11262 * @returns Strict VBox status code.
11263 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11264 *
11265 * @param pVCpu The cross context virtual CPU structure.
11266 * @param cbInstr The instruction length in bytes.
11267 *
11268 * @remarks Not all of the state needs to be synced in.
11269 */
11270VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11271{
11272 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11273 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11274
11275 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11276 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11277 Assert(!pVCpu->iem.s.cActiveMappings);
11278 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11279}
11280
11281
11282/**
11283 * Interface for HM and EM to emulate the RDTSCP instruction.
11284 *
11285 * @returns Strict VBox status code.
11286 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11287 *
11288 * @param pVCpu The cross context virtual CPU structure.
11289 * @param cbInstr The instruction length in bytes.
11290 *
11291 * @remarks Not all of the state needs to be synced in. Recommended
11292 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11293 */
11294VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11295{
11296 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11297 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11298
11299 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11300 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11301 Assert(!pVCpu->iem.s.cActiveMappings);
11302 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11303}
11304
11305
11306/**
11307 * Interface for HM and EM to emulate the RDMSR instruction.
11308 *
11309 * @returns Strict VBox status code.
11310 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11311 *
11312 * @param pVCpu The cross context virtual CPU structure.
11313 * @param cbInstr The instruction length in bytes.
11314 *
11315 * @remarks Not all of the state needs to be synced in. Requires RCX and
11316 * (currently) all MSRs.
11317 */
11318VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11319{
11320 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11321 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11322
11323 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11324 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11325 Assert(!pVCpu->iem.s.cActiveMappings);
11326 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11327}
11328
11329
11330/**
11331 * Interface for HM and EM to emulate the WRMSR instruction.
11332 *
11333 * @returns Strict VBox status code.
11334 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11335 *
11336 * @param pVCpu The cross context virtual CPU structure.
11337 * @param cbInstr The instruction length in bytes.
11338 *
11339 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11340 * and (currently) all MSRs.
11341 */
11342VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11343{
11344 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11345 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11346 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11347
11348 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11349 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11350 Assert(!pVCpu->iem.s.cActiveMappings);
11351 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11352}
11353
11354
11355/**
11356 * Interface for HM and EM to emulate the MONITOR instruction.
11357 *
11358 * @returns Strict VBox status code.
11359 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11360 *
11361 * @param pVCpu The cross context virtual CPU structure.
11362 * @param cbInstr The instruction length in bytes.
11363 *
11364 * @remarks Not all of the state needs to be synced in.
11365 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11366 * are used.
11367 */
11368VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11369{
11370 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11371 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11372
11373 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11374 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11375 Assert(!pVCpu->iem.s.cActiveMappings);
11376 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11377}
11378
11379
11380/**
11381 * Interface for HM and EM to emulate the MWAIT instruction.
11382 *
11383 * @returns Strict VBox status code.
11384 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11385 *
11386 * @param pVCpu The cross context virtual CPU structure.
11387 * @param cbInstr The instruction length in bytes.
11388 *
11389 * @remarks Not all of the state needs to be synced in.
11390 */
11391VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11392{
11393 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11394 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11395
11396 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11397 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11398 Assert(!pVCpu->iem.s.cActiveMappings);
11399 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11400}
11401
11402
11403/**
11404 * Interface for HM and EM to emulate the HLT instruction.
11405 *
11406 * @returns Strict VBox status code.
11407 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11408 *
11409 * @param pVCpu The cross context virtual CPU structure.
11410 * @param cbInstr The instruction length in bytes.
11411 *
11412 * @remarks Not all of the state needs to be synced in.
11413 */
11414VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11415{
11416 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11417
11418 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11419 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11420 Assert(!pVCpu->iem.s.cActiveMappings);
11421 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11422}
11423
11424
11425/**
11426 * Checks if IEM is in the process of delivering an event (interrupt or
11427 * exception).
11428 *
11429 * @returns true if we're in the process of raising an interrupt or exception,
11430 * false otherwise.
11431 * @param pVCpu The cross context virtual CPU structure.
11432 * @param puVector Where to store the vector associated with the
11433 * currently delivered event, optional.
11434 * @param pfFlags Where to store th event delivery flags (see
11435 * IEM_XCPT_FLAGS_XXX), optional.
11436 * @param puErr Where to store the error code associated with the
11437 * event, optional.
11438 * @param puCr2 Where to store the CR2 associated with the event,
11439 * optional.
11440 * @remarks The caller should check the flags to determine if the error code and
11441 * CR2 are valid for the event.
11442 */
11443VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11444{
11445 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11446 if (fRaisingXcpt)
11447 {
11448 if (puVector)
11449 *puVector = pVCpu->iem.s.uCurXcpt;
11450 if (pfFlags)
11451 *pfFlags = pVCpu->iem.s.fCurXcpt;
11452 if (puErr)
11453 *puErr = pVCpu->iem.s.uCurXcptErr;
11454 if (puCr2)
11455 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11456 }
11457 return fRaisingXcpt;
11458}
11459
11460#ifdef IN_RING3
11461
11462/**
11463 * Handles the unlikely and probably fatal merge cases.
11464 *
11465 * @returns Merged status code.
11466 * @param rcStrict Current EM status code.
11467 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11468 * with @a rcStrict.
11469 * @param iMemMap The memory mapping index. For error reporting only.
11470 * @param pVCpu The cross context virtual CPU structure of the calling
11471 * thread, for error reporting only.
11472 */
11473DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11474 unsigned iMemMap, PVMCPUCC pVCpu)
11475{
11476 if (RT_FAILURE_NP(rcStrict))
11477 return rcStrict;
11478
11479 if (RT_FAILURE_NP(rcStrictCommit))
11480 return rcStrictCommit;
11481
11482 if (rcStrict == rcStrictCommit)
11483 return rcStrictCommit;
11484
11485 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11486 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11487 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11489 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11490 return VERR_IOM_FF_STATUS_IPE;
11491}
11492
11493
11494/**
11495 * Helper for IOMR3ProcessForceFlag.
11496 *
11497 * @returns Merged status code.
11498 * @param rcStrict Current EM status code.
11499 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11500 * with @a rcStrict.
11501 * @param iMemMap The memory mapping index. For error reporting only.
11502 * @param pVCpu The cross context virtual CPU structure of the calling
11503 * thread, for error reporting only.
11504 */
11505DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11506{
11507 /* Simple. */
11508 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11509 return rcStrictCommit;
11510
11511 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11512 return rcStrict;
11513
11514 /* EM scheduling status codes. */
11515 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11516 && rcStrict <= VINF_EM_LAST))
11517 {
11518 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11519 && rcStrictCommit <= VINF_EM_LAST))
11520 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11521 }
11522
11523 /* Unlikely */
11524 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11525}
11526
11527
11528/**
11529 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11530 *
11531 * @returns Merge between @a rcStrict and what the commit operation returned.
11532 * @param pVM The cross context VM structure.
11533 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11534 * @param rcStrict The status code returned by ring-0 or raw-mode.
11535 */
11536VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11537{
11538 /*
11539 * Reset the pending commit.
11540 */
11541 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11542 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11543 ("%#x %#x %#x\n",
11544 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11545 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11546
11547 /*
11548 * Commit the pending bounce buffers (usually just one).
11549 */
11550 unsigned cBufs = 0;
11551 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11552 while (iMemMap-- > 0)
11553 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11554 {
11555 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11556 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11557 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11558
11559 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11560 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11561 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11562
11563 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11564 {
11565 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11566 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11567 pbBuf,
11568 cbFirst,
11569 PGMACCESSORIGIN_IEM);
11570 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11571 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11572 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11573 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11574 }
11575
11576 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11577 {
11578 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11579 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11580 pbBuf + cbFirst,
11581 cbSecond,
11582 PGMACCESSORIGIN_IEM);
11583 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11584 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11585 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11586 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11587 }
11588 cBufs++;
11589 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11590 }
11591
11592 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11593 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11594 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11595 pVCpu->iem.s.cActiveMappings = 0;
11596 return rcStrict;
11597}
11598
11599#endif /* IN_RING3 */
11600
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use