VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 100804

Last change on this file since 100804 was 100804, checked in by vboxsync, 22 months ago

VMM/IEM: Made the rep-prefixed string instructions return new status code VINF_IEM_YIELD_PENDING_FF if they yield and does not update RIP. This will avoid trouble in the recompiler as any non-zero status code will stop TB execution. This is more efficient than generating extra checks on the RIP value or something in the TB. The IEM_CIMPL_F_REP annotated instructions no longer need to trigger an end-of-tb. Annotate I/O instructions in case it comes in handy. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 120.0 KB
Line 
1/* $Id: IEMInline.h 100804 2023-08-05 01:01:32Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 /* Deal with the cases that should be treated as VINF_SUCCESS first. */
49 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF
50#ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */
51 || rcStrict == VINF_VMX_VMEXIT
52#endif
53#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
54 || rcStrict == VINF_SVM_VMEXIT
55#endif
56 )
57 {
58 if (pVCpu->iem.s.rcPassUp == VINF_SUCCESS)
59 rcStrict = VINF_SUCCESS;
60 else
61 {
62 pVCpu->iem.s.cRetPassUpStatus++;
63 rcStrict = pVCpu->iem.s.rcPassUp;
64 }
65 }
66 else if (RT_SUCCESS(rcStrict))
67 {
68 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
69 || rcStrict == VINF_IOM_R3_IOPORT_READ
70 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
71 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
72 || rcStrict == VINF_IOM_R3_MMIO_READ
73 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
74 || rcStrict == VINF_IOM_R3_MMIO_WRITE
75 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
76 || rcStrict == VINF_CPUM_R3_MSR_READ
77 || rcStrict == VINF_CPUM_R3_MSR_WRITE
78 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
79 || rcStrict == VINF_EM_RAW_TO_R3
80 || rcStrict == VINF_EM_TRIPLE_FAULT
81 || rcStrict == VINF_GIM_R3_HYPERCALL
82 /* raw-mode / virt handlers only: */
83 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
84 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
85 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
86 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
87 || rcStrict == VINF_SELM_SYNC_GDT
88 || rcStrict == VINF_CSAM_PENDING_ACTION
89 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
90 /* nested hw.virt codes: */
91 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
92 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
93 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
94/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
95 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
96 if (rcPassUp == VINF_SUCCESS)
97 pVCpu->iem.s.cRetInfStatuses++;
98 else if ( rcPassUp < VINF_EM_FIRST
99 || rcPassUp > VINF_EM_LAST
100 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
101 {
102 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
103 pVCpu->iem.s.cRetPassUpStatus++;
104 rcStrict = rcPassUp;
105 }
106 else
107 {
108 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
109 pVCpu->iem.s.cRetInfStatuses++;
110 }
111 }
112 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
113 pVCpu->iem.s.cRetAspectNotImplemented++;
114 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
115 pVCpu->iem.s.cRetInstrNotImplemented++;
116 else
117 pVCpu->iem.s.cRetErrStatuses++;
118 }
119 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
120 {
121 pVCpu->iem.s.cRetPassUpStatus++;
122 rcStrict = pVCpu->iem.s.rcPassUp;
123 }
124
125 return rcStrict;
126}
127
128
129/**
130 * Sets the pass up status.
131 *
132 * @returns VINF_SUCCESS.
133 * @param pVCpu The cross context virtual CPU structure of the
134 * calling thread.
135 * @param rcPassUp The pass up status. Must be informational.
136 * VINF_SUCCESS is not allowed.
137 */
138DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
139{
140 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
141
142 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
143 if (rcOldPassUp == VINF_SUCCESS)
144 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
145 /* If both are EM scheduling codes, use EM priority rules. */
146 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
147 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
148 {
149 if (rcPassUp < rcOldPassUp)
150 {
151 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
152 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
153 }
154 else
155 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
156 }
157 /* Override EM scheduling with specific status code. */
158 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
159 {
160 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
161 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
162 }
163 /* Don't override specific status code, first come first served. */
164 else
165 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
166 return VINF_SUCCESS;
167}
168
169
170/**
171 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag.
172 *
173 * Checks if CS, SS, DS and SS are all wide open flat 32-bit segments. This will
174 * reject expand down data segments and conforming code segments.
175 *
176 * ASSUMES that the CPU is in 32-bit mode.
177 *
178 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
179 * @param pVCpu The cross context virtual CPU structure of the
180 * calling thread.
181 * @sa iemCalc32BitFlatIndicatorEsDs
182 */
183DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicator(PVMCPUCC pVCpu) RT_NOEXCEPT
184{
185 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
186 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
187 | pVCpu->cpum.GstCtx.cs.Attr.u
188 | pVCpu->cpum.GstCtx.ss.Attr.u
189 | pVCpu->cpum.GstCtx.ds.Attr.u)
190 & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P))
191 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
192 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
193 | (pVCpu->cpum.GstCtx.cs.u32Limit + 1)
194 | (pVCpu->cpum.GstCtx.ss.u32Limit + 1)
195 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
196 == 0
197 && ( pVCpu->cpum.GstCtx.es.u64Base
198 | pVCpu->cpum.GstCtx.cs.u64Base
199 | pVCpu->cpum.GstCtx.ss.u64Base
200 | pVCpu->cpum.GstCtx.ds.u64Base)
201 == 0
202 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
203 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
204}
205
206
207/**
208 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag, ASSUMING the CS and SS are
209 * flat already.
210 *
211 * This is used by sysenter.
212 *
213 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
214 * @param pVCpu The cross context virtual CPU structure of the
215 * calling thread.
216 * @sa iemCalc32BitFlatIndicator
217 */
218DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicatorEsDs(PVMCPUCC pVCpu) RT_NOEXCEPT
219{
220 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
221 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
222 | pVCpu->cpum.GstCtx.ds.Attr.u)
223 & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P))
224 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
225 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
226 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
227 == 0
228 && ( pVCpu->cpum.GstCtx.es.u64Base
229 | pVCpu->cpum.GstCtx.ds.u64Base)
230 == 0
231 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
232 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
233}
234
235
236/**
237 * Calculates the IEM_F_MODE_XXX and CPL flags.
238 *
239 * @returns IEM_F_MODE_XXX
240 * @param pVCpu The cross context virtual CPU structure of the
241 * calling thread.
242 */
243DECL_FORCE_INLINE(uint32_t) iemCalcExecModeAndCplFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
244{
245 /*
246 * We're duplicates code from CPUMGetGuestCPL and CPUMIsGuestIn64BitCodeEx
247 * here to try get this done as efficiently as possible.
248 */
249 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
250
251 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
252 {
253 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
254 {
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
256 uint32_t fExec = ((uint32_t)pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl << IEM_F_X86_CPL_SHIFT);
257 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
258 {
259 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA));
260 fExec |= IEM_F_MODE_X86_32BIT_PROT | iemCalc32BitFlatIndicator(pVCpu);
261 }
262 else if ( pVCpu->cpum.GstCtx.cs.Attr.n.u1Long
263 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA))
264 fExec |= IEM_F_MODE_X86_64BIT;
265 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
266 fExec |= IEM_F_MODE_X86_16BIT_PROT;
267 else
268 fExec |= IEM_F_MODE_X86_16BIT_PROT_PRE_386;
269 return fExec;
270 }
271 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT);
272 }
273
274 /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
275 if (RT_LIKELY(!pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
276 {
277 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
278 return IEM_F_MODE_X86_16BIT;
279 return IEM_F_MODE_X86_16BIT_PRE_386;
280 }
281
282 /* 32-bit unreal mode. */
283 return IEM_F_MODE_X86_32BIT | iemCalc32BitFlatIndicator(pVCpu);
284}
285
286
287/**
288 * Calculates the AMD-V and VT-x related context flags.
289 *
290 * @returns 0 or a combination of IEM_F_X86_CTX_IN_GUEST, IEM_F_X86_CTX_SVM and
291 * IEM_F_X86_CTX_VMX.
292 * @param pVCpu The cross context virtual CPU structure of the
293 * calling thread.
294 */
295DECL_FORCE_INLINE(uint32_t) iemCalcExecHwVirtFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
296{
297 /*
298 * This duplicates code from CPUMIsGuestVmxEnabled, CPUMIsGuestSvmEnabled
299 * and CPUMIsGuestInNestedHwvirtMode to some extent.
300 */
301 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
302
303 AssertCompile(X86_CR4_VMXE != MSR_K6_EFER_SVME);
304 uint64_t const fTmp = (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VMXE)
305 | (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SVME);
306 if (RT_LIKELY(!fTmp))
307 return 0; /* likely */
308
309 if (fTmp & X86_CR4_VMXE)
310 {
311 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
312 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode)
313 return IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST;
314 return IEM_F_X86_CTX_VMX;
315 }
316
317 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
318 if (pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)
319 return IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST;
320 return IEM_F_X86_CTX_SVM;
321}
322
323
324/**
325 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags.
326 *
327 * @returns IEM_F_BRK_PENDING_XXX or zero.
328 * @param pVCpu The cross context virtual CPU structure of the
329 * calling thread.
330 */
331DECL_FORCE_INLINE(uint32_t) iemCalcExecDbgFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
332{
333 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
334
335 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
336 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
337 return 0;
338 return iemCalcExecDbgFlagsSlow(pVCpu);
339}
340
341/**
342 * Calculates the the IEM_F_XXX flags.
343 *
344 * @returns IEM_F_XXX combination match the current CPU state.
345 * @param pVCpu The cross context virtual CPU structure of the
346 * calling thread.
347 */
348DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
349{
350 return iemCalcExecModeAndCplFlags(pVCpu)
351 | iemCalcExecHwVirtFlags(pVCpu)
352 /* SMM is not yet implemented */
353 | iemCalcExecDbgFlags(pVCpu)
354 ;
355}
356
357
358/**
359 * Re-calculates the MODE and CPL parts of IEMCPU::fExec.
360 *
361 * @param pVCpu The cross context virtual CPU structure of the
362 * calling thread.
363 */
364DECL_FORCE_INLINE(void) iemRecalcExecModeAndCplFlags(PVMCPUCC pVCpu)
365{
366 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
367 | iemCalcExecModeAndCplFlags(pVCpu);
368}
369
370
371/**
372 * Re-calculates the IEM_F_PENDING_BRK_MASK part of IEMCPU::fExec.
373 *
374 * @param pVCpu The cross context virtual CPU structure of the
375 * calling thread.
376 */
377DECL_FORCE_INLINE(void) iemRecalcExecDbgFlags(PVMCPUCC pVCpu)
378{
379 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_PENDING_BRK_MASK)
380 | iemCalcExecDbgFlags(pVCpu);
381}
382
383
384#ifndef IEM_WITH_OPAQUE_DECODER_STATE
385
386# if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
387/**
388 * Initializes the execution state.
389 *
390 * @param pVCpu The cross context virtual CPU structure of the
391 * calling thread.
392 * @param fExecOpts Optional execution flags:
393 * - IEM_F_BYPASS_HANDLERS
394 * - IEM_F_X86_DISREGARD_LOCK
395 *
396 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
397 * side-effects in strict builds.
398 */
399DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
400{
401 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
402 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
403 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
404 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
405 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
406 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
407 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
408 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
409 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
410 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
411
412 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
413 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
414 pVCpu->iem.s.cActiveMappings = 0;
415 pVCpu->iem.s.iNextMapping = 0;
416
417# ifdef VBOX_STRICT
418 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
419 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
420 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
421 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
422 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
423 pVCpu->iem.s.uRexReg = 127;
424 pVCpu->iem.s.uRexB = 127;
425 pVCpu->iem.s.offModRm = 127;
426 pVCpu->iem.s.uRexIndex = 127;
427 pVCpu->iem.s.iEffSeg = 127;
428 pVCpu->iem.s.idxPrefix = 127;
429 pVCpu->iem.s.uVex3rdReg = 127;
430 pVCpu->iem.s.uVexLength = 127;
431 pVCpu->iem.s.fEvexStuff = 127;
432 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
433# ifdef IEM_WITH_CODE_TLB
434 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
435 pVCpu->iem.s.pbInstrBuf = NULL;
436 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
437 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
438 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
439 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
440# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
441 pVCpu->iem.s.offOpcode = 127;
442# endif
443# else
444 pVCpu->iem.s.offOpcode = 127;
445 pVCpu->iem.s.cbOpcode = 127;
446# endif
447# endif /* VBOX_STRICT */
448}
449# endif /* VBOX_INCLUDED_vmm_dbgf_h */
450
451
452# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
453/**
454 * Performs a minimal reinitialization of the execution state.
455 *
456 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
457 * 'world-switch' types operations on the CPU. Currently only nested
458 * hardware-virtualization uses it.
459 *
460 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
461 * @param cbInstr The instruction length (for flushing).
462 */
463DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
464{
465 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
466 iemOpcodeFlushHeavy(pVCpu, cbInstr);
467}
468# endif
469
470
471/**
472 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
473 *
474 * @param pVCpu The cross context virtual CPU structure of the
475 * calling thread.
476 */
477DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
478{
479 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
480# ifdef VBOX_STRICT
481# ifdef IEM_WITH_CODE_TLB
482 NOREF(pVCpu);
483# else
484 pVCpu->iem.s.cbOpcode = 0;
485# endif
486# else
487 NOREF(pVCpu);
488# endif
489}
490
491
492/**
493 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
494 *
495 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
496 *
497 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
499 * @param rcStrict The status code to fiddle.
500 */
501DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
502{
503 iemUninitExec(pVCpu);
504 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
505}
506
507
508/**
509 * Macro used by the IEMExec* method to check the given instruction length.
510 *
511 * Will return on failure!
512 *
513 * @param a_cbInstr The given instruction length.
514 * @param a_cbMin The minimum length.
515 */
516# define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
517 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
518 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
519
520
521# ifndef IEM_WITH_SETJMP
522
523/**
524 * Fetches the first opcode byte.
525 *
526 * @returns Strict VBox status code.
527 * @param pVCpu The cross context virtual CPU structure of the
528 * calling thread.
529 * @param pu8 Where to return the opcode byte.
530 */
531DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
532{
533 /*
534 * Check for hardware instruction breakpoints.
535 */
536 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
537 { /* likely */ }
538 else
539 {
540 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
541 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
542 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
543 { /* likely */ }
544 else if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
545 return iemRaiseDebugException(pVCpu);
546 else
547 return rcStrict;
548 }
549
550 /*
551 * Fetch the first opcode byte.
552 */
553 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
554 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
555 {
556 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
557 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
558 return VINF_SUCCESS;
559 }
560 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
561}
562
563# else /* IEM_WITH_SETJMP */
564
565/**
566 * Fetches the first opcode byte, longjmp on error.
567 *
568 * @returns The opcode byte.
569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
570 */
571DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
572{
573 /*
574 * Check for hardware instruction breakpoints.
575 */
576 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
577 { /* likely */ }
578 else
579 {
580 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
581 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
582 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
583 { /* likely */ }
584 else
585 {
586 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
587 rcStrict = iemRaiseDebugException(pVCpu);
588 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
589 }
590 }
591
592 /*
593 * Fetch the first opcode byte.
594 */
595# ifdef IEM_WITH_CODE_TLB
596 uint8_t bRet;
597 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
598 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
599 if (RT_LIKELY( pbBuf != NULL
600 && offBuf < pVCpu->iem.s.cbInstrBuf))
601 {
602 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
603 bRet = pbBuf[offBuf];
604 }
605 else
606 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
607# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
608 Assert(pVCpu->iem.s.offOpcode == 0);
609 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
610# endif
611 return bRet;
612
613# else /* !IEM_WITH_CODE_TLB */
614 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
615 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
616 {
617 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
618 return pVCpu->iem.s.abOpcode[offOpcode];
619 }
620 return iemOpcodeGetNextU8SlowJmp(pVCpu);
621# endif
622}
623
624# endif /* IEM_WITH_SETJMP */
625
626/**
627 * Fetches the first opcode byte, returns/throws automatically on failure.
628 *
629 * @param a_pu8 Where to return the opcode byte.
630 * @remark Implicitly references pVCpu.
631 */
632# ifndef IEM_WITH_SETJMP
633# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
634 do \
635 { \
636 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
637 if (rcStrict2 == VINF_SUCCESS) \
638 { /* likely */ } \
639 else \
640 return rcStrict2; \
641 } while (0)
642# else
643# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
644# endif /* IEM_WITH_SETJMP */
645
646
647# ifndef IEM_WITH_SETJMP
648
649/**
650 * Fetches the next opcode byte.
651 *
652 * @returns Strict VBox status code.
653 * @param pVCpu The cross context virtual CPU structure of the
654 * calling thread.
655 * @param pu8 Where to return the opcode byte.
656 */
657DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
658{
659 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
660 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
661 {
662 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
663 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
664 return VINF_SUCCESS;
665 }
666 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
667}
668
669# else /* IEM_WITH_SETJMP */
670
671/**
672 * Fetches the next opcode byte, longjmp on error.
673 *
674 * @returns The opcode byte.
675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
676 */
677DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
678{
679# ifdef IEM_WITH_CODE_TLB
680 uint8_t bRet;
681 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
682 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
683 if (RT_LIKELY( pbBuf != NULL
684 && offBuf < pVCpu->iem.s.cbInstrBuf))
685 {
686 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
687 bRet = pbBuf[offBuf];
688 }
689 else
690 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
691# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
692 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode));
693 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
694# endif
695 return bRet;
696
697# else /* !IEM_WITH_CODE_TLB */
698 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
699 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
700 {
701 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
702 return pVCpu->iem.s.abOpcode[offOpcode];
703 }
704 return iemOpcodeGetNextU8SlowJmp(pVCpu);
705# endif
706}
707
708# endif /* IEM_WITH_SETJMP */
709
710/**
711 * Fetches the next opcode byte, returns automatically on failure.
712 *
713 * @param a_pu8 Where to return the opcode byte.
714 * @remark Implicitly references pVCpu.
715 */
716# ifndef IEM_WITH_SETJMP
717# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
718 do \
719 { \
720 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
721 if (rcStrict2 == VINF_SUCCESS) \
722 { /* likely */ } \
723 else \
724 return rcStrict2; \
725 } while (0)
726# else
727# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
728# endif /* IEM_WITH_SETJMP */
729
730
731# ifndef IEM_WITH_SETJMP
732/**
733 * Fetches the next signed byte from the opcode stream.
734 *
735 * @returns Strict VBox status code.
736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
737 * @param pi8 Where to return the signed byte.
738 */
739DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
740{
741 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
742}
743# endif /* !IEM_WITH_SETJMP */
744
745
746/**
747 * Fetches the next signed byte from the opcode stream, returning automatically
748 * on failure.
749 *
750 * @param a_pi8 Where to return the signed byte.
751 * @remark Implicitly references pVCpu.
752 */
753# ifndef IEM_WITH_SETJMP
754# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
755 do \
756 { \
757 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
758 if (rcStrict2 != VINF_SUCCESS) \
759 return rcStrict2; \
760 } while (0)
761# else /* IEM_WITH_SETJMP */
762# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
763
764# endif /* IEM_WITH_SETJMP */
765
766
767# ifndef IEM_WITH_SETJMP
768/**
769 * Fetches the next signed byte from the opcode stream, extending it to
770 * unsigned 16-bit.
771 *
772 * @returns Strict VBox status code.
773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
774 * @param pu16 Where to return the unsigned word.
775 */
776DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
777{
778 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
779 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
780 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
781
782 *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
783 pVCpu->iem.s.offOpcode = offOpcode + 1;
784 return VINF_SUCCESS;
785}
786# endif /* !IEM_WITH_SETJMP */
787
788/**
789 * Fetches the next signed byte from the opcode stream and sign-extending it to
790 * a word, returning automatically on failure.
791 *
792 * @param a_pu16 Where to return the word.
793 * @remark Implicitly references pVCpu.
794 */
795# ifndef IEM_WITH_SETJMP
796# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
797 do \
798 { \
799 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
800 if (rcStrict2 != VINF_SUCCESS) \
801 return rcStrict2; \
802 } while (0)
803# else
804# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
805# endif
806
807# ifndef IEM_WITH_SETJMP
808/**
809 * Fetches the next signed byte from the opcode stream, extending it to
810 * unsigned 32-bit.
811 *
812 * @returns Strict VBox status code.
813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
814 * @param pu32 Where to return the unsigned dword.
815 */
816DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
817{
818 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
819 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
820 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
821
822 *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
823 pVCpu->iem.s.offOpcode = offOpcode + 1;
824 return VINF_SUCCESS;
825}
826# endif /* !IEM_WITH_SETJMP */
827
828/**
829 * Fetches the next signed byte from the opcode stream and sign-extending it to
830 * a word, returning automatically on failure.
831 *
832 * @param a_pu32 Where to return the word.
833 * @remark Implicitly references pVCpu.
834 */
835# ifndef IEM_WITH_SETJMP
836# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
837 do \
838 { \
839 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
840 if (rcStrict2 != VINF_SUCCESS) \
841 return rcStrict2; \
842 } while (0)
843# else
844# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
845# endif
846
847
848# ifndef IEM_WITH_SETJMP
849/**
850 * Fetches the next signed byte from the opcode stream, extending it to
851 * unsigned 64-bit.
852 *
853 * @returns Strict VBox status code.
854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
855 * @param pu64 Where to return the unsigned qword.
856 */
857DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
858{
859 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
860 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
861 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
862
863 *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
864 pVCpu->iem.s.offOpcode = offOpcode + 1;
865 return VINF_SUCCESS;
866}
867# endif /* !IEM_WITH_SETJMP */
868
869/**
870 * Fetches the next signed byte from the opcode stream and sign-extending it to
871 * a word, returning automatically on failure.
872 *
873 * @param a_pu64 Where to return the word.
874 * @remark Implicitly references pVCpu.
875 */
876# ifndef IEM_WITH_SETJMP
877# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
878 do \
879 { \
880 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
881 if (rcStrict2 != VINF_SUCCESS) \
882 return rcStrict2; \
883 } while (0)
884# else
885# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
886# endif
887
888
889# ifndef IEM_WITH_SETJMP
890
891/**
892 * Fetches the next opcode word.
893 *
894 * @returns Strict VBox status code.
895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
896 * @param pu16 Where to return the opcode word.
897 */
898DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
899{
900 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
901 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
902 {
903 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
904# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
905 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
906# else
907 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
908# endif
909 return VINF_SUCCESS;
910 }
911 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
912}
913
914# else /* IEM_WITH_SETJMP */
915
916/**
917 * Fetches the next opcode word, longjmp on error.
918 *
919 * @returns The opcode word.
920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
921 */
922DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
923{
924# ifdef IEM_WITH_CODE_TLB
925 uint16_t u16Ret;
926 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
927 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
928 if (RT_LIKELY( pbBuf != NULL
929 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
930 {
931 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
932# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
933 u16Ret = *(uint16_t const *)&pbBuf[offBuf];
934# else
935 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
936# endif
937 }
938 else
939 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu);
940
941# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
942 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
943 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode));
944# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
945 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret;
946# else
947 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret);
948 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret);
949# endif
950 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2;
951# endif
952
953 return u16Ret;
954
955# else /* !IEM_WITH_CODE_TLB */
956 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
957 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
958 {
959 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
960# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
961 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
962# else
963 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
964# endif
965 }
966 return iemOpcodeGetNextU16SlowJmp(pVCpu);
967# endif /* !IEM_WITH_CODE_TLB */
968}
969
970# endif /* IEM_WITH_SETJMP */
971
972/**
973 * Fetches the next opcode word, returns automatically on failure.
974 *
975 * @param a_pu16 Where to return the opcode word.
976 * @remark Implicitly references pVCpu.
977 */
978# ifndef IEM_WITH_SETJMP
979# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
980 do \
981 { \
982 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
983 if (rcStrict2 != VINF_SUCCESS) \
984 return rcStrict2; \
985 } while (0)
986# else
987# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
988# endif
989
990# ifndef IEM_WITH_SETJMP
991/**
992 * Fetches the next opcode word, zero extending it to a double word.
993 *
994 * @returns Strict VBox status code.
995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
996 * @param pu32 Where to return the opcode double word.
997 */
998DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
999{
1000 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1001 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1002 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
1003
1004 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1005 pVCpu->iem.s.offOpcode = offOpcode + 2;
1006 return VINF_SUCCESS;
1007}
1008# endif /* !IEM_WITH_SETJMP */
1009
1010/**
1011 * Fetches the next opcode word and zero extends it to a double word, returns
1012 * automatically on failure.
1013 *
1014 * @param a_pu32 Where to return the opcode double word.
1015 * @remark Implicitly references pVCpu.
1016 */
1017# ifndef IEM_WITH_SETJMP
1018# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1019 do \
1020 { \
1021 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
1022 if (rcStrict2 != VINF_SUCCESS) \
1023 return rcStrict2; \
1024 } while (0)
1025# else
1026# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
1027# endif
1028
1029# ifndef IEM_WITH_SETJMP
1030/**
1031 * Fetches the next opcode word, zero extending it to a quad word.
1032 *
1033 * @returns Strict VBox status code.
1034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1035 * @param pu64 Where to return the opcode quad word.
1036 */
1037DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1038{
1039 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1040 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1041 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
1042
1043 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1044 pVCpu->iem.s.offOpcode = offOpcode + 2;
1045 return VINF_SUCCESS;
1046}
1047# endif /* !IEM_WITH_SETJMP */
1048
1049/**
1050 * Fetches the next opcode word and zero extends it to a quad word, returns
1051 * automatically on failure.
1052 *
1053 * @param a_pu64 Where to return the opcode quad word.
1054 * @remark Implicitly references pVCpu.
1055 */
1056# ifndef IEM_WITH_SETJMP
1057# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1058 do \
1059 { \
1060 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
1061 if (rcStrict2 != VINF_SUCCESS) \
1062 return rcStrict2; \
1063 } while (0)
1064# else
1065# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
1066# endif
1067
1068
1069# ifndef IEM_WITH_SETJMP
1070/**
1071 * Fetches the next signed word from the opcode stream.
1072 *
1073 * @returns Strict VBox status code.
1074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1075 * @param pi16 Where to return the signed word.
1076 */
1077DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
1078{
1079 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
1080}
1081# endif /* !IEM_WITH_SETJMP */
1082
1083
1084/**
1085 * Fetches the next signed word from the opcode stream, returning automatically
1086 * on failure.
1087 *
1088 * @param a_pi16 Where to return the signed word.
1089 * @remark Implicitly references pVCpu.
1090 */
1091# ifndef IEM_WITH_SETJMP
1092# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1093 do \
1094 { \
1095 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
1096 if (rcStrict2 != VINF_SUCCESS) \
1097 return rcStrict2; \
1098 } while (0)
1099# else
1100# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
1101# endif
1102
1103# ifndef IEM_WITH_SETJMP
1104
1105/**
1106 * Fetches the next opcode dword.
1107 *
1108 * @returns Strict VBox status code.
1109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1110 * @param pu32 Where to return the opcode double word.
1111 */
1112DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1113{
1114 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1115 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1116 {
1117 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1118# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1119 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1120# else
1121 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1122 pVCpu->iem.s.abOpcode[offOpcode + 1],
1123 pVCpu->iem.s.abOpcode[offOpcode + 2],
1124 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1125# endif
1126 return VINF_SUCCESS;
1127 }
1128 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
1129}
1130
1131# else /* IEM_WITH_SETJMP */
1132
1133/**
1134 * Fetches the next opcode dword, longjmp on error.
1135 *
1136 * @returns The opcode dword.
1137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1138 */
1139DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1140{
1141# ifdef IEM_WITH_CODE_TLB
1142 uint32_t u32Ret;
1143 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1144 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1145 if (RT_LIKELY( pbBuf != NULL
1146 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1147 {
1148 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1149# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1150 u32Ret = *(uint32_t const *)&pbBuf[offBuf];
1151# else
1152 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1153 pbBuf[offBuf + 1],
1154 pbBuf[offBuf + 2],
1155 pbBuf[offBuf + 3]);
1156# endif
1157 }
1158 else
1159 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu);
1160
1161# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1162 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1163 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode));
1164# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1165 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret;
1166# else
1167 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret);
1168 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret);
1169 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret);
1170 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret);
1171# endif
1172 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4;
1173# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1174
1175 return u32Ret;
1176
1177# else /* !IEM_WITH_CODE_TLB */
1178 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1179 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1180 {
1181 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1183 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1184# else
1185 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1186 pVCpu->iem.s.abOpcode[offOpcode + 1],
1187 pVCpu->iem.s.abOpcode[offOpcode + 2],
1188 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1189# endif
1190 }
1191 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1192# endif
1193}
1194
1195# endif /* IEM_WITH_SETJMP */
1196
1197/**
1198 * Fetches the next opcode dword, returns automatically on failure.
1199 *
1200 * @param a_pu32 Where to return the opcode dword.
1201 * @remark Implicitly references pVCpu.
1202 */
1203# ifndef IEM_WITH_SETJMP
1204# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1205 do \
1206 { \
1207 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1208 if (rcStrict2 != VINF_SUCCESS) \
1209 return rcStrict2; \
1210 } while (0)
1211# else
1212# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1213# endif
1214
1215# ifndef IEM_WITH_SETJMP
1216/**
1217 * Fetches the next opcode dword, zero extending it to a quad word.
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1221 * @param pu64 Where to return the opcode quad word.
1222 */
1223DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1224{
1225 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1226 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1227 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1228
1229 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1230 pVCpu->iem.s.abOpcode[offOpcode + 1],
1231 pVCpu->iem.s.abOpcode[offOpcode + 2],
1232 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1233 pVCpu->iem.s.offOpcode = offOpcode + 4;
1234 return VINF_SUCCESS;
1235}
1236# endif /* !IEM_WITH_SETJMP */
1237
1238/**
1239 * Fetches the next opcode dword and zero extends it to a quad word, returns
1240 * automatically on failure.
1241 *
1242 * @param a_pu64 Where to return the opcode quad word.
1243 * @remark Implicitly references pVCpu.
1244 */
1245# ifndef IEM_WITH_SETJMP
1246# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1247 do \
1248 { \
1249 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1250 if (rcStrict2 != VINF_SUCCESS) \
1251 return rcStrict2; \
1252 } while (0)
1253# else
1254# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1255# endif
1256
1257
1258# ifndef IEM_WITH_SETJMP
1259/**
1260 * Fetches the next signed double word from the opcode stream.
1261 *
1262 * @returns Strict VBox status code.
1263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1264 * @param pi32 Where to return the signed double word.
1265 */
1266DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1267{
1268 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1269}
1270# endif
1271
1272/**
1273 * Fetches the next signed double word from the opcode stream, returning
1274 * automatically on failure.
1275 *
1276 * @param a_pi32 Where to return the signed double word.
1277 * @remark Implicitly references pVCpu.
1278 */
1279# ifndef IEM_WITH_SETJMP
1280# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1281 do \
1282 { \
1283 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1284 if (rcStrict2 != VINF_SUCCESS) \
1285 return rcStrict2; \
1286 } while (0)
1287# else
1288# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1289# endif
1290
1291# ifndef IEM_WITH_SETJMP
1292/**
1293 * Fetches the next opcode dword, sign extending it into a quad word.
1294 *
1295 * @returns Strict VBox status code.
1296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1297 * @param pu64 Where to return the opcode quad word.
1298 */
1299DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1300{
1301 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1302 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1303 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1304
1305 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1306 pVCpu->iem.s.abOpcode[offOpcode + 1],
1307 pVCpu->iem.s.abOpcode[offOpcode + 2],
1308 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1309 *pu64 = (uint64_t)(int64_t)i32;
1310 pVCpu->iem.s.offOpcode = offOpcode + 4;
1311 return VINF_SUCCESS;
1312}
1313# endif /* !IEM_WITH_SETJMP */
1314
1315/**
1316 * Fetches the next opcode double word and sign extends it to a quad word,
1317 * returns automatically on failure.
1318 *
1319 * @param a_pu64 Where to return the opcode quad word.
1320 * @remark Implicitly references pVCpu.
1321 */
1322# ifndef IEM_WITH_SETJMP
1323# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1324 do \
1325 { \
1326 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1327 if (rcStrict2 != VINF_SUCCESS) \
1328 return rcStrict2; \
1329 } while (0)
1330# else
1331# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1332# endif
1333
1334# ifndef IEM_WITH_SETJMP
1335
1336/**
1337 * Fetches the next opcode qword.
1338 *
1339 * @returns Strict VBox status code.
1340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1341 * @param pu64 Where to return the opcode qword.
1342 */
1343DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1344{
1345 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1346 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1347 {
1348# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1349 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1350# else
1351 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1352 pVCpu->iem.s.abOpcode[offOpcode + 1],
1353 pVCpu->iem.s.abOpcode[offOpcode + 2],
1354 pVCpu->iem.s.abOpcode[offOpcode + 3],
1355 pVCpu->iem.s.abOpcode[offOpcode + 4],
1356 pVCpu->iem.s.abOpcode[offOpcode + 5],
1357 pVCpu->iem.s.abOpcode[offOpcode + 6],
1358 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1359# endif
1360 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1361 return VINF_SUCCESS;
1362 }
1363 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1364}
1365
1366# else /* IEM_WITH_SETJMP */
1367
1368/**
1369 * Fetches the next opcode qword, longjmp on error.
1370 *
1371 * @returns The opcode qword.
1372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1373 */
1374DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1375{
1376# ifdef IEM_WITH_CODE_TLB
1377 uint64_t u64Ret;
1378 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1379 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1380 if (RT_LIKELY( pbBuf != NULL
1381 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1382 {
1383 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1384# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1385 u64Ret = *(uint64_t const *)&pbBuf[offBuf];
1386# else
1387 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1388 pbBuf[offBuf + 1],
1389 pbBuf[offBuf + 2],
1390 pbBuf[offBuf + 3],
1391 pbBuf[offBuf + 4],
1392 pbBuf[offBuf + 5],
1393 pbBuf[offBuf + 6],
1394 pbBuf[offBuf + 7]);
1395# endif
1396 }
1397 else
1398 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu);
1399
1400# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1401 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1402 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode));
1403# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1404 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret;
1405# else
1406 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret);
1407 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret);
1408 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u64Ret);
1409 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u64Ret);
1410 pVCpu->iem.s.abOpcode[offOpcode + 4] = RT_BYTE5(u64Ret);
1411 pVCpu->iem.s.abOpcode[offOpcode + 5] = RT_BYTE6(u64Ret);
1412 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret);
1413 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret);
1414# endif
1415 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8;
1416# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1417
1418 return u64Ret;
1419
1420# else /* !IEM_WITH_CODE_TLB */
1421 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1422 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1423 {
1424 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1425# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1426 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1427# else
1428 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1429 pVCpu->iem.s.abOpcode[offOpcode + 1],
1430 pVCpu->iem.s.abOpcode[offOpcode + 2],
1431 pVCpu->iem.s.abOpcode[offOpcode + 3],
1432 pVCpu->iem.s.abOpcode[offOpcode + 4],
1433 pVCpu->iem.s.abOpcode[offOpcode + 5],
1434 pVCpu->iem.s.abOpcode[offOpcode + 6],
1435 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1436# endif
1437 }
1438 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1439# endif /* !IEM_WITH_CODE_TLB */
1440}
1441
1442# endif /* IEM_WITH_SETJMP */
1443
1444/**
1445 * Fetches the next opcode quad word, returns automatically on failure.
1446 *
1447 * @param a_pu64 Where to return the opcode quad word.
1448 * @remark Implicitly references pVCpu.
1449 */
1450# ifndef IEM_WITH_SETJMP
1451# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1452 do \
1453 { \
1454 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1455 if (rcStrict2 != VINF_SUCCESS) \
1456 return rcStrict2; \
1457 } while (0)
1458# else
1459# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1460# endif
1461
1462/**
1463 * For fetching the opcode bytes for an ModR/M effective address, but throw
1464 * away the result.
1465 *
1466 * This is used when decoding undefined opcodes and such where we want to avoid
1467 * unnecessary MC blocks.
1468 *
1469 * @note The recompiler code overrides this one so iemOpHlpCalcRmEffAddrJmpEx is
1470 * used instead. At least for now...
1471 */
1472# ifndef IEM_WITH_SETJMP
1473# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1474 RTGCPTR GCPtrEff; \
1475 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \
1476 if (rcStrict != VINF_SUCCESS) \
1477 return rcStrict; \
1478 } while (0)
1479# else
1480# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1481 (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
1482 } while (0)
1483# endif
1484
1485#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1486
1487
1488/** @name Misc Worker Functions.
1489 * @{
1490 */
1491
1492/**
1493 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1494 * not (kind of obsolete now).
1495 *
1496 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1497 */
1498#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1499
1500/**
1501 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1502 *
1503 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1504 * @param a_fEfl The new EFLAGS.
1505 */
1506#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1507
1508
1509/**
1510 * Loads a NULL data selector into a selector register, both the hidden and
1511 * visible parts, in protected mode.
1512 *
1513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1514 * @param pSReg Pointer to the segment register.
1515 * @param uRpl The RPL.
1516 */
1517DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1518{
1519 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1520 * data selector in protected mode. */
1521 pSReg->Sel = uRpl;
1522 pSReg->ValidSel = uRpl;
1523 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1524 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1525 {
1526 /* VT-x (Intel 3960x) observed doing something like this. */
1527 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT);
1528 pSReg->u32Limit = UINT32_MAX;
1529 pSReg->u64Base = 0;
1530 }
1531 else
1532 {
1533 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1534 pSReg->u32Limit = 0;
1535 pSReg->u64Base = 0;
1536 }
1537}
1538
1539/** @} */
1540
1541
1542/*
1543 *
1544 * Helpers routines.
1545 * Helpers routines.
1546 * Helpers routines.
1547 *
1548 */
1549
1550#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1551
1552/**
1553 * Recalculates the effective operand size.
1554 *
1555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1556 */
1557DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1558{
1559 switch (IEM_GET_CPU_MODE(pVCpu))
1560 {
1561 case IEMMODE_16BIT:
1562 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1563 break;
1564 case IEMMODE_32BIT:
1565 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1566 break;
1567 case IEMMODE_64BIT:
1568 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1569 {
1570 case 0:
1571 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1572 break;
1573 case IEM_OP_PRF_SIZE_OP:
1574 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1575 break;
1576 case IEM_OP_PRF_SIZE_REX_W:
1577 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1578 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1579 break;
1580 }
1581 break;
1582 default:
1583 AssertFailed();
1584 }
1585}
1586
1587
1588/**
1589 * Sets the default operand size to 64-bit and recalculates the effective
1590 * operand size.
1591 *
1592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1593 */
1594DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1595{
1596 Assert(IEM_IS_64BIT_CODE(pVCpu));
1597 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1598 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1599 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1600 else
1601 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1602}
1603
1604
1605/**
1606 * Sets the default operand size to 64-bit and recalculates the effective
1607 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1608 *
1609 * This is for the relative jumps.
1610 *
1611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1612 */
1613DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1614{
1615 Assert(IEM_IS_64BIT_CODE(pVCpu));
1616 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1617 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1618 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1619 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1620 else
1621 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1622}
1623
1624#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1625
1626
1627
1628/** @name Register Access.
1629 * @{
1630 */
1631
1632/**
1633 * Gets a reference (pointer) to the specified hidden segment register.
1634 *
1635 * @returns Hidden register reference.
1636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1637 * @param iSegReg The segment register.
1638 */
1639DECL_FORCE_INLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1640{
1641 Assert(iSegReg < X86_SREG_COUNT);
1642 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1643 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1644
1645 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1646 return pSReg;
1647}
1648
1649
1650/**
1651 * Ensures that the given hidden segment register is up to date.
1652 *
1653 * @returns Hidden register reference.
1654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1655 * @param pSReg The segment register.
1656 */
1657DECL_FORCE_INLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1658{
1659 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1660 NOREF(pVCpu);
1661 return pSReg;
1662}
1663
1664
1665/**
1666 * Gets a reference (pointer) to the specified segment register (the selector
1667 * value).
1668 *
1669 * @returns Pointer to the selector variable.
1670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1671 * @param iSegReg The segment register.
1672 */
1673DECL_FORCE_INLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1674{
1675 Assert(iSegReg < X86_SREG_COUNT);
1676 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1677 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1678}
1679
1680
1681/**
1682 * Fetches the selector value of a segment register.
1683 *
1684 * @returns The selector value.
1685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1686 * @param iSegReg The segment register.
1687 */
1688DECL_FORCE_INLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1689{
1690 Assert(iSegReg < X86_SREG_COUNT);
1691 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1692 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1693}
1694
1695
1696/**
1697 * Fetches the base address value of a segment register.
1698 *
1699 * @returns The selector value.
1700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1701 * @param iSegReg The segment register.
1702 */
1703DECL_FORCE_INLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1704{
1705 Assert(iSegReg < X86_SREG_COUNT);
1706 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1707 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1708}
1709
1710
1711/**
1712 * Gets a reference (pointer) to the specified general purpose register.
1713 *
1714 * @returns Register reference.
1715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1716 * @param iReg The general purpose register.
1717 */
1718DECL_FORCE_INLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1719{
1720 Assert(iReg < 16);
1721 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1722}
1723
1724
1725#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1726/**
1727 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1728 *
1729 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1730 *
1731 * @returns Register reference.
1732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1733 * @param iReg The register.
1734 */
1735DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1736{
1737 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
1738 {
1739 Assert(iReg < 16);
1740 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1741 }
1742 /* high 8-bit register. */
1743 Assert(iReg < 8);
1744 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1745}
1746#endif
1747
1748
1749/**
1750 * Gets a reference (pointer) to the specified 8-bit general purpose register,
1751 * alternative version with extended (20) register index.
1752 *
1753 * @returns Register reference.
1754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1755 * @param iRegEx The register. The 16 first are regular ones,
1756 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1757 */
1758DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1759{
1760 /** @todo This could be done by double indexing on little endian hosts:
1761 * return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 15].ab[iRegEx >> 4]; */
1762 if (iRegEx < 16)
1763 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx].u8;
1764
1765 /* high 8-bit register. */
1766 Assert(iRegEx < 20);
1767 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 3].bHi;
1768}
1769
1770
1771/**
1772 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1773 *
1774 * @returns Register reference.
1775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1776 * @param iReg The register.
1777 */
1778DECL_FORCE_INLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1779{
1780 Assert(iReg < 16);
1781 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1782}
1783
1784
1785/**
1786 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1787 *
1788 * @returns Register reference.
1789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1790 * @param iReg The register.
1791 */
1792DECL_FORCE_INLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1793{
1794 Assert(iReg < 16);
1795 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1796}
1797
1798
1799/**
1800 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1801 *
1802 * @returns Register reference.
1803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1804 * @param iReg The register.
1805 */
1806DECL_FORCE_INLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1807{
1808 Assert(iReg < 16);
1809 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1810}
1811
1812
1813/**
1814 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1815 *
1816 * @returns Register reference.
1817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1818 * @param iReg The register.
1819 */
1820DECL_FORCE_INLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1821{
1822 Assert(iReg < 64);
1823 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1824}
1825
1826
1827/**
1828 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1829 *
1830 * @returns Register reference.
1831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1832 * @param iReg The register.
1833 */
1834DECL_FORCE_INLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1835{
1836 Assert(iReg < 16);
1837 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1838}
1839
1840
1841/**
1842 * Gets a reference (pointer) to the specified segment register's base address.
1843 *
1844 * @returns Segment register base address reference.
1845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1846 * @param iSegReg The segment selector.
1847 */
1848DECL_FORCE_INLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1849{
1850 Assert(iSegReg < X86_SREG_COUNT);
1851 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1852 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1853}
1854
1855
1856#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1857/**
1858 * Fetches the value of a 8-bit general purpose register.
1859 *
1860 * @returns The register value.
1861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1862 * @param iReg The register.
1863 */
1864DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1865{
1866 return *iemGRegRefU8(pVCpu, iReg);
1867}
1868#endif
1869
1870
1871/**
1872 * Fetches the value of a 8-bit general purpose register, alternative version
1873 * with extended (20) register index.
1874
1875 * @returns The register value.
1876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1877 * @param iRegEx The register. The 16 first are regular ones,
1878 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1879 */
1880DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1881{
1882 return *iemGRegRefU8Ex(pVCpu, iRegEx);
1883}
1884
1885
1886/**
1887 * Fetches the value of a 16-bit general purpose register.
1888 *
1889 * @returns The register value.
1890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1891 * @param iReg The register.
1892 */
1893DECL_FORCE_INLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1894{
1895 Assert(iReg < 16);
1896 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1897}
1898
1899
1900/**
1901 * Fetches the value of a 32-bit general purpose register.
1902 *
1903 * @returns The register value.
1904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1905 * @param iReg The register.
1906 */
1907DECL_FORCE_INLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1908{
1909 Assert(iReg < 16);
1910 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1911}
1912
1913
1914/**
1915 * Fetches the value of a 64-bit general purpose register.
1916 *
1917 * @returns The register value.
1918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1919 * @param iReg The register.
1920 */
1921DECL_FORCE_INLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1922{
1923 Assert(iReg < 16);
1924 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1925}
1926
1927
1928/**
1929 * Stores a 16-bit value to a general purpose register.
1930 *
1931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1932 * @param iReg The register.
1933 * @param uValue The value to store.
1934 */
1935DECL_FORCE_INLINE(void) iemGRegStoreU16(PVMCPUCC pVCpu, uint8_t iReg, uint16_t uValue) RT_NOEXCEPT
1936{
1937 Assert(iReg < 16);
1938 pVCpu->cpum.GstCtx.aGRegs[iReg].u16 = uValue;
1939}
1940
1941
1942/**
1943 * Stores a 32-bit value to a general purpose register, implicitly clearing high
1944 * values.
1945 *
1946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1947 * @param iReg The register.
1948 * @param uValue The value to store.
1949 */
1950DECL_FORCE_INLINE(void) iemGRegStoreU32(PVMCPUCC pVCpu, uint8_t iReg, uint32_t uValue) RT_NOEXCEPT
1951{
1952 Assert(iReg < 16);
1953 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
1954}
1955
1956
1957/**
1958 * Stores a 64-bit value to a general purpose register.
1959 *
1960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1961 * @param iReg The register.
1962 * @param uValue The value to store.
1963 */
1964DECL_FORCE_INLINE(void) iemGRegStoreU64(PVMCPUCC pVCpu, uint8_t iReg, uint64_t uValue) RT_NOEXCEPT
1965{
1966 Assert(iReg < 16);
1967 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
1968}
1969
1970
1971/**
1972 * Get the address of the top of the stack.
1973 *
1974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1975 */
1976DECL_FORCE_INLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
1977{
1978 if (IEM_IS_64BIT_CODE(pVCpu))
1979 return pVCpu->cpum.GstCtx.rsp;
1980 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1981 return pVCpu->cpum.GstCtx.esp;
1982 return pVCpu->cpum.GstCtx.sp;
1983}
1984
1985
1986/**
1987 * Updates the RIP/EIP/IP to point to the next instruction.
1988 *
1989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1990 * @param cbInstr The number of bytes to add.
1991 */
1992DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1993{
1994 /*
1995 * Advance RIP.
1996 *
1997 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
1998 * while in all other modes except LM64 the updates are 32-bit. This means
1999 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
2000 * 4GB and 64KB rollovers, and decide whether anything needs masking.
2001 *
2002 * See PC wrap around tests in bs3-cpu-weird-1.
2003 */
2004 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
2005 uint64_t const uRipNext = uRipPrev + cbInstr;
2006 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
2007 || IEM_IS_64BIT_CODE(pVCpu)))
2008 pVCpu->cpum.GstCtx.rip = uRipNext;
2009 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
2010 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
2011 else
2012 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
2013}
2014
2015
2016/**
2017 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
2018 * following EFLAGS bits are set:
2019 * - X86_EFL_RF - clear it.
2020 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
2021 * - X86_EFL_TF - generate single step \#DB trap.
2022 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
2023 * instruction).
2024 *
2025 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
2026 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
2027 * takes priority over both NMIs and hardware interrupts. So, neither is
2028 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
2029 * either unsupported will be triggered on-top of any \#DB raised here.)
2030 *
2031 * The RF flag only needs to be cleared here as it only suppresses instruction
2032 * breakpoints which are not raised here (happens synchronously during
2033 * instruction fetching).
2034 *
2035 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
2036 * status has no bearing on whether \#DB exceptions are raised.
2037 *
2038 * @note This must *NOT* be called by the two instructions setting the
2039 * CPUMCTX_INHIBIT_SHADOW_SS flag.
2040 *
2041 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
2042 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
2043 * Stacks}
2044 */
2045static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu) RT_NOEXCEPT
2046{
2047 /*
2048 * Normally we're just here to clear RF and/or interrupt shadow bits.
2049 */
2050 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
2051 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
2052 else
2053 {
2054 /*
2055 * Raise a #DB or/and DBGF event.
2056 */
2057 VBOXSTRICTRC rcStrict;
2058 if (pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK))
2059 {
2060 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2061 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2062 if (pVCpu->cpum.GstCtx.eflags.uBoth & X86_EFL_TF)
2063 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
2064 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2065 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
2066 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2067 pVCpu->cpum.GstCtx.rflags.uBoth));
2068
2069 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
2070 rcStrict = iemRaiseDebugException(pVCpu);
2071
2072 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
2073 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
2074 {
2075 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2076 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2077 }
2078 }
2079 else
2080 {
2081 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
2082 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2083 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2084 }
2085 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
2086 return rcStrict;
2087 }
2088 return VINF_SUCCESS;
2089}
2090
2091
2092/**
2093 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
2094 *
2095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2096 */
2097DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2098{
2099 /*
2100 * We assume that most of the time nothing actually needs doing here.
2101 */
2102 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2103 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
2104 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
2105 return VINF_SUCCESS;
2106 return iemFinishInstructionWithFlagsSet(pVCpu);
2107}
2108
2109
2110/**
2111 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
2112 * and CPUMCTX_INHIBIT_SHADOW.
2113 *
2114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2115 * @param cbInstr The number of bytes to add.
2116 */
2117DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2118{
2119 iemRegAddToRip(pVCpu, cbInstr);
2120 return iemRegFinishClearingRF(pVCpu);
2121}
2122
2123
2124/**
2125 * Updates the RIP to point to the next instruction and clears EFLAGS.RF
2126 * and CPUMCTX_INHIBIT_SHADOW.
2127 *
2128 * Only called from 64-bit code.
2129 *
2130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2131 * @param cbInstr The number of bytes to add.
2132 */
2133DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2134{
2135 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2136 return iemRegFinishClearingRF(pVCpu);
2137}
2138
2139
2140/**
2141 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and
2142 * CPUMCTX_INHIBIT_SHADOW.
2143 *
2144 * This is never from 64-bit code.
2145 *
2146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2147 * @param cbInstr The number of bytes to add.
2148 */
2149DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2150{
2151 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2152 return iemRegFinishClearingRF(pVCpu);
2153}
2154
2155
2156/**
2157 * Updates the IP to point to the next instruction and clears EFLAGS.RF and
2158 * CPUMCTX_INHIBIT_SHADOW.
2159 *
2160 * This is only ever used from 16-bit code on a pre-386 CPU.
2161 *
2162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2163 * @param cbInstr The number of bytes to add.
2164 */
2165DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2166{
2167 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2168 return iemRegFinishClearingRF(pVCpu);
2169}
2170
2171
2172/**
2173 * Adds a 8-bit signed jump offset to RIP from 64-bit code.
2174 *
2175 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2176 * segment limit.
2177 *
2178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2179 * @param cbInstr Instruction size.
2180 * @param offNextInstr The offset of the next instruction.
2181 * @param enmEffOpSize Effective operand size.
2182 */
2183DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2184 IEMMODE enmEffOpSize) RT_NOEXCEPT
2185{
2186 Assert(IEM_IS_64BIT_CODE(pVCpu));
2187 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2188
2189 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2190 if (enmEffOpSize == IEMMODE_16BIT)
2191 uNewRip &= UINT16_MAX;
2192
2193 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2194 pVCpu->cpum.GstCtx.rip = uNewRip;
2195 else
2196 return iemRaiseGeneralProtectionFault0(pVCpu);
2197
2198#ifndef IEM_WITH_CODE_TLB
2199 iemOpcodeFlushLight(pVCpu, cbInstr);
2200#endif
2201
2202 /*
2203 * Clear RF and finish the instruction (maybe raise #DB).
2204 */
2205 return iemRegFinishClearingRF(pVCpu);
2206}
2207
2208
2209/**
2210 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2211 * code (never 64-bit).
2212 *
2213 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2214 * segment limit.
2215 *
2216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2217 * @param cbInstr Instruction size.
2218 * @param offNextInstr The offset of the next instruction.
2219 * @param enmEffOpSize Effective operand size.
2220 */
2221DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2222 IEMMODE enmEffOpSize) RT_NOEXCEPT
2223{
2224 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2225 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2226
2227 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2228 if (enmEffOpSize == IEMMODE_16BIT)
2229 uNewEip &= UINT16_MAX;
2230 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2231 pVCpu->cpum.GstCtx.rip = uNewEip;
2232 else
2233 return iemRaiseGeneralProtectionFault0(pVCpu);
2234
2235#ifndef IEM_WITH_CODE_TLB
2236 iemOpcodeFlushLight(pVCpu, cbInstr);
2237#endif
2238
2239 /*
2240 * Clear RF and finish the instruction (maybe raise #DB).
2241 */
2242 return iemRegFinishClearingRF(pVCpu);
2243}
2244
2245
2246/**
2247 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
2248 *
2249 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2250 * segment limit.
2251 *
2252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2253 * @param cbInstr Instruction size.
2254 * @param offNextInstr The offset of the next instruction.
2255 */
2256DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2257 int8_t offNextInstr) RT_NOEXCEPT
2258{
2259 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2260
2261 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2262 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2263 pVCpu->cpum.GstCtx.rip = uNewIp;
2264 else
2265 return iemRaiseGeneralProtectionFault0(pVCpu);
2266
2267#ifndef IEM_WITH_CODE_TLB
2268 iemOpcodeFlushLight(pVCpu, cbInstr);
2269#endif
2270
2271 /*
2272 * Clear RF and finish the instruction (maybe raise #DB).
2273 */
2274 return iemRegFinishClearingRF(pVCpu);
2275}
2276
2277
2278/**
2279 * Adds a 16-bit signed jump offset to RIP from 64-bit code.
2280 *
2281 * @returns Strict VBox status code.
2282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2283 * @param cbInstr Instruction size.
2284 * @param offNextInstr The offset of the next instruction.
2285 */
2286DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2287 int16_t offNextInstr) RT_NOEXCEPT
2288{
2289 Assert(IEM_IS_64BIT_CODE(pVCpu));
2290
2291 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2292
2293#ifndef IEM_WITH_CODE_TLB
2294 iemOpcodeFlushLight(pVCpu, cbInstr);
2295#endif
2296
2297 /*
2298 * Clear RF and finish the instruction (maybe raise #DB).
2299 */
2300 return iemRegFinishClearingRF(pVCpu);
2301}
2302
2303
2304/**
2305 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code.
2306 *
2307 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2308 * segment limit.
2309 *
2310 * @returns Strict VBox status code.
2311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2312 * @param cbInstr Instruction size.
2313 * @param offNextInstr The offset of the next instruction.
2314 *
2315 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2316 * identical.
2317 */
2318DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2319 int16_t offNextInstr) RT_NOEXCEPT
2320{
2321 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2322
2323 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2324 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2325 pVCpu->cpum.GstCtx.rip = uNewIp;
2326 else
2327 return iemRaiseGeneralProtectionFault0(pVCpu);
2328
2329#ifndef IEM_WITH_CODE_TLB
2330 iemOpcodeFlushLight(pVCpu, cbInstr);
2331#endif
2332
2333 /*
2334 * Clear RF and finish the instruction (maybe raise #DB).
2335 */
2336 return iemRegFinishClearingRF(pVCpu);
2337}
2338
2339
2340/**
2341 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2342 *
2343 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2344 * segment limit.
2345 *
2346 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2347 * only alternative for relative jumps in 64-bit code and that is already
2348 * handled in the decoder stage.
2349 *
2350 * @returns Strict VBox status code.
2351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2352 * @param cbInstr Instruction size.
2353 * @param offNextInstr The offset of the next instruction.
2354 */
2355DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2356 int32_t offNextInstr) RT_NOEXCEPT
2357{
2358 Assert(IEM_IS_64BIT_CODE(pVCpu));
2359
2360 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2361 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2362 pVCpu->cpum.GstCtx.rip = uNewRip;
2363 else
2364 return iemRaiseGeneralProtectionFault0(pVCpu);
2365
2366#ifndef IEM_WITH_CODE_TLB
2367 iemOpcodeFlushLight(pVCpu, cbInstr);
2368#endif
2369
2370 /*
2371 * Clear RF and finish the instruction (maybe raise #DB).
2372 */
2373 return iemRegFinishClearingRF(pVCpu);
2374}
2375
2376
2377/**
2378 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2379 *
2380 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2381 * segment limit.
2382 *
2383 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
2384 * only alternative for relative jumps in 32-bit code and that is already
2385 * handled in the decoder stage.
2386 *
2387 * @returns Strict VBox status code.
2388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2389 * @param cbInstr Instruction size.
2390 * @param offNextInstr The offset of the next instruction.
2391 */
2392DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2393 int32_t offNextInstr) RT_NOEXCEPT
2394{
2395 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2396 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2397
2398 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
2399 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2400 pVCpu->cpum.GstCtx.rip = uNewEip;
2401 else
2402 return iemRaiseGeneralProtectionFault0(pVCpu);
2403
2404#ifndef IEM_WITH_CODE_TLB
2405 iemOpcodeFlushLight(pVCpu, cbInstr);
2406#endif
2407
2408 /*
2409 * Clear RF and finish the instruction (maybe raise #DB).
2410 */
2411 return iemRegFinishClearingRF(pVCpu);
2412}
2413
2414
2415/**
2416 * Extended version of iemFinishInstructionWithFlagsSet that goes with
2417 * iemRegAddToRipAndFinishingClearingRfEx.
2418 *
2419 * See iemFinishInstructionWithFlagsSet() for details.
2420 */
2421static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
2422{
2423 /*
2424 * Raise a #DB.
2425 */
2426 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2427 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2428 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
2429 | (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK) >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2430 /** @todo Do we set all pending \#DB events, or just one? */
2431 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
2432 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2433 pVCpu->cpum.GstCtx.rflags.uBoth));
2434 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
2435 return iemRaiseDebugException(pVCpu);
2436}
2437
2438
2439/**
2440 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
2441 * others potentially updating EFLAGS.TF.
2442 *
2443 * The single step event must be generated using the TF value at the start of
2444 * the instruction, not the new value set by it.
2445 *
2446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2447 * @param cbInstr The number of bytes to add.
2448 * @param fEflOld The EFLAGS at the start of the instruction
2449 * execution.
2450 */
2451DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
2452{
2453 iemRegAddToRip(pVCpu, cbInstr);
2454 if (!(fEflOld & X86_EFL_TF))
2455 return iemRegFinishClearingRF(pVCpu);
2456 return iemFinishInstructionWithTfSet(pVCpu);
2457}
2458
2459
2460#ifndef IEM_WITH_OPAQUE_DECODER_STATE
2461/**
2462 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
2463 *
2464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2465 */
2466DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
2467{
2468 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
2469}
2470#endif
2471
2472
2473/**
2474 * Adds to the stack pointer.
2475 *
2476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2477 * @param cbToAdd The number of bytes to add (8-bit!).
2478 */
2479DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
2480{
2481 if (IEM_IS_64BIT_CODE(pVCpu))
2482 pVCpu->cpum.GstCtx.rsp += cbToAdd;
2483 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2484 pVCpu->cpum.GstCtx.esp += cbToAdd;
2485 else
2486 pVCpu->cpum.GstCtx.sp += cbToAdd;
2487}
2488
2489
2490/**
2491 * Subtracts from the stack pointer.
2492 *
2493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2494 * @param cbToSub The number of bytes to subtract (8-bit!).
2495 */
2496DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
2497{
2498 if (IEM_IS_64BIT_CODE(pVCpu))
2499 pVCpu->cpum.GstCtx.rsp -= cbToSub;
2500 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2501 pVCpu->cpum.GstCtx.esp -= cbToSub;
2502 else
2503 pVCpu->cpum.GstCtx.sp -= cbToSub;
2504}
2505
2506
2507/**
2508 * Adds to the temporary stack pointer.
2509 *
2510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2511 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2512 * @param cbToAdd The number of bytes to add (16-bit).
2513 */
2514DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
2515{
2516 if (IEM_IS_64BIT_CODE(pVCpu))
2517 pTmpRsp->u += cbToAdd;
2518 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2519 pTmpRsp->DWords.dw0 += cbToAdd;
2520 else
2521 pTmpRsp->Words.w0 += cbToAdd;
2522}
2523
2524
2525/**
2526 * Subtracts from the temporary stack pointer.
2527 *
2528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2529 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2530 * @param cbToSub The number of bytes to subtract.
2531 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
2532 * expecting that.
2533 */
2534DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
2535{
2536 if (IEM_IS_64BIT_CODE(pVCpu))
2537 pTmpRsp->u -= cbToSub;
2538 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2539 pTmpRsp->DWords.dw0 -= cbToSub;
2540 else
2541 pTmpRsp->Words.w0 -= cbToSub;
2542}
2543
2544
2545/**
2546 * Calculates the effective stack address for a push of the specified size as
2547 * well as the new RSP value (upper bits may be masked).
2548 *
2549 * @returns Effective stack addressf for the push.
2550 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2551 * @param cbItem The size of the stack item to pop.
2552 * @param puNewRsp Where to return the new RSP value.
2553 */
2554DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2555{
2556 RTUINT64U uTmpRsp;
2557 RTGCPTR GCPtrTop;
2558 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2559
2560 if (IEM_IS_64BIT_CODE(pVCpu))
2561 GCPtrTop = uTmpRsp.u -= cbItem;
2562 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2563 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
2564 else
2565 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
2566 *puNewRsp = uTmpRsp.u;
2567 return GCPtrTop;
2568}
2569
2570
2571/**
2572 * Gets the current stack pointer and calculates the value after a pop of the
2573 * specified size.
2574 *
2575 * @returns Current stack pointer.
2576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2577 * @param cbItem The size of the stack item to pop.
2578 * @param puNewRsp Where to return the new RSP value.
2579 */
2580DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
2581{
2582 RTUINT64U uTmpRsp;
2583 RTGCPTR GCPtrTop;
2584 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
2585
2586 if (IEM_IS_64BIT_CODE(pVCpu))
2587 {
2588 GCPtrTop = uTmpRsp.u;
2589 uTmpRsp.u += cbItem;
2590 }
2591 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2592 {
2593 GCPtrTop = uTmpRsp.DWords.dw0;
2594 uTmpRsp.DWords.dw0 += cbItem;
2595 }
2596 else
2597 {
2598 GCPtrTop = uTmpRsp.Words.w0;
2599 uTmpRsp.Words.w0 += cbItem;
2600 }
2601 *puNewRsp = uTmpRsp.u;
2602 return GCPtrTop;
2603}
2604
2605
2606/**
2607 * Calculates the effective stack address for a push of the specified size as
2608 * well as the new temporary RSP value (upper bits may be masked).
2609 *
2610 * @returns Effective stack addressf for the push.
2611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2612 * @param pTmpRsp The temporary stack pointer. This is updated.
2613 * @param cbItem The size of the stack item to pop.
2614 */
2615DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2616{
2617 RTGCPTR GCPtrTop;
2618
2619 if (IEM_IS_64BIT_CODE(pVCpu))
2620 GCPtrTop = pTmpRsp->u -= cbItem;
2621 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2622 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2623 else
2624 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2625 return GCPtrTop;
2626}
2627
2628
2629/**
2630 * Gets the effective stack address for a pop of the specified size and
2631 * calculates and updates the temporary RSP.
2632 *
2633 * @returns Current stack pointer.
2634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2635 * @param pTmpRsp The temporary stack pointer. This is updated.
2636 * @param cbItem The size of the stack item to pop.
2637 */
2638DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
2639{
2640 RTGCPTR GCPtrTop;
2641 if (IEM_IS_64BIT_CODE(pVCpu))
2642 {
2643 GCPtrTop = pTmpRsp->u;
2644 pTmpRsp->u += cbItem;
2645 }
2646 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2647 {
2648 GCPtrTop = pTmpRsp->DWords.dw0;
2649 pTmpRsp->DWords.dw0 += cbItem;
2650 }
2651 else
2652 {
2653 GCPtrTop = pTmpRsp->Words.w0;
2654 pTmpRsp->Words.w0 += cbItem;
2655 }
2656 return GCPtrTop;
2657}
2658
2659/** @} */
2660
2661
2662/** @name FPU access and helpers.
2663 *
2664 * @{
2665 */
2666
2667
2668/**
2669 * Hook for preparing to use the host FPU.
2670 *
2671 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2672 *
2673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2674 */
2675DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
2676{
2677#ifdef IN_RING3
2678 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2679#else
2680 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
2681#endif
2682 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2683}
2684
2685
2686/**
2687 * Hook for preparing to use the host FPU for SSE.
2688 *
2689 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2690 *
2691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2692 */
2693DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
2694{
2695 iemFpuPrepareUsage(pVCpu);
2696}
2697
2698
2699/**
2700 * Hook for preparing to use the host FPU for AVX.
2701 *
2702 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2703 *
2704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2705 */
2706DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
2707{
2708 iemFpuPrepareUsage(pVCpu);
2709}
2710
2711
2712/**
2713 * Hook for actualizing the guest FPU state before the interpreter reads it.
2714 *
2715 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2716 *
2717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2718 */
2719DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2720{
2721#ifdef IN_RING3
2722 NOREF(pVCpu);
2723#else
2724 CPUMRZFpuStateActualizeForRead(pVCpu);
2725#endif
2726 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2727}
2728
2729
2730/**
2731 * Hook for actualizing the guest FPU state before the interpreter changes it.
2732 *
2733 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2734 *
2735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2736 */
2737DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2738{
2739#ifdef IN_RING3
2740 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2741#else
2742 CPUMRZFpuStateActualizeForChange(pVCpu);
2743#endif
2744 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2745}
2746
2747
2748/**
2749 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
2750 * only.
2751 *
2752 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2753 *
2754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2755 */
2756DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2757{
2758#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2759 NOREF(pVCpu);
2760#else
2761 CPUMRZFpuStateActualizeSseForRead(pVCpu);
2762#endif
2763 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2764}
2765
2766
2767/**
2768 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
2769 * read+write.
2770 *
2771 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2772 *
2773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2774 */
2775DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2776{
2777#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
2778 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2779#else
2780 CPUMRZFpuStateActualizeForChange(pVCpu);
2781#endif
2782 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2783
2784 /* Make sure any changes are loaded the next time around. */
2785 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
2786}
2787
2788
2789/**
2790 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
2791 * only.
2792 *
2793 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2794 *
2795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2796 */
2797DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
2798{
2799#ifdef IN_RING3
2800 NOREF(pVCpu);
2801#else
2802 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
2803#endif
2804 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2805}
2806
2807
2808/**
2809 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
2810 * read+write.
2811 *
2812 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
2813 *
2814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2815 */
2816DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
2817{
2818#ifdef IN_RING3
2819 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
2820#else
2821 CPUMRZFpuStateActualizeForChange(pVCpu);
2822#endif
2823 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
2824
2825 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
2826 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
2827}
2828
2829
2830/**
2831 * Stores a QNaN value into a FPU register.
2832 *
2833 * @param pReg Pointer to the register.
2834 */
2835DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
2836{
2837 pReg->au32[0] = UINT32_C(0x00000000);
2838 pReg->au32[1] = UINT32_C(0xc0000000);
2839 pReg->au16[4] = UINT16_C(0xffff);
2840}
2841
2842
2843/**
2844 * Updates the FOP, FPU.CS and FPUIP registers, extended version.
2845 *
2846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2847 * @param pFpuCtx The FPU context.
2848 * @param uFpuOpcode The FPU opcode value (see IEMCPU::uFpuOpcode).
2849 */
2850DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorkerEx(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint16_t uFpuOpcode) RT_NOEXCEPT
2851{
2852 Assert(uFpuOpcode != UINT16_MAX);
2853 pFpuCtx->FOP = uFpuOpcode;
2854 /** @todo x87.CS and FPUIP needs to be kept seperately. */
2855 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2856 {
2857 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
2858 * happens in real mode here based on the fnsave and fnstenv images. */
2859 pFpuCtx->CS = 0;
2860 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
2861 }
2862 else if (!IEM_IS_LONG_MODE(pVCpu))
2863 {
2864 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
2865 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2866 }
2867 else
2868 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
2869}
2870
2871
2872/**
2873 * Marks the specified stack register as free (for FFREE).
2874 *
2875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2876 * @param iStReg The register to free.
2877 */
2878DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2879{
2880 Assert(iStReg < 8);
2881 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2882 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2883 pFpuCtx->FTW &= ~RT_BIT(iReg);
2884}
2885
2886
2887/**
2888 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
2889 *
2890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2891 */
2892DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2893{
2894 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2895 uint16_t uFsw = pFpuCtx->FSW;
2896 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2897 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2898 uFsw &= ~X86_FSW_TOP_MASK;
2899 uFsw |= uTop;
2900 pFpuCtx->FSW = uFsw;
2901}
2902
2903
2904/**
2905 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
2906 *
2907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2908 */
2909DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
2910{
2911 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2912 uint16_t uFsw = pFpuCtx->FSW;
2913 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
2914 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
2915 uFsw &= ~X86_FSW_TOP_MASK;
2916 uFsw |= uTop;
2917 pFpuCtx->FSW = uFsw;
2918}
2919
2920
2921
2922
2923DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
2924{
2925 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2926 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2927 if (pFpuCtx->FTW & RT_BIT(iReg))
2928 return VINF_SUCCESS;
2929 return VERR_NOT_FOUND;
2930}
2931
2932
2933DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
2934{
2935 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2936 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
2937 if (pFpuCtx->FTW & RT_BIT(iReg))
2938 {
2939 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
2940 return VINF_SUCCESS;
2941 }
2942 return VERR_NOT_FOUND;
2943}
2944
2945
2946DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
2947 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
2948{
2949 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2950 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2951 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2952 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2953 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2954 {
2955 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2956 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
2957 return VINF_SUCCESS;
2958 }
2959 return VERR_NOT_FOUND;
2960}
2961
2962
2963DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
2964{
2965 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
2966 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2967 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
2968 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
2969 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
2970 {
2971 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
2972 return VINF_SUCCESS;
2973 }
2974 return VERR_NOT_FOUND;
2975}
2976
2977
2978/**
2979 * Rotates the stack registers when setting new TOS.
2980 *
2981 * @param pFpuCtx The FPU context.
2982 * @param iNewTop New TOS value.
2983 * @remarks We only do this to speed up fxsave/fxrstor which
2984 * arrange the FP registers in stack order.
2985 * MUST be done before writing the new TOS (FSW).
2986 */
2987DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
2988{
2989 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
2990 RTFLOAT80U ar80Temp[8];
2991
2992 if (iOldTop == iNewTop)
2993 return;
2994
2995 /* Unscrew the stack and get it into 'native' order. */
2996 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
2997 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
2998 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
2999 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
3000 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
3001 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
3002 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
3003 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
3004
3005 /* Now rotate the stack to the new position. */
3006 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
3007 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
3008 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
3009 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
3010 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
3011 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
3012 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
3013 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
3014}
3015
3016
3017/**
3018 * Updates the FPU exception status after FCW is changed.
3019 *
3020 * @param pFpuCtx The FPU context.
3021 */
3022DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
3023{
3024 uint16_t u16Fsw = pFpuCtx->FSW;
3025 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
3026 u16Fsw |= X86_FSW_ES | X86_FSW_B;
3027 else
3028 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
3029 pFpuCtx->FSW = u16Fsw;
3030}
3031
3032
3033/**
3034 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
3035 *
3036 * @returns The full FTW.
3037 * @param pFpuCtx The FPU context.
3038 */
3039DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
3040{
3041 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
3042 uint16_t u16Ftw = 0;
3043 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
3044 for (unsigned iSt = 0; iSt < 8; iSt++)
3045 {
3046 unsigned const iReg = (iSt + iTop) & 7;
3047 if (!(u8Ftw & RT_BIT(iReg)))
3048 u16Ftw |= 3 << (iReg * 2); /* empty */
3049 else
3050 {
3051 uint16_t uTag;
3052 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
3053 if (pr80Reg->s.uExponent == 0x7fff)
3054 uTag = 2; /* Exponent is all 1's => Special. */
3055 else if (pr80Reg->s.uExponent == 0x0000)
3056 {
3057 if (pr80Reg->s.uMantissa == 0x0000)
3058 uTag = 1; /* All bits are zero => Zero. */
3059 else
3060 uTag = 2; /* Must be special. */
3061 }
3062 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
3063 uTag = 0; /* Valid. */
3064 else
3065 uTag = 2; /* Must be special. */
3066
3067 u16Ftw |= uTag << (iReg * 2);
3068 }
3069 }
3070
3071 return u16Ftw;
3072}
3073
3074
3075/**
3076 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
3077 *
3078 * @returns The compressed FTW.
3079 * @param u16FullFtw The full FTW to convert.
3080 */
3081DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
3082{
3083 uint8_t u8Ftw = 0;
3084 for (unsigned i = 0; i < 8; i++)
3085 {
3086 if ((u16FullFtw & 3) != 3 /*empty*/)
3087 u8Ftw |= RT_BIT(i);
3088 u16FullFtw >>= 2;
3089 }
3090
3091 return u8Ftw;
3092}
3093
3094/** @} */
3095
3096
3097/** @name Memory access.
3098 *
3099 * @{
3100 */
3101
3102
3103/**
3104 * Checks whether alignment checks are enabled or not.
3105 *
3106 * @returns true if enabled, false if not.
3107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3108 */
3109DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
3110{
3111 AssertCompile(X86_CR0_AM == X86_EFL_AC);
3112 return IEM_GET_CPL(pVCpu) == 3
3113 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
3114}
3115
3116/**
3117 * Checks if the given segment can be written to, raise the appropriate
3118 * exception if not.
3119 *
3120 * @returns VBox strict status code.
3121 *
3122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3123 * @param pHid Pointer to the hidden register.
3124 * @param iSegReg The register number.
3125 * @param pu64BaseAddr Where to return the base address to use for the
3126 * segment. (In 64-bit code it may differ from the
3127 * base in the hidden segment.)
3128 */
3129DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
3130 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
3131{
3132 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3133
3134 if (IEM_IS_64BIT_CODE(pVCpu))
3135 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
3136 else
3137 {
3138 if (!pHid->Attr.n.u1Present)
3139 {
3140 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
3141 AssertRelease(uSel == 0);
3142 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
3143 return iemRaiseGeneralProtectionFault0(pVCpu);
3144 }
3145
3146 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
3147 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3148 && !IEM_IS_64BIT_CODE(pVCpu) )
3149 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3150 *pu64BaseAddr = pHid->u64Base;
3151 }
3152 return VINF_SUCCESS;
3153}
3154
3155
3156/**
3157 * Checks if the given segment can be read from, raise the appropriate
3158 * exception if not.
3159 *
3160 * @returns VBox strict status code.
3161 *
3162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3163 * @param pHid Pointer to the hidden register.
3164 * @param iSegReg The register number.
3165 * @param pu64BaseAddr Where to return the base address to use for the
3166 * segment. (In 64-bit code it may differ from the
3167 * base in the hidden segment.)
3168 */
3169DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
3170 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
3171{
3172 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3173
3174 if (IEM_IS_64BIT_CODE(pVCpu))
3175 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
3176 else
3177 {
3178 if (!pHid->Attr.n.u1Present)
3179 {
3180 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
3181 AssertRelease(uSel == 0);
3182 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
3183 return iemRaiseGeneralProtectionFault0(pVCpu);
3184 }
3185
3186 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3187 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3188 *pu64BaseAddr = pHid->u64Base;
3189 }
3190 return VINF_SUCCESS;
3191}
3192
3193
3194/**
3195 * Maps a physical page.
3196 *
3197 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
3198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3199 * @param GCPhysMem The physical address.
3200 * @param fAccess The intended access.
3201 * @param ppvMem Where to return the mapping address.
3202 * @param pLock The PGM lock.
3203 */
3204DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
3205 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
3206{
3207#ifdef IEM_LOG_MEMORY_WRITES
3208 if (fAccess & IEM_ACCESS_TYPE_WRITE)
3209 return VERR_PGM_PHYS_TLB_CATCH_ALL;
3210#endif
3211
3212 /** @todo This API may require some improving later. A private deal with PGM
3213 * regarding locking and unlocking needs to be struct. A couple of TLBs
3214 * living in PGM, but with publicly accessible inlined access methods
3215 * could perhaps be an even better solution. */
3216 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
3217 GCPhysMem,
3218 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
3219 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
3220 ppvMem,
3221 pLock);
3222 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
3223 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3224
3225 return rc;
3226}
3227
3228
3229/**
3230 * Unmap a page previously mapped by iemMemPageMap.
3231 *
3232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3233 * @param GCPhysMem The physical address.
3234 * @param fAccess The intended access.
3235 * @param pvMem What iemMemPageMap returned.
3236 * @param pLock The PGM lock.
3237 */
3238DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
3239 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
3240{
3241 NOREF(pVCpu);
3242 NOREF(GCPhysMem);
3243 NOREF(fAccess);
3244 NOREF(pvMem);
3245 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
3246}
3247
3248#ifdef IEM_WITH_SETJMP
3249
3250/** @todo slim this down */
3251DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
3252 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
3253{
3254 Assert(cbMem >= 1);
3255 Assert(iSegReg < X86_SREG_COUNT);
3256
3257 /*
3258 * 64-bit mode is simpler.
3259 */
3260 if (IEM_IS_64BIT_CODE(pVCpu))
3261 {
3262 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
3263 {
3264 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3265 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
3266 GCPtrMem += pSel->u64Base;
3267 }
3268
3269 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
3270 return GCPtrMem;
3271 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
3272 }
3273 /*
3274 * 16-bit and 32-bit segmentation.
3275 */
3276 else if (iSegReg != UINT8_MAX)
3277 {
3278 /** @todo Does this apply to segments with 4G-1 limit? */
3279 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
3280 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
3281 {
3282 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3283 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
3284 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
3285 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
3286 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
3287 | X86_SEL_TYPE_CODE))
3288 {
3289 case X86DESCATTR_P: /* readonly data, expand up */
3290 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
3291 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
3292 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
3293 /* expand up */
3294 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
3295 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3296 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
3297 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
3298 break;
3299
3300 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
3301 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
3302 /* expand down */
3303 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
3304 && ( pSel->Attr.n.u1DefBig
3305 || GCPtrLast32 <= UINT32_C(0xffff)) ))
3306 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3307 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
3308 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
3309 break;
3310
3311 default:
3312 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
3313 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3314 break;
3315 }
3316 }
3317 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
3318 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
3319 }
3320 /*
3321 * 32-bit flat address.
3322 */
3323 else
3324 return GCPtrMem;
3325}
3326
3327
3328/** @todo slim this down */
3329DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
3330 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
3331{
3332 Assert(cbMem >= 1);
3333 Assert(iSegReg < X86_SREG_COUNT);
3334
3335 /*
3336 * 64-bit mode is simpler.
3337 */
3338 if (IEM_IS_64BIT_CODE(pVCpu))
3339 {
3340 if (iSegReg >= X86_SREG_FS)
3341 {
3342 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3343 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3344 GCPtrMem += pSel->u64Base;
3345 }
3346
3347 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
3348 return GCPtrMem;
3349 }
3350 /*
3351 * 16-bit and 32-bit segmentation.
3352 */
3353 else
3354 {
3355 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
3356 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
3357 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
3358 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
3359 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
3360 {
3361 /* expand up */
3362 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
3363 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
3364 && GCPtrLast32 > (uint32_t)GCPtrMem))
3365 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3366 }
3367 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
3368 {
3369 /* expand down */
3370 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
3371 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
3372 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3373 && GCPtrLast32 > (uint32_t)GCPtrMem))
3374 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
3375 }
3376 else
3377 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3378 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
3379 }
3380 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
3381}
3382
3383#endif /* IEM_WITH_SETJMP */
3384
3385/**
3386 * Fakes a long mode stack selector for SS = 0.
3387 *
3388 * @param pDescSs Where to return the fake stack descriptor.
3389 * @param uDpl The DPL we want.
3390 */
3391DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
3392{
3393 pDescSs->Long.au64[0] = 0;
3394 pDescSs->Long.au64[1] = 0;
3395 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3396 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
3397 pDescSs->Long.Gen.u2Dpl = uDpl;
3398 pDescSs->Long.Gen.u1Present = 1;
3399 pDescSs->Long.Gen.u1Long = 1;
3400}
3401
3402/** @} */
3403
3404
3405#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3406
3407/**
3408 * Gets CR0 fixed-0 bits in VMX operation.
3409 *
3410 * We do this rather than fetching what we report to the guest (in
3411 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
3412 * values regardless of whether unrestricted-guest feature is available on the CPU.
3413 *
3414 * @returns CR0 fixed-0 bits.
3415 * @param pVCpu The cross context virtual CPU structure.
3416 * @param fVmxNonRootMode Whether the CR0 fixed-0 bits for VMX non-root mode
3417 * must be returned. When @c false, the CR0 fixed-0
3418 * bits for VMX root mode is returned.
3419 *
3420 */
3421DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu, bool fVmxNonRootMode) RT_NOEXCEPT
3422{
3423 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
3424
3425 PCVMXMSRS pMsrs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs;
3426 if ( fVmxNonRootMode
3427 && (pMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST))
3428 return VMX_V_CR0_FIXED0_UX;
3429 return VMX_V_CR0_FIXED0;
3430}
3431
3432
3433/**
3434 * Sets virtual-APIC write emulation as pending.
3435 *
3436 * @param pVCpu The cross context virtual CPU structure.
3437 * @param offApic The offset in the virtual-APIC page that was written.
3438 */
3439DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
3440{
3441 Assert(offApic < XAPIC_OFF_END + 4);
3442
3443 /*
3444 * Record the currently updated APIC offset, as we need this later for figuring
3445 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
3446 * as for supplying the exit qualification when causing an APIC-write VM-exit.
3447 */
3448 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
3449
3450 /*
3451 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
3452 * virtualization or APIC-write emulation).
3453 */
3454 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3455 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
3456}
3457
3458#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3459
3460#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette