VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFMem.cpp

Last change on this file was 99051, checked in by vboxsync, 14 months ago

VMM: More ARMv8 x86/amd64 separation work, VBoxVMMArm compiles and links now, bugref:10385

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.6 KB
Line 
1/* $Id: DBGFMem.cpp 99051 2023-03-19 16:40:06Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Memory Methods.
4 */
5
6/*
7 * Copyright (C) 2007-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DBGF
33#include <VBox/vmm/dbgf.h>
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/selm.h>
36#include <VBox/vmm/hm.h>
37#include "DBGFInternal.h"
38#include <VBox/vmm/vm.h>
39#include <VBox/vmm/uvm.h>
40#include <VBox/err.h>
41#include <VBox/log.h>
42#include <VBox/vmm/mm.h>
43
44
45
46/**
47 * Scan guest memory for an exact byte string.
48 *
49 * @returns VBox status code.
50 * @param pUVM The user mode VM handle.
51 * @param idCpu The ID of the CPU context to search in.
52 * @param pAddress Where to store the mixed address.
53 * @param puAlign The alignment restriction imposed on the search result.
54 * @param pcbRange The number of bytes to scan. Passed as a pointer because
55 * it may be 64-bit.
56 * @param pabNeedle What to search for - exact search.
57 * @param cbNeedle Size of the search byte string.
58 * @param pHitAddress Where to put the address of the first hit.
59 */
60static DECLCALLBACK(int) dbgfR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, PCRTGCUINTPTR pcbRange,
61 RTGCUINTPTR *puAlign, const uint8_t *pabNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
62{
63 PVM pVM = pUVM->pVM;
64 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
65 Assert(idCpu == VMMGetCpuId(pVM));
66
67 /*
68 * Validate the input we use, PGM does the rest.
69 */
70 RTGCUINTPTR cbRange = *pcbRange;
71 if (!DBGFR3AddrIsValid(pUVM, pAddress))
72 return VERR_INVALID_POINTER;
73 if (!RT_VALID_PTR(pHitAddress))
74 return VERR_INVALID_POINTER;
75
76 /*
77 * Select DBGF worker by addressing mode.
78 */
79 int rc;
80 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
81 PGMMODE enmMode = PGMGetGuestMode(pVCpu);
82 if ( enmMode == PGMMODE_REAL
83 || enmMode == PGMMODE_PROTECTED
84 || DBGFADDRESS_IS_PHYS(pAddress)
85 )
86 {
87 RTGCPHYS GCPhysAlign = *puAlign;
88 if (GCPhysAlign != *puAlign)
89 return VERR_OUT_OF_RANGE;
90 RTGCPHYS PhysHit;
91 rc = PGMR3DbgScanPhysical(pVM, pAddress->FlatPtr, cbRange, GCPhysAlign, pabNeedle, cbNeedle, &PhysHit);
92 if (RT_SUCCESS(rc))
93 DBGFR3AddrFromPhys(pUVM, pHitAddress, PhysHit);
94 }
95 else
96 {
97#if GC_ARCH_BITS > 32
98 if ( ( pAddress->FlatPtr >= _4G
99 || pAddress->FlatPtr + cbRange > _4G)
100 && enmMode != PGMMODE_AMD64
101 && enmMode != PGMMODE_AMD64_NX)
102 return VERR_DBGF_MEM_NOT_FOUND;
103#endif
104 RTGCUINTPTR GCPtrHit;
105 rc = PGMR3DbgScanVirtual(pVM, pVCpu, pAddress->FlatPtr, cbRange, *puAlign, pabNeedle, cbNeedle, &GCPtrHit);
106 if (RT_SUCCESS(rc))
107 DBGFR3AddrFromFlat(pUVM, pHitAddress, GCPtrHit);
108 }
109
110 return rc;
111}
112
113
114/**
115 * Scan guest memory for an exact byte string.
116 *
117 * @returns VBox status codes:
118 * @retval VINF_SUCCESS and *pGCPtrHit on success.
119 * @retval VERR_DBGF_MEM_NOT_FOUND if not found.
120 * @retval VERR_INVALID_POINTER if any of the pointer arguments are invalid.
121 * @retval VERR_INVALID_ARGUMENT if any other arguments are invalid.
122 *
123 * @param pUVM The user mode VM handle.
124 * @param idCpu The ID of the CPU context to search in.
125 * @param pAddress Where to store the mixed address.
126 * @param cbRange The number of bytes to scan.
127 * @param uAlign The alignment restriction imposed on the result.
128 * Usually set to 1.
129 * @param pvNeedle What to search for - exact search.
130 * @param cbNeedle Size of the search byte string.
131 * @param pHitAddress Where to put the address of the first hit.
132 *
133 * @thread Any thread.
134 */
135VMMR3DECL(int) DBGFR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, RTGCUINTPTR cbRange, RTGCUINTPTR uAlign,
136 const void *pvNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
137{
138 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
139 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
140 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemScan, 8,
141 pUVM, idCpu, pAddress, &cbRange, &uAlign, pvNeedle, cbNeedle, pHitAddress);
142
143}
144
145
146/**
147 * Read guest memory.
148 *
149 * @returns VBox status code.
150 * @param pUVM The user mode VM handle.
151 * @param idCpu The ID of the CPU context to read memory from.
152 * @param pAddress Where to start reading.
153 * @param pvBuf Where to store the data we've read.
154 * @param cbRead The number of bytes to read.
155 */
156static DECLCALLBACK(int) dbgfR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
157{
158 PVM pVM = pUVM->pVM;
159 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
160 Assert(idCpu == VMMGetCpuId(pVM));
161
162 /*
163 * Validate the input we use, PGM does the rest.
164 */
165 if (!DBGFR3AddrIsValid(pUVM, pAddress))
166 return VERR_INVALID_POINTER;
167 if (!RT_VALID_PTR(pvBuf))
168 return VERR_INVALID_POINTER;
169
170 /*
171 * Select PGM worker by addressing mode.
172 */
173 int rc;
174 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
175 PGMMODE enmMode = PGMGetGuestMode(pVCpu);
176 if ( enmMode == PGMMODE_REAL
177 || enmMode == PGMMODE_PROTECTED
178 || DBGFADDRESS_IS_PHYS(pAddress) )
179 rc = PGMPhysSimpleReadGCPhys(pVM, pvBuf, pAddress->FlatPtr, cbRead);
180 else
181 {
182#if GC_ARCH_BITS > 32
183 if ( ( pAddress->FlatPtr >= _4G
184 || pAddress->FlatPtr + cbRead > _4G)
185 && enmMode != PGMMODE_AMD64
186 && enmMode != PGMMODE_AMD64_NX)
187 return VERR_PAGE_TABLE_NOT_PRESENT;
188#endif
189 rc = PGMPhysSimpleReadGCPtr(pVCpu, pvBuf, pAddress->FlatPtr, cbRead);
190 }
191 return rc;
192}
193
194
195/**
196 * Read guest memory.
197 *
198 * @returns VBox status code.
199 *
200 * @param pUVM The user mode VM handle.
201 * @param idCpu The ID of the source CPU context (for the address).
202 * @param pAddress Where to start reading.
203 * @param pvBuf Where to store the data we've read.
204 * @param cbRead The number of bytes to read.
205 */
206VMMR3DECL(int) DBGFR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
207{
208 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
209 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
210
211 if ((pAddress->fFlags & DBGFADDRESS_FLAGS_TYPE_MASK) == DBGFADDRESS_FLAGS_RING0)
212 {
213 AssertCompile(sizeof(RTHCUINTPTR) <= sizeof(pAddress->FlatPtr));
214 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
215 return VMMR3ReadR0Stack(pUVM->pVM, idCpu, (RTHCUINTPTR)pAddress->FlatPtr, pvBuf, cbRead);
216 }
217 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemRead, 5, pUVM, idCpu, pAddress, pvBuf, cbRead);
218}
219
220
221/**
222 * Read a zero terminated string from guest memory.
223 *
224 * @returns VBox status code.
225 *
226 * @param pUVM The user mode VM handle.
227 * @param idCpu The ID of the source CPU context (for the address).
228 * @param pAddress Where to start reading.
229 * @param pszBuf Where to store the string.
230 * @param cchBuf The size of the buffer.
231 */
232static DECLCALLBACK(int) dbgfR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
233{
234 /*
235 * Validate the input we use, PGM does the rest.
236 */
237 if (!DBGFR3AddrIsValid(pUVM, pAddress))
238 return VERR_INVALID_POINTER;
239 if (!RT_VALID_PTR(pszBuf))
240 return VERR_INVALID_POINTER;
241
242 /*
243 * Let dbgfR3MemRead do the job.
244 */
245 int rc = dbgfR3MemRead(pUVM, idCpu, pAddress, pszBuf, cchBuf);
246
247 /*
248 * Make sure the result is terminated and that overflow is signaled.
249 * This may look a bit reckless with the rc but, it should be fine.
250 */
251 if (!RTStrEnd(pszBuf, cchBuf))
252 {
253 pszBuf[cchBuf - 1] = '\0';
254 rc = VINF_BUFFER_OVERFLOW;
255 }
256 /*
257 * Handle partial reads (not perfect).
258 */
259 else if (RT_FAILURE(rc))
260 {
261 if (pszBuf[0])
262 rc = VINF_SUCCESS;
263 }
264
265 return rc;
266}
267
268
269/**
270 * Read a zero terminated string from guest memory.
271 *
272 * @returns VBox status code.
273 *
274 * @param pUVM The user mode VM handle.
275 * @param idCpu The ID of the source CPU context (for the address).
276 * @param pAddress Where to start reading.
277 * @param pszBuf Where to store the string.
278 * @param cchBuf The size of the buffer.
279 */
280VMMR3DECL(int) DBGFR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
281{
282 /*
283 * Validate and zero output.
284 */
285 if (!RT_VALID_PTR(pszBuf))
286 return VERR_INVALID_POINTER;
287 if (cchBuf <= 0)
288 return VERR_INVALID_PARAMETER;
289 memset(pszBuf, 0, cchBuf);
290 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
291 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
292
293 /*
294 * Pass it on to the EMT.
295 */
296 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemReadString, 5, pUVM, idCpu, pAddress, pszBuf, cchBuf);
297}
298
299
300/**
301 * Writes guest memory.
302 *
303 * @returns VBox status code.
304 *
305 * @param pUVM The user mode VM handle.
306 * @param idCpu The ID of the target CPU context (for the address).
307 * @param pAddress Where to start writing.
308 * @param pvBuf The data to write.
309 * @param cbWrite The number of bytes to write.
310 */
311static DECLCALLBACK(int) dbgfR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
312{
313 /*
314 * Validate the input we use, PGM does the rest.
315 */
316 if (!DBGFR3AddrIsValid(pUVM, pAddress))
317 return VERR_INVALID_POINTER;
318 if (!RT_VALID_PTR(pvBuf))
319 return VERR_INVALID_POINTER;
320 PVM pVM = pUVM->pVM;
321 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
322
323 /*
324 * Select PGM function by addressing mode.
325 */
326 int rc;
327 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
328 PGMMODE enmMode = PGMGetGuestMode(pVCpu);
329 if ( enmMode == PGMMODE_REAL
330 || enmMode == PGMMODE_PROTECTED
331 || DBGFADDRESS_IS_PHYS(pAddress) )
332 rc = PGMPhysSimpleWriteGCPhys(pVM, pAddress->FlatPtr, pvBuf, cbWrite);
333 else
334 {
335#if GC_ARCH_BITS > 32
336 if ( ( pAddress->FlatPtr >= _4G
337 || pAddress->FlatPtr + cbWrite > _4G)
338 && enmMode != PGMMODE_AMD64
339 && enmMode != PGMMODE_AMD64_NX)
340 return VERR_PAGE_TABLE_NOT_PRESENT;
341#endif
342 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pAddress->FlatPtr, pvBuf, cbWrite);
343 }
344 return rc;
345}
346
347
348/**
349 * Read guest memory.
350 *
351 * @returns VBox status code.
352 *
353 * @param pUVM The user mode VM handle.
354 * @param idCpu The ID of the target CPU context (for the address).
355 * @param pAddress Where to start writing.
356 * @param pvBuf The data to write.
357 * @param cbWrite The number of bytes to write.
358 */
359VMMR3DECL(int) DBGFR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
360{
361 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
362 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
363 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemWrite, 5, pUVM, idCpu, pAddress, pvBuf, cbWrite);
364}
365
366
367#if !defined(VBOX_VMM_TARGET_ARMV8)
368/**
369 * Worker for DBGFR3SelQueryInfo that calls into SELM.
370 */
371static DECLCALLBACK(int) dbgfR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
372{
373 PVM pVM = pUVM->pVM;
374 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
375
376 /*
377 * Make the query.
378 */
379 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
380 VMCPU_ASSERT_EMT(pVCpu);
381 int rc = SELMR3GetSelectorInfo(pVCpu, Sel, pSelInfo);
382
383 /*
384 * 64-bit mode HACKS for making data and stack selectors wide open when
385 * queried. This is voodoo magic.
386 */
387 if (fFlags & DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)
388 {
389 /* Expand 64-bit data and stack selectors. The check is a bit bogus... */
390 if ( RT_SUCCESS(rc)
391 && (pSelInfo->fFlags & ( DBGFSELINFO_FLAGS_LONG_MODE | DBGFSELINFO_FLAGS_REAL_MODE | DBGFSELINFO_FLAGS_PROT_MODE
392 | DBGFSELINFO_FLAGS_GATE | DBGFSELINFO_FLAGS_HYPER
393 | DBGFSELINFO_FLAGS_INVALID | DBGFSELINFO_FLAGS_NOT_PRESENT))
394 == DBGFSELINFO_FLAGS_LONG_MODE
395 && pSelInfo->cbLimit != ~(RTGCPTR)0
396 && CPUMIsGuestIn64BitCode(pVCpu) )
397 {
398 pSelInfo->GCPtrBase = 0;
399 pSelInfo->cbLimit = ~(RTGCPTR)0;
400 }
401 else if ( Sel == 0
402 && CPUMIsGuestIn64BitCode(pVCpu))
403 {
404 pSelInfo->GCPtrBase = 0;
405 pSelInfo->cbLimit = ~(RTGCPTR)0;
406 pSelInfo->Sel = 0;
407 pSelInfo->SelGate = 0;
408 pSelInfo->fFlags = DBGFSELINFO_FLAGS_LONG_MODE;
409 pSelInfo->u.Raw64.Gen.u1Present = 1;
410 pSelInfo->u.Raw64.Gen.u1Long = 1;
411 pSelInfo->u.Raw64.Gen.u1DescType = 1;
412 rc = VINF_SUCCESS;
413 }
414 }
415 return rc;
416}
417#endif
418
419
420/**
421 * Gets information about a selector.
422 *
423 * Intended for the debugger mostly and will prefer the guest
424 * descriptor tables over the shadow ones.
425 *
426 * @returns VBox status code, the following are the common ones.
427 * @retval VINF_SUCCESS on success.
428 * @retval VERR_INVALID_SELECTOR if the selector isn't fully inside the
429 * descriptor table.
430 * @retval VERR_SELECTOR_NOT_PRESENT if the LDT is invalid or not present. This
431 * is not returned if the selector itself isn't present, you have to
432 * check that for yourself (see DBGFSELINFO::fFlags).
433 * @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
434 * pagetable or page backing the selector table wasn't present.
435 *
436 * @param pUVM The user mode VM handle.
437 * @param idCpu The ID of the virtual CPU context.
438 * @param Sel The selector to get info about.
439 * @param fFlags Flags, see DBGFQSEL_FLAGS_*.
440 * @param pSelInfo Where to store the information. This will always be
441 * updated.
442 *
443 * @remarks This is a wrapper around SELMR3GetSelectorInfo and
444 * SELMR3GetShadowSelectorInfo.
445 */
446VMMR3DECL(int) DBGFR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
447{
448 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
449 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
450 AssertReturn(!(fFlags & ~(DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)), VERR_INVALID_PARAMETER);
451
452 /* Clear the return data here on this thread. */
453 memset(pSelInfo, 0, sizeof(*pSelInfo));
454
455#if defined(VBOX_VMM_TARGET_ARMV8)
456 RT_NOREF(Sel);
457 return VERR_NOT_SUPPORTED;
458#else
459 /*
460 * Dispatch the request to a worker running on the target CPU.
461 */
462 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3SelQueryInfo, 5, pUVM, idCpu, Sel, fFlags, pSelInfo);
463#endif
464}
465
466
467/**
468 * Validates a CS selector.
469 *
470 * @returns VBox status code.
471 * @param pSelInfo Pointer to the selector information for the CS selector.
472 * @param SelCPL The selector defining the CPL (SS).
473 */
474VMMDECL(int) DBGFR3SelInfoValidateCS(PCDBGFSELINFO pSelInfo, RTSEL SelCPL)
475{
476 /*
477 * Check if present.
478 */
479 if (pSelInfo->u.Raw.Gen.u1Present)
480 {
481 /*
482 * Type check.
483 */
484 if ( pSelInfo->u.Raw.Gen.u1DescType == 1
485 && (pSelInfo->u.Raw.Gen.u4Type & X86_SEL_TYPE_CODE))
486 {
487 /*
488 * Check level.
489 */
490 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, pSelInfo->Sel & X86_SEL_RPL);
491 if ( !(pSelInfo->u.Raw.Gen.u4Type & X86_SEL_TYPE_CONF)
492 ? uLevel <= pSelInfo->u.Raw.Gen.u2Dpl
493 : uLevel >= pSelInfo->u.Raw.Gen.u2Dpl /* hope I got this right now... */
494 )
495 return VINF_SUCCESS;
496 return VERR_INVALID_RPL;
497 }
498 return VERR_NOT_CODE_SELECTOR;
499 }
500 return VERR_SELECTOR_NOT_PRESENT;
501}
502
503
504/**
505 * Converts a PGM paging mode to a set of DBGFPGDMP_XXX flags.
506 *
507 * @returns Flags. UINT32_MAX if the mode is invalid (asserted).
508 * @param enmMode The mode.
509 */
510static uint32_t dbgfR3PagingDumpModeToFlags(PGMMODE enmMode)
511{
512 switch (enmMode)
513 {
514 case PGMMODE_32_BIT:
515 return DBGFPGDMP_FLAGS_PSE;
516 case PGMMODE_PAE:
517 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE;
518 case PGMMODE_PAE_NX:
519 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_NXE;
520 case PGMMODE_AMD64:
521 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME;
522 case PGMMODE_AMD64_NX:
523 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_NXE;
524 case PGMMODE_NESTED_32BIT:
525 return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE;
526 case PGMMODE_NESTED_PAE:
527 return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_NXE;
528 case PGMMODE_NESTED_AMD64:
529 return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_NXE;
530 case PGMMODE_EPT:
531 return DBGFPGDMP_FLAGS_EPT;
532 case PGMMODE_NONE:
533 return 0;
534 default:
535 AssertFailedReturn(UINT32_MAX);
536 }
537}
538
539
540/**
541 * EMT worker for DBGFR3PagingDumpEx.
542 *
543 * @returns VBox status code.
544 * @param pUVM The shared VM handle.
545 * @param idCpu The current CPU ID.
546 * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX. Valid.
547 * @param pcr3 The CR3 to use (unless we're getting the current
548 * state, see @a fFlags).
549 * @param pu64FirstAddr The first address.
550 * @param pu64LastAddr The last address.
551 * @param cMaxDepth The depth.
552 * @param pHlp The output callbacks.
553 */
554static DECLCALLBACK(int) dbgfR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t *pcr3,
555 uint64_t *pu64FirstAddr, uint64_t *pu64LastAddr,
556 uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
557{
558 /*
559 * Implement dumping both context by means of recursion.
560 */
561 if ((fFlags & (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW)) == (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW))
562 {
563 int rc1 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_GUEST,
564 pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
565 int rc2 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_SHADOW,
566 pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
567 return RT_FAILURE(rc1) ? rc1 : rc2;
568 }
569
570 PVM pVM = pUVM->pVM;
571 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
572
573 /*
574 * Get the current CR3/mode if required.
575 */
576 uint64_t cr3 = *pcr3;
577 if (fFlags & (DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE))
578 {
579 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
580 if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
581 {
582 if (PGMGetShadowMode(pVCpu) == PGMMODE_NONE)
583 {
584 pHlp->pfnPrintf(pHlp, "Shadow paging mode is 'none' (NEM)\n");
585 return VINF_SUCCESS;
586 }
587
588#if !defined(VBOX_VMM_TARGET_ARMV8)
589 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
590 cr3 = PGMGetHyperCR3(pVCpu);
591#endif
592 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
593 fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetShadowMode(pVCpu));
594 }
595 else
596 {
597#if defined(VBOX_VMM_TARGET_ARMV8)
598 AssertReleaseFailed();
599#else
600 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
601 cr3 = CPUMGetGuestCR3(pVCpu);
602 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
603 {
604 AssertCompile(DBGFPGDMP_FLAGS_PSE == X86_CR4_PSE); AssertCompile(DBGFPGDMP_FLAGS_PAE == X86_CR4_PAE);
605 fFlags |= CPUMGetGuestCR4(pVCpu) & (X86_CR4_PSE | X86_CR4_PAE);
606 AssertCompile(DBGFPGDMP_FLAGS_LME == MSR_K6_EFER_LME); AssertCompile(DBGFPGDMP_FLAGS_NXE == MSR_K6_EFER_NXE);
607 fFlags |= CPUMGetGuestEFER(pVCpu) & (MSR_K6_EFER_LME | MSR_K6_EFER_NXE);
608 }
609#endif
610 }
611 }
612 fFlags &= ~(DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_CURRENT_CR3);
613
614 /*
615 * Call PGM to do the real work.
616 */
617 int rc;
618 if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
619 rc = PGMR3DumpHierarchyShw(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
620 else
621 rc = PGMR3DumpHierarchyGst(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
622 return rc;
623}
624
625
626/**
627 * Dump paging structures.
628 *
629 * This API can be used to dump both guest and shadow structures.
630 *
631 * @returns VBox status code.
632 * @param pUVM The user mode VM handle.
633 * @param idCpu The current CPU ID.
634 * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX.
635 * @param cr3 The CR3 to use (unless we're getting the current
636 * state, see @a fFlags).
637 * @param u64FirstAddr The address to start dumping at.
638 * @param u64LastAddr The address to end dumping after.
639 * @param cMaxDepth The depth.
640 * @param pHlp The output callbacks. Defaults to the debug log if
641 * NULL.
642 */
643VMMDECL(int) DBGFR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t cr3, uint64_t u64FirstAddr,
644 uint64_t u64LastAddr, uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
645{
646 /*
647 * Input validation.
648 */
649 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
650 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
651 AssertReturn(!(fFlags & ~DBGFPGDMP_FLAGS_VALID_MASK), VERR_INVALID_FLAGS);
652 AssertReturn(fFlags & (DBGFPGDMP_FLAGS_SHADOW | DBGFPGDMP_FLAGS_GUEST), VERR_INVALID_FLAGS);
653 AssertReturn((fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE) || (fFlags & DBGFPGDMP_FLAGS_MODE_MASK), VERR_INVALID_FLAGS);
654 AssertReturn( !(fFlags & DBGFPGDMP_FLAGS_EPT)
655 || !(fFlags & (DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_NXE))
656 , VERR_INVALID_FLAGS);
657 AssertReturn(cMaxDepth, VERR_INVALID_PARAMETER);
658
659 /*
660 * Forward the request to the target CPU.
661 */
662 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3PagingDumpEx, 8,
663 pUVM, idCpu, fFlags, &cr3, &u64FirstAddr, &u64LastAddr, cMaxDepth, pHlp ? pHlp : DBGFR3InfoLogHlp());
664}
665
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use