VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp

Last change on this file was 103515, checked in by vboxsync, 3 months ago

VMM/DBGF: Stop all VCpus on DBGFEVENT_DEV_STOP events.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 77.7 KB
Line 
1/* $Id: DBGF.cpp 103515 2024-02-22 03:51:37Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_dbgf DBGF - The Debugger Facility
30 *
31 * The purpose of the DBGF is to provide an interface for debuggers to
32 * manipulate the VMM without having to mess up the source code for each of
33 * them. The DBGF is always built in and will always work when a debugger
34 * attaches to the VM. The DBGF provides the basic debugger features, such as
35 * halting execution, handling breakpoints, single step execution, instruction
36 * disassembly, info querying, OS specific diggers, symbol and module
37 * management.
38 *
39 * The interface is working in a manner similar to the win32, linux and os2
40 * debugger interfaces. The interface has an asynchronous nature. This comes
41 * from the fact that the VMM and the Debugger are running in different threads.
42 * They are referred to as the "emulation thread" and the "debugger thread", or
43 * as the "ping thread" and the "pong thread, respectivly. (The last set of
44 * names comes from the use of the Ping-Pong synchronization construct from the
45 * RTSem API.)
46 *
47 * @see grp_dbgf
48 *
49 *
50 * @section sec_dbgf_scenario Usage Scenario
51 *
52 * The debugger starts by attaching to the VM. For practical reasons we limit the
53 * number of concurrently attached debuggers to 1 per VM. The action of
54 * attaching to the VM causes the VM to check and generate debug events.
55 *
56 * The debugger then will wait/poll for debug events and issue commands.
57 *
58 * The waiting and polling is done by the DBGFEventWait() function. It will wait
59 * for the emulation thread to send a ping, thus indicating that there is an
60 * event waiting to be processed.
61 *
62 * An event can be a response to a command issued previously, the hitting of a
63 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
64 * the ping and must respond to the event at hand - the VMM is waiting. This
65 * usually means that the user of the debugger must do something, but it doesn't
66 * have to. The debugger is free to call any DBGF function (nearly at least)
67 * while processing the event.
68 *
69 * Typically the user will issue a request for the execution to be resumed, so
70 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
71 *
72 * When the user eventually terminates the debugging session or selects another
73 * VM, the debugger detaches from the VM. This means that breakpoints are
74 * disabled and that the emulation thread no longer polls for debugger commands.
75 *
76 */
77
78
79/*********************************************************************************************************************************
80* Header Files *
81*********************************************************************************************************************************/
82#define LOG_GROUP LOG_GROUP_DBGF
83#include <VBox/vmm/dbgf.h>
84#include <VBox/vmm/selm.h>
85#include <VBox/vmm/em.h>
86#include <VBox/vmm/hm.h>
87#include <VBox/vmm/mm.h>
88#include <VBox/vmm/nem.h>
89#include "DBGFInternal.h"
90#include <VBox/vmm/vm.h>
91#include <VBox/vmm/uvm.h>
92#include <VBox/err.h>
93
94#include <VBox/log.h>
95#include <iprt/semaphore.h>
96#include <iprt/thread.h>
97#include <iprt/asm.h>
98#include <iprt/time.h>
99#include <iprt/assert.h>
100#include <iprt/stream.h>
101#include <iprt/env.h>
102
103
104/*********************************************************************************************************************************
105* Structures and Typedefs *
106*********************************************************************************************************************************/
107/**
108 * Instruction type returned by dbgfStepGetCurInstrType.
109 */
110typedef enum DBGFSTEPINSTRTYPE
111{
112 DBGFSTEPINSTRTYPE_INVALID = 0,
113 DBGFSTEPINSTRTYPE_OTHER,
114 DBGFSTEPINSTRTYPE_RET,
115 DBGFSTEPINSTRTYPE_CALL,
116 DBGFSTEPINSTRTYPE_END,
117 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
118} DBGFSTEPINSTRTYPE;
119
120
121/*********************************************************************************************************************************
122* Internal Functions *
123*********************************************************************************************************************************/
124DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
125DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
126static int dbgfR3CpuWait(PVMCPU pVCpu);
127static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
128static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
129static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
130static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude);
131
132
133
134/**
135 * Initializes the DBGF.
136 *
137 * @returns VBox status code.
138 * @param pVM The cross context VM structure.
139 */
140VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
141{
142 PUVM pUVM = pVM->pUVM;
143 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
144 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
145
146 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
147
148 /*
149 * The usual sideways mountain climbing style of init:
150 */
151 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
152 if (RT_SUCCESS(rc))
153 {
154 rc = dbgfR3TraceInit(pVM);
155 if (RT_SUCCESS(rc))
156 {
157 rc = dbgfR3RegInit(pUVM);
158 if (RT_SUCCESS(rc))
159 {
160 rc = dbgfR3AsInit(pUVM);
161 if (RT_SUCCESS(rc))
162 {
163 rc = dbgfR3BpInit(pUVM);
164 if (RT_SUCCESS(rc))
165 {
166 rc = dbgfR3OSInit(pUVM);
167 if (RT_SUCCESS(rc))
168 {
169 rc = dbgfR3PlugInInit(pUVM);
170 if (RT_SUCCESS(rc))
171 {
172 rc = dbgfR3BugCheckInit(pVM);
173 if (RT_SUCCESS(rc))
174 {
175#ifdef VBOX_WITH_DBGF_TRACING
176 rc = dbgfR3TracerInit(pVM);
177#endif
178 if (RT_SUCCESS(rc))
179 {
180 return VINF_SUCCESS;
181 }
182 }
183 dbgfR3PlugInTerm(pUVM);
184 }
185 dbgfR3OSTermPart1(pUVM);
186 dbgfR3OSTermPart2(pUVM);
187 }
188 dbgfR3BpTerm(pUVM);
189 }
190 dbgfR3AsTerm(pUVM);
191 }
192 dbgfR3RegTerm(pUVM);
193 }
194 dbgfR3TraceTerm(pVM);
195 }
196 dbgfR3InfoTerm(pUVM);
197 }
198 return rc;
199}
200
201
202/**
203 * Terminates and cleans up resources allocated by the DBGF.
204 *
205 * @returns VBox status code.
206 * @param pVM The cross context VM structure.
207 */
208VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
209{
210 PUVM pUVM = pVM->pUVM;
211
212#ifdef VBOX_WITH_DBGF_TRACING
213 dbgfR3TracerTerm(pVM);
214#endif
215 dbgfR3OSTermPart1(pUVM);
216 dbgfR3PlugInTerm(pUVM);
217 dbgfR3OSTermPart2(pUVM);
218 dbgfR3BpTerm(pUVM);
219 dbgfR3AsTerm(pUVM);
220 dbgfR3RegTerm(pUVM);
221 dbgfR3TraceTerm(pVM);
222 dbgfR3InfoTerm(pUVM);
223
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * This is for tstCFGM and others to avoid trigger leak detection.
230 *
231 * @param pUVM The user mode VM structure.
232 */
233VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
234{
235 dbgfR3InfoTerm(pUVM);
236}
237
238
239/**
240 * Called when the VM is powered off to detach debuggers.
241 *
242 * @param pVM The cross context VM structure.
243 */
244VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
245{
246 /*
247 * Send a termination event to any attached debugger.
248 */
249 if (pVM->dbgf.s.fAttached)
250 {
251 PVMCPU pVCpu = VMMGetCpu(pVM);
252 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
253 AssertLogRelRC(rc);
254
255 /*
256 * Clear the FF so we won't get confused later on.
257 */
258 VM_FF_CLEAR(pVM, VM_FF_DBGF);
259 }
260}
261
262
263/**
264 * Applies relocations to data and code managed by this
265 * component. This function will be called at init and
266 * whenever the VMM need to relocate it self inside the GC.
267 *
268 * @param pVM The cross context VM structure.
269 * @param offDelta Relocation delta relative to old location.
270 */
271VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
272{
273 dbgfR3TraceRelocate(pVM);
274 dbgfR3AsRelocate(pVM->pUVM, offDelta);
275}
276
277
278/**
279 * Waits a little while for a debuggger to attach.
280 *
281 * @returns True is a debugger have attached.
282 * @param pVM The cross context VM structure.
283 * @param pVCpu The cross context per CPU structure.
284 * @param enmEvent Event.
285 *
286 * @thread EMT(pVCpu)
287 */
288static bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
289{
290 /*
291 * First a message.
292 */
293#if !defined(DEBUG)
294 int cWait = 10;
295#else
296 int cWait = RTEnvExist("VBOX_DBGF_NO_WAIT_FOR_ATTACH")
297 || ( ( enmEvent == DBGFEVENT_ASSERTION_HYPER
298 || enmEvent == DBGFEVENT_FATAL_ERROR)
299 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH"))
300 ? 10
301 : 150;
302#endif
303 RTStrmPrintf(g_pStdErr,
304 "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n"
305#ifdef DEBUG
306 " Set VBOX_DBGF_NO_WAIT_FOR_ATTACH=1 for short wait or VBOX_DBGF_WAIT_FOR_ATTACH=1 longer.\n"
307#endif
308 ,
309 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
310 RTStrmFlush(g_pStdErr);
311 while (cWait > 0)
312 {
313 RTThreadSleep(100);
314 if (pVM->dbgf.s.fAttached)
315 {
316 RTStrmPrintf(g_pStdErr, "Attached!\n");
317 RTStrmFlush(g_pStdErr);
318 return true;
319 }
320
321 /* Process rendezvous (debugger attaching involves such). */
322 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
323 {
324 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
325 if (rc != VINF_SUCCESS)
326 {
327 /** @todo Ignoring these could be bad. */
328 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
329 RTStrmFlush(g_pStdErr);
330 }
331 }
332
333 /* Process priority stuff. */
334 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
335 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
336 {
337 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
338 if (rc == VINF_SUCCESS)
339 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
340 if (rc != VINF_SUCCESS)
341 {
342 /** @todo Ignoring these could be bad. */
343 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
344 RTStrmFlush(g_pStdErr);
345 }
346 }
347
348 /* next */
349 if (!(cWait % 10))
350 {
351 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
352 RTStrmFlush(g_pStdErr);
353 }
354 cWait--;
355 }
356
357 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
358 RTStrmFlush(g_pStdErr);
359 return false;
360}
361
362
363/**
364 * Forced action callback.
365 *
366 * The VMM will call this from it's main loop when either VM_FF_DBGF or
367 * VMCPU_FF_DBGF are set.
368 *
369 * The function checks for and executes pending commands from the debugger.
370 * Then it checks for pending debug events and serves these.
371 *
372 * @returns VINF_SUCCESS normally.
373 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
374 * @param pVM The cross context VM structure.
375 * @param pVCpu The cross context per CPU structure.
376 */
377VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
378{
379 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
380
381 /*
382 * Dispatch pending events.
383 */
384 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
385 {
386 if ( pVCpu->dbgf.s.cEvents > 0
387 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
388 {
389 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
390 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
391 }
392
393 /*
394 * Command pending? Process it.
395 */
396 PUVMCPU pUVCpu = pVCpu->pUVCpu;
397 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
398 {
399 bool fResumeExecution;
400 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
401 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
402 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
403 if (!fResumeExecution)
404 rcStrict2 = dbgfR3CpuWait(pVCpu);
405 if ( rcStrict2 != VINF_SUCCESS
406 && ( rcStrict == VINF_SUCCESS
407 || RT_FAILURE(rcStrict2)
408 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
409 rcStrict = rcStrict2;
410 }
411 }
412
413 return VBOXSTRICTRC_TODO(rcStrict);
414}
415
416
417/**
418 * Try to determine the event context.
419 *
420 * @returns debug event context.
421 * @param pVCpu The cross context vCPU structure.
422 */
423static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
424{
425 switch (EMGetState(pVCpu))
426 {
427 case EMSTATE_HM:
428 case EMSTATE_NEM:
429 case EMSTATE_DEBUG_GUEST_HM:
430 case EMSTATE_DEBUG_GUEST_NEM:
431 return DBGFEVENTCTX_HM;
432
433 case EMSTATE_IEM:
434 case EMSTATE_DEBUG_GUEST_IEM:
435 case EMSTATE_DEBUG_GUEST_RAW:
436 return DBGFEVENTCTX_RAW;
437
438
439 case EMSTATE_RECOMPILER:
440 case EMSTATE_DEBUG_GUEST_RECOMPILER:
441 return DBGFEVENTCTX_REM;
442
443 case EMSTATE_DEBUG_HYPER:
444 case EMSTATE_GURU_MEDITATION:
445 return DBGFEVENTCTX_HYPER;
446
447 default:
448 return DBGFEVENTCTX_OTHER;
449 }
450}
451
452
453/**
454 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
455 *
456 * @returns VBox status code.
457 * @param pVM The cross context VM structure.
458 * @param pVCpu The CPU sending the event.
459 * @param enmType The event type to send.
460 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
461 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
462 * @param cbPayload The size of the event payload, optional.
463 */
464static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
465 void const *pvPayload, size_t cbPayload)
466{
467 PUVM pUVM = pVM->pUVM;
468 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
469
470 /*
471 * Massage the input a little.
472 */
473 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
474 if (enmCtx == DBGFEVENTCTX_INVALID)
475 enmCtx = dbgfR3FigureEventCtx(pVCpu);
476
477 /*
478 * Put the event into the ring buffer.
479 */
480 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
481
482 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
483 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
484 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
485 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
486
487 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
488
489#ifdef DEBUG
490 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
491#endif
492 pEvent->enmType = enmType;
493 pEvent->enmCtx = enmCtx;
494 pEvent->idCpu = pVCpu->idCpu;
495 pEvent->uReserved = 0;
496 if (cbPayload)
497 memcpy(&pEvent->u, pvPayload, cbPayload);
498
499 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
500
501 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
502
503 /*
504 * Signal the debugger.
505 */
506 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
507}
508
509
510/**
511 * Send event and wait for the debugger to respond.
512 *
513 * @returns Strict VBox status code.
514 * @param pVM The cross context VM structure.
515 * @param pVCpu The CPU sending the event.
516 * @param enmType The event type to send.
517 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
518 */
519DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
520{
521 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
522 if (RT_SUCCESS(rc))
523 rc = dbgfR3CpuWait(pVCpu);
524 return rc;
525}
526
527
528/**
529 * Send event and wait for the debugger to respond, extended version.
530 *
531 * @returns Strict VBox status code.
532 * @param pVM The cross context VM structure.
533 * @param pVCpu The CPU sending the event.
534 * @param enmType The event type to send.
535 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
536 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
537 * @param cbPayload The size of the event payload, optional.
538 */
539DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
540 void const *pvPayload, size_t cbPayload)
541{
542 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
543 if (RT_SUCCESS(rc))
544 rc = dbgfR3CpuWait(pVCpu);
545 return rc;
546}
547
548
549/**
550 * Send event but do NOT wait for the debugger.
551 *
552 * Currently only used by dbgfR3CpuCmd().
553 *
554 * @param pVM The cross context VM structure.
555 * @param pVCpu The CPU sending the event.
556 * @param enmType The event type to send.
557 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
558 */
559DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
560{
561 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
562}
563
564
565/**
566 * The common event prologue code.
567 *
568 * It will make sure someone is attached, and perhaps process any high priority
569 * pending actions (none yet).
570 *
571 * @returns VBox status code.
572 * @param pVM The cross context VM structure.
573 * @param pVCpu The vCPU cross context structure.
574 * @param enmEvent The event to be sent.
575 */
576static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
577{
578 /*
579 * Check if a debugger is attached.
580 */
581 if ( !pVM->dbgf.s.fAttached
582 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
583 {
584 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
585 return VERR_DBGF_NOT_ATTACHED;
586 }
587
588 /*
589 * Look thru pending commands and finish those which make sense now.
590 */
591 /** @todo Process/purge pending commands. */
592 //int rc = DBGFR3VMMForcedAction(pVM);
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Processes a pending event on the current CPU.
599 *
600 * This is called by EM in response to VINF_EM_DBG_EVENT.
601 *
602 * @returns Strict VBox status code.
603 * @param pVM The cross context VM structure.
604 * @param pVCpu The cross context per CPU structure.
605 *
606 * @thread EMT(pVCpu)
607 */
608VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
609{
610 VMCPU_ASSERT_EMT(pVCpu);
611 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
612
613 /*
614 * Check that we've got an event first.
615 */
616 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
617 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
618 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
619
620 /*
621 * Make sure we've got a debugger and is allowed to speak to it.
622 */
623 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
624 if (RT_FAILURE(rc))
625 {
626 /** @todo drop them events? */
627 return rc; /** @todo this will cause trouble if we're here via an FF! */
628 }
629
630 /*
631 * Send the event and mark it as ignore.
632 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
633 */
634 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
635 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
636 return rcStrict;
637}
638
639
640/**
641 * Send a generic debugger event which takes no data.
642 *
643 * @returns VBox status code.
644 * @param pVM The cross context VM structure.
645 * @param enmEvent The event to send.
646 * @internal
647 */
648VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
649{
650 PVMCPU pVCpu = VMMGetCpu(pVM);
651 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
652
653 /*
654 * Do stepping filtering.
655 */
656 /** @todo Would be better if we did some of this inside the execution
657 * engines. */
658 if ( enmEvent == DBGFEVENT_STEPPED
659 || enmEvent == DBGFEVENT_STEPPED_HYPER)
660 {
661 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
662 return VINF_EM_DBG_STEP;
663 }
664
665 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
666 if (RT_FAILURE(rc))
667 return rc;
668
669 /*
670 * Send the event and process the reply communication.
671 */
672 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
673}
674
675
676/**
677 * Send a debugger event which takes the full source file location.
678 *
679 * @returns VBox status code.
680 * @param pVM The cross context VM structure.
681 * @param enmEvent The event to send.
682 * @param pszFile Source file.
683 * @param uLine Line number in source file.
684 * @param pszFunction Function name.
685 * @param pszFormat Message which accompanies the event.
686 * @param ... Message arguments.
687 * @internal
688 */
689VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
690{
691 va_list args;
692 va_start(args, pszFormat);
693 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
694 va_end(args);
695 return rc;
696}
697
698
699/**
700 * Send a debugger event which takes the full source file location.
701 *
702 * @returns VBox status code.
703 * @param pVM The cross context VM structure.
704 * @param enmEvent The event to send.
705 * @param pszFile Source file.
706 * @param uLine Line number in source file.
707 * @param pszFunction Function name.
708 * @param pszFormat Message which accompanies the event.
709 * @param args Message arguments.
710 * @internal
711 */
712VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
713{
714 PVMCPU pVCpu = VMMGetCpu(pVM);
715 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
716
717 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
718 if (RT_FAILURE(rc))
719 return rc;
720
721 /*
722 * Stop other CPUs for some messages so we can inspect the state accross
723 * all CPUs as best as possible.
724 */
725 /** @todo This isn't entirely sane as we'd need a wait to back out of this
726 * if the debugger goes fishing and such. */
727 switch (enmEvent)
728 {
729 default:
730 break;
731 case DBGFEVENT_DEV_STOP:
732 rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
733 if (RT_SUCCESS(rc))
734 break;
735 return rc;
736 }
737
738 /*
739 * Format the message.
740 */
741 char *pszMessage = NULL;
742 char szMessage[8192];
743 if (pszFormat && *pszFormat)
744 {
745 pszMessage = &szMessage[0];
746 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
747 }
748
749 /*
750 * Send the event and process the reply communication.
751 */
752 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
753 DbgEvent.u.Src.pszFile = pszFile;
754 DbgEvent.u.Src.uLine = uLine;
755 DbgEvent.u.Src.pszFunction = pszFunction;
756 DbgEvent.u.Src.pszMessage = pszMessage;
757 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
758}
759
760
761/**
762 * Send a debugger event which takes the two assertion messages.
763 *
764 * @returns VBox status code.
765 * @param pVM The cross context VM structure.
766 * @param enmEvent The event to send.
767 * @param pszMsg1 First assertion message.
768 * @param pszMsg2 Second assertion message.
769 */
770VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
771{
772 PVMCPU pVCpu = VMMGetCpu(pVM);
773 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
774
775 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
776 if (RT_FAILURE(rc))
777 return rc;
778
779 /*
780 * Send the event and process the reply communication.
781 */
782 DBGFEVENT DbgEvent;
783 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
784 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
785 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
786}
787
788
789/**
790 * Breakpoint was hit somewhere.
791 * Figure out which breakpoint it is and notify the debugger.
792 *
793 * @returns VBox status code.
794 * @param pVM The cross context VM structure.
795 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
796 */
797VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
798{
799 PVMCPU pVCpu = VMMGetCpu(pVM);
800 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
801
802 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
803 if (RT_FAILURE(rc))
804 return rc;
805
806 /*
807 * Halt all other vCPUs as well to give the user the ability to inspect other
808 * vCPU states as well.
809 */
810 rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
811 if (RT_FAILURE(rc))
812 return rc;
813
814 /*
815 * Send the event and process the reply communication.
816 */
817 DBGFEVENT DbgEvent;
818 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
819 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
820 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
821 {
822 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
823 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
824 }
825
826 return VERR_DBGF_IPE_1;
827}
828
829
830/**
831 * Returns whether the given vCPU is waiting for the debugger.
832 *
833 * @returns Flags whether the vCPU is currently waiting for the debugger.
834 * @param pUVCpu The user mode vCPU structure.
835 */
836DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
837{
838 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
839}
840
841
842/**
843 * Checks whether the given vCPU is waiting in the debugger.
844 *
845 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
846 * is given true is returned when at least one vCPU is halted.
847 * @param pUVM The user mode VM structure.
848 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
849 */
850DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
851{
852 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
853
854 /* Check that either the given vCPU or all are actually halted. */
855 if (idCpu != VMCPUID_ALL)
856 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
857
858 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
859 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
860 return true;
861 return false;
862}
863
864
865/**
866 * Gets the pending debug command for this EMT/CPU, replacing it with
867 * DBGFCMD_NO_COMMAND.
868 *
869 * @returns Pending command.
870 * @param pUVCpu The user mode virtual CPU structure.
871 * @thread EMT(pUVCpu)
872 */
873DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
874{
875 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
876 Log2(("DBGF: Getting command: %d\n", enmCmd));
877 return enmCmd;
878}
879
880
881/**
882 * Send a debug command to a CPU, making sure to notify it.
883 *
884 * @returns VBox status code.
885 * @param pUVCpu The user mode virtual CPU structure.
886 * @param enmCmd The command to submit to the CPU.
887 */
888DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
889{
890 Log2(("DBGF: Setting command to %d\n", enmCmd));
891 Assert(enmCmd != DBGFCMD_NO_COMMAND);
892 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
893
894 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
895 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
896
897 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
898 return VINF_SUCCESS;
899}
900
901
902/**
903 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
904 */
905static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventHaltEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
906{
907 RT_NOREF(pvUser);
908
909 VMCPU_ASSERT_EMT(pVCpu);
910 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
911
912 PUVMCPU pUVCpu = pVCpu->pUVCpu;
913 if ( pVCpu != (PVMCPU)pvUser
914 && !dbgfR3CpuIsHalted(pUVCpu))
915 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
916
917 return VINF_SUCCESS;
918}
919
920
921/**
922 * Halts all vCPUs of the given VM except for the given one.
923 *
924 * @returns VBox status code.
925 * @param pVM The cross context VM structure.
926 * @param pVCpuExclude The vCPU cross context structure of the vCPU to exclude.
927 */
928static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude)
929{
930 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventHaltEmtWorker, pVCpuExclude);
931}
932
933
934/**
935 * Waits for the debugger to respond.
936 *
937 * @returns VBox status code. (clearify)
938 * @param pVCpu The cross context vCPU structure.
939 */
940static int dbgfR3CpuWait(PVMCPU pVCpu)
941{
942 PVM pVM = pVCpu->CTX_SUFF(pVM);
943 PUVMCPU pUVCpu = pVCpu->pUVCpu;
944
945 LogFlow(("dbgfR3CpuWait:\n"));
946 int rcRet = VINF_SUCCESS;
947
948 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
949
950 /*
951 * Waits for the debugger to reply (i.e. issue an command).
952 */
953 for (;;)
954 {
955 /*
956 * Wait.
957 */
958 for (;;)
959 {
960 /*
961 * Process forced flags before we go sleep.
962 */
963 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
964 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
965 {
966 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
967 break;
968
969 int rc;
970 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
971 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
972 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
973 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
974 {
975 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
976 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
977 if (rc == VINF_SUCCESS)
978 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
979 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
980 }
981 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
982 {
983 VMSTATE enmState = VMR3GetState(pVM);
984 switch (enmState)
985 {
986 case VMSTATE_FATAL_ERROR:
987 case VMSTATE_FATAL_ERROR_LS:
988 case VMSTATE_GURU_MEDITATION:
989 case VMSTATE_GURU_MEDITATION_LS:
990 rc = VINF_EM_SUSPEND;
991 break;
992 case VMSTATE_DESTROYING:
993 rc = VINF_EM_TERMINATE;
994 break;
995 default:
996 rc = VERR_DBGF_IPE_1;
997 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
998 }
999 }
1000 else
1001 rc = VINF_SUCCESS;
1002 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1003 {
1004 switch (rc)
1005 {
1006 case VINF_EM_DBG_BREAKPOINT:
1007 case VINF_EM_DBG_STEPPED:
1008 case VINF_EM_DBG_STEP:
1009 case VINF_EM_DBG_STOP:
1010 case VINF_EM_DBG_EVENT:
1011 AssertMsgFailed(("rc=%Rrc\n", rc));
1012 break;
1013
1014 /* return straight away */
1015 case VINF_EM_TERMINATE:
1016 case VINF_EM_OFF:
1017 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1018 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1019 return rc;
1020
1021 /* remember return code. */
1022 default:
1023 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
1024 RT_FALL_THRU();
1025 case VINF_EM_RESET:
1026 case VINF_EM_SUSPEND:
1027 case VINF_EM_HALT:
1028 case VINF_EM_RESUME:
1029 case VINF_EM_RESCHEDULE:
1030 case VINF_EM_RESCHEDULE_REM:
1031 if (rc < rcRet || rcRet == VINF_SUCCESS)
1032 rcRet = rc;
1033 break;
1034 }
1035 }
1036 else if (RT_FAILURE(rc))
1037 {
1038 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1039 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1040 return rc;
1041 }
1042 }
1043 else if (pVM->dbgf.s.fAttached)
1044 {
1045 int rc = VMR3WaitU(pUVCpu);
1046 if (RT_FAILURE(rc))
1047 {
1048 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
1049 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1050 return rc;
1051 }
1052 }
1053 else
1054 {
1055 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1056 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1057 return rcRet;
1058 }
1059 }
1060
1061 /*
1062 * Process the command.
1063 */
1064 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1065 bool fResumeExecution;
1066 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1067 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1068 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1069 if (fResumeExecution)
1070 {
1071 if (RT_FAILURE(rc))
1072 rcRet = rc;
1073 else if ( rc >= VINF_EM_FIRST
1074 && rc <= VINF_EM_LAST
1075 && (rc < rcRet || rcRet == VINF_SUCCESS))
1076 rcRet = rc;
1077 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1078 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1079 return rcRet;
1080 }
1081 }
1082}
1083
1084
1085/**
1086 * Executes command from debugger.
1087 *
1088 * The caller is responsible for waiting or resuming execution based on the
1089 * value returned in the *pfResumeExecution indicator.
1090 *
1091 * @returns VBox status code. (clearify!)
1092 * @param pVCpu The cross context vCPU structure.
1093 * @param enmCmd The command in question.
1094 * @param pCmdData Pointer to the command data.
1095 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1096 */
1097static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1098{
1099 RT_NOREF(pCmdData); /* for later */
1100
1101 /*
1102 * The cases in this switch returns directly if no event to send.
1103 */
1104 DBGFEVENTTYPE enmEvent;
1105 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1106 switch (enmCmd)
1107 {
1108 /*
1109 * Halt is answered by an event say that we've halted.
1110 */
1111 case DBGFCMD_HALT:
1112 {
1113 *pfResumeExecution = false;
1114 enmEvent = DBGFEVENT_HALT_DONE;
1115 break;
1116 }
1117
1118
1119 /*
1120 * Resume is not answered, we just resume execution.
1121 */
1122 case DBGFCMD_GO:
1123 {
1124 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1125 *pfResumeExecution = true;
1126 return VINF_SUCCESS;
1127 }
1128
1129 /** @todo implement (and define) the rest of the commands. */
1130
1131 /*
1132 * Single step, with trace into.
1133 */
1134 case DBGFCMD_SINGLE_STEP:
1135 {
1136 Log2(("Single step\n"));
1137 PVM pVM = pVCpu->CTX_SUFF(pVM);
1138 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1139 {
1140 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1141 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1142 }
1143 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1144 {
1145 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1146 *pfResumeExecution = true;
1147 return VINF_EM_DBG_STEP;
1148 }
1149 /* Stop after zero steps. Nonsense, but whatever. */
1150 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1151 *pfResumeExecution = false;
1152 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1153 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1154 break;
1155 }
1156
1157 /*
1158 * Default is to send an invalid command event.
1159 */
1160 default:
1161 {
1162 *pfResumeExecution = false;
1163 enmEvent = DBGFEVENT_INVALID_COMMAND;
1164 break;
1165 }
1166 }
1167
1168 /*
1169 * Send the pending event.
1170 */
1171 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1172 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1173 AssertRCStmt(rc, *pfResumeExecution = true);
1174 return rc;
1175}
1176
1177
1178/**
1179 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1180 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1181 */
1182static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1183{
1184 PUVM pUVM = pVM->pUVM;
1185 int *prcAttach = (int *)pvUser;
1186 RT_NOREF(pVCpu);
1187
1188 if (pVM->dbgf.s.fAttached)
1189 {
1190 Log(("dbgfR3Attach: Debugger already attached\n"));
1191 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1192 return VINF_SUCCESS;
1193 }
1194
1195 /*
1196 * The per-CPU bits.
1197 */
1198 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1199 {
1200 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1201
1202 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1203 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1204 }
1205
1206 /*
1207 * Init of the VM -> Debugger communication part living in the global VM structure.
1208 */
1209 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1210 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1211 pUVM->dbgf.s.idxDbgEvtRead = 0;
1212 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1213 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1214 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1215 int rc;
1216 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1217 if (pUVM->dbgf.s.paDbgEvts)
1218 {
1219 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1220 if (RT_SUCCESS(rc))
1221 {
1222 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1223 if (RT_SUCCESS(rc))
1224 {
1225 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1226 if (RT_SUCCESS(rc))
1227 {
1228 /*
1229 * At last, set the attached flag.
1230 */
1231 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1232 *prcAttach = VINF_SUCCESS;
1233 return VINF_SUCCESS;
1234 }
1235
1236 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1237 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1238 }
1239 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1240 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1241 }
1242 }
1243 else
1244 rc = VERR_NO_MEMORY;
1245
1246 *prcAttach = rc;
1247 return VINF_SUCCESS;
1248}
1249
1250
1251/**
1252 * Attaches a debugger to the specified VM.
1253 *
1254 * Only one debugger at a time.
1255 *
1256 * @returns VBox status code.
1257 * @param pUVM The user mode VM handle.
1258 */
1259VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1260{
1261 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1262 PVM pVM = pUVM->pVM;
1263 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1264
1265 /*
1266 * Call the VM, use EMT rendezvous for serialization.
1267 */
1268 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1269 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1270 if (RT_SUCCESS(rc))
1271 rc = rcAttach;
1272
1273 return rc;
1274}
1275
1276
1277/**
1278 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1279 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1280 */
1281static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1282{
1283 if (pVCpu->idCpu == 0)
1284 {
1285 PUVM pUVM = (PUVM)pvUser;
1286
1287 /*
1288 * Per-CPU cleanup.
1289 */
1290 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1291 {
1292 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1293
1294 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1295 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1296 }
1297
1298 /*
1299 * De-init of the VM -> Debugger communication part living in the global VM structure.
1300 */
1301 if (pUVM->dbgf.s.paDbgEvts)
1302 {
1303 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1304 pUVM->dbgf.s.paDbgEvts = NULL;
1305 }
1306
1307 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1308 {
1309 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1310 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1311 }
1312
1313 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1314 {
1315 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1316 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1317 }
1318
1319 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1320 {
1321 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1322 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1323 }
1324
1325 pUVM->dbgf.s.cDbgEvtMax = 0;
1326 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1327 pUVM->dbgf.s.idxDbgEvtRead = 0;
1328 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1329 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1330 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1331
1332 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1333 }
1334
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Detaches a debugger from the specified VM.
1341 *
1342 * Caller must be attached to the VM.
1343 *
1344 * @returns VBox status code.
1345 * @param pUVM The user mode VM handle.
1346 */
1347VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1348{
1349 LogFlow(("DBGFR3Detach:\n"));
1350
1351 /*
1352 * Validate input. The UVM handle shall be valid, the VM handle might be
1353 * in the processes of being destroyed already, so deal quietly with that.
1354 */
1355 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1356 PVM pVM = pUVM->pVM;
1357 if (!VM_IS_VALID_EXT(pVM))
1358 return VERR_INVALID_VM_HANDLE;
1359
1360 /*
1361 * Check if attached.
1362 */
1363 if (!pVM->dbgf.s.fAttached)
1364 return VERR_DBGF_NOT_ATTACHED;
1365
1366 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1367}
1368
1369
1370/**
1371 * Wait for a debug event.
1372 *
1373 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1374 * @param pUVM The user mode VM handle.
1375 * @param cMillies Number of millis to wait.
1376 * @param pEvent Where to store the event data.
1377 */
1378VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1379{
1380 /*
1381 * Check state.
1382 */
1383 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1384 PVM pVM = pUVM->pVM;
1385 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1386 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1387
1388 RT_BZERO(pEvent, sizeof(*pEvent));
1389
1390 /*
1391 * Wait for an event to arrive if there are none.
1392 */
1393 int rc = VINF_SUCCESS;
1394 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1395 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1396 {
1397 do
1398 {
1399 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1400 } while ( RT_SUCCESS(rc)
1401 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1402 }
1403
1404 if (RT_SUCCESS(rc))
1405 {
1406 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1407
1408 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1409 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1410 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1411 }
1412
1413 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1414 return rc;
1415}
1416
1417
1418/**
1419 * Halts VM execution.
1420 *
1421 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1422 * arrives. Until that time it's not possible to issue any new commands.
1423 *
1424 * @returns VBox status code.
1425 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1426 * are halted.
1427 * @param pUVM The user mode VM handle.
1428 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1429 */
1430VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1431{
1432 /*
1433 * Check state.
1434 */
1435 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1436 PVM pVM = pUVM->pVM;
1437 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1438 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1439 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1440
1441 /*
1442 * Halt the requested CPUs as needed.
1443 */
1444 int rc;
1445 if (idCpu != VMCPUID_ALL)
1446 {
1447 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1448 if (!dbgfR3CpuIsHalted(pUVCpu))
1449 {
1450 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1451 rc = VINF_SUCCESS;
1452 }
1453 else
1454 rc = VWRN_DBGF_ALREADY_HALTED;
1455 }
1456 else
1457 {
1458 rc = VWRN_DBGF_ALREADY_HALTED;
1459 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1460 {
1461 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1462 if (!dbgfR3CpuIsHalted(pUVCpu))
1463 {
1464 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1465 rc = VINF_SUCCESS;
1466 }
1467 }
1468 }
1469
1470 return rc;
1471}
1472
1473
1474/**
1475 * Checks if any of the specified vCPUs have been halted by the debugger.
1476 *
1477 * @returns True if at least one halted vCPUs.
1478 * @returns False if no halted vCPUs.
1479 * @param pUVM The user mode VM handle.
1480 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1481 * at least a single vCPU is halted in the debugger.
1482 */
1483VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1484{
1485 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1486 PVM pVM = pUVM->pVM;
1487 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1488 AssertReturn(pVM->dbgf.s.fAttached, false);
1489
1490 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1491}
1492
1493
1494/**
1495 * Checks if the debugger can wait for events or not.
1496 *
1497 * This function is only used by lazy, multiplexing debuggers. :-)
1498 *
1499 * @returns VBox status code.
1500 * @retval VINF_SUCCESS if waitable.
1501 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1502 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1503 * (not asserted) or if the handle is invalid (asserted).
1504 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1505 *
1506 * @param pUVM The user mode VM handle.
1507 */
1508VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1509{
1510 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1511
1512 /* Note! There is a slight race here, unfortunately. */
1513 PVM pVM = pUVM->pVM;
1514 if (!RT_VALID_PTR(pVM))
1515 return VERR_INVALID_VM_HANDLE;
1516 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1517 return VERR_INVALID_VM_HANDLE;
1518 if (!pVM->dbgf.s.fAttached)
1519 return VERR_DBGF_NOT_ATTACHED;
1520
1521 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1522 return VINF_SUCCESS;
1523}
1524
1525
1526/**
1527 * Resumes VM execution.
1528 *
1529 * There is no receipt event on this command.
1530 *
1531 * @returns VBox status code.
1532 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1533 * @param pUVM The user mode VM handle.
1534 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1535 */
1536VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1537{
1538 /*
1539 * Validate input and attachment state.
1540 */
1541 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1542 PVM pVM = pUVM->pVM;
1543 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1544 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1545
1546 /*
1547 * Ping the halted emulation threads, telling them to run.
1548 */
1549 int rc = VWRN_DBGF_ALREADY_RUNNING;
1550 if (idCpu != VMCPUID_ALL)
1551 {
1552 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1553 if (dbgfR3CpuIsHalted(pUVCpu))
1554 {
1555 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1556 AssertRC(rc);
1557 }
1558 }
1559 else
1560 {
1561 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1562 {
1563 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1564 if (dbgfR3CpuIsHalted(pUVCpu))
1565 {
1566 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1567 AssertRC(rc2);
1568 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1569 rc = rc2;
1570 }
1571 }
1572 }
1573
1574 return rc;
1575}
1576
1577
1578/**
1579 * Classifies the current instruction.
1580 *
1581 * @returns Type of instruction.
1582 * @param pVM The cross context VM structure.
1583 * @param pVCpu The current CPU.
1584 * @thread EMT(pVCpu)
1585 */
1586static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1587{
1588 /*
1589 * Read the instruction.
1590 */
1591 size_t cbRead = 0;
1592 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1593 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1594 if (RT_SUCCESS(rc))
1595 {
1596 /*
1597 * Do minimal parsing. No real need to involve the disassembler here.
1598 */
1599 uint8_t *pb = abOpcode;
1600 for (;;)
1601 {
1602 switch (*pb++)
1603 {
1604 default:
1605 return DBGFSTEPINSTRTYPE_OTHER;
1606
1607 case 0xe8: /* call rel16/32 */
1608 case 0x9a: /* call farptr */
1609 case 0xcc: /* int3 */
1610 case 0xcd: /* int xx */
1611 // case 0xce: /* into */
1612 return DBGFSTEPINSTRTYPE_CALL;
1613
1614 case 0xc2: /* ret xx */
1615 case 0xc3: /* ret */
1616 case 0xca: /* retf xx */
1617 case 0xcb: /* retf */
1618 case 0xcf: /* iret */
1619 return DBGFSTEPINSTRTYPE_RET;
1620
1621 case 0xff:
1622 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1623 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1624 return DBGFSTEPINSTRTYPE_CALL;
1625 return DBGFSTEPINSTRTYPE_OTHER;
1626
1627 case 0x0f:
1628 switch (*pb++)
1629 {
1630 case 0x05: /* syscall */
1631 case 0x34: /* sysenter */
1632 return DBGFSTEPINSTRTYPE_CALL;
1633 case 0x07: /* sysret */
1634 case 0x35: /* sysexit */
1635 return DBGFSTEPINSTRTYPE_RET;
1636 }
1637 break;
1638
1639 /* Must handle some REX prefixes. So we do all normal prefixes. */
1640 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1641 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1642 if (!CPUMIsGuestIn64BitCode(pVCpu))
1643 return DBGFSTEPINSTRTYPE_OTHER;
1644 break;
1645
1646 case 0x2e: /* CS */
1647 case 0x36: /* SS */
1648 case 0x3e: /* DS */
1649 case 0x26: /* ES */
1650 case 0x64: /* FS */
1651 case 0x65: /* GS */
1652 case 0x66: /* op size */
1653 case 0x67: /* addr size */
1654 case 0xf0: /* lock */
1655 case 0xf2: /* REPNZ */
1656 case 0xf3: /* REPZ */
1657 break;
1658 }
1659 }
1660 }
1661
1662 return DBGFSTEPINSTRTYPE_INVALID;
1663}
1664
1665
1666/**
1667 * Checks if the stepping has reached a stop point.
1668 *
1669 * Called when raising a stepped event.
1670 *
1671 * @returns true if the event should be raised, false if we should take one more
1672 * step first.
1673 * @param pVM The cross context VM structure.
1674 * @param pVCpu The cross context per CPU structure of the calling EMT.
1675 * @thread EMT(pVCpu)
1676 */
1677static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1678{
1679 /*
1680 * Check valid pVCpu and that it matches the CPU one stepping.
1681 */
1682 if (pVCpu)
1683 {
1684 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1685 {
1686 /*
1687 * Increase the number of steps and see if we've reached the max.
1688 */
1689 pVM->dbgf.s.SteppingFilter.cSteps++;
1690 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1691 {
1692 /*
1693 * Check PC and SP address filtering.
1694 */
1695 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1696 {
1697 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1698 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1699 return true;
1700 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1701 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1702 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1703 return true;
1704 }
1705
1706 /*
1707 * Do step-over filtering separate from the step-into one.
1708 */
1709 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1710 {
1711 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1712 switch (enmType)
1713 {
1714 default:
1715 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1716 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1717 break;
1718 return true;
1719 case DBGFSTEPINSTRTYPE_CALL:
1720 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1721 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1722 return true;
1723 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1724 break;
1725 case DBGFSTEPINSTRTYPE_RET:
1726 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1727 {
1728 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1729 return true;
1730 /* If after return, we use the cMaxStep limit to stop the next time. */
1731 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1732 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1733 }
1734 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1735 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1736 break;
1737 }
1738 return false;
1739 }
1740 /*
1741 * Filtered step-into.
1742 */
1743 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1744 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1745 {
1746 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1747 switch (enmType)
1748 {
1749 default:
1750 break;
1751 case DBGFSTEPINSTRTYPE_CALL:
1752 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1753 return true;
1754 break;
1755 case DBGFSTEPINSTRTYPE_RET:
1756 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1757 return true;
1758 /* If after return, we use the cMaxStep limit to stop the next time. */
1759 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1760 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1761 break;
1762 }
1763 return false;
1764 }
1765 }
1766 }
1767 }
1768
1769 return true;
1770}
1771
1772
1773/**
1774 * Step Into.
1775 *
1776 * A single step event is generated from this command.
1777 * The current implementation is not reliable, so don't rely on the event coming.
1778 *
1779 * @returns VBox status code.
1780 * @param pUVM The user mode VM handle.
1781 * @param idCpu The ID of the CPU to single step on.
1782 */
1783VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1784{
1785 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1786}
1787
1788
1789/**
1790 * Full fleged step.
1791 *
1792 * This extended stepping API allows for doing multiple steps before raising an
1793 * event, helping implementing step over, step out and other more advanced
1794 * features.
1795 *
1796 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1797 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1798 * events, which will abort the stepping.
1799 *
1800 * The stop on pop area feature is for safeguarding step out.
1801 *
1802 * Please note though, that it will always use stepping and never breakpoints.
1803 * While this allows for a much greater flexibility it can at times be rather
1804 * slow.
1805 *
1806 * @returns VBox status code.
1807 * @param pUVM The user mode VM handle.
1808 * @param idCpu The ID of the CPU to single step on.
1809 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1810 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1811 * always be specified.
1812 * @param pStopPcAddr Address to stop executing at. Completely ignored
1813 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1814 * @param pStopPopAddr Stack address that SP must be lower than when
1815 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1816 * @param cbStopPop The range starting at @a pStopPopAddr which is
1817 * considered to be within the same thread stack. Note
1818 * that the API allows @a pStopPopAddr and @a cbStopPop
1819 * to form an area that wraps around and it will
1820 * consider the part starting at 0 as included.
1821 * @param cMaxSteps The maximum number of steps to take. This is to
1822 * prevent stepping for ever, so passing UINT32_MAX is
1823 * not recommended.
1824 *
1825 * @remarks The two address arguments must be guest context virtual addresses,
1826 * or HMA. The code doesn't make much of a point of out HMA, though.
1827 */
1828VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1829 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1830{
1831 /*
1832 * Check state.
1833 */
1834 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1835 PVM pVM = pUVM->pVM;
1836 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1837 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1838 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1839 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1840 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1841 {
1842 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1843 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1844 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1845 }
1846 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1847 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1848 {
1849 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1850 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1851 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1852 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1853 }
1854
1855 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1856 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1857 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1858 { /* likely */ }
1859 else
1860 return VERR_SEM_OUT_OF_TURN;
1861 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1862
1863 /*
1864 * Send the emulation thread a single-step command.
1865 */
1866 if (fFlags == DBGF_STEP_F_INTO)
1867 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1868 else
1869 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1870 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1871 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1872 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1873 else
1874 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1875 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1876 {
1877 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1878 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1879 }
1880 else
1881 {
1882 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1883 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1884 }
1885
1886 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1887 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1888 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1889
1890 Assert(dbgfR3CpuIsHalted(pUVCpu));
1891 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1892}
1893
1894
1895
1896/**
1897 * dbgfR3EventConfigEx argument packet.
1898 */
1899typedef struct DBGFR3EVENTCONFIGEXARGS
1900{
1901 PCDBGFEVENTCONFIG paConfigs;
1902 size_t cConfigs;
1903 int rc;
1904} DBGFR3EVENTCONFIGEXARGS;
1905/** Pointer to a dbgfR3EventConfigEx argument packet. */
1906typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1907
1908
1909/**
1910 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1911 */
1912static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1913{
1914 if (pVCpu->idCpu == 0)
1915 {
1916 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1917 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1918 size_t cConfigs = pArgs->cConfigs;
1919
1920 /*
1921 * Apply the changes.
1922 */
1923 unsigned cChanges = 0;
1924 for (uint32_t i = 0; i < cConfigs; i++)
1925 {
1926 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1927 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1928 if (paConfigs[i].fEnabled)
1929 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1930 else
1931 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1932 }
1933
1934 /*
1935 * Inform HM about changes.
1936 */
1937 if (cChanges > 0)
1938 {
1939 if (HMIsEnabled(pVM))
1940 {
1941 HMR3NotifyDebugEventChanged(pVM);
1942 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1943 }
1944 else if (VM_IS_NEM_ENABLED(pVM))
1945 {
1946 NEMR3NotifyDebugEventChanged(pVM);
1947 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1948 }
1949 }
1950 }
1951 else if (HMIsEnabled(pVM))
1952 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1953 else if (VM_IS_NEM_ENABLED(pVM))
1954 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1955
1956 return VINF_SUCCESS;
1957}
1958
1959
1960/**
1961 * Configures (enables/disables) multiple selectable debug events.
1962 *
1963 * @returns VBox status code.
1964 * @param pUVM The user mode VM handle.
1965 * @param paConfigs The event to configure and their new state.
1966 * @param cConfigs Number of entries in @a paConfigs.
1967 */
1968VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1969{
1970 /*
1971 * Validate input.
1972 */
1973 size_t i = cConfigs;
1974 while (i-- > 0)
1975 {
1976 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1977 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1978 }
1979 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1980 PVM pVM = pUVM->pVM;
1981 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1982
1983 /*
1984 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1985 * can sync their data and execution with new debug state.
1986 */
1987 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1988 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1989 dbgfR3EventConfigEx, &Args);
1990 if (RT_SUCCESS(rc))
1991 rc = Args.rc;
1992 return rc;
1993}
1994
1995
1996/**
1997 * Enables or disables a selectable debug event.
1998 *
1999 * @returns VBox status code.
2000 * @param pUVM The user mode VM handle.
2001 * @param enmEvent The selectable debug event.
2002 * @param fEnabled The new state.
2003 */
2004VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
2005{
2006 /*
2007 * Convert to an array call.
2008 */
2009 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
2010 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
2011}
2012
2013
2014/**
2015 * Checks if the given selectable event is enabled.
2016 *
2017 * @returns true if enabled, false if not or invalid input.
2018 * @param pUVM The user mode VM handle.
2019 * @param enmEvent The selectable debug event.
2020 * @sa DBGFR3EventQuery
2021 */
2022VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
2023{
2024 /*
2025 * Validate input.
2026 */
2027 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
2028 && enmEvent < DBGFEVENT_END, false);
2029 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
2030 || enmEvent == DBGFEVENT_BREAKPOINT
2031 || enmEvent == DBGFEVENT_BREAKPOINT_IO
2032 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
2033
2034 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2035 PVM pVM = pUVM->pVM;
2036 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2037
2038 /*
2039 * Check the event status.
2040 */
2041 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
2042}
2043
2044
2045/**
2046 * Queries the status of a set of events.
2047 *
2048 * @returns VBox status code.
2049 * @param pUVM The user mode VM handle.
2050 * @param paConfigs The events to query and where to return the state.
2051 * @param cConfigs The number of elements in @a paConfigs.
2052 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
2053 */
2054VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2055{
2056 /*
2057 * Validate input.
2058 */
2059 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2060 PVM pVM = pUVM->pVM;
2061 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2062
2063 for (size_t i = 0; i < cConfigs; i++)
2064 {
2065 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2066 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2067 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2068 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2069 || enmType == DBGFEVENT_BREAKPOINT
2070 || enmType == DBGFEVENT_BREAKPOINT_IO
2071 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2072 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2073 }
2074
2075 return VINF_SUCCESS;
2076}
2077
2078
2079/**
2080 * dbgfR3InterruptConfigEx argument packet.
2081 */
2082typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2083{
2084 PCDBGFINTERRUPTCONFIG paConfigs;
2085 size_t cConfigs;
2086 int rc;
2087} DBGFR3INTERRUPTCONFIGEXARGS;
2088/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2089typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2090
2091/**
2092 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2093 * Worker for DBGFR3InterruptConfigEx.}
2094 */
2095static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2096{
2097 if (pVCpu->idCpu == 0)
2098 {
2099 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2100 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2101 size_t cConfigs = pArgs->cConfigs;
2102
2103 /*
2104 * Apply the changes.
2105 */
2106 bool fChanged = false;
2107 bool fThis;
2108 for (uint32_t i = 0; i < cConfigs; i++)
2109 {
2110 /*
2111 * Hardware interrupts.
2112 */
2113 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2114 {
2115 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2116 if (fThis)
2117 {
2118 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2119 pVM->dbgf.s.cHardIntBreakpoints++;
2120 }
2121 }
2122 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2123 {
2124 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2125 if (fThis)
2126 {
2127 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2128 pVM->dbgf.s.cHardIntBreakpoints--;
2129 }
2130 }
2131
2132 /*
2133 * Software interrupts.
2134 */
2135 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2136 {
2137 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2138 if (fThis)
2139 {
2140 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2141 pVM->dbgf.s.cSoftIntBreakpoints++;
2142 }
2143 }
2144 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2145 {
2146 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2147 if (fThis)
2148 {
2149 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2150 pVM->dbgf.s.cSoftIntBreakpoints--;
2151 }
2152 }
2153 }
2154
2155 /*
2156 * Update the event bitmap entries.
2157 */
2158 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2159 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2160 else
2161 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2162
2163 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2164 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2165 else
2166 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2167
2168 /*
2169 * Inform HM about changes.
2170 */
2171 if (fChanged)
2172 {
2173 if (HMIsEnabled(pVM))
2174 {
2175 HMR3NotifyDebugEventChanged(pVM);
2176 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2177 }
2178 else if (VM_IS_NEM_ENABLED(pVM))
2179 {
2180 NEMR3NotifyDebugEventChanged(pVM);
2181 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2182 }
2183 }
2184 }
2185 else if (HMIsEnabled(pVM))
2186 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2187 else if (VM_IS_NEM_ENABLED(pVM))
2188 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2189
2190 return VINF_SUCCESS;
2191}
2192
2193
2194/**
2195 * Changes
2196 *
2197 * @returns VBox status code.
2198 * @param pUVM The user mode VM handle.
2199 * @param paConfigs The events to query and where to return the state.
2200 * @param cConfigs The number of elements in @a paConfigs.
2201 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2202 */
2203VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2204{
2205 /*
2206 * Validate input.
2207 */
2208 size_t i = cConfigs;
2209 while (i-- > 0)
2210 {
2211 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2212 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2213 }
2214
2215 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2216 PVM pVM = pUVM->pVM;
2217 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2218
2219 /*
2220 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2221 * can sync their data and execution with new debug state.
2222 */
2223 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2224 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2225 dbgfR3InterruptConfigEx, &Args);
2226 if (RT_SUCCESS(rc))
2227 rc = Args.rc;
2228 return rc;
2229}
2230
2231
2232/**
2233 * Configures interception of a hardware interrupt.
2234 *
2235 * @returns VBox status code.
2236 * @param pUVM The user mode VM handle.
2237 * @param iInterrupt The interrupt number.
2238 * @param fEnabled Whether interception is enabled or not.
2239 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2240 */
2241VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2242{
2243 /*
2244 * Convert to DBGFR3InterruptConfigEx call.
2245 */
2246 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2247 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2248}
2249
2250
2251/**
2252 * Configures interception of a software interrupt.
2253 *
2254 * @returns VBox status code.
2255 * @param pUVM The user mode VM handle.
2256 * @param iInterrupt The interrupt number.
2257 * @param fEnabled Whether interception is enabled or not.
2258 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2259 */
2260VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2261{
2262 /*
2263 * Convert to DBGFR3InterruptConfigEx call.
2264 */
2265 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2266 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2267}
2268
2269
2270/**
2271 * Checks whether interception is enabled for a hardware interrupt.
2272 *
2273 * @returns true if enabled, false if not or invalid input.
2274 * @param pUVM The user mode VM handle.
2275 * @param iInterrupt The interrupt number.
2276 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2277 * DBGF_IS_SOFTWARE_INT_ENABLED
2278 */
2279VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2280{
2281 /*
2282 * Validate input.
2283 */
2284 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2285 PVM pVM = pUVM->pVM;
2286 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2287
2288 /*
2289 * Check it.
2290 */
2291 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2292}
2293
2294
2295/**
2296 * Checks whether interception is enabled for a software interrupt.
2297 *
2298 * @returns true if enabled, false if not or invalid input.
2299 * @param pUVM The user mode VM handle.
2300 * @param iInterrupt The interrupt number.
2301 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2302 * DBGF_IS_HARDWARE_INT_ENABLED,
2303 */
2304VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2305{
2306 /*
2307 * Validate input.
2308 */
2309 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2310 PVM pVM = pUVM->pVM;
2311 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2312
2313 /*
2314 * Check it.
2315 */
2316 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2317}
2318
2319
2320
2321/**
2322 * Call this to single step programmatically.
2323 *
2324 * You must pass down the return code to the EM loop! That's
2325 * where the actual single stepping take place (at least in the
2326 * current implementation).
2327 *
2328 * @returns VINF_EM_DBG_STEP
2329 *
2330 * @param pVCpu The cross context virtual CPU structure.
2331 *
2332 * @thread VCpu EMT
2333 * @internal
2334 */
2335VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2336{
2337 VMCPU_ASSERT_EMT(pVCpu);
2338
2339 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2340 return VINF_EM_DBG_STEP;
2341}
2342
2343
2344/**
2345 * Inject an NMI into a running VM (only VCPU 0!)
2346 *
2347 * @returns VBox status code.
2348 * @param pUVM The user mode VM structure.
2349 * @param idCpu The ID of the CPU to inject the NMI on.
2350 */
2351VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2352{
2353 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2354 PVM pVM = pUVM->pVM;
2355 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2356 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2357
2358 /** @todo Implement generic NMI injection. */
2359 /** @todo NEM: NMI injection */
2360 if (!HMIsEnabled(pVM))
2361 return VERR_NOT_SUP_BY_NEM;
2362
2363 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2364 return VINF_SUCCESS;
2365}
2366
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use