VirtualBox

source: vbox/trunk/src/VBox/VMM/VMInternal.h@ 13762

Last change on this file since 13762 was 13755, checked in by vboxsync, 16 years ago

Started with VM request API changes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.0 KB
Line 
1/* $Id: VMInternal.h 13755 2008-11-03 15:49:06Z vboxsync $ */
2/** @file
3 * VM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___VMInternal_h
23#define ___VMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/vmapi.h>
27#include <setjmp.h>
28
29
30
31/** @defgroup grp_vm_int Internals
32 * @ingroup grp_vm
33 * @internal
34 * @{
35 */
36
37
38/**
39 * At-reset callback type.
40 */
41typedef enum VMATRESETTYPE
42{
43 /** Device callback. */
44 VMATRESETTYPE_DEV = 1,
45 /** Internal callback . */
46 VMATRESETTYPE_INTERNAL,
47 /** External callback. */
48 VMATRESETTYPE_EXTERNAL
49} VMATRESETTYPE;
50
51
52/** Pointer to at-reset callback. */
53typedef struct VMATRESET *PVMATRESET;
54
55/**
56 * At reset callback.
57 */
58typedef struct VMATRESET
59{
60 /** Pointer to the next one in the list. */
61 PVMATRESET pNext;
62 /** Callback type. */
63 VMATRESETTYPE enmType;
64 /** User argument for the callback. */
65 void *pvUser;
66 /** Description. */
67 const char *pszDesc;
68 /** Type specific data. */
69 union
70 {
71 /** VMATRESETTYPE_DEV. */
72 struct
73 {
74 /** Callback. */
75 PFNVMATRESET pfnCallback;
76 /** Device instance. */
77 PPDMDEVINS pDevIns;
78 } Dev;
79
80 /** VMATRESETTYPE_INTERNAL. */
81 struct
82 {
83 /** Callback. */
84 PFNVMATRESETINT pfnCallback;
85 } Internal;
86
87 /** VMATRESETTYPE_EXTERNAL. */
88 struct
89 {
90 /** Callback. */
91 PFNVMATRESETEXT pfnCallback;
92 } External;
93 } u;
94} VMATRESET;
95
96
97/**
98 * VM state change callback.
99 */
100typedef struct VMATSTATE
101{
102 /** Pointer to the next one. */
103 struct VMATSTATE *pNext;
104 /** Pointer to the callback. */
105 PFNVMATSTATE pfnAtState;
106 /** The user argument. */
107 void *pvUser;
108} VMATSTATE;
109/** Pointer to a VM state change callback. */
110typedef VMATSTATE *PVMATSTATE;
111
112
113/**
114 * VM error callback.
115 */
116typedef struct VMATERROR
117{
118 /** Pointer to the next one. */
119 struct VMATERROR *pNext;
120 /** Pointer to the callback. */
121 PFNVMATERROR pfnAtError;
122 /** The user argument. */
123 void *pvUser;
124} VMATERROR;
125/** Pointer to a VM error callback. */
126typedef VMATERROR *PVMATERROR;
127
128
129/**
130 * Chunk of memory allocated off the hypervisor heap in which
131 * we copy the error details.
132 */
133typedef struct VMERROR
134{
135 /** The size of the chunk. */
136 uint32_t cbAllocated;
137 /** The current offset into the chunk.
138 * We start by putting the filename and function immediatly
139 * after the end of the buffer. */
140 uint32_t off;
141 /** Offset from the start of this structure to the file name. */
142 uint32_t offFile;
143 /** The line number. */
144 uint32_t iLine;
145 /** Offset from the start of this structure to the function name. */
146 uint32_t offFunction;
147 /** Offset from the start of this structure to the formatted message text. */
148 uint32_t offMessage;
149 /** The VBox status code. */
150 int32_t rc;
151} VMERROR, *PVMERROR;
152
153
154/**
155 * VM runtime error callback.
156 */
157typedef struct VMATRUNTIMEERROR
158{
159 /** Pointer to the next one. */
160 struct VMATRUNTIMEERROR *pNext;
161 /** Pointer to the callback. */
162 PFNVMATRUNTIMEERROR pfnAtRuntimeError;
163 /** The user argument. */
164 void *pvUser;
165} VMATRUNTIMEERROR;
166/** Pointer to a VM error callback. */
167typedef VMATRUNTIMEERROR *PVMATRUNTIMEERROR;
168
169
170/**
171 * Chunk of memory allocated off the hypervisor heap in which
172 * we copy the runtime error details.
173 */
174typedef struct VMRUNTIMEERROR
175{
176 /** The size of the chunk. */
177 uint32_t cbAllocated;
178 /** The current offset into the chunk.
179 * We start by putting the error ID immediatly
180 * after the end of the buffer. */
181 uint32_t off;
182 /** Offset from the start of this structure to the error ID. */
183 uint32_t offErrorID;
184 /** Offset from the start of this structure to the formatted message text. */
185 uint32_t offMessage;
186 /** Whether the error is fatal or not */
187 bool fFatal;
188} VMRUNTIMEERROR, *PVMRUNTIMEERROR;
189
190/** The halt method. */
191typedef enum
192{
193 /** The usual invalid value. */
194 VMHALTMETHOD_INVALID = 0,
195 /** Use the method used during bootstrapping. */
196 VMHALTMETHOD_BOOTSTRAP,
197 /** Use the default method. */
198 VMHALTMETHOD_DEFAULT,
199 /** The old spin/yield/block method. */
200 VMHALTMETHOD_OLD,
201 /** The first go at a block/spin method. */
202 VMHALTMETHOD_1,
203 /** The first go at a more global approach. */
204 VMHALTMETHOD_GLOBAL_1,
205 /** The end of valid methods. (not inclusive of course) */
206 VMHALTMETHOD_END,
207 /** The usual 32-bit max value. */
208 VMHALTMETHOD_32BIT_HACK = 0x7fffffff
209} VMHALTMETHOD;
210
211
212/**
213 * VM Internal Data (part of the VM structure).
214 *
215 * @todo Move this and all related things to VMM. The VM component was, to some
216 * extent at least, a bad ad hoc design which should all have been put in
217 * VMM. @see pg_vm.
218 */
219typedef struct VMINT
220{
221 /** VM Error Message. */
222 R3PTRTYPE(PVMERROR) pErrorR3;
223 /** VM Runtime Error Message. */
224 R3PTRTYPE(PVMRUNTIMEERROR) pRuntimeErrorR3;
225 /** Set by VMR3SuspendNoSave; cleared by VMR3Resume; signals the VM is in an
226 * inconsistent state and saving is not allowed. */
227 bool fPreventSaveState;
228} VMINT;
229/** Pointer to the VM Internal Data (part of the VM structure). */
230typedef VMINT *PVMINT;
231
232
233/**
234 * VM internal data kept in the UVM.
235 */
236typedef struct VMINTUSERPERVM
237{
238 /** Head of the request queue. Atomic. */
239 volatile PVMREQ pReqs;
240 /** The last index used during alloc/free. */
241 volatile uint32_t iReqFree;
242 /** Number of free request packets. */
243 volatile uint32_t cReqFree;
244 /** Array of pointers to lists of free request packets. Atomic. */
245 volatile PVMREQ apReqFree[9];
246
247#ifdef VBOX_WITH_STATISTICS
248 /** Number of VMR3ReqAlloc returning a new packet. */
249 STAMCOUNTER StatReqAllocNew;
250 /** Number of VMR3ReqAlloc causing races. */
251 STAMCOUNTER StatReqAllocRaces;
252 /** Number of VMR3ReqAlloc returning a recycled packet. */
253 STAMCOUNTER StatReqAllocRecycled;
254 /** Number of VMR3ReqFree calls. */
255 STAMCOUNTER StatReqFree;
256 /** Number of times the request was actually freed. */
257 STAMCOUNTER StatReqFreeOverflow;
258#endif
259
260 /** Pointer to the support library session.
261 * Mainly for creation and destruction.. */
262 PSUPDRVSESSION pSession;
263
264 /** The handle to the EMT thread. */
265 RTTHREAD ThreadEMT;
266 /** The native of the EMT thread. */
267 RTNATIVETHREAD NativeThreadEMT;
268 /** Wait event semaphore. */
269 RTSEMEVENT EventSemWait;
270 /** Wait/Idle indicator. */
271 bool volatile fWait;
272 /** Force EMT to terminate. */
273 bool volatile fTerminateEMT;
274 /** If set the EMT does the final VM cleanup when it exits.
275 * If clear the VMR3Destroy() caller does so. */
276 bool fEMTDoesTheCleanup;
277
278 /** @name Generic Halt data
279 * @{
280 */
281 /** The current halt method.
282 * Can be selected by CFGM option 'VM/HaltMethod'. */
283 VMHALTMETHOD enmHaltMethod;
284 /** The index into g_aHaltMethods of the current halt method. */
285 uint32_t volatile iHaltMethod;
286 /** The average time (ns) between two halts in the last second. (updated once per second) */
287 uint32_t HaltInterval;
288 /** The average halt frequency for the last second. (updated once per second) */
289 uint32_t HaltFrequency;
290 /** The number of halts in the current period. */
291 uint32_t cHalts;
292 uint32_t padding; /**< alignment padding. */
293 /** When we started counting halts in cHalts (RTTimeNanoTS). */
294 uint64_t u64HaltsStartTS;
295 /** @} */
296
297 /** Union containing data and config for the different halt algorithms. */
298 union
299 {
300 /**
301 * Method 1 & 2 - Block whenever possible, and when lagging behind
302 * switch to spinning with regular blocking every 5-200ms (defaults)
303 * depending on the accumulated lag. The blocking interval is adjusted
304 * with the average oversleeping of the last 64 times.
305 *
306 * The difference between 1 and 2 is that we use native absolute
307 * time APIs for the blocking instead of the millisecond based IPRT
308 * interface.
309 */
310 struct
311 {
312 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
313 uint32_t cBlocks;
314 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
315 uint64_t cNSBlockedTooLongAvg;
316 /** Total time spend oversleeping when blocking. */
317 uint64_t cNSBlockedTooLong;
318 /** Total time spent blocking. */
319 uint64_t cNSBlocked;
320 /** The timestamp (RTTimeNanoTS) of the last block. */
321 uint64_t u64LastBlockTS;
322
323 /** When we started spinning relentlessly in order to catch up some of the oversleeping.
324 * This is 0 when we're not spinning. */
325 uint64_t u64StartSpinTS;
326
327 /** The max interval without blocking (when spinning). */
328 uint32_t u32MinBlockIntervalCfg;
329 /** The minimum interval between blocking (when spinning). */
330 uint32_t u32MaxBlockIntervalCfg;
331 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
332 uint32_t u32LagBlockIntervalDivisorCfg;
333 /** When to start spinning (lag / nano secs). */
334 uint32_t u32StartSpinningCfg;
335 /** When to stop spinning (lag / nano secs). */
336 uint32_t u32StopSpinningCfg;
337 } Method12;
338
339#if 0
340 /**
341 * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we
342 * sprinkle it with yields.
343 */
344 struct
345 {
346 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
347 uint32_t cBlocks;
348 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
349 uint64_t cBlockedTooLongNSAvg;
350 /** Total time spend oversleeping when blocking. */
351 uint64_t cBlockedTooLongNS;
352 /** Total time spent blocking. */
353 uint64_t cBlockedNS;
354 /** The timestamp (RTTimeNanoTS) of the last block. */
355 uint64_t u64LastBlockTS;
356
357 /** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */
358 uint32_t cYields;
359 /** Avg. time spend oversleeping when yielding. */
360 uint32_t cYieldTooLongNSAvg;
361 /** Total time spend oversleeping when yielding. */
362 uint64_t cYieldTooLongNS;
363 /** Total time spent yielding. */
364 uint64_t cYieldedNS;
365 /** The timestamp (RTTimeNanoTS) of the last block. */
366 uint64_t u64LastYieldTS;
367
368 /** When we started spinning relentlessly in order to catch up some of the oversleeping. */
369 uint64_t u64StartSpinTS;
370 } Method34;
371#endif
372 } Halt;
373
374 /** Profiling the halted state; yielding vs blocking.
375 * @{ */
376 STAMPROFILE StatHaltYield;
377 STAMPROFILE StatHaltBlock;
378 STAMPROFILE StatHaltTimers;
379 STAMPROFILE StatHaltPoll;
380 /** @} */
381
382
383 /** List of registered reset callbacks. */
384 PVMATRESET pAtReset;
385 /** List of registered reset callbacks. */
386 PVMATRESET *ppAtResetNext;
387
388 /** List of registered state change callbacks. */
389 PVMATSTATE pAtState;
390 /** List of registered state change callbacks. */
391 PVMATSTATE *ppAtStateNext;
392
393 /** List of registered error callbacks. */
394 PVMATERROR pAtError;
395 /** List of registered error callbacks. */
396 PVMATERROR *ppAtErrorNext;
397
398 /** List of registered error callbacks. */
399 PVMATRUNTIMEERROR pAtRuntimeError;
400 /** List of registered error callbacks. */
401 PVMATRUNTIMEERROR *ppAtRuntimeErrorNext;
402
403 /** Pointer to the DBGC instance data. */
404 void *pvDBGC;
405
406
407 /** vmR3EmulationThread longjmp buffer. Must be last in the structure. */
408 jmp_buf emtJumpEnv;
409} VMINTUSERPERVM;
410
411/** Pointer to the VM internal data kept in the UVM. */
412typedef VMINTUSERPERVM *PVMINTUSERPERVM;
413
414
415/**
416 * VMCPU internal data kept in the UVM.
417 *
418 * Almost a copy of VMINTUSERPERVM. Separate data properly later on.
419 */
420typedef struct VMINTUSERPERVMCPU
421{
422 /** Head of the request queue. Atomic. */
423 volatile PVMREQ pReqs;
424
425 /** The handle to the EMT thread. */
426 RTTHREAD ThreadEMT;
427 /** The native of the EMT thread. */
428 RTNATIVETHREAD NativeThreadEMT;
429 /** Wait event semaphore. */
430 RTSEMEVENT EventSemWait;
431 /** Wait/Idle indicator. */
432 bool volatile fWait;
433 /** Force EMT to terminate. */
434 bool volatile fTerminateEMT;
435 /** If set the EMT does the final VM cleanup when it exits.
436 * If clear the VMR3Destroy() caller does so. */
437 bool fEMTDoesTheCleanup;
438
439 /** @name Generic Halt data
440 * @{
441 */
442 /** The current halt method.
443 * Can be selected by CFGM option 'VM/HaltMethod'. */
444 VMHALTMETHOD enmHaltMethod;
445 /** The index into g_aHaltMethods of the current halt method. */
446 uint32_t volatile iHaltMethod;
447 /** The average time (ns) between two halts in the last second. (updated once per second) */
448 uint32_t HaltInterval;
449 /** The average halt frequency for the last second. (updated once per second) */
450 uint32_t HaltFrequency;
451 /** The number of halts in the current period. */
452 uint32_t cHalts;
453 uint32_t padding; /**< alignment padding. */
454 /** When we started counting halts in cHalts (RTTimeNanoTS). */
455 uint64_t u64HaltsStartTS;
456 /** @} */
457
458 /** Union containing data and config for the different halt algorithms. */
459 union
460 {
461 /**
462 * Method 1 & 2 - Block whenever possible, and when lagging behind
463 * switch to spinning with regular blocking every 5-200ms (defaults)
464 * depending on the accumulated lag. The blocking interval is adjusted
465 * with the average oversleeping of the last 64 times.
466 *
467 * The difference between 1 and 2 is that we use native absolute
468 * time APIs for the blocking instead of the millisecond based IPRT
469 * interface.
470 */
471 struct
472 {
473 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
474 uint32_t cBlocks;
475 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
476 uint64_t cNSBlockedTooLongAvg;
477 /** Total time spend oversleeping when blocking. */
478 uint64_t cNSBlockedTooLong;
479 /** Total time spent blocking. */
480 uint64_t cNSBlocked;
481 /** The timestamp (RTTimeNanoTS) of the last block. */
482 uint64_t u64LastBlockTS;
483
484 /** When we started spinning relentlessly in order to catch up some of the oversleeping.
485 * This is 0 when we're not spinning. */
486 uint64_t u64StartSpinTS;
487
488 /** The max interval without blocking (when spinning). */
489 uint32_t u32MinBlockIntervalCfg;
490 /** The minimum interval between blocking (when spinning). */
491 uint32_t u32MaxBlockIntervalCfg;
492 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
493 uint32_t u32LagBlockIntervalDivisorCfg;
494 /** When to start spinning (lag / nano secs). */
495 uint32_t u32StartSpinningCfg;
496 /** When to stop spinning (lag / nano secs). */
497 uint32_t u32StopSpinningCfg;
498 } Method12;
499
500#if 0
501 /**
502 * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we
503 * sprinkle it with yields.
504 */
505 struct
506 {
507 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
508 uint32_t cBlocks;
509 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
510 uint64_t cBlockedTooLongNSAvg;
511 /** Total time spend oversleeping when blocking. */
512 uint64_t cBlockedTooLongNS;
513 /** Total time spent blocking. */
514 uint64_t cBlockedNS;
515 /** The timestamp (RTTimeNanoTS) of the last block. */
516 uint64_t u64LastBlockTS;
517
518 /** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */
519 uint32_t cYields;
520 /** Avg. time spend oversleeping when yielding. */
521 uint32_t cYieldTooLongNSAvg;
522 /** Total time spend oversleeping when yielding. */
523 uint64_t cYieldTooLongNS;
524 /** Total time spent yielding. */
525 uint64_t cYieldedNS;
526 /** The timestamp (RTTimeNanoTS) of the last block. */
527 uint64_t u64LastYieldTS;
528
529 /** When we started spinning relentlessly in order to catch up some of the oversleeping. */
530 uint64_t u64StartSpinTS;
531 } Method34;
532#endif
533 } Halt;
534
535 /** Profiling the halted state; yielding vs blocking.
536 * @{ */
537 STAMPROFILE StatHaltYield;
538 STAMPROFILE StatHaltBlock;
539 STAMPROFILE StatHaltTimers;
540 STAMPROFILE StatHaltPoll;
541 /** @} */
542
543 /** Pointer to the DBGC instance data. */
544 void *pvDBGC;
545
546
547 /** vmR3EmulationThread longjmp buffer. Must be last in the structure. */
548 jmp_buf emtJumpEnv;
549} VMINTUSERPERVMCPU;
550
551/** Pointer to the VM internal data kept in the UVM. */
552typedef VMINTUSERPERVMCPU *PVMINTUSERPERVMCPU;
553
554DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArg);
555int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod);
556DECLCALLBACK(int) vmR3Destroy(PVM pVM);
557DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *args);
558void vmSetErrorCopy(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list args);
559DECLCALLBACK(void) vmR3SetRuntimeErrorV(PVM pVM, bool fFatal, const char *pszErrorID, const char *pszFormat, va_list *args);
560void vmSetRuntimeErrorCopy(PVM pVM, bool fFatal, const char *pszErrorID, const char *pszFormat, va_list args);
561void vmR3DestroyFinalBitFromEMT(PUVM pUVM);
562void vmR3SetState(PVM pVM, VMSTATE enmStateNew);
563
564
565/** @} */
566
567#endif
568
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use