VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c@ 2676

Last change on this file since 2676 was 2515, checked in by vboxsync, 17 years ago

Implemented non-IDT patching mode for windows (not used anywhere yet but useful for debugging).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 146.1 KB
Line 
1/** @file
2 *
3 * VBox host drivers - Ring-0 support drivers - Shared code:
4 * Driver code for all host platforms
5 */
6
7/*
8 * Copyright (C) 2006 InnoTek Systemberatung GmbH
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License as published by the Free Software Foundation,
14 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
15 * distribution. VirtualBox OSE is distributed in the hope that it will
16 * be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * If you received this file as part of a commercial VirtualBox
19 * distribution, then only the terms of your commercial VirtualBox
20 * license agreement apply instead of the previous paragraph.
21 */
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#include "SUPDRV.h"
28#ifndef PAGE_SHIFT
29# include <iprt/param.h>
30#endif
31#include <iprt/alloc.h>
32#include <iprt/semaphore.h>
33#include <iprt/spinlock.h>
34#include <iprt/thread.h>
35#include <iprt/process.h>
36#include <iprt/log.h>
37#ifdef VBOX_WITHOUT_IDT_PATCHING
38# include <VBox/vmm.h>
39# include <VBox/err.h>
40#endif
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/* from x86.h - clashes with linux thus this duplication */
47#undef X86_CR0_PG
48#define X86_CR0_PG BIT(31)
49#undef X86_CR0_PE
50#define X86_CR0_PE BIT(0)
51#undef X86_CPUID_AMD_FEATURE_EDX_NX
52#define X86_CPUID_AMD_FEATURE_EDX_NX BIT(20)
53#undef MSR_K6_EFER
54#define MSR_K6_EFER 0xc0000080
55#undef MSR_K6_EFER_NXE
56#define MSR_K6_EFER_NXE BIT(11)
57#undef MSR_K6_EFER_LMA
58#define MSR_K6_EFER_LMA BIT(10)
59#undef X86_CR4_PGE
60#define X86_CR4_PGE BIT(7)
61#undef X86_CR4_PAE
62#define X86_CR4_PAE BIT(5)
63#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
64#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE BIT(29)
65
66
67/** The frequency by which we recalculate the u32UpdateHz and
68 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
69#define GIP_UPDATEHZ_RECALC_FREQ 0x800
70
71
72/*******************************************************************************
73* Global Variables *
74*******************************************************************************/
75/**
76 * Array of the R0 SUP API.
77 */
78static SUPFUNC g_aFunctions[] =
79{
80 /* name function */
81 { "SUPR0ObjRegister", (void *)SUPR0ObjRegister },
82 { "SUPR0ObjAddRef", (void *)SUPR0ObjAddRef },
83 { "SUPR0ObjRelease", (void *)SUPR0ObjRelease },
84 { "SUPR0ObjVerifyAccess", (void *)SUPR0ObjVerifyAccess },
85 { "SUPR0LockMem", (void *)SUPR0LockMem },
86 { "SUPR0UnlockMem", (void *)SUPR0UnlockMem },
87 { "SUPR0ContAlloc", (void *)SUPR0ContAlloc },
88 { "SUPR0ContFree", (void *)SUPR0ContFree },
89 { "SUPR0MemAlloc", (void *)SUPR0MemAlloc },
90 { "SUPR0MemGetPhys", (void *)SUPR0MemGetPhys },
91 { "SUPR0MemFree", (void *)SUPR0MemFree },
92 { "SUPR0Printf", (void *)SUPR0Printf },
93 { "RTMemAlloc", (void *)RTMemAlloc },
94 { "RTMemAllocZ", (void *)RTMemAllocZ },
95 { "RTMemFree", (void *)RTMemFree },
96/* These doesn't work yet on linux - use fast mutexes!
97 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
98 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
99 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
100 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
101*/
102 { "RTSemFastMutexCreate", (void *)RTSemFastMutexCreate },
103 { "RTSemFastMutexDestroy", (void *)RTSemFastMutexDestroy },
104 { "RTSemFastMutexRequest", (void *)RTSemFastMutexRequest },
105 { "RTSemFastMutexRelease", (void *)RTSemFastMutexRelease },
106 { "RTSemEventCreate", (void *)RTSemEventCreate },
107 { "RTSemEventSignal", (void *)RTSemEventSignal },
108 { "RTSemEventWait", (void *)RTSemEventWait },
109 { "RTSemEventDestroy", (void *)RTSemEventDestroy },
110 { "RTSpinlockCreate", (void *)RTSpinlockCreate },
111 { "RTSpinlockDestroy", (void *)RTSpinlockDestroy },
112 { "RTSpinlockAcquire", (void *)RTSpinlockAcquire },
113 { "RTSpinlockRelease", (void *)RTSpinlockRelease },
114 { "RTSpinlockAcquireNoInts", (void *)RTSpinlockAcquireNoInts },
115 { "RTSpinlockReleaseNoInts", (void *)RTSpinlockReleaseNoInts },
116 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
117 { "RTThreadSleep", (void *)RTThreadSleep },
118 { "RTThreadYield", (void *)RTThreadYield },
119#if 0 /* Thread APIs, Part 2. */
120 { "RTThreadSelf", (void *)RTThreadSelf },
121 { "RTThreadCreate", (void *)RTThreadCreate },
122 { "RTThreadGetNative", (void *)RTThreadGetNative },
123 { "RTThreadWait", (void *)RTThreadWait },
124 { "RTThreadWaitNoResume", (void *)RTThreadWaitNoResume },
125 { "RTThreadGetName", (void *)RTThreadGetName },
126 { "RTThreadSelfName", (void *)RTThreadSelfName },
127 { "RTThreadGetType", (void *)RTThreadGetType },
128 { "RTThreadUserSignal", (void *)RTThreadUserSignal },
129 { "RTThreadUserReset", (void *)RTThreadUserReset },
130 { "RTThreadUserWait", (void *)RTThreadUserWait },
131 { "RTThreadUserWaitNoResume", (void *)RTThreadUserWaitNoResume },
132#endif
133 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
134 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
135 { "RTLogSetDefaultInstanceThread", (void *)RTLogSetDefaultInstanceThread },
136 { "RTLogLogger", (void *)RTLogLogger },
137 { "RTLogLoggerEx", (void *)RTLogLoggerEx },
138 { "RTLogLoggerExV", (void *)RTLogLoggerExV },
139 { "AssertMsg1", (void *)AssertMsg1 },
140 { "AssertMsg2", (void *)AssertMsg2 },
141};
142
143
144/*******************************************************************************
145* Internal Functions *
146*******************************************************************************/
147__BEGIN_DECLS
148static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
149static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
150#ifndef VBOX_WITHOUT_IDT_PATCHING
151static int supdrvIOCtl_IdtInstall(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPIDTINSTALL_IN pIn, PSUPIDTINSTALL_OUT pOut);
152static PSUPDRVPATCH supdrvIdtPatchOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch);
153static int supdrvIOCtl_IdtRemoveAll(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession);
154static void supdrvIdtRemoveOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch);
155static void supdrvIdtWrite(volatile void *pvIdtEntry, const SUPDRVIDTE *pNewIDTEntry);
156#endif /* !VBOX_WITHOUT_IDT_PATCHING */
157static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN_IN pIn, PSUPLDROPEN_OUT pOut);
158static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD_IN pIn);
159static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE_IN pIn);
160static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL_IN pIn, PSUPLDRGETSYMBOL_OUT pOut);
161static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry);
162static void supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt);
163static void supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
164static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
165static int supdrvIOCtl_GetPagingMode(PSUPGETPAGINGMODE_OUT pOut);
166static SUPGIPMODE supdrvGipDeterminTscMode(void);
167#ifdef USE_NEW_OS_INTERFACE
168static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
169static int supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
170static DECLCALLBACK(void) supdrvGipTimer(PRTTIMER pTimer, void *pvUser);
171#endif
172
173__END_DECLS
174
175
176/**
177 * Initializes the device extentsion structure.
178 *
179 * @returns 0 on success.
180 * @returns SUPDRV_ERR_ on failure.
181 * @param pDevExt The device extension to initialize.
182 */
183int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
184{
185 /*
186 * Initialize it.
187 */
188 int rc;
189 memset(pDevExt, 0, sizeof(*pDevExt));
190 rc = RTSpinlockCreate(&pDevExt->Spinlock);
191 if (!rc)
192 {
193 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
194 if (!rc)
195 {
196 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
197 if (!rc)
198 {
199#ifdef USE_NEW_OS_INTERFACE
200 rc = supdrvGipCreate(pDevExt);
201 if (RT_SUCCESS(rc))
202 {
203 pDevExt->u32Cookie = BIRD;
204 return 0;
205 }
206#else
207 pDevExt->u32Cookie = BIRD;
208 return 0;
209#endif
210 }
211 RTSemFastMutexDestroy(pDevExt->mtxLdr);
212 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
213 }
214 RTSpinlockDestroy(pDevExt->Spinlock);
215 pDevExt->Spinlock = NIL_RTSPINLOCK;
216 }
217 return rc;
218}
219
220/**
221 * Delete the device extension (e.g. cleanup members).
222 *
223 * @returns 0.
224 * @param pDevExt The device extension to delete.
225 */
226int VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
227{
228#ifndef VBOX_WITHOUT_IDT_PATCHING
229 PSUPDRVPATCH pPatch;
230#endif
231 PSUPDRVOBJ pObj;
232 PSUPDRVUSAGE pUsage;
233
234 /*
235 * Kill mutexes and spinlocks.
236 */
237 RTSemFastMutexDestroy(pDevExt->mtxGip);
238 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
239 RTSemFastMutexDestroy(pDevExt->mtxLdr);
240 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
241 RTSpinlockDestroy(pDevExt->Spinlock);
242 pDevExt->Spinlock = NIL_RTSPINLOCK;
243
244 /*
245 * Free lists.
246 */
247
248#ifndef VBOX_WITHOUT_IDT_PATCHING
249 /* patches */
250 /** @todo make sure we don't uninstall patches which has been patched by someone else. */
251 pPatch = pDevExt->pIdtPatchesFree;
252 pDevExt->pIdtPatchesFree = NULL;
253 while (pPatch)
254 {
255 void *pvFree = pPatch;
256 pPatch = pPatch->pNext;
257 RTMemExecFree(pvFree);
258 }
259#endif /* !VBOX_WITHOUT_IDT_PATCHING */
260
261 /* objects. */
262 pObj = pDevExt->pObjs;
263#if !defined(DEBUG_bird) || !defined(__LINUX__) /* breaks unloading, temporary, remove me! */
264 Assert(!pObj); /* (can trigger on forced unloads) */
265#endif
266 pDevExt->pObjs = NULL;
267 while (pObj)
268 {
269 void *pvFree = pObj;
270 pObj = pObj->pNext;
271 RTMemFree(pvFree);
272 }
273
274 /* usage records. */
275 pUsage = pDevExt->pUsageFree;
276 pDevExt->pUsageFree = NULL;
277 while (pUsage)
278 {
279 void *pvFree = pUsage;
280 pUsage = pUsage->pNext;
281 RTMemFree(pvFree);
282 }
283
284#ifdef USE_NEW_OS_INTERFACE
285 /* kill the GIP */
286 supdrvGipDestroy(pDevExt);
287#endif
288
289 return 0;
290}
291
292
293/**
294 * Create session.
295 *
296 * @returns 0 on success.
297 * @returns SUPDRV_ERR_ on failure.
298 * @param pDevExt Device extension.
299 * @param ppSession Where to store the pointer to the session data.
300 */
301int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION *ppSession)
302{
303 /*
304 * Allocate memory for the session data.
305 */
306 int rc = SUPDRV_ERR_NO_MEMORY;
307 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
308 if (pSession)
309 {
310 /* Initialize session data. */
311 rc = RTSpinlockCreate(&pSession->Spinlock);
312 if (!rc)
313 {
314 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
315 pSession->pDevExt = pDevExt;
316 pSession->u32Cookie = BIRD_INV;
317 /*pSession->pLdrUsage = NULL;
318 pSession->pPatchUsage = NULL;
319 pSession->pUsage = NULL;
320 pSession->pGip = NULL;
321 pSession->fGipReferenced = false;
322 pSession->Bundle.cUsed = 0 */
323
324 dprintf(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
325 return 0;
326 }
327
328 RTMemFree(pSession);
329 *ppSession = NULL;
330 }
331
332 dprintf(("Failed to create spinlock, rc=%d!\n", rc));
333 return rc;
334}
335
336
337/**
338 * Shared code for cleaning up a session.
339 *
340 * @param pDevExt Device extension.
341 * @param pSession Session data.
342 * This data will be freed by this routine.
343 */
344void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
345{
346 /*
347 * Cleanup the session first.
348 */
349 supdrvCleanupSession(pDevExt, pSession);
350
351 /*
352 * Free the rest of the session stuff.
353 */
354 RTSpinlockDestroy(pSession->Spinlock);
355 pSession->Spinlock = NIL_RTSPINLOCK;
356 pSession->pDevExt = NULL;
357 RTMemFree(pSession);
358 dprintf2(("supdrvCloseSession: returns\n"));
359}
360
361
362/**
363 * Shared code for cleaning up a session (but not quite freeing it).
364 *
365 * This is primarily intended for MAC OS X where we have to clean up the memory
366 * stuff before the file handle is closed.
367 *
368 * @param pDevExt Device extension.
369 * @param pSession Session data.
370 * This data will be freed by this routine.
371 */
372void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
373{
374 PSUPDRVBUNDLE pBundle;
375 dprintf(("supdrvCleanupSession: pSession=%p\n", pSession));
376
377 /*
378 * Remove logger instances related to this session.
379 * (This assumes the dprintf and dprintf2 macros doesn't use the normal logging.)
380 */
381 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
382
383#ifndef VBOX_WITHOUT_IDT_PATCHING
384 /*
385 * Uninstall any IDT patches installed for this session.
386 */
387 supdrvIOCtl_IdtRemoveAll(pDevExt, pSession);
388#endif
389
390 /*
391 * Release object references made in this session.
392 * In theory there should be noone racing us in this session.
393 */
394 dprintf2(("release objects - start\n"));
395 if (pSession->pUsage)
396 {
397 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
398 PSUPDRVUSAGE pUsage;
399 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
400
401 while ((pUsage = pSession->pUsage) != NULL)
402 {
403 PSUPDRVOBJ pObj = pUsage->pObj;
404 pSession->pUsage = pUsage->pNext;
405
406 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
407 if (pUsage->cUsage < pObj->cUsage)
408 {
409 pObj->cUsage -= pUsage->cUsage;
410 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
411 }
412 else
413 {
414 /* Destroy the object and free the record. */
415 if (pDevExt->pObjs == pObj)
416 pDevExt->pObjs = pObj->pNext;
417 else
418 {
419 PSUPDRVOBJ pObjPrev;
420 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
421 if (pObjPrev->pNext == pObj)
422 {
423 pObjPrev->pNext = pObj->pNext;
424 break;
425 }
426 Assert(pObjPrev);
427 }
428 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
429
430 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
431 RTMemFree(pObj);
432 }
433
434 /* free it and continue. */
435 RTMemFree(pUsage);
436
437 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
438 }
439
440 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
441 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
442 }
443 dprintf2(("release objects - done\n"));
444
445 /*
446 * Release memory allocated in the session.
447 *
448 * We do not serialize this as we assume that the application will
449 * not allocated memory while closing the file handle object.
450 */
451 dprintf2(("freeing memory:\n"));
452 pBundle = &pSession->Bundle;
453 while (pBundle)
454 {
455 PSUPDRVBUNDLE pToFree;
456 unsigned i;
457
458 /*
459 * Check and unlock all entries in the bundle.
460 */
461 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
462 {
463#ifdef USE_NEW_OS_INTERFACE
464 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
465 {
466 int rc;
467 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
468 {
469 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
470 AssertRC(rc); /** @todo figure out how to handle this. */
471 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
472 }
473 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, false);
474 AssertRC(rc); /** @todo figure out how to handle this. */
475 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
476 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
477 }
478
479#else /* !USE_NEW_OS_INTERFACE */
480 if ( pBundle->aMem[i].pvR0
481 || pBundle->aMem[i].pvR3)
482 {
483 dprintf2(("eType=%d pvR0=%p pvR3=%p cb=%d\n", pBundle->aMem[i].eType,
484 pBundle->aMem[i].pvR0, pBundle->aMem[i].pvR3, pBundle->aMem[i].cb));
485 switch (pBundle->aMem[i].eType)
486 {
487 case MEMREF_TYPE_LOCKED:
488 supdrvOSUnlockMemOne(&pBundle->aMem[i]);
489 break;
490 case MEMREF_TYPE_CONT:
491 supdrvOSContFreeOne(&pBundle->aMem[i]);
492 break;
493 case MEMREF_TYPE_LOW:
494 supdrvOSLowFreeOne(&pBundle->aMem[i]);
495 break;
496 case MEMREF_TYPE_MEM:
497 supdrvOSMemFreeOne(&pBundle->aMem[i]);
498 break;
499 default:
500 break;
501 }
502 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
503 }
504#endif /* !USE_NEW_OS_INTERFACE */
505 }
506
507 /*
508 * Advance and free previous bundle.
509 */
510 pToFree = pBundle;
511 pBundle = pBundle->pNext;
512
513 pToFree->pNext = NULL;
514 pToFree->cUsed = 0;
515 if (pToFree != &pSession->Bundle)
516 RTMemFree(pToFree);
517 }
518 dprintf2(("freeing memory - done\n"));
519
520 /*
521 * Loaded images needs to be dereferenced and possibly freed up.
522 */
523 RTSemFastMutexRequest(pDevExt->mtxLdr);
524 dprintf2(("freeing images:\n"));
525 if (pSession->pLdrUsage)
526 {
527 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
528 pSession->pLdrUsage = NULL;
529 while (pUsage)
530 {
531 void *pvFree = pUsage;
532 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
533 if (pImage->cUsage > pUsage->cUsage)
534 pImage->cUsage -= pUsage->cUsage;
535 else
536 supdrvLdrFree(pDevExt, pImage);
537 pUsage->pImage = NULL;
538 pUsage = pUsage->pNext;
539 RTMemFree(pvFree);
540 }
541 }
542 RTSemFastMutexRelease(pDevExt->mtxLdr);
543 dprintf2(("freeing images - done\n"));
544
545 /*
546 * Unmap the GIP.
547 */
548 dprintf2(("umapping GIP:\n"));
549#ifdef USE_NEW_OS_INTERFACE
550 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
551#else
552 if (pSession->pGip)
553#endif
554 {
555 SUPR0GipUnmap(pSession);
556#ifndef USE_NEW_OS_INTERFACE
557 pSession->pGip = NULL;
558#endif
559 pSession->fGipReferenced = 0;
560 }
561 dprintf2(("umapping GIP - done\n"));
562}
563
564
565#ifdef VBOX_WITHOUT_IDT_PATCHING
566/**
567 * Fast path I/O Control worker.
568 *
569 * @returns 0 on success.
570 * @returns One of the SUPDRV_ERR_* on failure.
571 * @param uIOCtl Function number.
572 * @param pDevExt Device extention.
573 * @param pSession Session data.
574 */
575int VBOXCALL supdrvIOCtlFast(unsigned uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
576{
577 /*
578 * Disable interrupts before invoking VMMR0Entry() because it ASSUMES
579 * that interrupts are disabled. (We check the two prereqs after doing
580 * this only to allow the compiler to optimize things better.)
581 */
582 int rc;
583 RTCCUINTREG uFlags = ASMGetFlags();
584 ASMIntDisable();
585
586 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0Entry))
587 {
588 switch (uIOCtl)
589 {
590 case SUP_IOCTL_FAST_DO_RAW_RUN:
591 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_RAW_RUN, NULL);
592 break;
593 case SUP_IOCTL_FAST_DO_HWACC_RUN:
594 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_HWACC_RUN, NULL);
595 break;
596 case SUP_IOCTL_FAST_DO_NOP:
597 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_NOP, NULL);
598 break;
599 default:
600 rc = VERR_INTERNAL_ERROR;
601 break;
602 }
603 }
604 else
605 rc = VERR_INTERNAL_ERROR;
606
607 ASMSetFlags(uFlags);
608 return rc;
609}
610#endif /* VBOX_WITHOUT_IDT_PATCHING */
611
612
613/**
614 * I/O Control worker.
615 *
616 * @returns 0 on success.
617 * @returns One of the SUPDRV_ERR_* on failure.
618 * @param uIOCtl Function number.
619 * @param pDevExt Device extention.
620 * @param pSession Session data.
621 * @param pvIn Input data.
622 * @param cbIn Size of input data.
623 * @param pvOut Output data.
624 * IMPORTANT! This buffer may be shared with the input
625 * data, thus no writing before done reading
626 * input data!!!
627 * @param cbOut Size of output data.
628 * @param pcbReturned Size of the returned data.
629 */
630int VBOXCALL supdrvIOCtl(unsigned int uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession,
631 void *pvIn, unsigned cbIn, void *pvOut, unsigned cbOut, unsigned *pcbReturned)
632{
633 *pcbReturned = 0;
634 switch (uIOCtl)
635 {
636 case SUP_IOCTL_COOKIE:
637 {
638 PSUPCOOKIE_IN pIn = (PSUPCOOKIE_IN)pvIn;
639 PSUPCOOKIE_OUT pOut = (PSUPCOOKIE_OUT)pvOut;
640
641 /*
642 * Validate.
643 */
644 if ( cbIn != sizeof(*pIn)
645 || cbOut != sizeof(*pOut))
646 {
647 OSDBGPRINT(("SUP_IOCTL_COOKIE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
648 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
649 return SUPDRV_ERR_INVALID_PARAM;
650 }
651 if (strncmp(pIn->szMagic, SUPCOOKIE_MAGIC, sizeof(pIn->szMagic)))
652 {
653 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pIn->szMagic));
654 return SUPDRV_ERR_INVALID_MAGIC;
655 }
656
657 /*
658 * Match the version.
659 * The current logic is very simple, match the major interface version.
660 */
661 if ( pIn->u32MinVersion > SUPDRVIOC_VERSION
662 || (pIn->u32MinVersion & 0xffff0000) != (SUPDRVIOC_VERSION & 0xffff0000))
663 {
664 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
665 pIn->u32ReqVersion, pIn->u32MinVersion, SUPDRVIOC_VERSION));
666 pOut->u32Cookie = 0xffffffff;
667 pOut->u32SessionCookie = 0xffffffff;
668 pOut->u32SessionVersion = 0xffffffff;
669 pOut->u32DriverVersion = SUPDRVIOC_VERSION;
670 pOut->pSession = NULL;
671 pOut->cFunctions = 0;
672 *pcbReturned = sizeof(*pOut);
673 return SUPDRV_ERR_VERSION_MISMATCH;
674 }
675
676 /*
677 * Fill in return data and be gone.
678 * N.B. The first one to change SUPDRVIOC_VERSION shall makes sure that
679 * u32SessionVersion <= u32ReqVersion!
680 */
681 /** @todo A more secure cookie negotiation? */
682 pOut->u32Cookie = pDevExt->u32Cookie;
683 pOut->u32SessionCookie = pSession->u32Cookie;
684 pOut->u32SessionVersion = SUPDRVIOC_VERSION;
685 pOut->u32DriverVersion = SUPDRVIOC_VERSION;
686 pOut->pSession = pSession;
687 pOut->cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
688 *pcbReturned = sizeof(*pOut);
689 return 0;
690 }
691
692
693 case SUP_IOCTL_QUERY_FUNCS:
694 {
695 unsigned cFunctions;
696 PSUPQUERYFUNCS_IN pIn = (PSUPQUERYFUNCS_IN)pvIn;
697 PSUPQUERYFUNCS_OUT pOut = (PSUPQUERYFUNCS_OUT)pvOut;
698
699 /*
700 * Validate.
701 */
702 if ( cbIn != sizeof(*pIn)
703 || cbOut < sizeof(*pOut))
704 {
705 dprintf(("SUP_IOCTL_QUERY_FUNCS: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
706 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
707 return SUPDRV_ERR_INVALID_PARAM;
708 }
709 if ( pIn->u32Cookie != pDevExt->u32Cookie
710 || pIn->u32SessionCookie != pSession->u32Cookie )
711 {
712 dprintf(("SUP_IOCTL_QUERY_FUNCS: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
713 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
714 return SUPDRV_ERR_INVALID_MAGIC;
715 }
716
717 /*
718 * Copy the functions.
719 */
720 cFunctions = (cbOut - RT_OFFSETOF(SUPQUERYFUNCS_OUT, aFunctions)) / sizeof(pOut->aFunctions[0]);
721 cFunctions = RT_MIN(cFunctions, ELEMENTS(g_aFunctions));
722 AssertMsg(cFunctions == ELEMENTS(g_aFunctions),
723 ("Why aren't R3 querying all the functions!?! cFunctions=%d while there are %d available\n",
724 cFunctions, ELEMENTS(g_aFunctions)));
725 pOut->cFunctions = cFunctions;
726 memcpy(&pOut->aFunctions[0], g_aFunctions, sizeof(pOut->aFunctions[0]) * cFunctions);
727 *pcbReturned = RT_OFFSETOF(SUPQUERYFUNCS_OUT, aFunctions[cFunctions]);
728 return 0;
729 }
730
731
732 case SUP_IOCTL_IDT_INSTALL:
733 {
734 PSUPIDTINSTALL_IN pIn = (PSUPIDTINSTALL_IN)pvIn;
735 PSUPIDTINSTALL_OUT pOut = (PSUPIDTINSTALL_OUT)pvOut;
736
737 /*
738 * Validate.
739 */
740 if ( cbIn != sizeof(*pIn)
741 || cbOut != sizeof(*pOut))
742 {
743 dprintf(("SUP_IOCTL_INSTALL: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
744 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
745 return SUPDRV_ERR_INVALID_PARAM;
746 }
747 if ( pIn->u32Cookie != pDevExt->u32Cookie
748 || pIn->u32SessionCookie != pSession->u32Cookie )
749 {
750 dprintf(("SUP_IOCTL_INSTALL: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
751 pIn->u32Cookie, pDevExt->u32Cookie,
752 pIn->u32SessionCookie, pSession->u32Cookie));
753 return SUPDRV_ERR_INVALID_MAGIC;
754 }
755
756 *pcbReturned = sizeof(*pOut);
757#ifndef VBOX_WITHOUT_IDT_PATCHING
758 return supdrvIOCtl_IdtInstall(pDevExt, pSession, pIn, pOut);
759#else
760 pOut->u8Idt = 3;
761 return 0;
762#endif
763 }
764
765
766 case SUP_IOCTL_IDT_REMOVE:
767 {
768 PSUPIDTREMOVE_IN pIn = (PSUPIDTREMOVE_IN)pvIn;
769
770 /*
771 * Validate.
772 */
773 if ( cbIn != sizeof(*pIn)
774 || cbOut != 0)
775 {
776 dprintf(("SUP_IOCTL_REMOVE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
777 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
778 return SUPDRV_ERR_INVALID_PARAM;
779 }
780 if ( pIn->u32Cookie != pDevExt->u32Cookie
781 || pIn->u32SessionCookie != pSession->u32Cookie )
782 {
783 dprintf(("SUP_IOCTL_REMOVE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
784 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
785 return SUPDRV_ERR_INVALID_MAGIC;
786 }
787
788#ifndef VBOX_WITHOUT_IDT_PATCHING
789 return supdrvIOCtl_IdtRemoveAll(pDevExt, pSession);
790#else
791 return 0;
792#endif
793 }
794
795
796 case SUP_IOCTL_PINPAGES:
797 {
798 int rc;
799 PSUPPINPAGES_IN pIn = (PSUPPINPAGES_IN)pvIn;
800 PSUPPINPAGES_OUT pOut = (PSUPPINPAGES_OUT)pvOut;
801
802 /*
803 * Validate.
804 */
805 if ( cbIn != sizeof(*pIn)
806 || cbOut < sizeof(*pOut))
807 {
808 dprintf(("SUP_IOCTL_PINPAGES: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
809 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
810 return SUPDRV_ERR_INVALID_PARAM;
811 }
812 if ( pIn->u32Cookie != pDevExt->u32Cookie
813 || pIn->u32SessionCookie != pSession->u32Cookie )
814 {
815 dprintf(("SUP_IOCTL_PINPAGES: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
816 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
817 return SUPDRV_ERR_INVALID_MAGIC;
818 }
819 if (pIn->cPages <= 0 || !pIn->pvR3)
820 {
821 dprintf(("SUP_IOCTL_PINPAGES: Illegal request %p %d\n", (void *)pIn->pvR3, pIn->cPages));
822 return SUPDRV_ERR_INVALID_PARAM;
823 }
824 if ((unsigned)RT_OFFSETOF(SUPPINPAGES_OUT, aPages[pIn->cPages]) > cbOut)
825 {
826 dprintf(("SUP_IOCTL_PINPAGES: Output buffer is too small! %d required %d passed in.\n",
827 RT_OFFSETOF(SUPPINPAGES_OUT, aPages[pIn->cPages]), cbOut));
828 return SUPDRV_ERR_INVALID_PARAM;
829 }
830
831 /*
832 * Execute.
833 */
834 *pcbReturned = RT_OFFSETOF(SUPPINPAGES_OUT, aPages[pIn->cPages]);
835 rc = SUPR0LockMem(pSession, pIn->pvR3, pIn->cPages, &pOut->aPages[0]);
836 if (rc)
837 *pcbReturned = 0;
838 return rc;
839 }
840
841
842 case SUP_IOCTL_UNPINPAGES:
843 {
844 PSUPUNPINPAGES_IN pIn = (PSUPUNPINPAGES_IN)pvIn;
845
846 /*
847 * Validate.
848 */
849 if ( cbIn != sizeof(*pIn)
850 || cbOut != 0)
851 {
852 dprintf(("SUP_IOCTL_UNPINPAGES: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
853 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
854 return SUPDRV_ERR_INVALID_PARAM;
855 }
856 if ( pIn->u32Cookie != pDevExt->u32Cookie
857 || pIn->u32SessionCookie != pSession->u32Cookie)
858 {
859 dprintf(("SUP_IOCTL_UNPINPAGES: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
860 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
861 return SUPDRV_ERR_INVALID_MAGIC;
862 }
863
864 /*
865 * Execute.
866 */
867 return SUPR0UnlockMem(pSession, pIn->pvR3);
868 }
869
870 case SUP_IOCTL_CONT_ALLOC:
871 {
872 int rc;
873 PSUPCONTALLOC_IN pIn = (PSUPCONTALLOC_IN)pvIn;
874 PSUPCONTALLOC_OUT pOut = (PSUPCONTALLOC_OUT)pvOut;
875
876 /*
877 * Validate.
878 */
879 if ( cbIn != sizeof(*pIn)
880 || cbOut < sizeof(*pOut))
881 {
882 dprintf(("SUP_IOCTL_CONT_ALLOC: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
883 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
884 return SUPDRV_ERR_INVALID_PARAM;
885 }
886 if ( pIn->u32Cookie != pDevExt->u32Cookie
887 || pIn->u32SessionCookie != pSession->u32Cookie )
888 {
889 dprintf(("SUP_IOCTL_CONT_ALLOC: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
890 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
891 return SUPDRV_ERR_INVALID_MAGIC;
892 }
893
894 /*
895 * Execute.
896 */
897 rc = SUPR0ContAlloc(pSession, pIn->cPages, &pOut->pvR0, &pOut->pvR3, &pOut->HCPhys);
898 if (!rc)
899 *pcbReturned = sizeof(*pOut);
900 return rc;
901 }
902
903
904 case SUP_IOCTL_CONT_FREE:
905 {
906 PSUPCONTFREE_IN pIn = (PSUPCONTFREE_IN)pvIn;
907
908 /*
909 * Validate.
910 */
911 if ( cbIn != sizeof(*pIn)
912 || cbOut != 0)
913 {
914 dprintf(("SUP_IOCTL_CONT_FREE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
915 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
916 return SUPDRV_ERR_INVALID_PARAM;
917 }
918 if ( pIn->u32Cookie != pDevExt->u32Cookie
919 || pIn->u32SessionCookie != pSession->u32Cookie)
920 {
921 dprintf(("SUP_IOCTL_CONT_FREE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
922 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
923 return SUPDRV_ERR_INVALID_MAGIC;
924 }
925
926 /*
927 * Execute.
928 */
929 return SUPR0ContFree(pSession, (RTHCUINTPTR)pIn->pvR3);
930 }
931
932
933 case SUP_IOCTL_LDR_OPEN:
934 {
935 PSUPLDROPEN_IN pIn = (PSUPLDROPEN_IN)pvIn;
936 PSUPLDROPEN_OUT pOut = (PSUPLDROPEN_OUT)pvOut;
937
938 /*
939 * Validate.
940 */
941 if ( cbIn != sizeof(*pIn)
942 || cbOut != sizeof(*pOut))
943 {
944 dprintf(("SUP_IOCTL_LDR_OPEN: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
945 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
946 return SUPDRV_ERR_INVALID_PARAM;
947 }
948 if ( pIn->u32Cookie != pDevExt->u32Cookie
949 || pIn->u32SessionCookie != pSession->u32Cookie)
950 {
951 dprintf(("SUP_IOCTL_LDR_OPEN: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
952 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
953 return SUPDRV_ERR_INVALID_MAGIC;
954 }
955 if ( pIn->cbImage <= 0
956 || pIn->cbImage >= 16*1024*1024 /*16MB*/)
957 {
958 dprintf(("SUP_IOCTL_LDR_OPEN: Invalid size %d. (max is 16MB)\n", pIn->cbImage));
959 return SUPDRV_ERR_INVALID_PARAM;
960 }
961 if (!memchr(pIn->szName, '\0', sizeof(pIn->szName)))
962 {
963 dprintf(("SUP_IOCTL_LDR_OPEN: The image name isn't terminated!\n"));
964 return SUPDRV_ERR_INVALID_PARAM;
965 }
966 if (!pIn->szName[0])
967 {
968 dprintf(("SUP_IOCTL_LDR_OPEN: The image name is too short\n"));
969 return SUPDRV_ERR_INVALID_PARAM;
970 }
971 if (strpbrk(pIn->szName, ";:()[]{}/\\|&*%#@!~`\"'"))
972 {
973 dprintf(("SUP_IOCTL_LDR_OPEN: The name is invalid '%s'\n", pIn->szName));
974 return SUPDRV_ERR_INVALID_PARAM;
975 }
976
977 *pcbReturned = sizeof(*pOut);
978 return supdrvIOCtl_LdrOpen(pDevExt, pSession, pIn, pOut);
979 }
980
981
982 case SUP_IOCTL_LDR_LOAD:
983 {
984 PSUPLDRLOAD_IN pIn = (PSUPLDRLOAD_IN)pvIn;
985
986 /*
987 * Validate.
988 */
989 if ( cbIn <= sizeof(*pIn)
990 || cbOut != 0)
991 {
992 dprintf(("SUP_IOCTL_LDR_LOAD: Invalid input/output sizes. cbIn=%ld expected greater than %ld. cbOut=%ld expected %ld.\n",
993 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
994 return SUPDRV_ERR_INVALID_PARAM;
995 }
996 if ( pIn->u32Cookie != pDevExt->u32Cookie
997 || pIn->u32SessionCookie != pSession->u32Cookie)
998 {
999 dprintf(("SUP_IOCTL_LDR_LOAD: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1000 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1001 return SUPDRV_ERR_INVALID_MAGIC;
1002 }
1003 if ((unsigned)RT_OFFSETOF(SUPLDRLOAD_IN, achImage[pIn->cbImage]) > cbIn)
1004 {
1005 dprintf(("SUP_IOCTL_LDR_LOAD: Invalid size %d. InputBufferLength=%d\n",
1006 pIn->cbImage, cbIn));
1007 return SUPDRV_ERR_INVALID_PARAM;
1008 }
1009 if (pIn->cSymbols > 16384)
1010 {
1011 dprintf(("SUP_IOCTL_LDR_LOAD: Too many symbols. cSymbols=%u max=16384\n", pIn->cSymbols));
1012 return SUPDRV_ERR_INVALID_PARAM;
1013 }
1014 if ( pIn->cSymbols
1015 && ( pIn->offSymbols >= pIn->cbImage
1016 || pIn->offSymbols + pIn->cSymbols * sizeof(SUPLDRSYM) > pIn->cbImage)
1017 )
1018 {
1019 dprintf(("SUP_IOCTL_LDR_LOAD: symbol table is outside the image bits! offSymbols=%u cSymbols=%d cbImage=%d\n",
1020 pIn->offSymbols, pIn->cSymbols, pIn->cbImage));
1021 return SUPDRV_ERR_INVALID_PARAM;
1022 }
1023 if ( pIn->cbStrTab
1024 && ( pIn->offStrTab >= pIn->cbImage
1025 || pIn->offStrTab + pIn->cbStrTab > pIn->cbImage
1026 || pIn->offStrTab + pIn->cbStrTab < pIn->offStrTab)
1027 )
1028 {
1029 dprintf(("SUP_IOCTL_LDR_LOAD: string table is outside the image bits! offStrTab=%u cbStrTab=%d cbImage=%d\n",
1030 pIn->offStrTab, pIn->cbStrTab, pIn->cbImage));
1031 return SUPDRV_ERR_INVALID_PARAM;
1032 }
1033
1034 if (pIn->cSymbols)
1035 {
1036 uint32_t i;
1037 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pIn->achImage[pIn->offSymbols];
1038 for (i = 0; i < pIn->cSymbols; i++)
1039 {
1040 if (paSyms[i].offSymbol >= pIn->cbImage)
1041 {
1042 dprintf(("SUP_IOCTL_LDR_LOAD: symbol i=%d has an invalid symbol offset: %#x (max=%#x)\n",
1043 i, paSyms[i].offSymbol, pIn->cbImage));
1044 return SUPDRV_ERR_INVALID_PARAM;
1045 }
1046 if (paSyms[i].offName >= pIn->cbStrTab)
1047 {
1048 dprintf(("SUP_IOCTL_LDR_LOAD: symbol i=%d has an invalid name offset: %#x (max=%#x)\n",
1049 i, paSyms[i].offName, pIn->cbStrTab));
1050 return SUPDRV_ERR_INVALID_PARAM;
1051 }
1052 if (!memchr(&pIn->achImage[pIn->offStrTab + paSyms[i].offName], '\0', pIn->cbStrTab - paSyms[i].offName))
1053 {
1054 dprintf(("SUP_IOCTL_LDR_LOAD: symbol i=%d has an unterminated name! offName=%#x (max=%#x)\n",
1055 i, paSyms[i].offName, pIn->cbStrTab));
1056 return SUPDRV_ERR_INVALID_PARAM;
1057 }
1058 }
1059 }
1060
1061 return supdrvIOCtl_LdrLoad(pDevExt, pSession, pIn);
1062 }
1063
1064
1065 case SUP_IOCTL_LDR_FREE:
1066 {
1067 PSUPLDRFREE_IN pIn = (PSUPLDRFREE_IN)pvIn;
1068
1069 /*
1070 * Validate.
1071 */
1072 if ( cbIn != sizeof(*pIn)
1073 || cbOut != 0)
1074 {
1075 dprintf(("SUP_IOCTL_LDR_FREE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1076 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1077 return SUPDRV_ERR_INVALID_PARAM;
1078 }
1079 if ( pIn->u32Cookie != pDevExt->u32Cookie
1080 || pIn->u32SessionCookie != pSession->u32Cookie)
1081 {
1082 dprintf(("SUP_IOCTL_LDR_FREE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1083 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1084 return SUPDRV_ERR_INVALID_MAGIC;
1085 }
1086
1087 return supdrvIOCtl_LdrFree(pDevExt, pSession, pIn);
1088 }
1089
1090
1091 case SUP_IOCTL_LDR_GET_SYMBOL:
1092 {
1093 PSUPLDRGETSYMBOL_IN pIn = (PSUPLDRGETSYMBOL_IN)pvIn;
1094 PSUPLDRGETSYMBOL_OUT pOut = (PSUPLDRGETSYMBOL_OUT)pvOut;
1095 char *pszEnd;
1096
1097 /*
1098 * Validate.
1099 */
1100 if ( cbIn < (unsigned)RT_OFFSETOF(SUPLDRGETSYMBOL_IN, szSymbol[2])
1101 || cbOut != sizeof(*pOut))
1102 {
1103 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: Invalid input/output sizes. cbIn=%d expected >=%d. cbOut=%d expected at%d.\n",
1104 cbIn, RT_OFFSETOF(SUPLDRGETSYMBOL_IN, szSymbol[2]), cbOut, 0));
1105 return SUPDRV_ERR_INVALID_PARAM;
1106 }
1107 if ( pIn->u32Cookie != pDevExt->u32Cookie
1108 || pIn->u32SessionCookie != pSession->u32Cookie)
1109 {
1110 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1111 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1112 return SUPDRV_ERR_INVALID_MAGIC;
1113 }
1114 pszEnd = memchr(pIn->szSymbol, '\0', cbIn - RT_OFFSETOF(SUPLDRGETSYMBOL_IN, szSymbol));
1115 if (!pszEnd)
1116 {
1117 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: The symbol name isn't terminated!\n"));
1118 return SUPDRV_ERR_INVALID_PARAM;
1119 }
1120 if (pszEnd - &pIn->szSymbol[0] >= 1024)
1121 {
1122 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: The symbol name too long (%ld chars, max is %d)!\n",
1123 (long)(pszEnd - &pIn->szSymbol[0]), 1024));
1124 return SUPDRV_ERR_INVALID_PARAM;
1125 }
1126
1127 pOut->pvSymbol = NULL;
1128 *pcbReturned = sizeof(*pOut);
1129 return supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pIn, pOut);
1130 }
1131
1132
1133 /** @todo this interface needs re-doing, we're accessing Ring-3 buffers directly here! */
1134 case SUP_IOCTL_CALL_VMMR0:
1135 {
1136 PSUPCALLVMMR0_IN pIn = (PSUPCALLVMMR0_IN)pvIn;
1137 PSUPCALLVMMR0_OUT pOut = (PSUPCALLVMMR0_OUT)pvOut;
1138
1139 /*
1140 * Validate.
1141 */
1142 if ( cbIn != sizeof(*pIn)
1143 || cbOut != sizeof(*pOut))
1144 {
1145 dprintf(("SUP_IOCTL_CALL_VMMR0: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1146 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
1147 return SUPDRV_ERR_INVALID_PARAM;
1148 }
1149 if ( pIn->u32Cookie != pDevExt->u32Cookie
1150 || pIn->u32SessionCookie != pSession->u32Cookie )
1151 {
1152 dprintf(("SUP_IOCTL_CALL_VMMR0: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1153 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1154 return SUPDRV_ERR_INVALID_MAGIC;
1155 }
1156
1157 /*
1158 * Do we have an entrypoint?
1159 */
1160 if (!pDevExt->pfnVMMR0Entry)
1161 return SUPDRV_ERR_GENERAL_FAILURE;
1162
1163 /*
1164 * Execute.
1165 */
1166 pOut->rc = pDevExt->pfnVMMR0Entry(pIn->pVMR0, pIn->uOperation, (void *)pIn->pvArg); /** @todo address the pvArg problem! */
1167 *pcbReturned = sizeof(*pOut);
1168 return 0;
1169 }
1170
1171
1172 case SUP_IOCTL_GET_PAGING_MODE:
1173 {
1174 int rc;
1175 PSUPGETPAGINGMODE_IN pIn = (PSUPGETPAGINGMODE_IN)pvIn;
1176 PSUPGETPAGINGMODE_OUT pOut = (PSUPGETPAGINGMODE_OUT)pvOut;
1177
1178 /*
1179 * Validate.
1180 */
1181 if ( cbIn != sizeof(*pIn)
1182 || cbOut != sizeof(*pOut))
1183 {
1184 dprintf(("SUP_IOCTL_GET_PAGING_MODE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1185 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
1186 return SUPDRV_ERR_INVALID_PARAM;
1187 }
1188 if ( pIn->u32Cookie != pDevExt->u32Cookie
1189 || pIn->u32SessionCookie != pSession->u32Cookie )
1190 {
1191 dprintf(("SUP_IOCTL_GET_PAGING_MODE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1192 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1193 return SUPDRV_ERR_INVALID_MAGIC;
1194 }
1195
1196 /*
1197 * Execute.
1198 */
1199 *pcbReturned = sizeof(*pOut);
1200 rc = supdrvIOCtl_GetPagingMode(pOut);
1201 if (rc)
1202 *pcbReturned = 0;
1203 return rc;
1204 }
1205
1206
1207 case SUP_IOCTL_LOW_ALLOC:
1208 {
1209 int rc;
1210 PSUPLOWALLOC_IN pIn = (PSUPLOWALLOC_IN)pvIn;
1211 PSUPLOWALLOC_OUT pOut = (PSUPLOWALLOC_OUT)pvOut;
1212
1213 /*
1214 * Validate.
1215 */
1216 if ( cbIn != sizeof(*pIn)
1217 || cbOut < sizeof(*pOut))
1218 {
1219 dprintf(("SUP_IOCTL_LOW_ALLOC: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1220 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
1221 return SUPDRV_ERR_INVALID_PARAM;
1222 }
1223 if ( pIn->u32Cookie != pDevExt->u32Cookie
1224 || pIn->u32SessionCookie != pSession->u32Cookie )
1225 {
1226 dprintf(("SUP_IOCTL_LOW_ALLOC: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1227 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1228 return SUPDRV_ERR_INVALID_MAGIC;
1229 }
1230 if ((unsigned)RT_OFFSETOF(SUPLOWALLOC_OUT, aPages[pIn->cPages]) > cbOut)
1231 {
1232 dprintf(("SUP_IOCTL_LOW_ALLOC: Output buffer is too small! %d required %d passed in.\n",
1233 RT_OFFSETOF(SUPLOWALLOC_OUT, aPages[pIn->cPages]), cbOut));
1234 return SUPDRV_ERR_INVALID_PARAM;
1235 }
1236
1237 /*
1238 * Execute.
1239 */
1240 *pcbReturned = RT_OFFSETOF(SUPLOWALLOC_OUT, aPages[pIn->cPages]);
1241 rc = SUPR0LowAlloc(pSession, pIn->cPages, &pOut->pvR0, &pOut->pvR3, &pOut->aPages[0]);
1242 if (rc)
1243 *pcbReturned = 0;
1244 return rc;
1245 }
1246
1247
1248 case SUP_IOCTL_LOW_FREE:
1249 {
1250 PSUPLOWFREE_IN pIn = (PSUPLOWFREE_IN)pvIn;
1251
1252 /*
1253 * Validate.
1254 */
1255 if ( cbIn != sizeof(*pIn)
1256 || cbOut != 0)
1257 {
1258 dprintf(("SUP_IOCTL_LOW_FREE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1259 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1260 return SUPDRV_ERR_INVALID_PARAM;
1261 }
1262 if ( pIn->u32Cookie != pDevExt->u32Cookie
1263 || pIn->u32SessionCookie != pSession->u32Cookie)
1264 {
1265 dprintf(("SUP_IOCTL_LOW_FREE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1266 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1267 return SUPDRV_ERR_INVALID_MAGIC;
1268 }
1269
1270 /*
1271 * Execute.
1272 */
1273 return SUPR0LowFree(pSession, (RTHCUINTPTR)pIn->pvR3);
1274 }
1275
1276
1277 case SUP_IOCTL_GIP_MAP:
1278 {
1279 int rc;
1280 PSUPGIPMAP_IN pIn = (PSUPGIPMAP_IN)pvIn;
1281 PSUPGIPMAP_OUT pOut = (PSUPGIPMAP_OUT)pvOut;
1282
1283 /*
1284 * Validate.
1285 */
1286 if ( cbIn != sizeof(*pIn)
1287 || cbOut != sizeof(*pOut))
1288 {
1289 dprintf(("SUP_IOCTL_GIP_MAP: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1290 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1291 return SUPDRV_ERR_INVALID_PARAM;
1292 }
1293 if ( pIn->u32Cookie != pDevExt->u32Cookie
1294 || pIn->u32SessionCookie != pSession->u32Cookie)
1295 {
1296 dprintf(("SUP_IOCTL_GIP_MAP: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1297 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1298 return SUPDRV_ERR_INVALID_MAGIC;
1299 }
1300
1301 /*
1302 * Execute.
1303 */
1304 rc = SUPR0GipMap(pSession, &pOut->pGipR3, &pOut->HCPhysGip);
1305 if (!rc)
1306 {
1307 pOut->pGipR0 = pDevExt->pGip;
1308 *pcbReturned = sizeof(*pOut);
1309 }
1310 return rc;
1311 }
1312
1313
1314 case SUP_IOCTL_GIP_UNMAP:
1315 {
1316 PSUPGIPUNMAP_IN pIn = (PSUPGIPUNMAP_IN)pvIn;
1317
1318 /*
1319 * Validate.
1320 */
1321 if ( cbIn != sizeof(*pIn)
1322 || cbOut != 0)
1323 {
1324 dprintf(("SUP_IOCTL_GIP_UNMAP: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1325 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1326 return SUPDRV_ERR_INVALID_PARAM;
1327 }
1328 if ( pIn->u32Cookie != pDevExt->u32Cookie
1329 || pIn->u32SessionCookie != pSession->u32Cookie)
1330 {
1331 dprintf(("SUP_IOCTL_GIP_UNMAP: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1332 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1333 return SUPDRV_ERR_INVALID_MAGIC;
1334 }
1335
1336 /*
1337 * Execute.
1338 */
1339 return SUPR0GipUnmap(pSession);
1340 }
1341
1342
1343 case SUP_IOCTL_SET_VM_FOR_FAST:
1344 {
1345 PSUPSETVMFORFAST_IN pIn = (PSUPSETVMFORFAST_IN)pvIn;
1346
1347 /*
1348 * Validate.
1349 */
1350 if ( cbIn != sizeof(*pIn)
1351 || cbOut != 0)
1352 {
1353 dprintf(("SUP_IOCTL_SET_VM_FOR_FAST: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1354 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1355 return SUPDRV_ERR_INVALID_PARAM;
1356 }
1357 if ( pIn->u32Cookie != pDevExt->u32Cookie
1358 || pIn->u32SessionCookie != pSession->u32Cookie)
1359 {
1360 dprintf(("SUP_IOCTL_SET_VM_FOR_FAST: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1361 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1362 return SUPDRV_ERR_INVALID_MAGIC;
1363 }
1364 if ( pIn->pVMR0 != NULL
1365 && ( !VALID_PTR(pIn->pVMR0)
1366 || ((uintptr_t)pIn->pVMR0 & (PAGE_SIZE - 1))
1367 )
1368 )
1369 {
1370 dprintf(("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p! Must be a valid, page aligned, pointer.\n", pIn->pVMR0));
1371 return SUPDRV_ERR_INVALID_POINTER;
1372 }
1373
1374 /*
1375 * Execute.
1376 */
1377#ifndef VBOX_WITHOUT_IDT_PATCHING
1378 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: !VBOX_WITHOUT_IDT_PATCHING\n"));
1379 return SUPDRV_ERR_GENERAL_FAILURE;
1380#else
1381 pSession->pVM = pIn->pVMR0;
1382 return 0;
1383#endif
1384 }
1385
1386
1387 default:
1388 dprintf(("Unknown IOCTL %#x\n", uIOCtl));
1389 break;
1390 }
1391 return SUPDRV_ERR_GENERAL_FAILURE;
1392}
1393
1394
1395/**
1396 * Register a object for reference counting.
1397 * The object is registered with one reference in the specified session.
1398 *
1399 * @returns Unique identifier on success (pointer).
1400 * All future reference must use this identifier.
1401 * @returns NULL on failure.
1402 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1403 * @param pvUser1 The first user argument.
1404 * @param pvUser2 The second user argument.
1405 */
1406SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1407{
1408 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1409 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1410 PSUPDRVOBJ pObj;
1411 PSUPDRVUSAGE pUsage;
1412
1413 /*
1414 * Validate the input.
1415 */
1416 if (!pSession)
1417 {
1418 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1419 return NULL;
1420 }
1421 if ( enmType <= SUPDRVOBJTYPE_INVALID
1422 || enmType >= SUPDRVOBJTYPE_END)
1423 {
1424 AssertMsgFailed(("Invalid enmType=%d\n", enmType));
1425 return NULL;
1426 }
1427 if (!pfnDestructor)
1428 {
1429 AssertMsgFailed(("Invalid pfnDestructor=%d\n", pfnDestructor));
1430 return NULL;
1431 }
1432
1433 /*
1434 * Allocate and initialize the object.
1435 */
1436 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1437 if (!pObj)
1438 return NULL;
1439 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1440 pObj->enmType = enmType;
1441 pObj->pNext = NULL;
1442 pObj->cUsage = 1;
1443 pObj->pfnDestructor = pfnDestructor;
1444 pObj->pvUser1 = pvUser1;
1445 pObj->pvUser2 = pvUser2;
1446 pObj->CreatorUid = pSession->Uid;
1447 pObj->CreatorGid = pSession->Gid;
1448 pObj->CreatorProcess= pSession->Process;
1449 supdrvOSObjInitCreator(pObj, pSession);
1450
1451 /*
1452 * Allocate the usage record.
1453 * (We keep freed usage records around to simplity SUPR0ObjAddRef().)
1454 */
1455 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1456
1457 pUsage = pDevExt->pUsageFree;
1458 if (pUsage)
1459 pDevExt->pUsageFree = pUsage->pNext;
1460 else
1461 {
1462 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1463 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1464 if (!pUsage)
1465 {
1466 RTMemFree(pObj);
1467 return NULL;
1468 }
1469 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1470 }
1471
1472 /*
1473 * Insert the object and create the session usage record.
1474 */
1475 /* The object. */
1476 pObj->pNext = pDevExt->pObjs;
1477 pDevExt->pObjs = pObj;
1478
1479 /* The session record. */
1480 pUsage->cUsage = 1;
1481 pUsage->pObj = pObj;
1482 pUsage->pNext = pSession->pUsage;
1483 dprintf(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1484 pSession->pUsage = pUsage;
1485
1486 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1487
1488 dprintf(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1489 return pObj;
1490}
1491
1492
1493/**
1494 * Increment the reference counter for the object associating the reference
1495 * with the specified session.
1496 *
1497 * @returns 0 on success.
1498 * @returns SUPDRV_ERR_* on failure.
1499 * @param pvObj The identifier returned by SUPR0ObjRegister().
1500 * @param pSession The session which is referencing the object.
1501 */
1502SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1503{
1504 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1505 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1506 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1507 PSUPDRVUSAGE pUsagePre;
1508 PSUPDRVUSAGE pUsage;
1509
1510 /*
1511 * Validate the input.
1512 */
1513 if (!pSession)
1514 {
1515 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1516 return SUPDRV_ERR_INVALID_PARAM;
1517 }
1518 if (!pObj || pObj->u32Magic != SUPDRVOBJ_MAGIC)
1519 {
1520 AssertMsgFailed(("Invalid pvObj=%p magic=%#x (exepcted %#x)\n",
1521 pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC));
1522 return SUPDRV_ERR_INVALID_PARAM;
1523 }
1524
1525 /*
1526 * Preallocate the usage record.
1527 */
1528 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1529
1530 pUsagePre = pDevExt->pUsageFree;
1531 if (pUsagePre)
1532 pDevExt->pUsageFree = pUsagePre->pNext;
1533 else
1534 {
1535 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1536 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1537 if (!pUsagePre)
1538 return SUPDRV_ERR_NO_MEMORY;
1539 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1540 }
1541
1542 /*
1543 * Reference the object.
1544 */
1545 pObj->cUsage++;
1546
1547 /*
1548 * Look for the session record.
1549 */
1550 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1551 {
1552 dprintf(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1553 if (pUsage->pObj == pObj)
1554 break;
1555 }
1556 if (pUsage)
1557 pUsage->cUsage++;
1558 else
1559 {
1560 /* create a new session record. */
1561 pUsagePre->cUsage = 1;
1562 pUsagePre->pObj = pObj;
1563 pUsagePre->pNext = pSession->pUsage;
1564 pSession->pUsage = pUsagePre;
1565 dprintf(("SUPR0ObjRelease: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));
1566
1567 pUsagePre = NULL;
1568 }
1569
1570 /*
1571 * Put any unused usage record into the free list..
1572 */
1573 if (pUsagePre)
1574 {
1575 pUsagePre->pNext = pDevExt->pUsageFree;
1576 pDevExt->pUsageFree = pUsagePre;
1577 }
1578
1579 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1580
1581 return 0;
1582}
1583
1584
1585/**
1586 * Decrement / destroy a reference counter record for an object.
1587 *
1588 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1589 *
1590 * @returns 0 on success.
1591 * @returns SUPDRV_ERR_* on failure.
1592 * @param pvObj The identifier returned by SUPR0ObjRegister().
1593 * @param pSession The session which is referencing the object.
1594 */
1595SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1596{
1597 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1598 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1599 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1600 bool fDestroy = false;
1601 PSUPDRVUSAGE pUsage;
1602 PSUPDRVUSAGE pUsagePrev;
1603
1604 /*
1605 * Validate the input.
1606 */
1607 if (!pSession)
1608 {
1609 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1610 return SUPDRV_ERR_INVALID_PARAM;
1611 }
1612 if (!pObj || pObj->u32Magic != SUPDRVOBJ_MAGIC)
1613 {
1614 AssertMsgFailed(("Invalid pvObj=%p magic=%#x (exepcted %#x)\n",
1615 pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC));
1616 return SUPDRV_ERR_INVALID_PARAM;
1617 }
1618
1619 /*
1620 * Acquire the spinlock and look for the usage record.
1621 */
1622 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1623
1624 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1625 pUsage;
1626 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1627 {
1628 dprintf(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1629 if (pUsage->pObj == pObj)
1630 {
1631 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1632 if (pUsage->cUsage > 1)
1633 {
1634 pObj->cUsage--;
1635 pUsage->cUsage--;
1636 }
1637 else
1638 {
1639 /*
1640 * Free the session record.
1641 */
1642 if (pUsagePrev)
1643 pUsagePrev->pNext = pUsage->pNext;
1644 else
1645 pSession->pUsage = pUsage->pNext;
1646 pUsage->pNext = pDevExt->pUsageFree;
1647 pDevExt->pUsageFree = pUsage;
1648
1649 /* What about the object? */
1650 if (pObj->cUsage > 1)
1651 pObj->cUsage--;
1652 else
1653 {
1654 /*
1655 * Object is to be destroyed, unlink it.
1656 */
1657 fDestroy = true;
1658 if (pDevExt->pObjs == pObj)
1659 pDevExt->pObjs = pObj->pNext;
1660 else
1661 {
1662 PSUPDRVOBJ pObjPrev;
1663 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
1664 if (pObjPrev->pNext == pObj)
1665 {
1666 pObjPrev->pNext = pObj->pNext;
1667 break;
1668 }
1669 Assert(pObjPrev);
1670 }
1671 }
1672 }
1673 break;
1674 }
1675 }
1676
1677 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1678
1679 /*
1680 * Call the destructor and free the object if required.
1681 */
1682 if (fDestroy)
1683 {
1684 pObj->u32Magic++;
1685 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
1686 RTMemFree(pObj);
1687 }
1688
1689 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
1690 return pUsage ? 0 : SUPDRV_ERR_INVALID_PARAM;
1691}
1692
1693/**
1694 * Verifies that the current process can access the specified object.
1695 *
1696 * @returns 0 if access is granted.
1697 * @returns SUPDRV_ERR_PERMISSION_DENIED if denied access.
1698 * @returns SUPDRV_ERR_INVALID_PARAM if invalid parameter.
1699 *
1700 * @param pvObj The identifier returned by SUPR0ObjRegister().
1701 * @param pSession The session which wishes to access the object.
1702 * @param pszObjName Object string name. This is optional and depends on the object type.
1703 *
1704 * @remark The caller is responsible for making sure the object isn't removed while
1705 * we're inside this function. If uncertain about this, just call AddRef before calling us.
1706 */
1707SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
1708{
1709 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1710 int rc = SUPDRV_ERR_GENERAL_FAILURE;
1711
1712 /*
1713 * Validate the input.
1714 */
1715 if (!pSession)
1716 {
1717 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1718 return SUPDRV_ERR_INVALID_PARAM;
1719 }
1720 if (!pObj || pObj->u32Magic != SUPDRVOBJ_MAGIC)
1721 {
1722 AssertMsgFailed(("Invalid pvObj=%p magic=%#x (exepcted %#x)\n",
1723 pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC));
1724 return SUPDRV_ERR_INVALID_PARAM;
1725 }
1726
1727 /*
1728 * Check access. (returns true if a decision has been made.)
1729 */
1730 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
1731 return rc;
1732
1733 /*
1734 * Default policy is to allow the user to access his own
1735 * stuff but nothing else.
1736 */
1737 if (pObj->CreatorUid == pSession->Uid)
1738 return 0;
1739 return SUPDRV_ERR_PERMISSION_DENIED;
1740}
1741
1742
1743/**
1744 * Lock pages.
1745 *
1746 * @param pSession Session to which the locked memory should be associated.
1747 * @param pvR3 Start of the memory range to lock.
1748 * This must be page aligned.
1749 * @param cb Size of the memory range to lock.
1750 * This must be page aligned.
1751 */
1752SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PSUPPAGE paPages)
1753{
1754 int rc;
1755 SUPDRVMEMREF Mem = {0};
1756 const size_t cb = (size_t)cPages << PAGE_SHIFT;
1757 dprintf(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n",
1758 pSession, (void *)pvR3, cPages, paPages));
1759
1760 /*
1761 * Verify input.
1762 */
1763 if (RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3 || !pvR3)
1764 {
1765 dprintf(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
1766 return SUPDRV_ERR_INVALID_PARAM;
1767 }
1768 if (!paPages)
1769 {
1770 dprintf(("paPages is NULL!\n"));
1771 return SUPDRV_ERR_INVALID_PARAM;
1772 }
1773
1774#ifdef USE_NEW_OS_INTERFACE
1775 /*
1776 * Let IPRT do the job.
1777 */
1778 Mem.eType = MEMREF_TYPE_LOCKED;
1779 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
1780 if (RT_SUCCESS(rc))
1781 {
1782 AssertMsg(RTR0MemObjAddress(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddress(Mem.MemObj), pvR3));
1783 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
1784
1785 unsigned iPage = cPages;
1786 while (iPage-- > 0)
1787 {
1788 paPages[iPage].uReserved = 0;
1789 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
1790 if (RT_UNLIKELY(paPages[iPage].Phys == NIL_RTCCPHYS))
1791 {
1792 AssertMsgFailed(("iPage=%d\n", iPage));
1793 rc = VERR_INTERNAL_ERROR;
1794 break;
1795 }
1796 }
1797 if (RT_SUCCESS(rc))
1798 rc = supdrvMemAdd(&Mem, pSession);
1799 if (RT_FAILURE(rc))
1800 {
1801 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
1802 AssertRC(rc2);
1803 }
1804 }
1805
1806#else /* !USE_NEW_OS_INTERFACE */
1807
1808 /*
1809 * Let the OS specific code have a go.
1810 */
1811 Mem.pvR0 = NULL;
1812 Mem.pvR3 = pvR3;
1813 Mem.eType = MEMREF_TYPE_LOCKED;
1814 Mem.cb = cb;
1815 rc = supdrvOSLockMemOne(&Mem, paPages);
1816 if (rc)
1817 return rc;
1818
1819 /*
1820 * Everything when fine, add the memory reference to the session.
1821 */
1822 rc = supdrvMemAdd(&Mem, pSession);
1823 if (rc)
1824 supdrvOSUnlockMemOne(&Mem);
1825#endif /* !USE_NEW_OS_INTERFACE */
1826 return rc;
1827}
1828
1829
1830/**
1831 * Unlocks the memory pointed to by pv.
1832 *
1833 * @returns 0 on success.
1834 * @returns SUPDRV_ERR_* on failure
1835 * @param pSession Session to which the memory was locked.
1836 * @param pvR3 Memory to unlock.
1837 */
1838SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
1839{
1840 dprintf(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
1841 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
1842}
1843
1844
1845/**
1846 * Allocates a chunk of page aligned memory with contiguous and fixed physical
1847 * backing.
1848 *
1849 * @returns 0 on success.
1850 * @returns SUPDRV_ERR_* on failure.
1851 * @param pSession Session data.
1852 * @param cb Number of bytes to allocate.
1853 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
1854 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
1855 * @param pHCPhys Where to put the physical address of allocated memory.
1856 */
1857SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
1858{
1859 int rc;
1860 SUPDRVMEMREF Mem = {0};
1861 dprintf(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
1862
1863 /*
1864 * Validate input.
1865 */
1866 if (!pSession || !ppvR3 || !ppvR0 || !pHCPhys)
1867 {
1868 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
1869 pSession, ppvR0, ppvR3, pHCPhys));
1870 return SUPDRV_ERR_INVALID_PARAM;
1871
1872 }
1873 if (cPages == 0 || cPages >= 256)
1874 {
1875 dprintf(("Illegal request cPages=%d, must be greater than 0 and smaller than 256\n", cPages));
1876 return SUPDRV_ERR_INVALID_PARAM;
1877 }
1878
1879#ifdef USE_NEW_OS_INTERFACE
1880 /*
1881 * Let IPRT do the job.
1882 */
1883 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
1884 if (RT_SUCCESS(rc))
1885 {
1886 int rc2;
1887 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (void *)-1, 0,
1888 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
1889 if (RT_SUCCESS(rc))
1890 {
1891 Mem.eType = MEMREF_TYPE_CONT;
1892 rc = supdrvMemAdd(&Mem, pSession);
1893 if (!rc)
1894 {
1895 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
1896 *ppvR3 = (RTR3PTR)RTR0MemObjAddress(Mem.MapObjR3);
1897 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
1898 return 0;
1899 }
1900
1901 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
1902 AssertRC(rc2);
1903 }
1904 rc2 = RTR0MemObjFree(Mem.MemObj, false);
1905 AssertRC(rc2);
1906 }
1907
1908#else /* !USE_NEW_OS_INTERFACE */
1909
1910 /*
1911 * Let the OS specific code have a go.
1912 */
1913 Mem.pvR0 = NULL;
1914 Mem.pvR3 = NIL_RTR3PTR;
1915 Mem.eType = MEMREF_TYPE_CONT;
1916 Mem.cb = cPages << PAGE_SHIFT;
1917 rc = supdrvOSContAllocOne(&Mem, ppvR0, ppvR3, pHCPhys);
1918 if (rc)
1919 return rc;
1920 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)) || !(*pHCPhys & (PAGE_SIZE - 1)),
1921 ("Memory is not page aligned! *ppvR0=%p *ppvR3=%p phys=%VHp\n", ppvR0 ? *ppvR0 : NULL, *ppvR3, *pHCPhys));
1922
1923 /*
1924 * Everything when fine, add the memory reference to the session.
1925 */
1926 rc = supdrvMemAdd(&Mem, pSession);
1927 if (rc)
1928 supdrvOSContFreeOne(&Mem);
1929#endif /* !USE_NEW_OS_INTERFACE */
1930
1931 return rc;
1932}
1933
1934
1935/**
1936 * Frees memory allocated using SUPR0ContAlloc().
1937 *
1938 * @returns 0 on success.
1939 * @returns SUPDRV_ERR_* on failure.
1940 * @param pSession The session to which the memory was allocated.
1941 * @param uPtr Pointer to the memory (ring-3 or ring-0).
1942 */
1943SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
1944{
1945 dprintf(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
1946 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
1947}
1948
1949
1950/**
1951 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
1952 *
1953 * @returns 0 on success.
1954 * @returns SUPDRV_ERR_* on failure.
1955 * @param pSession Session data.
1956 * @param cPages Number of pages to allocate.
1957 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
1958 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
1959 * @param paPages Where to put the physical addresses of allocated memory.
1960 */
1961SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PSUPPAGE paPages)
1962{
1963 unsigned iPage;
1964 int rc;
1965 SUPDRVMEMREF Mem = {0};
1966 dprintf(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
1967
1968 /*
1969 * Validate input.
1970 */
1971 if (!pSession || !ppvR3 || !ppvR0 || !paPages)
1972 {
1973 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
1974 pSession, ppvR3, ppvR0, paPages));
1975 return SUPDRV_ERR_INVALID_PARAM;
1976
1977 }
1978 if (cPages < 1 || cPages > 256)
1979 {
1980 dprintf(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
1981 return SUPDRV_ERR_INVALID_PARAM;
1982 }
1983
1984#ifdef USE_NEW_OS_INTERFACE
1985 /*
1986 * Let IPRT do the work.
1987 */
1988 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
1989 if (RT_SUCCESS(rc))
1990 {
1991 int rc2;
1992 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (void *)-1, 0,
1993 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
1994 if (RT_SUCCESS(rc))
1995 {
1996 Mem.eType = MEMREF_TYPE_LOW;
1997 rc = supdrvMemAdd(&Mem, pSession);
1998 if (!rc)
1999 {
2000 for (iPage = 0; iPage < cPages; iPage++)
2001 {
2002 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2003 paPages[iPage].uReserved = 0;
2004 AssertMsg(!(paPages[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPages[iPage].Phys));
2005 }
2006 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2007 *ppvR3 = RTR0MemObjAddress(Mem.MapObjR3);
2008 return 0;
2009 }
2010
2011 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2012 AssertRC(rc2);
2013 }
2014
2015 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2016 AssertRC(rc2);
2017 }
2018
2019#else /* !USE_NEW_OS_INTERFACE */
2020
2021 /*
2022 * Let the OS specific code have a go.
2023 */
2024 Mem.pvR0 = NULL;
2025 Mem.pvR3 = NIL_RTR3PTR;
2026 Mem.eType = MEMREF_TYPE_LOW;
2027 Mem.cb = cPages << PAGE_SHIFT;
2028 rc = supdrvOSLowAllocOne(&Mem, ppvR0, ppvR3, paPages);
2029 if (rc)
2030 return rc;
2031 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! virt=%p\n", *ppvR3));
2032 AssertMsg(!((uintptr_t)*ppvR0 & (PAGE_SIZE - 1)), ("Memory is not page aligned! virt=%p\n", *ppvR0));
2033 for (iPage = 0; iPage < cPages; iPage++)
2034 AssertMsg(!(paPages[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPages[iPage].Phys));
2035
2036 /*
2037 * Everything when fine, add the memory reference to the session.
2038 */
2039 rc = supdrvMemAdd(&Mem, pSession);
2040 if (rc)
2041 supdrvOSLowFreeOne(&Mem);
2042#endif /* !USE_NEW_OS_INTERFACE */
2043 return rc;
2044}
2045
2046
2047/**
2048 * Frees memory allocated using SUPR0LowAlloc().
2049 *
2050 * @returns 0 on success.
2051 * @returns SUPDRV_ERR_* on failure.
2052 * @param pSession The session to which the memory was allocated.
2053 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2054 */
2055SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2056{
2057 dprintf(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2058 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2059}
2060
2061
2062/**
2063 * Allocates a chunk of memory with both R0 and R3 mappings.
2064 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2065 *
2066 * @returns 0 on success.
2067 * @returns SUPDRV_ERR_* on failure.
2068 * @param pSession The session to associated the allocation with.
2069 * @param cb Number of bytes to allocate.
2070 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2071 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2072 */
2073SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2074{
2075 int rc;
2076 SUPDRVMEMREF Mem = {0};
2077 dprintf(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2078
2079 /*
2080 * Validate input.
2081 */
2082 if (!pSession || !ppvR0 || !ppvR3)
2083 {
2084 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p\n",
2085 pSession, ppvR0, ppvR3));
2086 return SUPDRV_ERR_INVALID_PARAM;
2087
2088 }
2089 if (cb < 1 || cb >= PAGE_SIZE * 256)
2090 {
2091 dprintf(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2092 return SUPDRV_ERR_INVALID_PARAM;
2093 }
2094
2095#ifdef USE_NEW_OS_INTERFACE
2096 /*
2097 * Let IPRT do the work.
2098 */
2099 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2100 if (RT_SUCCESS(rc))
2101 {
2102 int rc2;
2103 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (void*)-1, 0,
2104 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2105 if (RT_SUCCESS(rc))
2106 {
2107 Mem.eType = MEMREF_TYPE_MEM;
2108 rc = supdrvMemAdd(&Mem, pSession);
2109 if (!rc)
2110 {
2111 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2112 *ppvR3 = (RTR3PTR)RTR0MemObjAddress(Mem.MapObjR3);
2113 return 0;
2114 }
2115 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2116 AssertRC(rc2);
2117 }
2118
2119 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2120 AssertRC(rc2);
2121 }
2122
2123#else /* !USE_NEW_OS_INTERFACE */
2124
2125 /*
2126 * Let the OS specific code have a go.
2127 */
2128 Mem.pvR0 = NULL;
2129 Mem.pvR3 = NIL_RTR3PTR;
2130 Mem.eType = MEMREF_TYPE_MEM;
2131 Mem.cb = cb;
2132 rc = supdrvOSMemAllocOne(&Mem, ppvR0, ppvR3);
2133 if (rc)
2134 return rc;
2135 AssertMsg(!((uintptr_t)*ppvR0 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR0=%p\n", *ppvR0));
2136 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR3=%p\n", *ppvR3));
2137
2138 /*
2139 * Everything when fine, add the memory reference to the session.
2140 */
2141 rc = supdrvMemAdd(&Mem, pSession);
2142 if (rc)
2143 supdrvOSMemFreeOne(&Mem);
2144#endif /* !USE_NEW_OS_INTERFACE */
2145 return rc;
2146}
2147
2148
2149/**
2150 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2151 *
2152 * @returns 0 on success.
2153 * @returns SUPDRV_ERR_* on failure.
2154 * @param pSession The session to which the memory was allocated.
2155 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2156 * @param paPages Where to store the physical addresses.
2157 */
2158SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages)
2159{
2160 PSUPDRVBUNDLE pBundle;
2161 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2162 dprintf(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2163
2164 /*
2165 * Validate input.
2166 */
2167 if (!pSession)
2168 {
2169 dprintf(("pSession must not be NULL!"));
2170 return SUPDRV_ERR_INVALID_PARAM;
2171 }
2172 if (!uPtr || !paPages)
2173 {
2174 dprintf(("Illegal address uPtr=%p or/and paPages=%p\n", (void *)uPtr, paPages));
2175 return SUPDRV_ERR_INVALID_PARAM;
2176 }
2177
2178 /*
2179 * Search for the address.
2180 */
2181 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2182 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2183 {
2184 if (pBundle->cUsed > 0)
2185 {
2186 unsigned i;
2187 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
2188 {
2189#ifdef USE_NEW_OS_INTERFACE
2190 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2191 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2192 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2193 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2194 && (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MapObjR3) == uPtr)
2195 )
2196 )
2197 {
2198 const unsigned cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2199 unsigned iPage;
2200 for (iPage = 0; iPage < cPages; iPage++)
2201 {
2202 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2203 paPages[iPage].uReserved = 0;
2204 }
2205 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2206 return 0;
2207 }
2208#else /* !USE_NEW_OS_INTERFACE */
2209 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2210 && ( (RTHCUINTPTR)pBundle->aMem[i].pvR0 == uPtr
2211 || (RTHCUINTPTR)pBundle->aMem[i].pvR3 == uPtr))
2212 {
2213 supdrvOSMemGetPages(&pBundle->aMem[i], paPages);
2214 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2215 return 0;
2216 }
2217#endif
2218 }
2219 }
2220 }
2221 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2222 dprintf(("Failed to find %p!!!\n", (void *)uPtr));
2223 return SUPDRV_ERR_INVALID_PARAM;
2224}
2225
2226
2227/**
2228 * Free memory allocated by SUPR0MemAlloc().
2229 *
2230 * @returns 0 on success.
2231 * @returns SUPDRV_ERR_* on failure.
2232 * @param pSession The session owning the allocation.
2233 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2234 */
2235SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2236{
2237 dprintf(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2238 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2239}
2240
2241
2242/**
2243 * Maps the GIP into userspace and/or get the physical address of the GIP.
2244 *
2245 * @returns 0 on success.
2246 * @returns SUPDRV_ERR_* on failure.
2247 * @param pSession Session to which the GIP mapping should belong.
2248 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2249 * @param pHCPhysGip Where to store the physical address. (optional)
2250 *
2251 * @remark There is no reference counting on the mapping, so one call to this function
2252 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2253 * and remove the session as a GIP user.
2254 */
2255SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGid)
2256{
2257 int rc = 0;
2258 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2259 RTR3PTR pGip = NIL_RTR3PTR;
2260 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2261 dprintf(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGid=%p\n", pSession, ppGipR3, pHCPhysGid));
2262
2263 /*
2264 * Validate
2265 */
2266 if (!ppGipR3 && !pHCPhysGid)
2267 return 0;
2268
2269 RTSemFastMutexRequest(pDevExt->mtxGip);
2270 if (pDevExt->pGip)
2271 {
2272 /*
2273 * Map it?
2274 */
2275 if (ppGipR3)
2276 {
2277#ifdef USE_NEW_OS_INTERFACE
2278 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2279 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (void*)-1, 0,
2280 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2281 if (RT_SUCCESS(rc))
2282 {
2283 pGip = (RTR3PTR)RTR0MemObjAddress(pSession->GipMapObjR3);
2284 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2285 }
2286#else /* !USE_NEW_OS_INTERFACE */
2287 if (!pSession->pGip)
2288 rc = supdrvOSGipMap(pSession->pDevExt, &pSession->pGip);
2289 if (!rc)
2290 pGip = (RTR3PTR)pSession->pGip;
2291#endif /* !USE_NEW_OS_INTERFACE */
2292 }
2293
2294 /*
2295 * Get physical address.
2296 */
2297 if (pHCPhysGid && !rc)
2298 HCPhys = pDevExt->HCPhysGip;
2299
2300 /*
2301 * Reference globally.
2302 */
2303 if (!pSession->fGipReferenced && !rc)
2304 {
2305 pSession->fGipReferenced = 1;
2306 pDevExt->cGipUsers++;
2307 if (pDevExt->cGipUsers == 1)
2308 {
2309 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2310 unsigned i;
2311
2312 dprintf(("SUPR0GipMap: Resumes GIP updating\n"));
2313
2314 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2315 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2316 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2317
2318#ifdef USE_NEW_OS_INTERFACE
2319 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2320 AssertRC(rc); rc = 0;
2321#else
2322 supdrvOSGipResume(pDevExt);
2323#endif
2324 }
2325 }
2326 }
2327 else
2328 {
2329 rc = SUPDRV_ERR_GENERAL_FAILURE;
2330 dprintf(("SUPR0GipMap: GIP is not available!\n"));
2331 }
2332 RTSemFastMutexRelease(pDevExt->mtxGip);
2333
2334 /*
2335 * Write returns.
2336 */
2337 if (pHCPhysGid)
2338 *pHCPhysGid = HCPhys;
2339 if (ppGipR3)
2340 *ppGipR3 = pGip;
2341
2342#ifdef DEBUG_DARWIN_GIP
2343 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGid=%lx *ppGip=%p GipMapObjR3\n", rc, (unsigned long)HCPhys, pGip, pSession->GipMapObjR3));
2344#else
2345 dprintf(("SUPR0GipMap: returns %d *pHCPhysGid=%lx *ppGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)(uintptr_t)pGip));
2346#endif
2347 return rc;
2348}
2349
2350
2351/**
2352 * Unmaps any user mapping of the GIP and terminates all GIP access
2353 * from this session.
2354 *
2355 * @returns 0 on success.
2356 * @returns SUPDRV_ERR_* on failure.
2357 * @param pSession Session to which the GIP mapping should belong.
2358 */
2359SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2360{
2361 int rc = 0;
2362 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2363#ifdef DEBUG_DARWIN_GIP
2364 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2365 pSession,
2366 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2367 pSession->GipMapObjR3));
2368#else
2369 dprintf(("SUPR0GipUnmap: pSession=%p\n", pSession));
2370#endif
2371
2372 RTSemFastMutexRequest(pDevExt->mtxGip);
2373
2374 /*
2375 * Unmap anything?
2376 */
2377#ifdef USE_NEW_OS_INTERFACE
2378 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2379 {
2380 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2381 AssertRC(rc);
2382 if (RT_SUCCESS(rc))
2383 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2384 }
2385#else
2386 if (pSession->pGip)
2387 {
2388 rc = supdrvOSGipUnmap(pDevExt, pSession->pGip);
2389 if (!rc)
2390 pSession->pGip = NULL;
2391 }
2392#endif
2393
2394 /*
2395 * Dereference global GIP.
2396 */
2397 if (pSession->fGipReferenced && !rc)
2398 {
2399 pSession->fGipReferenced = 0;
2400 if ( pDevExt->cGipUsers > 0
2401 && !--pDevExt->cGipUsers)
2402 {
2403 dprintf(("SUPR0GipUnmap: Suspends GIP updating\n"));
2404#ifdef USE_NEW_OS_INTERFACE
2405 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = 0;
2406#else
2407 supdrvOSGipSuspend(pDevExt);
2408#endif
2409 }
2410 }
2411
2412 RTSemFastMutexRelease(pDevExt->mtxGip);
2413
2414 return rc;
2415}
2416
2417
2418/**
2419 * Adds a memory object to the session.
2420 *
2421 * @returns 0 on success.
2422 * @returns SUPDRV_ERR_* on failure.
2423 * @param pMem Memory tracking structure containing the
2424 * information to track.
2425 * @param pSession The session.
2426 */
2427static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
2428{
2429 PSUPDRVBUNDLE pBundle;
2430 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2431
2432 /*
2433 * Find free entry and record the allocation.
2434 */
2435 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2436 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2437 {
2438 if (pBundle->cUsed < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]))
2439 {
2440 unsigned i;
2441 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
2442 {
2443#ifdef USE_NEW_OS_INTERFACE
2444 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
2445#else /* !USE_NEW_OS_INTERFACE */
2446 if ( !pBundle->aMem[i].pvR0
2447 && !pBundle->aMem[i].pvR3)
2448#endif /* !USE_NEW_OS_INTERFACE */
2449 {
2450 pBundle->cUsed++;
2451 pBundle->aMem[i] = *pMem;
2452 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2453 return 0;
2454 }
2455 }
2456 AssertFailed(); /* !!this can't be happening!!! */
2457 }
2458 }
2459 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2460
2461 /*
2462 * Need to allocate a new bundle.
2463 * Insert into the last entry in the bundle.
2464 */
2465 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
2466 if (!pBundle)
2467 return SUPDRV_ERR_NO_MEMORY;
2468
2469 /* take last entry. */
2470 pBundle->cUsed++;
2471 pBundle->aMem[sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]) - 1] = *pMem;
2472
2473 /* insert into list. */
2474 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2475 pBundle->pNext = pSession->Bundle.pNext;
2476 pSession->Bundle.pNext = pBundle;
2477 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2478
2479 return 0;
2480}
2481
2482
2483/**
2484 * Releases a memory object referenced by pointer and type.
2485 *
2486 * @returns 0 on success.
2487 * @returns SUPDRV_ERR_INVALID_PARAM on failure.
2488 * @param pSession Session data.
2489 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
2490 * @param eType Memory type.
2491 */
2492static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
2493{
2494 PSUPDRVBUNDLE pBundle;
2495 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2496
2497 /*
2498 * Validate input.
2499 */
2500 if (!pSession)
2501 {
2502 dprintf(("pSession must not be NULL!"));
2503 return SUPDRV_ERR_INVALID_PARAM;
2504 }
2505 if (!uPtr)
2506 {
2507 dprintf(("Illegal address %p\n", (void *)uPtr));
2508 return SUPDRV_ERR_INVALID_PARAM;
2509 }
2510
2511 /*
2512 * Search for the address.
2513 */
2514 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2515 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2516 {
2517 if (pBundle->cUsed > 0)
2518 {
2519 unsigned i;
2520 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
2521 {
2522#ifdef USE_NEW_OS_INTERFACE
2523 if ( pBundle->aMem[i].eType == eType
2524 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2525 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2526 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2527 && (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MapObjR3) == uPtr))
2528 )
2529 {
2530 /* Make a copy of it and release it outside the spinlock. */
2531 SUPDRVMEMREF Mem = pBundle->aMem[i];
2532 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
2533 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
2534 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
2535 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2536
2537 if (Mem.MapObjR3)
2538 {
2539 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
2540 AssertRC(rc); /** @todo figure out how to handle this. */
2541 }
2542 if (Mem.MemObj)
2543 {
2544 int rc = RTR0MemObjFree(Mem.MemObj, false);
2545 AssertRC(rc); /** @todo figure out how to handle this. */
2546 }
2547 return 0;
2548 }
2549#else /* !USE_NEW_OS_INTERFACE */
2550 if ( pBundle->aMem[i].eType == eType
2551 && ( (RTHCUINTPTR)pBundle->aMem[i].pvR0 == uPtr
2552 || (RTHCUINTPTR)pBundle->aMem[i].pvR3 == uPtr))
2553 {
2554 /* Make a copy of it and release it outside the spinlock. */
2555 SUPDRVMEMREF Mem = pBundle->aMem[i];
2556 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
2557 pBundle->aMem[i].pvR0 = NULL;
2558 pBundle->aMem[i].pvR3 = NIL_RTR3PTR;
2559 pBundle->aMem[i].cb = 0;
2560 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2561
2562 /* Type specific free operation. */
2563 switch (Mem.eType)
2564 {
2565 case MEMREF_TYPE_LOCKED:
2566 supdrvOSUnlockMemOne(&Mem);
2567 break;
2568 case MEMREF_TYPE_CONT:
2569 supdrvOSContFreeOne(&Mem);
2570 break;
2571 case MEMREF_TYPE_LOW:
2572 supdrvOSLowFreeOne(&Mem);
2573 break;
2574 case MEMREF_TYPE_MEM:
2575 supdrvOSMemFreeOne(&Mem);
2576 break;
2577 default:
2578 case MEMREF_TYPE_UNUSED:
2579 break;
2580 }
2581 return 0;
2582 }
2583#endif /* !USE_NEW_OS_INTERFACE */
2584 }
2585 }
2586 }
2587 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2588 dprintf(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
2589 return SUPDRV_ERR_INVALID_PARAM;
2590}
2591
2592
2593#ifndef VBOX_WITHOUT_IDT_PATCHING
2594/**
2595 * Install IDT for the current CPU.
2596 *
2597 * @returns 0 on success.
2598 * @returns SUPDRV_ERR_NO_MEMORY or SUPDRV_ERROR_IDT_FAILED on failure.
2599 * @param pIn Input data.
2600 * @param pOut Output data.
2601 */
2602static int supdrvIOCtl_IdtInstall(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPIDTINSTALL_IN pIn, PSUPIDTINSTALL_OUT pOut)
2603{
2604 PSUPDRVPATCHUSAGE pUsagePre;
2605 PSUPDRVPATCH pPatchPre;
2606 RTIDTR Idtr;
2607 PSUPDRVPATCH pPatch;
2608 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2609 dprintf(("supdrvIOCtl_IdtInstall\n"));
2610
2611 /*
2612 * Preallocate entry for this CPU cause we don't wanna do
2613 * that inside the spinlock!
2614 */
2615 pUsagePre = (PSUPDRVPATCHUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2616 if (!pUsagePre)
2617 return SUPDRV_ERR_NO_MEMORY;
2618
2619 /*
2620 * Take the spinlock and see what we need to do.
2621 */
2622 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
2623
2624 /* check if we already got a free patch. */
2625 if (!pDevExt->pIdtPatchesFree)
2626 {
2627 /*
2628 * Allocate a patch - outside the spinlock of course.
2629 */
2630 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
2631
2632 pPatchPre = (PSUPDRVPATCH)RTMemExecAlloc(sizeof(*pPatchPre));
2633 if (!pPatchPre)
2634 return SUPDRV_ERR_NO_MEMORY;
2635
2636 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
2637 }
2638 else
2639 {
2640 pPatchPre = pDevExt->pIdtPatchesFree;
2641 pDevExt->pIdtPatchesFree = pPatchPre->pNext;
2642 }
2643
2644 /* look for matching patch entry */
2645 ASMGetIDTR(&Idtr);
2646 pPatch = pDevExt->pIdtPatches;
2647 while (pPatch && pPatch->pvIdt != (void *)Idtr.pIdt)
2648 pPatch = pPatch->pNext;
2649
2650 if (!pPatch)
2651 {
2652 /*
2653 * Create patch.
2654 */
2655 pPatch = supdrvIdtPatchOne(pDevExt, pPatchPre);
2656 if (pPatch)
2657 pPatchPre = NULL; /* mark as used. */
2658 }
2659 else
2660 {
2661 /*
2662 * Simply increment patch usage.
2663 */
2664 pPatch->cUsage++;
2665 }
2666
2667 if (pPatch)
2668 {
2669 /*
2670 * Increment and add if need be the session usage record for this patch.
2671 */
2672 PSUPDRVPATCHUSAGE pUsage = pSession->pPatchUsage;
2673 while (pUsage && pUsage->pPatch != pPatch)
2674 pUsage = pUsage->pNext;
2675
2676 if (!pUsage)
2677 {
2678 /*
2679 * Add usage record.
2680 */
2681 pUsagePre->cUsage = 1;
2682 pUsagePre->pPatch = pPatch;
2683 pUsagePre->pNext = pSession->pPatchUsage;
2684 pSession->pPatchUsage = pUsagePre;
2685 pUsagePre = NULL; /* mark as used. */
2686 }
2687 else
2688 {
2689 /*
2690 * Increment usage count.
2691 */
2692 pUsage->cUsage++;
2693 }
2694 }
2695
2696 /* free patch - we accumulate them for paranoid saftly reasons. */
2697 if (pPatchPre)
2698 {
2699 pPatchPre->pNext = pDevExt->pIdtPatchesFree;
2700 pDevExt->pIdtPatchesFree = pPatchPre;
2701 }
2702
2703 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
2704
2705 /*
2706 * Free unused preallocated buffers.
2707 */
2708 if (pUsagePre)
2709 RTMemFree(pUsagePre);
2710
2711 pOut->u8Idt = pDevExt->u8Idt;
2712
2713 return pPatch ? 0 : SUPDRV_ERR_IDT_FAILED;
2714}
2715
2716
2717/**
2718 * This creates a IDT patch entry.
2719 * If the first patch being installed it'll also determin the IDT entry
2720 * to use.
2721 *
2722 * @returns pPatch on success.
2723 * @returns NULL on failure.
2724 * @param pDevExt Pointer to globals.
2725 * @param pPatch Patch entry to use.
2726 * This will be linked into SUPDRVDEVEXT::pIdtPatches on
2727 * successful return.
2728 * @remark Call must be owning the SUPDRVDEVEXT::Spinlock!
2729 */
2730static PSUPDRVPATCH supdrvIdtPatchOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch)
2731{
2732 RTIDTR Idtr;
2733 PSUPDRVIDTE paIdt;
2734 dprintf(("supdrvIOCtl_IdtPatchOne: pPatch=%p\n", pPatch));
2735
2736 /*
2737 * Get IDT.
2738 */
2739 ASMGetIDTR(&Idtr);
2740 paIdt = (PSUPDRVIDTE)Idtr.pIdt;
2741 /*
2742 * Recent Linux kernels can be configured to 1G user /3G kernel.
2743 */
2744 if ((uintptr_t)paIdt < 0x40000000)
2745 {
2746 AssertMsgFailed(("bad paIdt=%p\n", paIdt));
2747 return NULL;
2748 }
2749
2750 if (!pDevExt->u8Idt)
2751 {
2752 /*
2753 * Test out the alternatives.
2754 *
2755 * At the moment we do not support chaining thus we ASSUME that one of
2756 * these 48 entries is unused (which is not a problem on Win32 and
2757 * Linux to my knowledge).
2758 */
2759 /** @todo we MUST change this detection to try grab an entry which is NOT in use. This can be
2760 * combined with gathering info about which guest system call gates we can hook up directly. */
2761 unsigned i;
2762 uint8_t u8Idt = 0;
2763 static uint8_t au8Ints[] =
2764 {
2765#ifdef __WIN__ /* We don't use 0xef and above because they are system stuff on linux (ef is IPI,
2766 * local apic timer, or some other frequently fireing thing). */
2767 0xef, 0xee, 0xed, 0xec,
2768#endif
2769 0xeb, 0xea, 0xe9, 0xe8,
2770 0xdf, 0xde, 0xdd, 0xdc,
2771 0x7b, 0x7a, 0x79, 0x78,
2772 0xbf, 0xbe, 0xbd, 0xbc,
2773 };
2774#if defined(__AMD64__) && defined(DEBUG)
2775 static int s_iWobble = 0;
2776 unsigned iMax = !(s_iWobble++ % 2) ? 0x80 : 0x100;
2777 dprintf(("IDT: Idtr=%p:%#x\n", (void *)Idtr.pIdt, (unsigned)Idtr.cbIdt));
2778 for (i = iMax - 0x80; i*16+15 < Idtr.cbIdt && i < iMax; i++)
2779 {
2780 dprintf(("%#x: %04x:%08x%04x%04x P=%d DPL=%d IST=%d Type1=%#x u32Reserved=%#x u5Reserved=%#x\n",
2781 i, paIdt[i].u16SegSel, paIdt[i].u32OffsetTop, paIdt[i].u16OffsetHigh, paIdt[i].u16OffsetLow,
2782 paIdt[i].u1Present, paIdt[i].u2DPL, paIdt[i].u3IST, paIdt[i].u5Type2,
2783 paIdt[i].u32Reserved, paIdt[i].u5Reserved));
2784 }
2785#endif
2786 /* look for entries which are not present or otherwise unused. */
2787 for (i = 0; i < sizeof(au8Ints) / sizeof(au8Ints[0]); i++)
2788 {
2789 u8Idt = au8Ints[i];
2790 if ( u8Idt * sizeof(SUPDRVIDTE) < Idtr.cbIdt
2791 && ( !paIdt[u8Idt].u1Present
2792 || paIdt[u8Idt].u5Type2 == 0))
2793 break;
2794 u8Idt = 0;
2795 }
2796 if (!u8Idt)
2797 {
2798 /* try again, look for a compatible entry .*/
2799 for (i = 0; i < sizeof(au8Ints) / sizeof(au8Ints[0]); i++)
2800 {
2801 u8Idt = au8Ints[i];
2802 if ( u8Idt * sizeof(SUPDRVIDTE) < Idtr.cbIdt
2803 && paIdt[u8Idt].u1Present
2804 && paIdt[u8Idt].u5Type2 == SUPDRV_IDTE_TYPE2_INTERRUPT_GATE
2805 && !(paIdt[u8Idt].u16SegSel & 3))
2806 break;
2807 u8Idt = 0;
2808 }
2809 if (!u8Idt)
2810 {
2811 dprintf(("Failed to find appropirate IDT entry!!\n"));
2812 return NULL;
2813 }
2814 }
2815 pDevExt->u8Idt = u8Idt;
2816 dprintf(("supdrvIOCtl_IdtPatchOne: u8Idt=%x\n", u8Idt));
2817 }
2818
2819 /*
2820 * Prepare the patch
2821 */
2822 memset(pPatch, 0, sizeof(*pPatch));
2823 pPatch->pvIdt = paIdt;
2824 pPatch->cUsage = 1;
2825 pPatch->pIdtEntry = &paIdt[pDevExt->u8Idt];
2826 pPatch->SavedIdt = paIdt[pDevExt->u8Idt];
2827 pPatch->ChangedIdt.u16OffsetLow = (uint32_t)((uintptr_t)&pPatch->auCode[0] & 0xffff);
2828 pPatch->ChangedIdt.u16OffsetHigh = (uint32_t)((uintptr_t)&pPatch->auCode[0] >> 16);
2829#ifdef __AMD64__
2830 pPatch->ChangedIdt.u32OffsetTop = (uint32_t)((uintptr_t)&pPatch->auCode[0] >> 32);
2831#endif
2832 pPatch->ChangedIdt.u16SegSel = ASMGetCS();
2833#ifdef __AMD64__
2834 pPatch->ChangedIdt.u3IST = 0;
2835 pPatch->ChangedIdt.u5Reserved = 0;
2836#else /* x86 */
2837 pPatch->ChangedIdt.u5Reserved = 0;
2838 pPatch->ChangedIdt.u3Type1 = 0;
2839#endif /* x86 */
2840 pPatch->ChangedIdt.u5Type2 = SUPDRV_IDTE_TYPE2_INTERRUPT_GATE;
2841 pPatch->ChangedIdt.u2DPL = 3;
2842 pPatch->ChangedIdt.u1Present = 1;
2843
2844 /*
2845 * Generate the patch code.
2846 */
2847 {
2848#ifdef __AMD64__
2849 union
2850 {
2851 uint8_t *pb;
2852 uint32_t *pu32;
2853 uint64_t *pu64;
2854 } u, uFixJmp, uFixCall, uNotNested;
2855 u.pb = &pPatch->auCode[0];
2856
2857 /* check the cookie */
2858 *u.pb++ = 0x3d; // cmp eax, GLOBALCOOKIE
2859 *u.pu32++ = pDevExt->u32Cookie;
2860
2861 *u.pb++ = 0x74; // jz @VBoxCall
2862 *u.pb++ = 2;
2863
2864 /* jump to forwarder code. */
2865 *u.pb++ = 0xeb;
2866 uFixJmp = u;
2867 *u.pb++ = 0xfe;
2868
2869 // @VBoxCall:
2870 *u.pb++ = 0x0f; // swapgs
2871 *u.pb++ = 0x01;
2872 *u.pb++ = 0xf8;
2873
2874 /*
2875 * Call VMMR0Entry
2876 * We don't have to push the arguments here, but we have top
2877 * reserve some stack space for the interrupt forwarding.
2878 */
2879# ifdef __WIN__
2880 *u.pb++ = 0x50; // push rax ; alignment filler.
2881 *u.pb++ = 0x41; // push r8 ; uArg
2882 *u.pb++ = 0x50;
2883 *u.pb++ = 0x52; // push rdx ; uOperation
2884 *u.pb++ = 0x51; // push rcx ; pVM
2885# else
2886 *u.pb++ = 0x51; // push rcx ; alignment filler.
2887 *u.pb++ = 0x52; // push rdx ; uArg
2888 *u.pb++ = 0x56; // push rsi ; uOperation
2889 *u.pb++ = 0x57; // push rdi ; pVM
2890# endif
2891
2892 *u.pb++ = 0xff; // call qword [pfnVMMR0Entry wrt rip]
2893 *u.pb++ = 0x15;
2894 uFixCall = u;
2895 *u.pu32++ = 0;
2896
2897 *u.pb++ = 0x48; // add rsp, 20h ; remove call frame.
2898 *u.pb++ = 0x81;
2899 *u.pb++ = 0xc4;
2900 *u.pu32++ = 0x20;
2901
2902 *u.pb++ = 0x0f; // swapgs
2903 *u.pb++ = 0x01;
2904 *u.pb++ = 0xf8;
2905
2906 /* Return to R3. */
2907 uNotNested = u;
2908 *u.pb++ = 0x48; // iretq
2909 *u.pb++ = 0xcf;
2910
2911 while ((uintptr_t)u.pb & 0x7) // align 8
2912 *u.pb++ = 0xcc;
2913
2914 /* Pointer to the VMMR0Entry. */ // pfnVMMR0Entry dq StubVMMR0Entry
2915 *uFixCall.pu32 = (uint32_t)(u.pb - uFixCall.pb - 4); uFixCall.pb = NULL;
2916 pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]);
2917 *u.pu64++ = pDevExt->pvVMMR0 ? (uint64_t)pDevExt->pfnVMMR0Entry : (uint64_t)u.pb + 8;
2918
2919 /* stub entry. */ // StubVMMR0Entry:
2920 pPatch->offStub = (uint16_t)(u.pb - &pPatch->auCode[0]);
2921 *u.pb++ = 0x33; // xor eax, eax
2922 *u.pb++ = 0xc0;
2923
2924 *u.pb++ = 0x48; // dec rax
2925 *u.pb++ = 0xff;
2926 *u.pb++ = 0xc8;
2927
2928 *u.pb++ = 0xc3; // ret
2929
2930 /* forward to the original handler using a retf. */
2931 *uFixJmp.pb = (uint8_t)(u.pb - uFixJmp.pb - 1); uFixJmp.pb = NULL;
2932
2933 *u.pb++ = 0x68; // push <target cs>
2934 *u.pu32++ = !pPatch->SavedIdt.u5Type2 ? ASMGetCS() : pPatch->SavedIdt.u16SegSel;
2935
2936 *u.pb++ = 0x68; // push <low target rip>
2937 *u.pu32++ = !pPatch->SavedIdt.u5Type2
2938 ? (uint32_t)(uintptr_t)uNotNested.pb
2939 : (uint32_t)pPatch->SavedIdt.u16OffsetLow
2940 | (uint32_t)pPatch->SavedIdt.u16OffsetHigh << 16;
2941
2942 *u.pb++ = 0xc7; // mov dword [rsp + 4], <high target rip>
2943 *u.pb++ = 0x44;
2944 *u.pb++ = 0x24;
2945 *u.pb++ = 0x04;
2946 *u.pu32++ = !pPatch->SavedIdt.u5Type2
2947 ? (uint32_t)((uint64_t)uNotNested.pb >> 32)
2948 : pPatch->SavedIdt.u32OffsetTop;
2949
2950 *u.pb++ = 0x48; // retf ; does this require prefix?
2951 *u.pb++ = 0xcb;
2952
2953#else /* __X86__ */
2954
2955 union
2956 {
2957 uint8_t *pb;
2958 uint16_t *pu16;
2959 uint32_t *pu32;
2960 } u, uFixJmpNotNested, uFixJmp, uFixCall, uNotNested;
2961 u.pb = &pPatch->auCode[0];
2962
2963 /* check the cookie */
2964 *u.pb++ = 0x81; // cmp esi, GLOBALCOOKIE
2965 *u.pb++ = 0xfe;
2966 *u.pu32++ = pDevExt->u32Cookie;
2967
2968 *u.pb++ = 0x74; // jz VBoxCall
2969 uFixJmp = u;
2970 *u.pb++ = 0;
2971
2972 /* jump (far) to the original handler / not-nested-stub. */
2973 *u.pb++ = 0xea; // jmp far NotNested
2974 uFixJmpNotNested = u;
2975 *u.pu32++ = 0;
2976 *u.pu16++ = 0;
2977
2978 /* save selector registers. */ // VBoxCall:
2979 *uFixJmp.pb = (uint8_t)(u.pb - uFixJmp.pb - 1);
2980 *u.pb++ = 0x0f; // push fs
2981 *u.pb++ = 0xa0;
2982
2983 *u.pb++ = 0x1e; // push ds
2984
2985 *u.pb++ = 0x06; // push es
2986
2987 /* call frame */
2988 *u.pb++ = 0x51; // push ecx
2989
2990 *u.pb++ = 0x52; // push edx
2991
2992 *u.pb++ = 0x50; // push eax
2993
2994 /* load ds, es and perhaps fs before call. */
2995 *u.pb++ = 0xb8; // mov eax, KernelDS
2996 *u.pu32++ = ASMGetDS();
2997
2998 *u.pb++ = 0x8e; // mov ds, eax
2999 *u.pb++ = 0xd8;
3000
3001 *u.pb++ = 0x8e; // mov es, eax
3002 *u.pb++ = 0xc0;
3003
3004#ifdef __WIN__
3005 *u.pb++ = 0xb8; // mov eax, KernelFS
3006 *u.pu32++ = ASMGetFS();
3007
3008 *u.pb++ = 0x8e; // mov fs, eax
3009 *u.pb++ = 0xe0;
3010#endif
3011
3012 /* do the call. */
3013 *u.pb++ = 0xe8; // call _VMMR0Entry / StubVMMR0Entry
3014 uFixCall = u;
3015 pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]);
3016 *u.pu32++ = 0xfffffffb;
3017
3018 *u.pb++ = 0x83; // add esp, 0ch ; cdecl
3019 *u.pb++ = 0xc4;
3020 *u.pb++ = 0x0c;
3021
3022 /* restore selector registers. */
3023 *u.pb++ = 0x07; // pop es
3024 //
3025 *u.pb++ = 0x1f; // pop ds
3026
3027 *u.pb++ = 0x0f; // pop fs
3028 *u.pb++ = 0xa1;
3029
3030 uNotNested = u; // NotNested:
3031 *u.pb++ = 0xcf; // iretd
3032
3033 /* the stub VMMR0Entry. */ // StubVMMR0Entry:
3034 pPatch->offStub = (uint16_t)(u.pb - &pPatch->auCode[0]);
3035 *u.pb++ = 0x33; // xor eax, eax
3036 *u.pb++ = 0xc0;
3037
3038 *u.pb++ = 0x48; // dec eax
3039
3040 *u.pb++ = 0xc3; // ret
3041
3042 /* Fixup the VMMR0Entry call. */
3043 if (pDevExt->pvVMMR0)
3044 *uFixCall.pu32 = (uint32_t)pDevExt->pfnVMMR0Entry - (uint32_t)(uFixCall.pu32 + 1);
3045 else
3046 *uFixCall.pu32 = (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)(uFixCall.pu32 + 1);
3047
3048 /* Fixup the forward / nested far jump. */
3049 if (!pPatch->SavedIdt.u5Type2)
3050 {
3051 *uFixJmpNotNested.pu32++ = (uint32_t)uNotNested.pb;
3052 *uFixJmpNotNested.pu16++ = ASMGetCS();
3053 }
3054 else
3055 {
3056 *uFixJmpNotNested.pu32++ = ((uint32_t)pPatch->SavedIdt.u16OffsetHigh << 16) | pPatch->SavedIdt.u16OffsetLow;
3057 *uFixJmpNotNested.pu16++ = pPatch->SavedIdt.u16SegSel;
3058 }
3059#endif /* __X86__ */
3060 Assert(u.pb <= &pPatch->auCode[sizeof(pPatch->auCode)]);
3061#if 0
3062 /* dump the patch code */
3063 dprintf(("patch code: %p\n", &pPatch->auCode[0]));
3064 for (uFixCall.pb = &pPatch->auCode[0]; uFixCall.pb < u.pb; uFixCall.pb++)
3065 dprintf(("0x%02x,\n", *uFixCall.pb));
3066#endif
3067 }
3068
3069 /*
3070 * Install the patch.
3071 */
3072 supdrvIdtWrite(pPatch->pIdtEntry, &pPatch->ChangedIdt);
3073 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)), ("The stupid change code didn't work!!!!!\n"));
3074
3075 /*
3076 * Link in the patch.
3077 */
3078 pPatch->pNext = pDevExt->pIdtPatches;
3079 pDevExt->pIdtPatches = pPatch;
3080
3081 return pPatch;
3082}
3083
3084
3085/**
3086 * Removes the sessions IDT references.
3087 * This will uninstall our IDT patch if we left unreferenced.
3088 *
3089 * @returns 0 indicating success.
3090 * @param pDevExt Device globals.
3091 * @param pSession Session data.
3092 */
3093static int supdrvIOCtl_IdtRemoveAll(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
3094{
3095 PSUPDRVPATCHUSAGE pUsage;
3096 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3097 dprintf(("supdrvIOCtl_IdtRemoveAll: pSession=%p\n", pSession));
3098
3099 /*
3100 * Take the spinlock.
3101 */
3102 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
3103
3104 /*
3105 * Walk usage list.
3106 */
3107 pUsage = pSession->pPatchUsage;
3108 while (pUsage)
3109 {
3110 if (pUsage->pPatch->cUsage <= pUsage->cUsage)
3111 supdrvIdtRemoveOne(pDevExt, pUsage->pPatch);
3112 else
3113 pUsage->pPatch->cUsage -= pUsage->cUsage;
3114
3115 /* next */
3116 pUsage = pUsage->pNext;
3117 }
3118
3119 /*
3120 * Empty the usage chain and we're done inside the spinlock.
3121 */
3122 pUsage = pSession->pPatchUsage;
3123 pSession->pPatchUsage = NULL;
3124
3125 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
3126
3127 /*
3128 * Free usage entries.
3129 */
3130 while (pUsage)
3131 {
3132 void *pvToFree = pUsage;
3133 pUsage->cUsage = 0;
3134 pUsage->pPatch = NULL;
3135 pUsage = pUsage->pNext;
3136 RTMemFree(pvToFree);
3137 }
3138
3139 return 0;
3140}
3141
3142
3143/**
3144 * Remove one patch.
3145 *
3146 * @param pDevExt Device globals.
3147 * @param pPatch Patch entry to remove.
3148 * @remark Caller must own SUPDRVDEVEXT::Spinlock!
3149 */
3150static void supdrvIdtRemoveOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch)
3151{
3152 dprintf(("supdrvIdtRemoveOne: pPatch=%p\n", pPatch));
3153
3154 pPatch->cUsage = 0;
3155
3156 /*
3157 * If the IDT entry was changed it have to kick around for ever!
3158 * This will be attempted freed again, perhaps next time we'll succeed :-)
3159 */
3160 if (memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)))
3161 {
3162 AssertMsgFailed(("The hijacked IDT entry has CHANGED!!!\n"));
3163 return;
3164 }
3165
3166 /*
3167 * Unlink it.
3168 */
3169 if (pDevExt->pIdtPatches != pPatch)
3170 {
3171 PSUPDRVPATCH pPatchPrev = pDevExt->pIdtPatches;
3172 while (pPatchPrev)
3173 {
3174 if (pPatchPrev->pNext == pPatch)
3175 {
3176 pPatchPrev->pNext = pPatch->pNext;
3177 break;
3178 }
3179 pPatchPrev = pPatchPrev->pNext;
3180 }
3181 Assert(!pPatchPrev);
3182 }
3183 else
3184 pDevExt->pIdtPatches = pPatch->pNext;
3185 pPatch->pNext = NULL;
3186
3187
3188 /*
3189 * Verify and restore the IDT.
3190 */
3191 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)), ("The hijacked IDT entry has CHANGED!!!\n"));
3192 supdrvIdtWrite(pPatch->pIdtEntry, &pPatch->SavedIdt);
3193 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->SavedIdt, sizeof(pPatch->SavedIdt)), ("The hijacked IDT entry has CHANGED!!!\n"));
3194
3195 /*
3196 * Put it in the free list.
3197 * (This free list stuff is to calm my paranoia.)
3198 */
3199 pPatch->pvIdt = NULL;
3200 pPatch->pIdtEntry = NULL;
3201
3202 pPatch->pNext = pDevExt->pIdtPatchesFree;
3203 pDevExt->pIdtPatchesFree = pPatch;
3204}
3205
3206
3207/**
3208 * Write to an IDT entry.
3209 *
3210 * @param pvIdtEntry Where to write.
3211 * @param pNewIDTEntry What to write.
3212 */
3213static void supdrvIdtWrite(volatile void *pvIdtEntry, const SUPDRVIDTE *pNewIDTEntry)
3214{
3215 RTUINTREG uCR0;
3216 RTUINTREG uFlags;
3217
3218 /*
3219 * On SMP machines (P4 hyperthreading included) we must preform a
3220 * 64-bit locked write when updating the IDT entry.
3221 *
3222 * The F00F bugfix for linux (and probably other OSes) causes
3223 * the IDT to be pointing to an readonly mapping. We get around that
3224 * by temporarily turning of WP. Since we're inside a spinlock at this
3225 * point, interrupts are disabled and there isn't any way the WP bit
3226 * flipping can cause any trouble.
3227 */
3228
3229 /* Save & Clear interrupt flag; Save & clear WP. */
3230 uFlags = ASMGetFlags();
3231 ASMSetFlags(uFlags & ~(RTUINTREG)(1 << 9)); /*X86_EFL_IF*/
3232 Assert(!(ASMGetFlags() & (1 << 9)));
3233 uCR0 = ASMGetCR0();
3234 ASMSetCR0(uCR0 & ~(RTUINTREG)(1 << 16)); /*X86_CR0_WP*/
3235
3236 /* Update IDT Entry */
3237#ifdef __AMD64__
3238 ASMAtomicXchgU128((volatile uint128_t *)pvIdtEntry, *(uint128_t *)(uintptr_t)pNewIDTEntry);
3239#else
3240 ASMAtomicXchgU64((volatile uint64_t *)pvIdtEntry, *(uint64_t *)(uintptr_t)pNewIDTEntry);
3241#endif
3242
3243 /* Restore CR0 & Flags */
3244 ASMSetCR0(uCR0);
3245 ASMSetFlags(uFlags);
3246}
3247#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3248
3249
3250/**
3251 * Opens an image. If it's the first time it's opened the call must upload
3252 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3253 *
3254 * This is the 1st step of the loading.
3255 *
3256 * @returns 0 on success.
3257 * @returns SUPDRV_ERR_* on failure.
3258 * @param pDevExt Device globals.
3259 * @param pSession Session data.
3260 * @param pIn Input.
3261 * @param pOut Output. (May overlap pIn.)
3262 */
3263static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN_IN pIn, PSUPLDROPEN_OUT pOut)
3264{
3265 PSUPDRVLDRIMAGE pImage;
3266 unsigned cb;
3267 void *pv;
3268 dprintf(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pIn->szName, pIn->cbImage));
3269
3270 /*
3271 * Check if we got an instance of the image already.
3272 */
3273 RTSemFastMutexRequest(pDevExt->mtxLdr);
3274 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3275 {
3276 if (!strcmp(pImage->szName, pIn->szName))
3277 {
3278 pImage->cUsage++;
3279 pOut->pvImageBase = pImage->pvImage;
3280 pOut->fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3281 supdrvLdrAddUsage(pSession, pImage);
3282 RTSemFastMutexRelease(pDevExt->mtxLdr);
3283 return 0;
3284 }
3285 }
3286 /* (not found - add it!) */
3287
3288 /*
3289 * Allocate memory.
3290 */
3291 cb = pIn->cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3292 pv = RTMemExecAlloc(cb);
3293 if (!pv)
3294 {
3295 RTSemFastMutexRelease(pDevExt->mtxLdr);
3296 return SUPDRV_ERR_NO_MEMORY;
3297 }
3298
3299 /*
3300 * Setup and link in the LDR stuff.
3301 */
3302 pImage = (PSUPDRVLDRIMAGE)pv;
3303 pImage->pvImage = ALIGNP(pImage + 1, 32);
3304 pImage->cbImage = pIn->cbImage;
3305 pImage->pfnModuleInit = NULL;
3306 pImage->pfnModuleTerm = NULL;
3307 pImage->uState = SUP_IOCTL_LDR_OPEN;
3308 pImage->cUsage = 1;
3309 strcpy(pImage->szName, pIn->szName);
3310
3311 pImage->pNext = pDevExt->pLdrImages;
3312 pDevExt->pLdrImages = pImage;
3313
3314 supdrvLdrAddUsage(pSession, pImage);
3315
3316 pOut->pvImageBase = pImage->pvImage;
3317 pOut->fNeedsLoading = 1;
3318 RTSemFastMutexRelease(pDevExt->mtxLdr);
3319 return 0;
3320}
3321
3322
3323/**
3324 * Loads the image bits.
3325 *
3326 * This is the 2nd step of the loading.
3327 *
3328 * @returns 0 on success.
3329 * @returns SUPDRV_ERR_* on failure.
3330 * @param pDevExt Device globals.
3331 * @param pSession Session data.
3332 * @param pIn Input.
3333 */
3334static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD_IN pIn)
3335{
3336 PSUPDRVLDRUSAGE pUsage;
3337 PSUPDRVLDRIMAGE pImage;
3338 int rc;
3339 dprintf(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pIn->pvImageBase, pIn->cbImage));
3340
3341 /*
3342 * Find the ldr image.
3343 */
3344 RTSemFastMutexRequest(pDevExt->mtxLdr);
3345 pUsage = pSession->pLdrUsage;
3346 while (pUsage && pUsage->pImage->pvImage != pIn->pvImageBase)
3347 pUsage = pUsage->pNext;
3348 if (!pUsage)
3349 {
3350 RTSemFastMutexRelease(pDevExt->mtxLdr);
3351 dprintf(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3352 return SUPDRV_ERR_INVALID_HANDLE;
3353 }
3354 pImage = pUsage->pImage;
3355 if (pImage->cbImage != pIn->cbImage)
3356 {
3357 RTSemFastMutexRelease(pDevExt->mtxLdr);
3358 dprintf(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pIn->cbImage));
3359 return SUPDRV_ERR_INVALID_HANDLE;
3360 }
3361 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3362 {
3363 unsigned uState = pImage->uState;
3364 RTSemFastMutexRelease(pDevExt->mtxLdr);
3365 if (uState != SUP_IOCTL_LDR_LOAD)
3366 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3367 return SUPDRV_ERR_ALREADY_LOADED;
3368 }
3369 switch (pIn->eEPType)
3370 {
3371 case EP_NOTHING:
3372 break;
3373 case EP_VMMR0:
3374 if (!pIn->EP.VMMR0.pvVMMR0 || !pIn->EP.VMMR0.pvVMMR0Entry)
3375 {
3376 RTSemFastMutexRelease(pDevExt->mtxLdr);
3377 dprintf(("pvVMMR0=%p or pIn->EP.VMMR0.pvVMMR0Entry=%p is NULL!\n",
3378 pIn->EP.VMMR0.pvVMMR0, pIn->EP.VMMR0.pvVMMR0Entry));
3379 return SUPDRV_ERR_INVALID_PARAM;
3380 }
3381 if ((uintptr_t)pIn->EP.VMMR0.pvVMMR0Entry - (uintptr_t)pImage->pvImage >= pIn->cbImage)
3382 {
3383 RTSemFastMutexRelease(pDevExt->mtxLdr);
3384 dprintf(("SUP_IOCTL_LDR_LOAD: pvVMMR0Entry=%p is outside the image (%p %d bytes)\n",
3385 pIn->EP.VMMR0.pvVMMR0Entry, pImage->pvImage, pIn->cbImage));
3386 return SUPDRV_ERR_INVALID_PARAM;
3387 }
3388 break;
3389 default:
3390 RTSemFastMutexRelease(pDevExt->mtxLdr);
3391 dprintf(("Invalid eEPType=%d\n", pIn->eEPType));
3392 return SUPDRV_ERR_INVALID_PARAM;
3393 }
3394 if ( pIn->pfnModuleInit
3395 && (uintptr_t)pIn->pfnModuleInit - (uintptr_t)pImage->pvImage >= pIn->cbImage)
3396 {
3397 RTSemFastMutexRelease(pDevExt->mtxLdr);
3398 dprintf(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3399 pIn->pfnModuleInit, pImage->pvImage, pIn->cbImage));
3400 return SUPDRV_ERR_INVALID_PARAM;
3401 }
3402 if ( pIn->pfnModuleTerm
3403 && (uintptr_t)pIn->pfnModuleTerm - (uintptr_t)pImage->pvImage >= pIn->cbImage)
3404 {
3405 RTSemFastMutexRelease(pDevExt->mtxLdr);
3406 dprintf(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3407 pIn->pfnModuleTerm, pImage->pvImage, pIn->cbImage));
3408 return SUPDRV_ERR_INVALID_PARAM;
3409 }
3410
3411 /*
3412 * Copy the memory.
3413 */
3414 /* no need to do try/except as this is a buffered request. */
3415 memcpy(pImage->pvImage, &pIn->achImage[0], pImage->cbImage);
3416 pImage->uState = SUP_IOCTL_LDR_LOAD;
3417 pImage->pfnModuleInit = pIn->pfnModuleInit;
3418 pImage->pfnModuleTerm = pIn->pfnModuleTerm;
3419 pImage->offSymbols = pIn->offSymbols;
3420 pImage->cSymbols = pIn->cSymbols;
3421 pImage->offStrTab = pIn->offStrTab;
3422 pImage->cbStrTab = pIn->cbStrTab;
3423
3424 /*
3425 * Update any entry points.
3426 */
3427 switch (pIn->eEPType)
3428 {
3429 default:
3430 case EP_NOTHING:
3431 rc = 0;
3432 break;
3433 case EP_VMMR0:
3434 rc = supdrvLdrSetR0EP(pDevExt, pIn->EP.VMMR0.pvVMMR0, pIn->EP.VMMR0.pvVMMR0Entry);
3435 break;
3436 }
3437
3438 /*
3439 * On success call the module initialization.
3440 */
3441 dprintf(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3442 if (!rc && pImage->pfnModuleInit)
3443 {
3444 dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3445 rc = pImage->pfnModuleInit();
3446 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3447 supdrvLdrUnsetR0EP(pDevExt);
3448 }
3449
3450 if (rc)
3451 pImage->uState = SUP_IOCTL_LDR_OPEN;
3452
3453 RTSemFastMutexRelease(pDevExt->mtxLdr);
3454 return rc;
3455}
3456
3457
3458/**
3459 * Frees a previously loaded (prep'ed) image.
3460 *
3461 * @returns 0 on success.
3462 * @returns SUPDRV_ERR_* on failure.
3463 * @param pDevExt Device globals.
3464 * @param pSession Session data.
3465 * @param pIn Input.
3466 */
3467static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE_IN pIn)
3468{
3469 PSUPDRVLDRUSAGE pUsagePrev;
3470 PSUPDRVLDRUSAGE pUsage;
3471 PSUPDRVLDRIMAGE pImage;
3472 dprintf(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pIn->pvImageBase));
3473
3474 /*
3475 * Find the ldr image.
3476 */
3477 RTSemFastMutexRequest(pDevExt->mtxLdr);
3478 pUsagePrev = NULL;
3479 pUsage = pSession->pLdrUsage;
3480 while (pUsage && pUsage->pImage->pvImage != pIn->pvImageBase)
3481 {
3482 pUsagePrev = pUsage;
3483 pUsage = pUsage->pNext;
3484 }
3485 if (!pUsage)
3486 {
3487 RTSemFastMutexRelease(pDevExt->mtxLdr);
3488 dprintf(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3489 return SUPDRV_ERR_INVALID_HANDLE;
3490 }
3491
3492 /*
3493 * Check if we can remove anything.
3494 */
3495 pImage = pUsage->pImage;
3496 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3497 {
3498 /* unlink it */
3499 if (pUsagePrev)
3500 pUsagePrev->pNext = pUsage->pNext;
3501 else
3502 pSession->pLdrUsage = pUsage->pNext;
3503 /* free it */
3504 pUsage->pImage = NULL;
3505 pUsage->pNext = NULL;
3506 RTMemFree(pUsage);
3507
3508 /*
3509 * Derefrence the image.
3510 */
3511 if (pImage->cUsage <= 1)
3512 supdrvLdrFree(pDevExt, pImage);
3513 else
3514 pImage->cUsage--;
3515 }
3516 else
3517 {
3518 /*
3519 * Dereference both image and usage.
3520 */
3521 pImage->cUsage--;
3522 pUsage->cUsage--;
3523 }
3524
3525 RTSemFastMutexRelease(pDevExt->mtxLdr);
3526 return 0;
3527}
3528
3529
3530/**
3531 * Gets the address of a symbol in an open image.
3532 *
3533 * @returns 0 on success.
3534 * @returns SUPDRV_ERR_* on failure.
3535 * @param pDevExt Device globals.
3536 * @param pSession Session data.
3537 * @param pIn Input.
3538 * @param pOut Output. (May overlap pIn.)
3539 */
3540static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL_IN pIn, PSUPLDRGETSYMBOL_OUT pOut)
3541{
3542 PSUPDRVLDRIMAGE pImage;
3543 PSUPDRVLDRUSAGE pUsage;
3544 uint32_t i;
3545 PSUPLDRSYM paSyms;
3546 const char *pchStrings;
3547 const size_t cbSymbol = strlen(pIn->szSymbol) + 1;
3548 void *pvSymbol = NULL;
3549 int rc = SUPDRV_ERR_GENERAL_FAILURE; /** @todo better error code. */
3550 dprintf2(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pIn->pvImageBase, pIn->szSymbol));
3551
3552 /*
3553 * Find the ldr image.
3554 */
3555 RTSemFastMutexRequest(pDevExt->mtxLdr);
3556 pUsage = pSession->pLdrUsage;
3557 while (pUsage && pUsage->pImage->pvImage != pIn->pvImageBase)
3558 pUsage = pUsage->pNext;
3559 if (!pUsage)
3560 {
3561 RTSemFastMutexRelease(pDevExt->mtxLdr);
3562 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3563 return SUPDRV_ERR_INVALID_HANDLE;
3564 }
3565 pImage = pUsage->pImage;
3566 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3567 {
3568 unsigned uState = pImage->uState;
3569 RTSemFastMutexRelease(pDevExt->mtxLdr);
3570 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3571 return SUPDRV_ERR_ALREADY_LOADED;
3572 }
3573
3574 /*
3575 * Search the symbol string.
3576 */
3577 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3578 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3579 for (i = 0; i < pImage->cSymbols; i++)
3580 {
3581 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3582 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3583 && !memcmp(pchStrings + paSyms[i].offName, pIn->szSymbol, cbSymbol))
3584 {
3585 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3586 rc = 0;
3587 break;
3588 }
3589 }
3590 RTSemFastMutexRelease(pDevExt->mtxLdr);
3591 pOut->pvSymbol = pvSymbol;
3592 return rc;
3593}
3594
3595
3596/**
3597 * Updates the IDT patches to point to the specified VMM R0 entry
3598 * point (i.e. VMMR0Enter()).
3599 *
3600 * @returns 0 on success.
3601 * @returns SUPDRV_ERR_* on failure.
3602 * @param pDevExt Device globals.
3603 * @param pSession Session data.
3604 * @param pVMMR0 VMMR0 image handle.
3605 * @param pVMMR0Entry VMMR0Entry address.
3606 * @remark Caller must own the loader mutex.
3607 */
3608static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry)
3609{
3610 int rc;
3611 dprintf(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0Entry=%p\n", pvVMMR0, pvVMMR0Entry));
3612
3613
3614 /*
3615 * Check if not yet set.
3616 */
3617 rc = 0;
3618 if (!pDevExt->pvVMMR0)
3619 {
3620#ifndef VBOX_WITHOUT_IDT_PATCHING
3621 PSUPDRVPATCH pPatch;
3622#endif
3623
3624 /*
3625 * Set it and update IDT patch code.
3626 */
3627 pDevExt->pvVMMR0 = pvVMMR0;
3628 pDevExt->pfnVMMR0Entry = pvVMMR0Entry;
3629#ifndef VBOX_WITHOUT_IDT_PATCHING
3630 for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext)
3631 {
3632# ifdef __AMD64__
3633 ASMAtomicXchgU64((volatile uint64_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup], (uint64_t)pvVMMR0);
3634# else /* __X86__ */
3635 ASMAtomicXchgU32((volatile uint32_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3636 (uint32_t)pvVMMR0 - (uint32_t)&pPatch->auCode[pPatch->offVMMR0EntryFixup + 4]);
3637# endif
3638 }
3639#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3640 }
3641 else
3642 {
3643 /*
3644 * Return failure or success depending on whether the
3645 * values match or not.
3646 */
3647 if ( pDevExt->pvVMMR0 != pvVMMR0
3648 || (void *)pDevExt->pfnVMMR0Entry != pvVMMR0Entry)
3649 {
3650 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3651 rc = SUPDRV_ERR_INVALID_PARAM;
3652 }
3653 }
3654 return rc;
3655}
3656
3657
3658/**
3659 * Unsets the R0 entry point installed by supdrvLdrSetR0EP.
3660 *
3661 * @param pDevExt Device globals.
3662 */
3663static void supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt)
3664{
3665#ifndef VBOX_WITHOUT_IDT_PATCHING
3666 PSUPDRVPATCH pPatch;
3667#endif
3668
3669 pDevExt->pvVMMR0 = NULL;
3670 pDevExt->pfnVMMR0Entry = NULL;
3671
3672#ifndef VBOX_WITHOUT_IDT_PATCHING
3673 for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext)
3674 {
3675# ifdef __AMD64__
3676 ASMAtomicXchgU64((volatile uint64_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3677 (uint64_t)&pPatch->auCode[pPatch->offStub]);
3678# else /* __X86__ */
3679 ASMAtomicXchgU32((volatile uint32_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3680 (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)&pPatch->auCode[pPatch->offVMMR0EntryFixup + 4]);
3681# endif
3682 }
3683#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3684}
3685
3686
3687/**
3688 * Adds a usage reference in the specified session of an image.
3689 *
3690 * @param pSession Session in question.
3691 * @param pImage Image which the session is using.
3692 */
3693static void supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3694{
3695 PSUPDRVLDRUSAGE pUsage;
3696 dprintf(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3697
3698 /*
3699 * Referenced it already?
3700 */
3701 pUsage = pSession->pLdrUsage;
3702 while (pUsage)
3703 {
3704 if (pUsage->pImage == pImage)
3705 {
3706 pUsage->cUsage++;
3707 return;
3708 }
3709 pUsage = pUsage->pNext;
3710 }
3711
3712 /*
3713 * Allocate new usage record.
3714 */
3715 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3716 Assert(pUsage);
3717 if (pUsage)
3718 {
3719 pUsage->cUsage = 1;
3720 pUsage->pImage = pImage;
3721 pUsage->pNext = pSession->pLdrUsage;
3722 pSession->pLdrUsage = pUsage;
3723 }
3724 /* ignore errors... */
3725}
3726
3727
3728/**
3729 * Frees a load image.
3730 *
3731 * @param pDevExt Pointer to device extension.
3732 * @param pImage Pointer to the image we're gonna free.
3733 * This image must exit!
3734 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3735 */
3736static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3737{
3738 PSUPDRVLDRIMAGE pImagePrev;
3739 dprintf(("supdrvLdrFree: pImage=%p\n", pImage));
3740
3741 /* find it - arg. should've used doubly linked list. */
3742 Assert(pDevExt->pLdrImages);
3743 pImagePrev = NULL;
3744 if (pDevExt->pLdrImages != pImage)
3745 {
3746 pImagePrev = pDevExt->pLdrImages;
3747 while (pImagePrev->pNext != pImage)
3748 pImagePrev = pImagePrev->pNext;
3749 Assert(pImagePrev->pNext == pImage);
3750 }
3751
3752 /* unlink */
3753 if (pImagePrev)
3754 pImagePrev->pNext = pImage->pNext;
3755 else
3756 pDevExt->pLdrImages = pImage->pNext;
3757
3758 /* check if this is VMMR0.r0 and fix the Idt patches if it is. */
3759 if (pDevExt->pvVMMR0 == pImage->pvImage)
3760 supdrvLdrUnsetR0EP(pDevExt);
3761
3762 /* call termination function if fully loaded. */
3763 if ( pImage->pfnModuleTerm
3764 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3765 {
3766 dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3767 pImage->pfnModuleTerm();
3768 }
3769
3770 /* free the image */
3771 pImage->cUsage = 0;
3772 pImage->pNext = 0;
3773 pImage->uState = SUP_IOCTL_LDR_FREE;
3774 RTMemExecFree(pImage);
3775}
3776
3777
3778/**
3779 * Gets the current paging mode of the CPU and stores in in pOut.
3780 */
3781static int supdrvIOCtl_GetPagingMode(PSUPGETPAGINGMODE_OUT pOut)
3782{
3783 RTUINTREG cr0 = ASMGetCR0();
3784 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3785 pOut->enmMode = SUPPAGINGMODE_INVALID;
3786 else
3787 {
3788 RTUINTREG cr4 = ASMGetCR4();
3789 uint32_t fNXEPlusLMA = 0;
3790 if (cr4 & X86_CR4_PAE)
3791 {
3792 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
3793 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
3794 {
3795 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3796 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3797 fNXEPlusLMA |= BIT(0);
3798 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3799 fNXEPlusLMA |= BIT(1);
3800 }
3801 }
3802
3803 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
3804 {
3805 case 0:
3806 pOut->enmMode = SUPPAGINGMODE_32_BIT;
3807 break;
3808
3809 case X86_CR4_PGE:
3810 pOut->enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
3811 break;
3812
3813 case X86_CR4_PAE:
3814 pOut->enmMode = SUPPAGINGMODE_PAE;
3815 break;
3816
3817 case X86_CR4_PAE | BIT(0):
3818 pOut->enmMode = SUPPAGINGMODE_PAE_NX;
3819 break;
3820
3821 case X86_CR4_PAE | X86_CR4_PGE:
3822 pOut->enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3823 break;
3824
3825 case X86_CR4_PAE | X86_CR4_PGE | BIT(0):
3826 pOut->enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3827 break;
3828
3829 case BIT(1) | X86_CR4_PAE:
3830 pOut->enmMode = SUPPAGINGMODE_AMD64;
3831 break;
3832
3833 case BIT(1) | X86_CR4_PAE | BIT(0):
3834 pOut->enmMode = SUPPAGINGMODE_AMD64_NX;
3835 break;
3836
3837 case BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
3838 pOut->enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
3839 break;
3840
3841 case BIT(1) | X86_CR4_PAE | X86_CR4_PGE | BIT(0):
3842 pOut->enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
3843 break;
3844
3845 default:
3846 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
3847 pOut->enmMode = SUPPAGINGMODE_INVALID;
3848 break;
3849 }
3850 }
3851 return 0;
3852}
3853
3854
3855#if !defined(SUPDRV_OS_HAVE_LOW) && !defined(USE_NEW_OS_INTERFACE) /* Use same backend as the contiguous stuff */
3856/**
3857 * OS Specific code for allocating page aligned memory with fixed
3858 * physical backing below 4GB.
3859 *
3860 * @returns 0 on success.
3861 * @returns SUPDRV_ERR_* on failure.
3862 * @param pMem Memory reference record of the memory to be allocated.
3863 * (This is not linked in anywhere.)
3864 * @param ppvR3 Where to store the Ring-0 mapping of the allocated memory.
3865 * @param ppvR3 Where to store the Ring-3 mapping of the allocated memory.
3866 * @param paPagesOut Where to store the physical addresss.
3867 */
3868int VBOXCALL supdrvOSLowAllocOne(PSUPDRVMEMREF pMem, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PSUPPAGE paPagesOut)
3869{
3870 RTHCPHYS HCPhys;
3871 int rc = supdrvOSContAllocOne(pMem, ppvR0, ppvR3, &HCPhys);
3872 if (!rc)
3873 {
3874 unsigned iPage = pMem->cb >> PAGE_SHIFT;
3875 while (iPage-- > 0)
3876 {
3877 paPagesOut[iPage].Phys = HCPhys + (iPage << PAGE_SHIFT);
3878 paPagesOut[iPage].uReserved = 0;
3879 }
3880 }
3881 return rc;
3882}
3883
3884
3885/**
3886 * Frees low memory.
3887 *
3888 * @param pMem Memory reference record of the memory to be freed.
3889 */
3890void VBOXCALL supdrvOSLowFreeOne(PSUPDRVMEMREF pMem)
3891{
3892 supdrvOSContFreeOne(pMem);
3893}
3894#endif /* !SUPDRV_OS_HAVE_LOW */
3895
3896
3897#ifdef USE_NEW_OS_INTERFACE
3898/**
3899 * Creates the GIP.
3900 *
3901 * @returns negative errno.
3902 * @param pDevExt Instance data. GIP stuff may be updated.
3903 */
3904static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
3905{
3906 PSUPGLOBALINFOPAGE pGip;
3907 RTHCPHYS HCPhysGip;
3908 uint32_t u32SystemResolution;
3909 uint32_t u32Interval;
3910 int rc;
3911
3912 dprintf(("supdrvGipCreate:\n"));
3913
3914 /* assert order */
3915 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
3916 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
3917 Assert(!pDevExt->pGipTimer);
3918
3919 /*
3920 * Allocate a suitable page with a default kernel mapping.
3921 */
3922 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
3923 if (RT_FAILURE(rc))
3924 {
3925 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
3926 return rc;
3927 }
3928 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
3929 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
3930
3931 /*
3932 * Try bump up the system timer resolution.
3933 * The more interrupts the better...
3934 */
3935 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
3936 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
3937 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
3938 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
3939 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
3940 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
3941 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
3942 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
3943 )
3944 {
3945 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
3946 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
3947 }
3948
3949 /*
3950 * Find a reasonable update interval, something close to 10ms would be nice,
3951 * and create a recurring timer.
3952 */
3953 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
3954 while (u32Interval < 10000000 /* 10 ms */)
3955 u32Interval += u32SystemResolution;
3956
3957 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipTimer, pDevExt);
3958 if (RT_FAILURE(rc))
3959 {
3960 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %RU32 ns interval. rc=%d\n", u32Interval, rc));
3961 Assert(!pDevExt->pGipTimer);
3962 supdrvGipDestroy(pDevExt);
3963 return rc;
3964 }
3965
3966 /*
3967 * We're good.
3968 */
3969 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
3970 return 0;
3971}
3972
3973
3974/**
3975 * Terminates the GIP.
3976 *
3977 * @returns negative errno.
3978 * @param pDevExt Instance data. GIP stuff may be updated.
3979 */
3980static int supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
3981{
3982 int rc;
3983#ifdef DEBUG_DARWIN_GIP
3984 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
3985 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
3986 pDevExt->pGipTimer, pDevExt->GipMemObj));
3987#endif
3988
3989 /*
3990 * Invalid the GIP data.
3991 */
3992 if (pDevExt->pGip)
3993 {
3994 supdrvGipTerm(pDevExt->pGip);
3995 pDevExt->pGip = 0;
3996 }
3997
3998 /*
3999 * Destroy the timer and free the GIP memory object.
4000 */
4001 if (pDevExt->pGipTimer)
4002 {
4003 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4004 pDevExt->pGipTimer = NULL;
4005 }
4006
4007 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4008 {
4009 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4010 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4011 }
4012
4013 /*
4014 * Finally, release the system timer resolution request if one succeeded.
4015 */
4016 if (pDevExt->u32SystemTimerGranularityGrant)
4017 {
4018 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4019 pDevExt->u32SystemTimerGranularityGrant = 0;
4020 }
4021
4022 return 0;
4023}
4024
4025
4026/**
4027 * Timer callback function.
4028 * @param pTimer The timer.
4029 * @param pvUser The device extension.
4030 */
4031static DECLCALLBACK(void) supdrvGipTimer(PRTTIMER pTimer, void *pvUser)
4032{
4033 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4034 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
4035}
4036#endif /* USE_NEW_OS_INTERFACE */
4037
4038
4039/**
4040 * Initializes the GIP data.
4041 *
4042 * @returns VBox status code.
4043 * @param pDevExt Pointer to the device instance data.
4044 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4045 * @param HCPhys The physical address of the GIP.
4046 * @param u64NanoTS The current nanosecond timestamp.
4047 * @param uUpdateHz The update freqence.
4048 */
4049int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4050{
4051 unsigned i;
4052#ifdef DEBUG_DARWIN_GIP
4053 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4054#else
4055 dprintf(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4056#endif
4057
4058 /*
4059 * Initialize the structure.
4060 */
4061 memset(pGip, 0, PAGE_SIZE);
4062 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4063 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4064 pGip->u32Mode = supdrvGipDeterminTscMode();
4065 pGip->u32UpdateHz = uUpdateHz;
4066 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4067 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4068
4069 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4070 {
4071 pGip->aCPUs[i].u32TransactionId = 2;
4072 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4073 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4074
4075 /*
4076 * We don't know the following values until we've executed updates.
4077 * So, we'll just insert very high values.
4078 */
4079 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4080 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4081 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4082 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4083 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4084 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4085 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4086 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4087 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4088 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4089 }
4090
4091 /*
4092 * Link it to the device extension.
4093 */
4094 pDevExt->pGip = pGip;
4095 pDevExt->HCPhysGip = HCPhys;
4096 pDevExt->cGipUsers = 0;
4097
4098 return 0;
4099}
4100
4101
4102/**
4103 * Determin the GIP TSC mode.
4104 *
4105 * @returns The most suitable TSC mode.
4106 */
4107static SUPGIPMODE supdrvGipDeterminTscMode(void)
4108{
4109#ifndef USE_NEW_OS_INTERFACE
4110 /*
4111 * The problem here is that AMD processors with power management features
4112 * may easily end up with different TSCs because the CPUs or even cores
4113 * on the same physical chip run at different frequencies to save power.
4114 *
4115 * It is rumoured that this will be corrected with Barcelona and it's
4116 * expected that this will be indicated by the TscInvariant bit in
4117 * cpuid(0x80000007). So, the "difficult" bit here is to correctly
4118 * identify the older CPUs which don't do different frequency and
4119 * can be relied upon to have somewhat uniform TSC between the cpus.
4120 */
4121 if (supdrvOSGetCPUCount() > 1)
4122 {
4123 uint32_t uEAX, uEBX, uECX, uEDX;
4124
4125 /* Permit user users override. */
4126 if (supdrvOSGetForcedAsyncTscMode())
4127 return SUPGIPMODE_ASYNC_TSC;
4128
4129 /* Check for "AuthenticAMD" */
4130 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
4131 if (uEAX >= 1 && uEBX == 0x68747541 && uECX == 0x444d4163 && uEDX == 0x69746e65)
4132 {
4133 /* Check for APM support and that TscInvariant is cleared. */
4134 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
4135 if (uEAX >= 0x80000007)
4136 {
4137 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
4138 if ( !(uEDX & BIT(8))/* TscInvariant */
4139 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
4140 return SUPGIPMODE_ASYNC_TSC;
4141 }
4142 }
4143 }
4144#endif
4145 return SUPGIPMODE_SYNC_TSC;
4146}
4147
4148
4149/**
4150 * Invalidates the GIP data upon termination.
4151 *
4152 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4153 */
4154void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4155{
4156 unsigned i;
4157 pGip->u32Magic = 0;
4158 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4159 {
4160 pGip->aCPUs[i].u64NanoTS = 0;
4161 pGip->aCPUs[i].u64TSC = 0;
4162 pGip->aCPUs[i].iTSCHistoryHead = 0;
4163 }
4164}
4165
4166
4167/**
4168 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4169 * updates all the per cpu data except the transaction id.
4170 *
4171 * @param pGip The GIP.
4172 * @param pGipCpu Pointer to the per cpu data.
4173 * @param u64NanoTS The current time stamp.
4174 */
4175static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
4176{
4177 uint64_t u64TSC;
4178 uint64_t u64TSCDelta;
4179 uint32_t u32UpdateIntervalTSC;
4180 uint32_t u32UpdateIntervalTSCSlack;
4181 unsigned iTSCHistoryHead;
4182 uint64_t u64CpuHz;
4183
4184 /*
4185 * Update the NanoTS.
4186 */
4187 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4188
4189 /*
4190 * Calc TSC delta.
4191 */
4192 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4193 u64TSC = ASMReadTSC();
4194 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4195 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4196
4197 if (u64TSCDelta >> 32)
4198 {
4199 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4200 pGipCpu->cErrors++;
4201 }
4202
4203 /*
4204 * TSC History.
4205 */
4206 Assert(ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4207
4208 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4209 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4210 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4211
4212 /*
4213 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4214 */
4215 if (pGip->u32UpdateHz >= 1000)
4216 {
4217 uint32_t u32;
4218 u32 = pGipCpu->au32TSCHistory[0];
4219 u32 += pGipCpu->au32TSCHistory[1];
4220 u32 += pGipCpu->au32TSCHistory[2];
4221 u32 += pGipCpu->au32TSCHistory[3];
4222 u32 >>= 2;
4223 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4224 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4225 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4226 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4227 u32UpdateIntervalTSC >>= 2;
4228 u32UpdateIntervalTSC += u32;
4229 u32UpdateIntervalTSC >>= 1;
4230
4231 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4232 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4233 }
4234 else if (pGip->u32UpdateHz >= 90)
4235 {
4236 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4237 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4238 u32UpdateIntervalTSC >>= 1;
4239
4240 /* value choosen on a 2GHz thinkpad running windows */
4241 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4242 }
4243 else
4244 {
4245 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4246
4247 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4248 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4249 }
4250 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4251
4252 /*
4253 * CpuHz.
4254 */
4255 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4256 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4257}
4258
4259
4260/**
4261 * Updates the GIP.
4262 *
4263 * @param pGip Pointer to the GIP.
4264 * @param u64NanoTS The current nanosecond timesamp.
4265 */
4266void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4267{
4268 /*
4269 * Determin the relevant CPU data.
4270 */
4271 PSUPGIPCPU pGipCpu;
4272 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4273 pGipCpu = &pGip->aCPUs[0];
4274 else
4275 {
4276 unsigned iCpu = ASMGetApicId();
4277 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4278 return;
4279 pGipCpu = &pGip->aCPUs[iCpu];
4280 }
4281
4282 /*
4283 * Start update transaction.
4284 */
4285 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4286 {
4287 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4288 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4289 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4290 pGipCpu->cErrors++;
4291 return;
4292 }
4293
4294 /*
4295 * Recalc the update frequency every 0x800th time.
4296 */
4297 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4298 {
4299 if (pGip->u64NanoTSLastUpdateHz)
4300 {
4301#ifdef __AMD64__ /** @todo fix 64-bit div here to work on x86 linux. */
4302 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4303 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4304 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4305 {
4306 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4307 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4308 }
4309#endif
4310 }
4311 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4312 }
4313
4314 /*
4315 * Update the data.
4316 */
4317 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4318
4319 /*
4320 * Complete transaction.
4321 */
4322 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4323}
4324
4325
4326/**
4327 * Updates the per cpu GIP data for the calling cpu.
4328 *
4329 * @param pGip Pointer to the GIP.
4330 * @param u64NanoTS The current nanosecond timesamp.
4331 * @param iCpu The CPU index.
4332 */
4333void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
4334{
4335 PSUPGIPCPU pGipCpu;
4336
4337 if (RT_LIKELY(iCpu <= RT_ELEMENTS(pGip->aCPUs)))
4338 {
4339 pGipCpu = &pGip->aCPUs[iCpu];
4340
4341 /*
4342 * Start update transaction.
4343 */
4344 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4345 {
4346 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4347 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4348 pGipCpu->cErrors++;
4349 return;
4350 }
4351
4352 /*
4353 * Update the data.
4354 */
4355 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4356
4357 /*
4358 * Complete transaction.
4359 */
4360 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4361 }
4362}
4363
4364
4365#ifndef DEBUG /** @todo change #ifndef DEBUG -> #ifdef LOG_ENABLED */
4366/**
4367 * Stub function for non-debug builds.
4368 */
4369RTDECL(PRTLOGGER) RTLogDefaultInstance(void)
4370{
4371 return NULL;
4372}
4373
4374RTDECL(PRTLOGGER) RTLogRelDefaultInstance(void)
4375{
4376 return NULL;
4377}
4378
4379/**
4380 * Stub function for non-debug builds.
4381 */
4382RTDECL(int) RTLogSetDefaultInstanceThread(PRTLOGGER pLogger, uintptr_t uKey)
4383{
4384 return 0;
4385}
4386
4387/**
4388 * Stub function for non-debug builds.
4389 */
4390RTDECL(void) RTLogLogger(PRTLOGGER pLogger, void *pvCallerRet, const char *pszFormat, ...)
4391{
4392}
4393
4394/**
4395 * Stub function for non-debug builds.
4396 */
4397RTDECL(void) RTLogLoggerEx(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, ...)
4398{
4399}
4400
4401/**
4402 * Stub function for non-debug builds.
4403 */
4404RTDECL(void) RTLogLoggerExV(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args)
4405{
4406}
4407#endif /* !DEBUG */
4408
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use