VirtualBox

source: vbox/trunk/src/VBox/ExtPacks/VBoxDTrace/VBoxDTraceR0.cpp

Last change on this file was 98103, checked in by vboxsync, 16 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.7 KB
Line 
1/* $Id: VBoxDTraceR0.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * VBoxDTraceR0.
4 *
5 * Contributed by: bird
6 */
7
8/*
9 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
10 *
11 * This file is part of VirtualBox base platform packages, as
12 * available from http://www.virtualbox.org.
13 *
14 * The contents of this file are subject to the terms of the Common
15 * Development and Distribution License Version 1.0 (CDDL) only, as it
16 * comes in the "COPYING.CDDL" file of the VirtualBox distribution.
17 *
18 * SPDX-License-Identifier: CDDL-1.0
19 */
20
21
22/*********************************************************************************************************************************
23* Header Files *
24*********************************************************************************************************************************/
25#include <VBox/sup.h>
26#include <VBox/log.h>
27
28#include <iprt/asm-amd64-x86.h>
29#include <iprt/assert.h>
30#include <iprt/ctype.h>
31#include <iprt/err.h>
32#include <iprt/mem.h>
33#include <iprt/mp.h>
34#include <iprt/process.h>
35#include <iprt/semaphore.h>
36#include <iprt/spinlock.h>
37#include <iprt/string.h>
38#include <iprt/thread.h>
39#include <iprt/time.h>
40
41#include <sys/dtrace_impl.h>
42
43#include <VBox/VBoxTpG.h>
44
45
46/*********************************************************************************************************************************
47* Defined Constants And Macros *
48*********************************************************************************************************************************/
49//#if !defined(RT_OS_WINDOWS) && !defined(RT_OS_OS2)
50//# define HAVE_RTMEMALLOCEX_FEATURES
51//#endif
52
53
54/*********************************************************************************************************************************
55* Structures and Typedefs *
56*********************************************************************************************************************************/
57
58/** Caller indicator. */
59typedef enum VBOXDTCALLER
60{
61 kVBoxDtCaller_Invalid = 0,
62 kVBoxDtCaller_Generic,
63 kVBoxDtCaller_ProbeFireUser,
64 kVBoxDtCaller_ProbeFireKernel
65} VBOXDTCALLER;
66
67/**
68 * Stack data used for thread structure and such.
69 *
70 * This is planted in every external entry point and used to emulate solaris
71 * curthread, CRED, curproc and similar. It is also used to get at the
72 * uncached probe arguments.
73 */
74typedef struct VBoxDtStackData
75{
76 /** Eyecatcher no. 1 (VBDT_STACK_DATA_MAGIC2). */
77 uint32_t u32Magic1;
78 /** Eyecatcher no. 2 (VBDT_STACK_DATA_MAGIC2). */
79 uint32_t u32Magic2;
80 /** The format of the caller specific data. */
81 VBOXDTCALLER enmCaller;
82 /** Caller specific data. */
83 union
84 {
85 /** kVBoxDtCaller_ProbeFireKernel. */
86 struct
87 {
88 /** The caller. */
89 uintptr_t uCaller;
90 /** Pointer to the stack arguments of a probe function call. */
91 uintptr_t *pauStackArgs;
92 } ProbeFireKernel;
93 /** kVBoxDtCaller_ProbeFireUser. */
94 struct
95 {
96 /** The user context. */
97 PCSUPDRVTRACERUSRCTX pCtx;
98 /** The argument displacement caused by 64-bit arguments passed directly to
99 * dtrace_probe. */
100 int offArg;
101 } ProbeFireUser;
102 } u;
103 /** Credentials allocated by VBoxDtGetCurrentCreds. */
104 struct VBoxDtCred *pCred;
105 /** Thread structure currently being held by this thread. */
106 struct VBoxDtThread *pThread;
107 /** Pointer to this structure.
108 * This is the final bit of integrity checking. */
109 struct VBoxDtStackData *pSelf;
110} VBDTSTACKDATA;
111/** Pointer to the on-stack thread specific data. */
112typedef VBDTSTACKDATA *PVBDTSTACKDATA;
113
114/** The first magic value. */
115#define VBDT_STACK_DATA_MAGIC1 RT_MAKE_U32_FROM_U8('V', 'B', 'o', 'x')
116/** The second magic value. */
117#define VBDT_STACK_DATA_MAGIC2 RT_MAKE_U32_FROM_U8('D', 'T', 'r', 'c')
118
119/** The alignment of the stack data.
120 * The data doesn't require more than sizeof(uintptr_t) alignment, but the
121 * greater alignment the quicker lookup. */
122#define VBDT_STACK_DATA_ALIGN 32
123
124/** Plants the stack data. */
125#define VBDT_SETUP_STACK_DATA(a_enmCaller) \
126 uint8_t abBlob[sizeof(VBDTSTACKDATA) + VBDT_STACK_DATA_ALIGN - 1]; \
127 PVBDTSTACKDATA pStackData = (PVBDTSTACKDATA)( (uintptr_t)&abBlob[VBDT_STACK_DATA_ALIGN - 1] \
128 & ~(uintptr_t)(VBDT_STACK_DATA_ALIGN - 1)); \
129 pStackData->u32Magic1 = VBDT_STACK_DATA_MAGIC1; \
130 pStackData->u32Magic2 = VBDT_STACK_DATA_MAGIC2; \
131 pStackData->enmCaller = a_enmCaller; \
132 pStackData->pCred = NULL; \
133 pStackData->pThread = NULL; \
134 pStackData->pSelf = pStackData
135
136/** Passifies the stack data and frees up resource held within it. */
137#define VBDT_CLEAR_STACK_DATA() \
138 do \
139 { \
140 pStackData->u32Magic1 = 0; \
141 pStackData->u32Magic2 = 0; \
142 pStackData->pSelf = NULL; \
143 if (pStackData->pCred) \
144 crfree(pStackData->pCred); \
145 if (pStackData->pThread) \
146 VBoxDtReleaseThread(pStackData->pThread); \
147 } while (0)
148
149
150/** Simple SUPR0Printf-style logging. */
151#if 0 /*def DEBUG_bird*/
152# define LOG_DTRACE(a) SUPR0Printf a
153#else
154# define LOG_DTRACE(a) do { } while (0)
155#endif
156
157
158/*********************************************************************************************************************************
159* Global Variables *
160*********************************************************************************************************************************/
161/** Per CPU information */
162cpucore_t g_aVBoxDtCpuCores[RTCPUSET_MAX_CPUS];
163/** Dummy mutex. */
164struct VBoxDtMutex g_DummyMtx;
165/** Pointer to the tracer helpers provided by VBoxDrv. */
166static PCSUPDRVTRACERHLP g_pVBoxDTraceHlp;
167
168dtrace_cacheid_t dtrace_predcache_id = DTRACE_CACHEIDNONE + 1;
169
170#if 0
171void (*dtrace_cpu_init)(processorid_t);
172void (*dtrace_modload)(struct modctl *);
173void (*dtrace_modunload)(struct modctl *);
174void (*dtrace_helpers_cleanup)(void);
175void (*dtrace_helpers_fork)(proc_t *, proc_t *);
176void (*dtrace_cpustart_init)(void);
177void (*dtrace_cpustart_fini)(void);
178void (*dtrace_cpc_fire)(uint64_t);
179void (*dtrace_debugger_init)(void);
180void (*dtrace_debugger_fini)(void);
181#endif
182
183
184/**
185 * Gets the stack data.
186 *
187 * @returns Pointer to the stack data. Never NULL.
188 */
189static PVBDTSTACKDATA vboxDtGetStackData(void)
190{
191 int volatile iDummy = 1; /* use this to get the stack address. */
192 PVBDTSTACKDATA pData = (PVBDTSTACKDATA)( ((uintptr_t)&iDummy + VBDT_STACK_DATA_ALIGN - 1)
193 & ~(uintptr_t)(VBDT_STACK_DATA_ALIGN - 1));
194 for (;;)
195 {
196 if ( pData->u32Magic1 == VBDT_STACK_DATA_MAGIC1
197 && pData->u32Magic2 == VBDT_STACK_DATA_MAGIC2
198 && pData->pSelf == pData)
199 return pData;
200 pData = (PVBDTSTACKDATA)((uintptr_t)pData + VBDT_STACK_DATA_ALIGN);
201 }
202}
203
204
205void dtrace_toxic_ranges(void (*pfnAddOne)(uintptr_t uBase, uintptr_t cbRange))
206{
207 /** @todo ? */
208 RT_NOREF_PV(pfnAddOne);
209}
210
211
212
213/**
214 * Dummy callback used by dtrace_sync.
215 */
216static DECLCALLBACK(void) vboxDtSyncCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
217{
218 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
219}
220
221
222/**
223 * Synchronzie across all CPUs (expensive).
224 */
225void dtrace_sync(void)
226{
227 int rc = RTMpOnAll(vboxDtSyncCallback, NULL, NULL);
228 AssertRC(rc);
229}
230
231
232/**
233 * Fetch a 8-bit "word" from userland.
234 *
235 * @return The byte value.
236 * @param pvUserAddr The userland address.
237 */
238uint8_t dtrace_fuword8( void *pvUserAddr)
239{
240 uint8_t u8;
241 int rc = RTR0MemUserCopyFrom(&u8, (uintptr_t)pvUserAddr, sizeof(u8));
242 if (RT_FAILURE(rc))
243 {
244 RTCPUID iCpu = VBDT_GET_CPUID();
245 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
246 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
247 u8 = 0;
248 }
249 return u8;
250}
251
252
253/**
254 * Fetch a 16-bit word from userland.
255 *
256 * @return The word value.
257 * @param pvUserAddr The userland address.
258 */
259uint16_t dtrace_fuword16(void *pvUserAddr)
260{
261 uint16_t u16;
262 int rc = RTR0MemUserCopyFrom(&u16, (uintptr_t)pvUserAddr, sizeof(u16));
263 if (RT_FAILURE(rc))
264 {
265 RTCPUID iCpu = VBDT_GET_CPUID();
266 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
267 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
268 u16 = 0;
269 }
270 return u16;
271}
272
273
274/**
275 * Fetch a 32-bit word from userland.
276 *
277 * @return The dword value.
278 * @param pvUserAddr The userland address.
279 */
280uint32_t dtrace_fuword32(void *pvUserAddr)
281{
282 uint32_t u32;
283 int rc = RTR0MemUserCopyFrom(&u32, (uintptr_t)pvUserAddr, sizeof(u32));
284 if (RT_FAILURE(rc))
285 {
286 RTCPUID iCpu = VBDT_GET_CPUID();
287 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
288 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
289 u32 = 0;
290 }
291 return u32;
292}
293
294
295/**
296 * Fetch a 64-bit word from userland.
297 *
298 * @return The qword value.
299 * @param pvUserAddr The userland address.
300 */
301uint64_t dtrace_fuword64(void *pvUserAddr)
302{
303 uint64_t u64;
304 int rc = RTR0MemUserCopyFrom(&u64, (uintptr_t)pvUserAddr, sizeof(u64));
305 if (RT_FAILURE(rc))
306 {
307 RTCPUID iCpu = VBDT_GET_CPUID();
308 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
309 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
310 u64 = 0;
311 }
312 return u64;
313}
314
315
316/** copyin implementation */
317int VBoxDtCopyIn(void const *pvUser, void *pvDst, size_t cb)
318{
319 int rc = RTR0MemUserCopyFrom(pvDst, (uintptr_t)pvUser, cb);
320 return RT_SUCCESS(rc) ? 0 : -1;
321}
322
323
324/** copyout implementation */
325int VBoxDtCopyOut(void const *pvSrc, void *pvUser, size_t cb)
326{
327 int rc = RTR0MemUserCopyTo((uintptr_t)pvUser, pvSrc, cb);
328 return RT_SUCCESS(rc) ? 0 : -1;
329}
330
331
332/**
333 * Copy data from userland into the kernel.
334 *
335 * @param uUserAddr The userland address.
336 * @param uKrnlAddr The kernel buffer address.
337 * @param cb The number of bytes to copy.
338 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
339 */
340void dtrace_copyin( uintptr_t uUserAddr, uintptr_t uKrnlAddr, size_t cb, volatile uint16_t *pfFlags)
341{
342 int rc = RTR0MemUserCopyFrom((void *)uKrnlAddr, uUserAddr, cb);
343 if (RT_FAILURE(rc))
344 {
345 *pfFlags |= CPU_DTRACE_BADADDR;
346 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
347 }
348}
349
350
351/**
352 * Copy data from the kernel into userland.
353 *
354 * @param uKrnlAddr The kernel buffer address.
355 * @param uUserAddr The userland address.
356 * @param cb The number of bytes to copy.
357 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
358 */
359void dtrace_copyout( uintptr_t uKrnlAddr, uintptr_t uUserAddr, size_t cb, volatile uint16_t *pfFlags)
360{
361 int rc = RTR0MemUserCopyTo(uUserAddr, (void const *)uKrnlAddr, cb);
362 if (RT_FAILURE(rc))
363 {
364 *pfFlags |= CPU_DTRACE_BADADDR;
365 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
366 }
367}
368
369
370/**
371 * Copy a string from userland into the kernel.
372 *
373 * @param uUserAddr The userland address.
374 * @param uKrnlAddr The kernel buffer address.
375 * @param cbMax The maximum number of bytes to copy. May stop
376 * earlier if zero byte is encountered.
377 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
378 */
379void dtrace_copyinstr( uintptr_t uUserAddr, uintptr_t uKrnlAddr, size_t cbMax, volatile uint16_t *pfFlags)
380{
381 if (!cbMax)
382 return;
383
384 char *pszDst = (char *)uKrnlAddr;
385 int rc = RTR0MemUserCopyFrom(pszDst, uUserAddr, cbMax);
386 if (RT_FAILURE(rc))
387 {
388 /* Byte by byte - lazy bird! */
389 size_t off = 0;
390 while (off < cbMax)
391 {
392 rc = RTR0MemUserCopyFrom(&pszDst[off], uUserAddr + off, 1);
393 if (RT_FAILURE(rc))
394 {
395 *pfFlags |= CPU_DTRACE_BADADDR;
396 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
397 pszDst[off] = '\0';
398 return;
399 }
400 if (!pszDst[off])
401 return;
402 off++;
403 }
404 }
405
406 pszDst[cbMax - 1] = '\0';
407}
408
409
410/**
411 * Copy a string from the kernel and into user land.
412 *
413 * @param uKrnlAddr The kernel string address.
414 * @param uUserAddr The userland address.
415 * @param cbMax The maximum number of bytes to copy. Will stop
416 * earlier if zero byte is encountered.
417 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
418 */
419void dtrace_copyoutstr(uintptr_t uKrnlAddr, uintptr_t uUserAddr, size_t cbMax, volatile uint16_t *pfFlags)
420{
421 const char *pszSrc = (const char *)uKrnlAddr;
422 size_t cbActual = RTStrNLen(pszSrc, cbMax);
423 cbActual += cbActual < cbMax;
424 dtrace_copyout(uKrnlAddr,uUserAddr, cbActual, pfFlags);
425}
426
427
428/**
429 * Get the caller @a cCallFrames call frames up the stack.
430 *
431 * @returns The caller's return address or ~(uintptr_t)0.
432 * @param cCallFrames The number of frames.
433 */
434uintptr_t dtrace_caller(int cCallFrames)
435{
436 PVBDTSTACKDATA pData = vboxDtGetStackData();
437 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
438 return pData->u.ProbeFireKernel.uCaller;
439 RT_NOREF_PV(cCallFrames);
440 return ~(uintptr_t)0;
441}
442
443
444/**
445 * Get argument number @a iArg @a cCallFrames call frames up the stack.
446 *
447 * @returns The caller's return address or ~(uintptr_t)0.
448 * @param iArg The argument to get.
449 * @param cCallFrames The number of frames.
450 */
451uint64_t dtrace_getarg(int iArg, int cCallFrames)
452{
453 PVBDTSTACKDATA pData = vboxDtGetStackData();
454 AssertReturn(iArg >= 5, UINT64_MAX);
455
456 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
457 return pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
458 RT_NOREF_PV(cCallFrames);
459 return UINT64_MAX;
460}
461
462
463/**
464 * Produce a traceback of the kernel stack.
465 *
466 * @param paPcStack Where to return the program counters.
467 * @param cMaxFrames The maximum number of PCs to return.
468 * @param cSkipFrames The number of artificial callstack frames to
469 * skip at the top.
470 * @param pIntr Not sure what this is...
471 */
472void dtrace_getpcstack(pc_t *paPcStack, int cMaxFrames, int cSkipFrames, uint32_t *pIntr)
473{
474 int iFrame = 0;
475 while (iFrame < cMaxFrames)
476 {
477 paPcStack[iFrame] = NULL;
478 iFrame++;
479 }
480 RT_NOREF_PV(pIntr);
481 RT_NOREF_PV(cSkipFrames);
482}
483
484
485/**
486 * Get the number of call frames on the stack.
487 *
488 * @returns The stack depth.
489 * @param cSkipFrames The number of artificial callstack frames to
490 * skip at the top.
491 */
492int dtrace_getstackdepth(int cSkipFrames)
493{
494 RT_NOREF_PV(cSkipFrames);
495 return 1;
496}
497
498
499/**
500 * Produce a traceback of the userland stack.
501 *
502 * @param paPcStack Where to return the program counters.
503 * @param paFpStack Where to return the frame pointers.
504 * @param cMaxFrames The maximum number of frames to return.
505 */
506void dtrace_getufpstack(uint64_t *paPcStack, uint64_t *paFpStack, int cMaxFrames)
507{
508 int iFrame = 0;
509 while (iFrame < cMaxFrames)
510 {
511 paPcStack[iFrame] = 0;
512 paFpStack[iFrame] = 0;
513 iFrame++;
514 }
515}
516
517
518/**
519 * Produce a traceback of the userland stack.
520 *
521 * @param paPcStack Where to return the program counters.
522 * @param cMaxFrames The maximum number of frames to return.
523 */
524void dtrace_getupcstack(uint64_t *paPcStack, int cMaxFrames)
525{
526 int iFrame = 0;
527 while (iFrame < cMaxFrames)
528 {
529 paPcStack[iFrame] = 0;
530 iFrame++;
531 }
532}
533
534
535/**
536 * Computes the depth of the userland stack.
537 */
538int dtrace_getustackdepth(void)
539{
540 return 0;
541}
542
543
544/**
545 * Get the current IPL/IRQL.
546 *
547 * @returns Current level.
548 */
549int dtrace_getipl(void)
550{
551#ifdef RT_ARCH_AMD64
552 /* CR8 is normally the same as IRQL / IPL on AMD64. */
553 return ASMGetCR8();
554#else
555 /* Just fake it on x86. */
556 return !ASMIntAreEnabled();
557#endif
558}
559
560
561/**
562 * Get current monotonic timestamp.
563 *
564 * @returns Timestamp, nano seconds.
565 */
566hrtime_t dtrace_gethrtime(void)
567{
568 return RTTimeNanoTS();
569}
570
571
572/**
573 * Get current walltime.
574 *
575 * @returns Timestamp, nano seconds.
576 */
577hrtime_t dtrace_gethrestime(void)
578{
579 /** @todo try get better resolution here somehow ... */
580 RTTIMESPEC Now;
581 return RTTimeSpecGetNano(RTTimeNow(&Now));
582}
583
584
585/**
586 * DTrace panic routine.
587 *
588 * @param pszFormat Panic message.
589 * @param va Arguments to the panic message.
590 */
591void dtrace_vpanic(const char *pszFormat, va_list va)
592{
593 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
594 RTAssertMsg2WeakV(pszFormat, va);
595 RTR0AssertPanicSystem();
596 for (;;)
597 {
598 ASMBreakpoint();
599 volatile char *pchCrash = (volatile char *)~(uintptr_t)0;
600 *pchCrash = '\0';
601 }
602}
603
604
605/**
606 * DTrace panic routine.
607 *
608 * @param pszFormat Panic message.
609 * @param ... Arguments to the panic message.
610 */
611void VBoxDtPanic(const char *pszFormat, ...)
612{
613 va_list va;
614 va_start(va, pszFormat);
615 dtrace_vpanic(pszFormat, va);
616 /*va_end(va); - unreachable */
617}
618
619
620/**
621 * DTrace kernel message routine.
622 *
623 * @param pszFormat Kernel message.
624 * @param ... Arguments to the panic message.
625 */
626void VBoxDtCmnErr(int iLevel, const char *pszFormat, ...)
627{
628 va_list va;
629 va_start(va, pszFormat);
630 SUPR0Printf("%N", pszFormat, va);
631 va_end(va);
632 RT_NOREF_PV(iLevel);
633}
634
635
636/** uprintf implementation */
637void VBoxDtUPrintf(const char *pszFormat, ...)
638{
639 va_list va;
640 va_start(va, pszFormat);
641 VBoxDtUPrintfV(pszFormat, va);
642 va_end(va);
643}
644
645
646/** vuprintf implementation */
647void VBoxDtUPrintfV(const char *pszFormat, va_list va)
648{
649 SUPR0Printf("%N", pszFormat, va);
650}
651
652
653/* CRED implementation. */
654cred_t *VBoxDtGetCurrentCreds(void)
655{
656 PVBDTSTACKDATA pData = vboxDtGetStackData();
657 if (!pData->pCred)
658 {
659 struct VBoxDtCred *pCred;
660#ifdef HAVE_RTMEMALLOCEX_FEATURES
661 int rc = RTMemAllocEx(sizeof(*pCred), 0, RTMEMALLOCEX_FLAGS_ANY_CTX, (void **)&pCred);
662#else
663 int rc = RTMemAllocEx(sizeof(*pCred), 0, 0, (void **)&pCred);
664#endif
665 AssertFatalRC(rc);
666 pCred->cr_refs = 1;
667 /** @todo get the right creds on unix systems. */
668 pCred->cr_uid = 0;
669 pCred->cr_ruid = 0;
670 pCred->cr_suid = 0;
671 pCred->cr_gid = 0;
672 pCred->cr_rgid = 0;
673 pCred->cr_sgid = 0;
674 pCred->cr_zone = 0;
675 pData->pCred = pCred;
676 }
677
678 return pData->pCred;
679}
680
681
682/* crhold implementation */
683void VBoxDtCredHold(struct VBoxDtCred *pCred)
684{
685 int32_t cRefs = ASMAtomicIncS32(&pCred->cr_refs);
686 Assert(cRefs > 1); NOREF(cRefs);
687}
688
689
690/* crfree implementation */
691void VBoxDtCredFree(struct VBoxDtCred *pCred)
692{
693 int32_t cRefs = ASMAtomicDecS32(&pCred->cr_refs);
694 Assert(cRefs >= 0);
695 if (!cRefs)
696 RTMemFreeEx(pCred, sizeof(*pCred));
697}
698
699/** Spinlock protecting the thread structures. */
700static RTSPINLOCK g_hThreadSpinlock = NIL_RTSPINLOCK;
701/** List of threads by usage age. */
702static RTLISTANCHOR g_ThreadAgeList;
703/** Hash table for looking up thread structures. */
704static struct VBoxDtThread *g_apThreadsHash[16384];
705/** Fake kthread_t structures.
706 * The size of this array is making horrible ASSUMPTIONS about the number of
707 * thread in the system that will be subjected to DTracing. */
708static struct VBoxDtThread g_aThreads[8192];
709
710
711static int vboxDtInitThreadDb(void)
712{
713 int rc = RTSpinlockCreate(&g_hThreadSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxDtThreadDb");
714 if (RT_FAILURE(rc))
715 return rc;
716
717 RTListInit(&g_ThreadAgeList);
718 for (uint32_t i = 0; i < RT_ELEMENTS(g_aThreads); i++)
719 {
720 g_aThreads[i].hNative = NIL_RTNATIVETHREAD;
721 g_aThreads[i].uPid = NIL_RTPROCESS;
722 RTListPrepend(&g_ThreadAgeList, &g_aThreads[i].AgeEntry);
723 }
724
725 return VINF_SUCCESS;
726}
727
728
729static void vboxDtTermThreadDb(void)
730{
731 RTSpinlockDestroy(g_hThreadSpinlock);
732 g_hThreadSpinlock = NIL_RTSPINLOCK;
733 RTListInit(&g_ThreadAgeList);
734}
735
736
737/* curthread implementation, providing a fake kthread_t. */
738struct VBoxDtThread *VBoxDtGetCurrentThread(void)
739{
740 /*
741 * Once we've retrieved a thread, we hold on to it until the thread exits
742 * the VBoxDTrace module.
743 */
744 PVBDTSTACKDATA pData = vboxDtGetStackData();
745 if (pData->pThread)
746 {
747 AssertPtr(pData->pThread);
748 Assert(pData->pThread->hNative == RTThreadNativeSelf());
749 Assert(pData->pThread->uPid == RTProcSelf());
750 Assert(RTListIsEmpty(&pData->pThread->AgeEntry));
751 return pData->pThread;
752 }
753
754 /*
755 * Lookup the thread in the hash table.
756 */
757 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
758 RTPROCESS uPid = RTProcSelf();
759 uintptr_t iHash = (hNativeSelf * 2654435761U) % RT_ELEMENTS(g_apThreadsHash);
760
761 RTSpinlockAcquire(g_hThreadSpinlock);
762
763 struct VBoxDtThread *pThread = g_apThreadsHash[iHash];
764 while (pThread)
765 {
766 if (pThread->hNative == hNativeSelf)
767 {
768 if (pThread->uPid != uPid)
769 {
770 /* Re-initialize the reused thread. */
771 pThread->uPid = uPid;
772 pThread->t_dtrace_vtime = 0;
773 pThread->t_dtrace_start = 0;
774 pThread->t_dtrace_stop = 0;
775 pThread->t_dtrace_scrpc = 0;
776 pThread->t_dtrace_astpc = 0;
777 pThread->t_predcache = 0;
778 }
779
780 /* Hold the thread in the on-stack data, making sure it does not
781 get reused till the thread leaves VBoxDTrace. */
782 RTListNodeRemove(&pThread->AgeEntry);
783 pData->pThread = pThread;
784
785 RTSpinlockRelease(g_hThreadSpinlock);
786 return pThread;
787 }
788
789 pThread = pThread->pNext;
790 }
791
792 /*
793 * Unknown thread. Allocate a new entry, recycling unused or old ones.
794 */
795 pThread = RTListGetLast(&g_ThreadAgeList, struct VBoxDtThread, AgeEntry);
796 AssertFatal(pThread);
797 RTListNodeRemove(&pThread->AgeEntry);
798 if (pThread->hNative != NIL_RTNATIVETHREAD)
799 {
800 uintptr_t iHash2 = (pThread->hNative * 2654435761U) % RT_ELEMENTS(g_apThreadsHash);
801 if (g_apThreadsHash[iHash2] == pThread)
802 g_apThreadsHash[iHash2] = pThread->pNext;
803 else
804 {
805 for (struct VBoxDtThread *pPrev = g_apThreadsHash[iHash2]; ; pPrev = pPrev->pNext)
806 {
807 AssertPtr(pPrev);
808 if (pPrev->pNext == pThread)
809 {
810 pPrev->pNext = pThread->pNext;
811 break;
812 }
813 }
814 }
815 }
816
817 /*
818 * Initialize the data.
819 */
820 pThread->t_dtrace_vtime = 0;
821 pThread->t_dtrace_start = 0;
822 pThread->t_dtrace_stop = 0;
823 pThread->t_dtrace_scrpc = 0;
824 pThread->t_dtrace_astpc = 0;
825 pThread->t_predcache = 0;
826 pThread->hNative = hNativeSelf;
827 pThread->uPid = uPid;
828
829 /*
830 * Add it to the hash as well as the on-stack data.
831 */
832 pThread->pNext = g_apThreadsHash[iHash];
833 g_apThreadsHash[iHash] = pThread->pNext;
834
835 pData->pThread = pThread;
836
837 RTSpinlockRelease(g_hThreadSpinlock);
838 return pThread;
839}
840
841
842/**
843 * Called by the stack data destructor.
844 *
845 * @param pThread The thread to release.
846 *
847 */
848static void VBoxDtReleaseThread(struct VBoxDtThread *pThread)
849{
850 RTSpinlockAcquire(g_hThreadSpinlock);
851
852 RTListAppend(&g_ThreadAgeList, &pThread->AgeEntry);
853
854 RTSpinlockRelease(g_hThreadSpinlock);
855}
856
857
858
859
860/*
861 *
862 * Virtual Memory / Resource Allocator.
863 * Virtual Memory / Resource Allocator.
864 * Virtual Memory / Resource Allocator.
865 *
866 */
867
868
869/** The number of bits per chunk.
870 * @remarks The 32 bytes are for heap headers and such like. */
871#define VBOXDTVMEMCHUNK_BITS ( ((_64K - 32 - sizeof(uint32_t) * 2) / sizeof(uint32_t)) * 32)
872
873/**
874 * Resource allocator chunk.
875 */
876typedef struct VBoxDtVMemChunk
877{
878 /** The ordinal (unbased) of the first item. */
879 uint32_t iFirst;
880 /** The current number of free items in this chunk. */
881 uint32_t cCurFree;
882 /** The allocation bitmap. */
883 uint32_t bm[VBOXDTVMEMCHUNK_BITS / 32];
884} VBOXDTVMEMCHUNK;
885/** Pointer to a resource allocator chunk. */
886typedef VBOXDTVMEMCHUNK *PVBOXDTVMEMCHUNK;
887
888
889
890/**
891 * Resource allocator instance.
892 */
893typedef struct VBoxDtVMem
894{
895 /** Spinlock protecting the data (interrupt safe). */
896 RTSPINLOCK hSpinlock;
897 /** Magic value. */
898 uint32_t u32Magic;
899 /** The current number of free items in the chunks. */
900 uint32_t cCurFree;
901 /** The current number of chunks that we have allocated. */
902 uint32_t cCurChunks;
903 /** The configured resource base. */
904 uint32_t uBase;
905 /** The configured max number of items. */
906 uint32_t cMaxItems;
907 /** The size of the apChunks array. */
908 uint32_t cMaxChunks;
909 /** Array of chunk pointers.
910 * (The size is determined at creation.) */
911 PVBOXDTVMEMCHUNK apChunks[1];
912} VBOXDTVMEM;
913/** Pointer to a resource allocator instance. */
914typedef VBOXDTVMEM *PVBOXDTVMEM;
915
916/** Magic value for the VBOXDTVMEM structure. */
917#define VBOXDTVMEM_MAGIC RT_MAKE_U32_FROM_U8('V', 'M', 'e', 'm')
918
919
920/* vmem_create implementation */
921struct VBoxDtVMem *VBoxDtVMemCreate(const char *pszName, void *pvBase, size_t cb, size_t cbUnit,
922 PFNRT pfnAlloc, PFNRT pfnFree, struct VBoxDtVMem *pSrc,
923 size_t cbQCacheMax, uint32_t fFlags)
924{
925 /*
926 * Assert preconditions of this implementation.
927 */
928 AssertMsgReturn((uintptr_t)pvBase <= UINT32_MAX, ("%p\n", pvBase), NULL);
929 AssertMsgReturn(cb <= UINT32_MAX, ("%zu\n", cb), NULL);
930 AssertMsgReturn((uintptr_t)pvBase + cb - 1 <= UINT32_MAX, ("%p %zu\n", pvBase, cb), NULL);
931 AssertMsgReturn(cbUnit == 1, ("%zu\n", cbUnit), NULL);
932 AssertReturn(!pfnAlloc, NULL);
933 AssertReturn(!pfnFree, NULL);
934 AssertReturn(!pSrc, NULL);
935 AssertReturn(!cbQCacheMax, NULL);
936 AssertReturn(fFlags & VM_SLEEP, NULL);
937 AssertReturn(fFlags & VMC_IDENTIFIER, NULL);
938 RT_NOREF_PV(pszName);
939
940 /*
941 * Allocate the instance.
942 */
943 uint32_t cChunks = (uint32_t)cb / VBOXDTVMEMCHUNK_BITS;
944 if (cb % VBOXDTVMEMCHUNK_BITS)
945 cChunks++;
946 PVBOXDTVMEM pThis = (PVBOXDTVMEM)RTMemAllocZ(RT_UOFFSETOF_DYN(VBOXDTVMEM, apChunks[cChunks]));
947 if (!pThis)
948 return NULL;
949 int rc = RTSpinlockCreate(&pThis->hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxDtVMem");
950 if (RT_FAILURE(rc))
951 {
952 RTMemFree(pThis);
953 return NULL;
954 }
955 pThis->u32Magic = VBOXDTVMEM_MAGIC;
956 pThis->cCurFree = 0;
957 pThis->cCurChunks = 0;
958 pThis->uBase = (uint32_t)(uintptr_t)pvBase;
959 pThis->cMaxItems = (uint32_t)cb;
960 pThis->cMaxChunks = cChunks;
961
962 return pThis;
963}
964
965
966/* vmem_destroy implementation */
967void VBoxDtVMemDestroy(struct VBoxDtVMem *pThis)
968{
969 if (!pThis)
970 return;
971 AssertPtrReturnVoid(pThis);
972 AssertReturnVoid(pThis->u32Magic == VBOXDTVMEM_MAGIC);
973
974 /*
975 * Invalidate the instance.
976 */
977 RTSpinlockAcquire(pThis->hSpinlock); /* paranoia */
978 pThis->u32Magic = 0;
979 RTSpinlockRelease(pThis->hSpinlock);
980 RTSpinlockDestroy(pThis->hSpinlock);
981
982 /*
983 * Free the chunks, then the instance.
984 */
985 uint32_t iChunk = pThis->cCurChunks;
986 while (iChunk-- > 0)
987 {
988 RTMemFree(pThis->apChunks[iChunk]);
989 pThis->apChunks[iChunk] = NULL;
990 }
991 RTMemFree(pThis);
992}
993
994
995/* vmem_alloc implementation */
996void *VBoxDtVMemAlloc(struct VBoxDtVMem *pThis, size_t cbMem, uint32_t fFlags)
997{
998 /*
999 * Validate input.
1000 */
1001 AssertReturn(fFlags & VM_BESTFIT, NULL);
1002 AssertReturn(fFlags & VM_SLEEP, NULL);
1003 AssertReturn(cbMem == 1, NULL);
1004 AssertPtrReturn(pThis, NULL);
1005 AssertReturn(pThis->u32Magic == VBOXDTVMEM_MAGIC, NULL);
1006
1007 /*
1008 * Allocation loop.
1009 */
1010 RTSpinlockAcquire(pThis->hSpinlock);
1011 for (;;)
1012 {
1013 PVBOXDTVMEMCHUNK pChunk;
1014 uint32_t const cChunks = pThis->cCurChunks;
1015
1016 if (RT_LIKELY(pThis->cCurFree > 0))
1017 {
1018 for (uint32_t iChunk = 0; iChunk < cChunks; iChunk++)
1019 {
1020 pChunk = pThis->apChunks[iChunk];
1021 if (pChunk->cCurFree > 0)
1022 {
1023 int iBit = ASMBitFirstClear(pChunk->bm, VBOXDTVMEMCHUNK_BITS);
1024 AssertMsgReturnStmt(iBit >= 0 && (unsigned)iBit < VBOXDTVMEMCHUNK_BITS, ("%d\n", iBit),
1025 RTSpinlockRelease(pThis->hSpinlock),
1026 NULL);
1027
1028 ASMBitSet(pChunk->bm, iBit);
1029 pChunk->cCurFree--;
1030 pThis->cCurFree--;
1031
1032 uint32_t iRet = (uint32_t)iBit + pChunk->iFirst + pThis->uBase;
1033 RTSpinlockRelease(pThis->hSpinlock);
1034 return (void *)(uintptr_t)iRet;
1035 }
1036 }
1037 AssertFailedBreak();
1038 }
1039
1040 /* Out of resources? */
1041 if (cChunks >= pThis->cMaxChunks)
1042 break;
1043
1044 /*
1045 * Allocate another chunk.
1046 */
1047 uint32_t const iFirstBit = cChunks > 0 ? pThis->apChunks[cChunks - 1]->iFirst + VBOXDTVMEMCHUNK_BITS : 0;
1048 uint32_t const cFreeBits = cChunks + 1 == pThis->cMaxChunks
1049 ? pThis->cMaxItems - (iFirstBit - pThis->uBase)
1050 : VBOXDTVMEMCHUNK_BITS;
1051 Assert(cFreeBits <= VBOXDTVMEMCHUNK_BITS);
1052
1053 RTSpinlockRelease(pThis->hSpinlock);
1054
1055 pChunk = (PVBOXDTVMEMCHUNK)RTMemAllocZ(sizeof(*pChunk));
1056 if (!pChunk)
1057 return NULL;
1058
1059 pChunk->iFirst = iFirstBit;
1060 pChunk->cCurFree = cFreeBits;
1061 if (cFreeBits != VBOXDTVMEMCHUNK_BITS)
1062 {
1063 /* lazy bird. */
1064 uint32_t iBit = cFreeBits;
1065 while (iBit < VBOXDTVMEMCHUNK_BITS)
1066 {
1067 ASMBitSet(pChunk->bm, iBit);
1068 iBit++;
1069 }
1070 }
1071
1072 RTSpinlockAcquire(pThis->hSpinlock);
1073
1074 /*
1075 * Insert the new chunk. If someone raced us here, we'll drop it to
1076 * avoid wasting resources.
1077 */
1078 if (pThis->cCurChunks == cChunks)
1079 {
1080 pThis->apChunks[cChunks] = pChunk;
1081 pThis->cCurFree += pChunk->cCurFree;
1082 pThis->cCurChunks += 1;
1083 }
1084 else
1085 {
1086 RTSpinlockRelease(pThis->hSpinlock);
1087 RTMemFree(pChunk);
1088 RTSpinlockAcquire(pThis->hSpinlock);
1089 }
1090 }
1091 RTSpinlockRelease(pThis->hSpinlock);
1092
1093 return NULL;
1094}
1095
1096/* vmem_free implementation */
1097void VBoxDtVMemFree(struct VBoxDtVMem *pThis, void *pvMem, size_t cbMem)
1098{
1099 /*
1100 * Validate input.
1101 */
1102 AssertReturnVoid(cbMem == 1);
1103 AssertPtrReturnVoid(pThis);
1104 AssertReturnVoid(pThis->u32Magic == VBOXDTVMEM_MAGIC);
1105
1106 AssertReturnVoid((uintptr_t)pvMem < UINT32_MAX);
1107 uint32_t uMem = (uint32_t)(uintptr_t)pvMem;
1108 AssertReturnVoid(uMem >= pThis->uBase);
1109 uMem -= pThis->uBase;
1110 AssertReturnVoid(uMem < pThis->cMaxItems);
1111
1112
1113 /*
1114 * Free it.
1115 */
1116 RTSpinlockAcquire(pThis->hSpinlock);
1117 uint32_t const iChunk = uMem / VBOXDTVMEMCHUNK_BITS;
1118 if (iChunk < pThis->cCurChunks)
1119 {
1120 PVBOXDTVMEMCHUNK pChunk = pThis->apChunks[iChunk];
1121 uint32_t iBit = uMem - pChunk->iFirst;
1122 AssertReturnVoidStmt(iBit < VBOXDTVMEMCHUNK_BITS, RTSpinlockRelease(pThis->hSpinlock));
1123 AssertReturnVoidStmt(ASMBitTestAndClear(pChunk->bm, iBit), RTSpinlockRelease(pThis->hSpinlock));
1124
1125 pChunk->cCurFree++;
1126 pThis->cCurFree++;
1127 }
1128
1129 RTSpinlockRelease(pThis->hSpinlock);
1130}
1131
1132
1133/*
1134 *
1135 * Memory Allocators.
1136 * Memory Allocators.
1137 * Memory Allocators.
1138 *
1139 */
1140
1141
1142/* kmem_alloc implementation */
1143void *VBoxDtKMemAlloc(size_t cbMem, uint32_t fFlags)
1144{
1145 void *pvMem;
1146#ifdef HAVE_RTMEMALLOCEX_FEATURES
1147 uint32_t fMemAllocFlags = fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0;
1148#else
1149 uint32_t fMemAllocFlags = 0;
1150 RT_NOREF_PV(fFlags);
1151#endif
1152 int rc = RTMemAllocEx(cbMem, 0, fMemAllocFlags, &pvMem);
1153 AssertRCReturn(rc, NULL);
1154 AssertPtr(pvMem);
1155 return pvMem;
1156}
1157
1158
1159/* kmem_zalloc implementation */
1160void *VBoxDtKMemAllocZ(size_t cbMem, uint32_t fFlags)
1161{
1162 void *pvMem;
1163#ifdef HAVE_RTMEMALLOCEX_FEATURES
1164 uint32_t fMemAllocFlags = (fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0) | RTMEMALLOCEX_FLAGS_ZEROED;
1165#else
1166 uint32_t fMemAllocFlags = RTMEMALLOCEX_FLAGS_ZEROED;
1167 RT_NOREF_PV(fFlags);
1168#endif
1169 int rc = RTMemAllocEx(cbMem, 0, fMemAllocFlags, &pvMem);
1170 AssertRCReturn(rc, NULL);
1171 AssertPtr(pvMem);
1172 return pvMem;
1173}
1174
1175
1176/* kmem_free implementation */
1177void VBoxDtKMemFree(void *pvMem, size_t cbMem)
1178{
1179 RTMemFreeEx(pvMem, cbMem);
1180}
1181
1182
1183/**
1184 * Memory cache mockup structure.
1185 * No slab allocator here!
1186 */
1187struct VBoxDtMemCache
1188{
1189 uint32_t u32Magic;
1190 size_t cbBuf;
1191 size_t cbAlign;
1192};
1193
1194
1195/* Limited kmem_cache_create implementation. */
1196struct VBoxDtMemCache *VBoxDtKMemCacheCreate(const char *pszName, size_t cbBuf, size_t cbAlign,
1197 PFNRT pfnCtor, PFNRT pfnDtor, PFNRT pfnReclaim,
1198 void *pvUser, void *pvVM, uint32_t fFlags)
1199{
1200 /*
1201 * Check the input.
1202 */
1203 AssertReturn(cbBuf > 0 && cbBuf < _1G, NULL);
1204 AssertReturn(RT_IS_POWER_OF_TWO(cbAlign), NULL);
1205 AssertReturn(!pfnCtor, NULL);
1206 AssertReturn(!pfnDtor, NULL);
1207 AssertReturn(!pfnReclaim, NULL);
1208 AssertReturn(!pvUser, NULL);
1209 AssertReturn(!pvVM, NULL);
1210 AssertReturn(!fFlags, NULL);
1211 RT_NOREF_PV(pszName);
1212
1213 /*
1214 * Create a parameter container. Don't bother with anything fancy here yet,
1215 * just get something working.
1216 */
1217 struct VBoxDtMemCache *pThis = (struct VBoxDtMemCache *)RTMemAlloc(sizeof(*pThis));
1218 if (!pThis)
1219 return NULL;
1220
1221 pThis->cbAlign = cbAlign;
1222 pThis->cbBuf = cbBuf;
1223 return pThis;
1224}
1225
1226
1227/* Limited kmem_cache_destroy implementation. */
1228void VBoxDtKMemCacheDestroy(struct VBoxDtMemCache *pThis)
1229{
1230 RTMemFree(pThis);
1231}
1232
1233
1234/* kmem_cache_alloc implementation. */
1235void *VBoxDtKMemCacheAlloc(struct VBoxDtMemCache *pThis, uint32_t fFlags)
1236{
1237 void *pvMem;
1238#ifdef HAVE_RTMEMALLOCEX_FEATURES
1239 uint32_t fMemAllocFlags = (fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0) | RTMEMALLOCEX_FLAGS_ZEROED;
1240#else
1241 uint32_t fMemAllocFlags = RTMEMALLOCEX_FLAGS_ZEROED;
1242 RT_NOREF_PV(fFlags);
1243#endif
1244 int rc = RTMemAllocEx(pThis->cbBuf, /*pThis->cbAlign*/0, fMemAllocFlags, &pvMem);
1245 AssertRCReturn(rc, NULL);
1246 AssertPtr(pvMem);
1247 return pvMem;
1248}
1249
1250
1251/* kmem_cache_free implementation. */
1252void VBoxDtKMemCacheFree(struct VBoxDtMemCache *pThis, void *pvMem)
1253{
1254 RTMemFreeEx(pvMem, pThis->cbBuf);
1255}
1256
1257
1258/*
1259 *
1260 * Mutex Semaphore Wrappers.
1261 *
1262 */
1263
1264
1265/** Initializes a mutex. */
1266int VBoxDtMutexInit(struct VBoxDtMutex *pMtx)
1267{
1268 AssertReturn(pMtx != &g_DummyMtx, -1);
1269 AssertPtr(pMtx);
1270
1271 pMtx->hOwner = NIL_RTNATIVETHREAD;
1272 pMtx->hMtx = NIL_RTSEMMUTEX;
1273 int rc = RTSemMutexCreate(&pMtx->hMtx);
1274 if (RT_SUCCESS(rc))
1275 return 0;
1276 return -1;
1277}
1278
1279
1280/** Deletes a mutex. */
1281void VBoxDtMutexDelete(struct VBoxDtMutex *pMtx)
1282{
1283 AssertReturnVoid(pMtx != &g_DummyMtx);
1284 AssertPtr(pMtx);
1285 if (pMtx->hMtx == NIL_RTSEMMUTEX)
1286 return;
1287
1288 Assert(pMtx->hOwner == NIL_RTNATIVETHREAD);
1289 int rc = RTSemMutexDestroy(pMtx->hMtx); AssertRC(rc);
1290 pMtx->hMtx = NIL_RTSEMMUTEX;
1291}
1292
1293
1294/* mutex_enter implementation */
1295void VBoxDtMutexEnter(struct VBoxDtMutex *pMtx)
1296{
1297 AssertPtr(pMtx);
1298 if (pMtx == &g_DummyMtx)
1299 return;
1300
1301 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
1302
1303 int rc = RTSemMutexRequest(pMtx->hMtx, RT_INDEFINITE_WAIT);
1304 AssertFatalRC(rc);
1305
1306 Assert(pMtx->hOwner == NIL_RTNATIVETHREAD);
1307 pMtx->hOwner = hSelf;
1308}
1309
1310
1311/* mutex_exit implementation */
1312void VBoxDtMutexExit(struct VBoxDtMutex *pMtx)
1313{
1314 AssertPtr(pMtx);
1315 if (pMtx == &g_DummyMtx)
1316 return;
1317
1318 Assert(pMtx->hOwner == RTThreadNativeSelf());
1319
1320 pMtx->hOwner = NIL_RTNATIVETHREAD;
1321 int rc = RTSemMutexRelease(pMtx->hMtx);
1322 AssertFatalRC(rc);
1323}
1324
1325
1326/* MUTEX_HELD implementation */
1327bool VBoxDtMutexIsOwner(struct VBoxDtMutex *pMtx)
1328{
1329 AssertPtrReturn(pMtx, false);
1330 if (pMtx == &g_DummyMtx)
1331 return true;
1332 return pMtx->hOwner == RTThreadNativeSelf();
1333}
1334
1335
1336
1337/*
1338 *
1339 * Helpers for handling VTG structures.
1340 * Helpers for handling VTG structures.
1341 * Helpers for handling VTG structures.
1342 *
1343 */
1344
1345
1346
1347/**
1348 * Converts an attribute from VTG description speak to DTrace.
1349 *
1350 * @param pDtAttr The DTrace attribute (dst).
1351 * @param pVtgAttr The VTG attribute descriptor (src).
1352 */
1353static void vboxDtVtgConvAttr(dtrace_attribute_t *pDtAttr, PCVTGDESCATTR pVtgAttr)
1354{
1355 pDtAttr->dtat_name = pVtgAttr->u8Code - 1;
1356 pDtAttr->dtat_data = pVtgAttr->u8Data - 1;
1357 pDtAttr->dtat_class = pVtgAttr->u8DataDep - 1;
1358}
1359
1360/**
1361 * Gets a string from the string table.
1362 *
1363 * @returns Pointer to the string.
1364 * @param pVtgHdr The VTG object header.
1365 * @param offStrTab The string table offset.
1366 */
1367static const char *vboxDtVtgGetString(PVTGOBJHDR pVtgHdr, uint32_t offStrTab)
1368{
1369 Assert(offStrTab < pVtgHdr->cbStrTab);
1370 return (const char *)pVtgHdr + pVtgHdr->offStrTab + offStrTab;
1371}
1372
1373
1374
1375/*
1376 *
1377 * DTrace Provider Interface.
1378 * DTrace Provider Interface.
1379 * DTrace Provider Interface.
1380 *
1381 */
1382
1383
1384/**
1385 * @callback_method_impl{dtrace_pops_t,dtps_provide}
1386 */
1387static void vboxDtPOps_Provide(void *pvProv, const dtrace_probedesc_t *pDtProbeDesc)
1388{
1389 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1390 AssertPtrReturnVoid(pProv);
1391 LOG_DTRACE(("%s: %p / %p pDtProbeDesc=%p\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, pDtProbeDesc));
1392
1393 if (pDtProbeDesc)
1394 return; /* We don't generate probes, so never mind these requests. */
1395
1396 if (pProv->TracerData.DTrace.fZombie)
1397 return;
1398
1399 dtrace_provider_id_t const idProvider = pProv->TracerData.DTrace.idProvider;
1400 AssertPtrReturnVoid(idProvider);
1401
1402 AssertPtrReturnVoid(pProv->pHdr);
1403 AssertReturnVoid(pProv->pHdr->offProbeLocs != 0);
1404 uint32_t const cProbeLocs = pProv->pHdr->cbProbeLocs / sizeof(VTGPROBELOC);
1405
1406 /* Need a buffer for extracting the function names and mangling them in
1407 case of collision. */
1408 size_t const cbFnNmBuf = _4K + _1K;
1409 char *pszFnNmBuf = (char *)RTMemAlloc(cbFnNmBuf);
1410 if (!pszFnNmBuf)
1411 return;
1412
1413 /*
1414 * Itereate the probe location list and register all probes related to
1415 * this provider.
1416 */
1417 uint16_t const idxProv = (uint16_t)((PVTGDESCPROVIDER)((uintptr_t)pProv->pHdr + pProv->pHdr->offProviders) - pProv->pDesc);
1418 for (uint32_t idxProbeLoc = 0; idxProbeLoc < cProbeLocs; idxProbeLoc++)
1419 {
1420 /* Skip probe location belonging to other providers or once that
1421 we've already reported. */
1422 PCVTGPROBELOC pProbeLocRO = &pProv->paProbeLocsRO[idxProbeLoc];
1423 PVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1424 if (pProbeDesc->idxProvider != idxProv)
1425 continue;
1426
1427 uint32_t *pidProbe;
1428 if (!pProv->fUmod)
1429 pidProbe = (uint32_t *)&pProbeLocRO->idProbe;
1430 else
1431 pidProbe = &pProv->paR0ProbeLocs[idxProbeLoc].idProbe;
1432 if (*pidProbe != 0)
1433 continue;
1434
1435 /* The function name may need to be stripped since we're using C++
1436 compilers for most of the code. ASSUMES nobody are brave/stupid
1437 enough to use function pointer returns without typedef'ing
1438 properly them (e.g. signal). */
1439 const char *pszPrbName = vboxDtVtgGetString(pProv->pHdr, pProbeDesc->offName);
1440 const char *pszFunc = pProbeLocRO->pszFunction;
1441 const char *psz = strchr(pProbeLocRO->pszFunction, '(');
1442 size_t cch;
1443 if (psz)
1444 {
1445 /* skip blanks preceeding the parameter parenthesis. */
1446 while ( (uintptr_t)psz > (uintptr_t)pProbeLocRO->pszFunction
1447 && RT_C_IS_BLANK(psz[-1]))
1448 psz--;
1449
1450 /* Find the start of the function name. */
1451 pszFunc = psz - 1;
1452 while ((uintptr_t)pszFunc > (uintptr_t)pProbeLocRO->pszFunction)
1453 {
1454 char ch = pszFunc[-1];
1455 if (!RT_C_IS_ALNUM(ch) && ch != '_' && ch != ':')
1456 break;
1457 pszFunc--;
1458 }
1459 cch = psz - pszFunc;
1460 }
1461 else
1462 cch = strlen(pszFunc);
1463 RTStrCopyEx(pszFnNmBuf, cbFnNmBuf, pszFunc, cch);
1464
1465 /* Look up the probe, if we have one in the same function, mangle
1466 the function name a little to avoid having to deal with having
1467 multiple location entries with the same probe ID. (lazy bird) */
1468 Assert(!*pidProbe);
1469 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE)
1470 {
1471 RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u", pProbeLocRO->uLine);
1472 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE)
1473 {
1474 unsigned iOrd = 2;
1475 while (iOrd < 128)
1476 {
1477 RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u-%u", pProbeLocRO->uLine, iOrd);
1478 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) == DTRACE_IDNONE)
1479 break;
1480 iOrd++;
1481 }
1482 if (iOrd >= 128)
1483 {
1484 LogRel(("VBoxDrv: More than 128 duplicate probe location instances at line %u in function %s [%s], probe %s\n",
1485 pProbeLocRO->uLine, pProbeLocRO->pszFunction, pszFnNmBuf, pszPrbName));
1486 continue;
1487 }
1488 }
1489 }
1490
1491 /* Create the probe. */
1492 AssertCompile(sizeof(*pidProbe) == sizeof(dtrace_id_t));
1493 *pidProbe = dtrace_probe_create(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName,
1494 1 /*aframes*/, (void *)(uintptr_t)idxProbeLoc);
1495 pProv->TracerData.DTrace.cProvidedProbes++;
1496 }
1497
1498 RTMemFree(pszFnNmBuf);
1499 LOG_DTRACE(("%s: returns\n", __FUNCTION__));
1500}
1501
1502
1503/**
1504 * @callback_method_impl{dtrace_pops_t,dtps_enable}
1505 */
1506static int vboxDtPOps_Enable(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1507{
1508 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1509 LOG_DTRACE(("%s: %p / %p - %#x / %p\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe));
1510 AssertPtrReturn(pProv->TracerData.DTrace.idProvider, EINVAL);
1511 RT_NOREF_PV(idProbe);
1512
1513 if (!pProv->TracerData.DTrace.fZombie)
1514 {
1515 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1516 PVTGPROBELOC32 pProbeLocEn = (PVTGPROBELOC32)( (uintptr_t)pProv->pvProbeLocsEn + idxProbeLoc * pProv->cbProbeLocsEn);
1517 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1518 PCVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1519 uint32_t const idxProbe = pProbeDesc->idxEnabled;
1520
1521 if (!pProv->fUmod)
1522 {
1523 if (!pProbeLocEn->fEnabled)
1524 {
1525 pProbeLocEn->fEnabled = 1;
1526 ASMAtomicIncU32(&pProv->pacProbeEnabled[idxProbe]);
1527 ASMAtomicIncU32(&pProv->pDesc->cProbesEnabled);
1528 ASMAtomicIncU32(&pProv->pDesc->uSettingsSerialNo);
1529 }
1530 }
1531 else
1532 {
1533 /* Update kernel mode structure */
1534 if (!pProv->paR0ProbeLocs[idxProbeLoc].fEnabled)
1535 {
1536 pProv->paR0ProbeLocs[idxProbeLoc].fEnabled = 1;
1537 ASMAtomicIncU32(&pProv->paR0Probes[idxProbe].cEnabled);
1538 ASMAtomicIncU32(&pProv->pDesc->cProbesEnabled);
1539 ASMAtomicIncU32(&pProv->pDesc->uSettingsSerialNo);
1540 }
1541
1542 /* Update user mode structure. */
1543 pProbeLocEn->fEnabled = 1;
1544 pProv->pacProbeEnabled[idxProbe] = pProv->paR0Probes[idxProbe].cEnabled;
1545 }
1546 }
1547
1548 return 0;
1549}
1550
1551
1552/**
1553 * @callback_method_impl{dtrace_pops_t,dtps_disable}
1554 */
1555static void vboxDtPOps_Disable(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1556{
1557 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1558 AssertPtrReturnVoid(pProv);
1559 LOG_DTRACE(("%s: %p / %p - %#x / %p\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe));
1560 AssertPtrReturnVoid(pProv->TracerData.DTrace.idProvider);
1561 RT_NOREF_PV(idProbe);
1562
1563 if (!pProv->TracerData.DTrace.fZombie)
1564 {
1565 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1566 PVTGPROBELOC32 pProbeLocEn = (PVTGPROBELOC32)( (uintptr_t)pProv->pvProbeLocsEn + idxProbeLoc * pProv->cbProbeLocsEn);
1567 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1568 PCVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1569 uint32_t const idxProbe = pProbeDesc->idxEnabled;
1570
1571 if (!pProv->fUmod)
1572 {
1573 if (pProbeLocEn->fEnabled)
1574 {
1575 pProbeLocEn->fEnabled = 0;
1576 ASMAtomicDecU32(&pProv->pacProbeEnabled[idxProbe]);
1577 ASMAtomicDecU32(&pProv->pDesc->cProbesEnabled);
1578 ASMAtomicIncU32(&pProv->pDesc->uSettingsSerialNo);
1579 }
1580 }
1581 else
1582 {
1583 /* Update kernel mode structure */
1584 if (pProv->paR0ProbeLocs[idxProbeLoc].fEnabled)
1585 {
1586 pProv->paR0ProbeLocs[idxProbeLoc].fEnabled = 0;
1587 ASMAtomicDecU32(&pProv->paR0Probes[idxProbe].cEnabled);
1588 ASMAtomicDecU32(&pProv->pDesc->cProbesEnabled);
1589 ASMAtomicIncU32(&pProv->pDesc->uSettingsSerialNo);
1590 }
1591
1592 /* Update user mode structure. */
1593 pProbeLocEn->fEnabled = 0;
1594 pProv->pacProbeEnabled[idxProbe] = pProv->paR0Probes[idxProbe].cEnabled;
1595 }
1596 }
1597}
1598
1599
1600/**
1601 * @callback_method_impl{dtrace_pops_t,dtps_getargdesc}
1602 */
1603static void vboxDtPOps_GetArgDesc(void *pvProv, dtrace_id_t idProbe, void *pvProbe,
1604 dtrace_argdesc_t *pArgDesc)
1605{
1606 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1607 unsigned uArg = pArgDesc->dtargd_ndx;
1608 RT_NOREF_PV(idProbe);
1609
1610 pArgDesc->dtargd_ndx = DTRACE_ARGNONE;
1611 AssertPtrReturnVoid(pProv);
1612 LOG_DTRACE(("%s: %p / %p - %#x / %p uArg=%d\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe, uArg));
1613 AssertPtrReturnVoid(pProv->TracerData.DTrace.idProvider);
1614
1615 if (!pProv->TracerData.DTrace.fZombie)
1616 {
1617 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1618 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1619 PCVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1620 PCVTGDESCARGLIST pArgList = (PCVTGDESCARGLIST)( (uintptr_t)pProv->pHdr
1621 + pProv->pHdr->offArgLists
1622 + pProbeDesc->offArgList);
1623 AssertReturnVoid(pProbeDesc->offArgList < pProv->pHdr->cbArgLists);
1624
1625 if (uArg < pArgList->cArgs)
1626 {
1627 const char *pszType = vboxDtVtgGetString(pProv->pHdr, pArgList->aArgs[uArg].offType);
1628 size_t cchType = strlen(pszType);
1629 if (cchType < sizeof(pArgDesc->dtargd_native))
1630 {
1631 memcpy(pArgDesc->dtargd_native, pszType, cchType + 1);
1632 /** @todo mapping? */
1633 pArgDesc->dtargd_ndx = uArg;
1634 LOG_DTRACE(("%s: returns dtargd_native = %s\n", __FUNCTION__, pArgDesc->dtargd_native));
1635 return;
1636 }
1637 }
1638 }
1639}
1640
1641
1642/**
1643 * @callback_method_impl{dtrace_pops_t,dtps_getargval}
1644 */
1645static uint64_t vboxDtPOps_GetArgVal(void *pvProv, dtrace_id_t idProbe, void *pvProbe,
1646 int iArg, int cFrames)
1647{
1648 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1649 AssertPtrReturn(pProv, UINT64_MAX);
1650 LOG_DTRACE(("%s: %p / %p - %#x / %p iArg=%d cFrames=%u\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe, iArg, cFrames));
1651 AssertReturn(iArg >= 5, UINT64_MAX);
1652 RT_NOREF_PV(idProbe); RT_NOREF_PV(cFrames);
1653
1654 if (pProv->TracerData.DTrace.fZombie)
1655 return UINT64_MAX;
1656
1657 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1658 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1659 PCVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1660 PCVTGDESCARGLIST pArgList = (PCVTGDESCARGLIST)( (uintptr_t)pProv->pHdr
1661 + pProv->pHdr->offArgLists
1662 + pProbeDesc->offArgList);
1663 AssertReturn(pProbeDesc->offArgList < pProv->pHdr->cbArgLists, UINT64_MAX);
1664
1665 PVBDTSTACKDATA pData = vboxDtGetStackData();
1666
1667 /*
1668 * Get the stack data. This is a wee bit complicated on 32-bit systems
1669 * since we want to support 64-bit integer arguments.
1670 */
1671 uint64_t u64Ret;
1672 if (iArg >= 20)
1673 u64Ret = UINT64_MAX;
1674 else if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
1675 {
1676#if ARCH_BITS == 64
1677 u64Ret = pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
1678#else
1679 if ( !pArgList->fHaveLargeArgs
1680 || iArg >= pArgList->cArgs)
1681 u64Ret = pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
1682 else
1683 {
1684 /* Similar to what we did for mac in when calling dtrace_probe(). */
1685 uint32_t offArg = 0;
1686 for (int i = 5; i < iArg; i++)
1687 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iArg].fType))
1688 offArg++;
1689 u64Ret = pData->u.ProbeFireKernel.pauStackArgs[iArg - 5 + offArg];
1690 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iArg].fType))
1691 u64Ret |= (uint64_t)pData->u.ProbeFireKernel.pauStackArgs[iArg - 5 + offArg + 1] << 32;
1692 }
1693#endif
1694 }
1695 else if (pData->enmCaller == kVBoxDtCaller_ProbeFireUser)
1696 {
1697 int offArg = pData->u.ProbeFireUser.offArg;
1698 PCSUPDRVTRACERUSRCTX pCtx = pData->u.ProbeFireUser.pCtx;
1699 AssertPtrReturn(pCtx, UINT64_MAX);
1700
1701 if (pCtx->cBits == 32)
1702 {
1703 if ( !pArgList->fHaveLargeArgs
1704 || iArg >= pArgList->cArgs)
1705 {
1706 if (iArg + offArg < (int)RT_ELEMENTS(pCtx->u.X86.aArgs))
1707 u64Ret = pCtx->u.X86.aArgs[iArg + offArg];
1708 else
1709 u64Ret = UINT64_MAX;
1710 }
1711 else
1712 {
1713 for (int i = 5; i < iArg; i++)
1714 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iArg].fType))
1715 offArg++;
1716 if (offArg + iArg < (int)RT_ELEMENTS(pCtx->u.X86.aArgs))
1717 {
1718 u64Ret = pCtx->u.X86.aArgs[iArg + offArg];
1719 if ( VTG_TYPE_IS_LARGE(pArgList->aArgs[iArg].fType)
1720 && offArg + iArg + 1 < (int)RT_ELEMENTS(pCtx->u.X86.aArgs))
1721 u64Ret |= (uint64_t)pCtx->u.X86.aArgs[iArg + offArg + 1] << 32;
1722 }
1723 else
1724 u64Ret = UINT64_MAX;
1725 }
1726 }
1727 else
1728 {
1729 if (iArg + offArg < (int)RT_ELEMENTS(pCtx->u.Amd64.aArgs))
1730 u64Ret = pCtx->u.Amd64.aArgs[iArg + offArg];
1731 else
1732 u64Ret = UINT64_MAX;
1733 }
1734 }
1735 else
1736 AssertFailedReturn(UINT64_MAX);
1737
1738 LOG_DTRACE(("%s: returns %#llx\n", __FUNCTION__, u64Ret));
1739 return u64Ret;
1740}
1741
1742
1743/**
1744 * @callback_method_impl{dtrace_pops_t,dtps_destroy}
1745 */
1746static void vboxDtPOps_Destroy(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1747{
1748 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1749 AssertPtrReturnVoid(pProv);
1750 LOG_DTRACE(("%s: %p / %p - %#x / %p\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe));
1751 AssertReturnVoid(pProv->TracerData.DTrace.cProvidedProbes > 0);
1752 AssertPtrReturnVoid(pProv->TracerData.DTrace.idProvider);
1753
1754 if (!pProv->TracerData.DTrace.fZombie)
1755 {
1756 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1757 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1758 uint32_t *pidProbe;
1759 if (!pProv->fUmod)
1760 {
1761 pidProbe = (uint32_t *)&pProbeLocRO->idProbe;
1762 Assert(!pProbeLocRO->fEnabled);
1763 Assert(*pidProbe == idProbe);
1764 }
1765 else
1766 {
1767 pidProbe = &pProv->paR0ProbeLocs[idxProbeLoc].idProbe;
1768 Assert(!pProv->paR0ProbeLocs[idxProbeLoc].fEnabled);
1769 Assert(*pidProbe == idProbe); NOREF(idProbe);
1770 }
1771 *pidProbe = 0;
1772 }
1773 pProv->TracerData.DTrace.cProvidedProbes--;
1774}
1775
1776
1777
1778/**
1779 * DTrace provider method table.
1780 */
1781static const dtrace_pops_t g_vboxDtVtgProvOps =
1782{
1783 /* .dtps_provide = */ vboxDtPOps_Provide,
1784 /* .dtps_provide_module = */ NULL,
1785 /* .dtps_enable = */ vboxDtPOps_Enable,
1786 /* .dtps_disable = */ vboxDtPOps_Disable,
1787 /* .dtps_suspend = */ NULL,
1788 /* .dtps_resume = */ NULL,
1789 /* .dtps_getargdesc = */ vboxDtPOps_GetArgDesc,
1790 /* .dtps_getargval = */ vboxDtPOps_GetArgVal,
1791 /* .dtps_usermode = */ NULL,
1792 /* .dtps_destroy = */ vboxDtPOps_Destroy
1793};
1794
1795
1796
1797
1798/*
1799 *
1800 * Support Driver Tracer Interface.
1801 * Support Driver Tracer Interface.
1802 * Support Driver Tracer Interface.
1803 *
1804 */
1805
1806
1807
1808/**
1809 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireKernel}
1810 */
1811static DECLCALLBACK(void) vboxDtTOps_ProbeFireKernel(struct VTGPROBELOC *pVtgProbeLoc, uintptr_t uArg0, uintptr_t uArg1, uintptr_t uArg2,
1812 uintptr_t uArg3, uintptr_t uArg4)
1813{
1814 AssertPtrReturnVoid(pVtgProbeLoc);
1815 LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pVtgProbeLoc, pVtgProbeLoc->idProbe));
1816 AssertPtrReturnVoid(pVtgProbeLoc->pProbe);
1817 AssertPtrReturnVoid(pVtgProbeLoc->pszFunction);
1818
1819 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireKernel);
1820
1821 pStackData->u.ProbeFireKernel.pauStackArgs = &uArg4 + 1;
1822
1823#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
1824 /*
1825 * Convert arguments from uintptr_t to uint64_t.
1826 */
1827 PVTGDESCPROBE pProbe = pVtgProbeLoc->pProbe;
1828 AssertPtrReturnVoid(pProbe);
1829 PVTGOBJHDR pVtgHdr = (PVTGOBJHDR)((uintptr_t)pProbe + pProbe->offObjHdr);
1830 AssertPtrReturnVoid(pVtgHdr);
1831 PVTGDESCARGLIST pArgList = (PVTGDESCARGLIST)((uintptr_t)pVtgHdr + pVtgHdr->offArgLists + pProbe->offArgList);
1832 AssertPtrReturnVoid(pArgList);
1833 if (!pArgList->fHaveLargeArgs)
1834 dtrace_probe(pVtgProbeLoc->idProbe, uArg0, uArg1, uArg2, uArg3, uArg4);
1835 else
1836 {
1837 uintptr_t *auSrcArgs = &uArg0;
1838 uint32_t iSrcArg = 0;
1839 uint32_t iDstArg = 0;
1840 uint64_t au64DstArgs[5];
1841
1842 while ( iDstArg < RT_ELEMENTS(au64DstArgs)
1843 && iSrcArg < pArgList->cArgs)
1844 {
1845 au64DstArgs[iDstArg] = auSrcArgs[iSrcArg];
1846 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iDstArg].fType))
1847 au64DstArgs[iDstArg] |= (uint64_t)auSrcArgs[++iSrcArg] << 32;
1848 iSrcArg++;
1849 iDstArg++;
1850 }
1851 while (iDstArg < RT_ELEMENTS(au64DstArgs))
1852 au64DstArgs[iDstArg++] = auSrcArgs[iSrcArg++];
1853
1854 pStackData->u.ProbeFireKernel.pauStackArgs = &auSrcArgs[iSrcArg];
1855 dtrace_probe(pVtgProbeLoc->idProbe, au64DstArgs[0], au64DstArgs[1], au64DstArgs[2], au64DstArgs[3], au64DstArgs[4]);
1856 }
1857#else
1858 dtrace_probe(pVtgProbeLoc->idProbe, uArg0, uArg1, uArg2, uArg3, uArg4);
1859#endif
1860
1861 VBDT_CLEAR_STACK_DATA();
1862 LOG_DTRACE(("%s: returns\n", __FUNCTION__));
1863}
1864
1865
1866/**
1867 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireUser}
1868 */
1869static DECLCALLBACK(void) vboxDtTOps_ProbeFireUser(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, PCSUPDRVTRACERUSRCTX pCtx,
1870 PCVTGOBJHDR pVtgHdr, PCVTGPROBELOC pProbeLocRO)
1871{
1872 LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pCtx, pCtx->idProbe));
1873 AssertPtrReturnVoid(pProbeLocRO);
1874 AssertPtrReturnVoid(pVtgHdr);
1875 RT_NOREF_PV(pThis);
1876 RT_NOREF_PV(pSession);
1877 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireUser);
1878
1879 if (pCtx->cBits == 32)
1880 {
1881 pStackData->u.ProbeFireUser.pCtx = pCtx;
1882 pStackData->u.ProbeFireUser.offArg = 0;
1883
1884#if ARCH_BITS == 64 || defined(RT_OS_DARWIN)
1885 /*
1886 * Combine two 32-bit arguments into one 64-bit argument where needed.
1887 */
1888 PVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1889 AssertPtrReturnVoid(pProbeDesc);
1890 PVTGDESCARGLIST pArgList = (PVTGDESCARGLIST)((uintptr_t)pVtgHdr + pVtgHdr->offArgLists + pProbeDesc->offArgList);
1891 AssertPtrReturnVoid(pArgList);
1892
1893 if (!pArgList->fHaveLargeArgs)
1894 dtrace_probe(pCtx->idProbe,
1895 pCtx->u.X86.aArgs[0],
1896 pCtx->u.X86.aArgs[1],
1897 pCtx->u.X86.aArgs[2],
1898 pCtx->u.X86.aArgs[3],
1899 pCtx->u.X86.aArgs[4]);
1900 else
1901 {
1902 uint32_t const *auSrcArgs = &pCtx->u.X86.aArgs[0];
1903 uint32_t iSrcArg = 0;
1904 uint32_t iDstArg = 0;
1905 uint64_t au64DstArgs[5];
1906
1907 while ( iDstArg < RT_ELEMENTS(au64DstArgs)
1908 && iSrcArg < pArgList->cArgs)
1909 {
1910 au64DstArgs[iDstArg] = auSrcArgs[iSrcArg];
1911 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iDstArg].fType))
1912 au64DstArgs[iDstArg] |= (uint64_t)auSrcArgs[++iSrcArg] << 32;
1913 iSrcArg++;
1914 iDstArg++;
1915 }
1916 while (iDstArg < RT_ELEMENTS(au64DstArgs))
1917 au64DstArgs[iDstArg++] = auSrcArgs[iSrcArg++];
1918
1919 pStackData->u.ProbeFireUser.offArg = iSrcArg - RT_ELEMENTS(au64DstArgs);
1920 dtrace_probe(pCtx->idProbe, au64DstArgs[0], au64DstArgs[1], au64DstArgs[2], au64DstArgs[3], au64DstArgs[4]);
1921 }
1922#else
1923 dtrace_probe(pCtx->idProbe,
1924 pCtx->u.X86.aArgs[0],
1925 pCtx->u.X86.aArgs[1],
1926 pCtx->u.X86.aArgs[2],
1927 pCtx->u.X86.aArgs[3],
1928 pCtx->u.X86.aArgs[4]);
1929#endif
1930 }
1931 else if (pCtx->cBits == 64)
1932 {
1933 pStackData->u.ProbeFireUser.pCtx = pCtx;
1934 pStackData->u.ProbeFireUser.offArg = 0;
1935 dtrace_probe(pCtx->idProbe,
1936 pCtx->u.Amd64.aArgs[0],
1937 pCtx->u.Amd64.aArgs[1],
1938 pCtx->u.Amd64.aArgs[2],
1939 pCtx->u.Amd64.aArgs[3],
1940 pCtx->u.Amd64.aArgs[4]);
1941 }
1942 else
1943 AssertFailed();
1944
1945 VBDT_CLEAR_STACK_DATA();
1946 LOG_DTRACE(("%s: returns\n", __FUNCTION__));
1947}
1948
1949
1950/**
1951 * interface_method_impl{SUPDRVTRACERREG,pfnTracerOpen}
1952 */
1953static DECLCALLBACK(int) vboxDtTOps_TracerOpen(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uint32_t uCookie,
1954 uintptr_t uArg, uintptr_t *puSessionData)
1955{
1956 if (uCookie != RT_MAKE_U32_FROM_U8('V', 'B', 'D', 'T'))
1957 return VERR_INVALID_MAGIC;
1958 if (uArg)
1959 return VERR_INVALID_PARAMETER;
1960 RT_NOREF_PV(pThis); RT_NOREF_PV(pSession);
1961 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1962
1963 int rc = dtrace_open((dtrace_state_t **)puSessionData, VBoxDtGetCurrentCreds());
1964
1965 VBDT_CLEAR_STACK_DATA();
1966 return RTErrConvertFromErrno(rc);
1967}
1968
1969
1970/**
1971 * interface_method_impl{SUPDRVTRACERREG,pfnTracerClose}
1972 */
1973static DECLCALLBACK(int) vboxDtTOps_TracerIoCtl(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uintptr_t uSessionData,
1974 uintptr_t uCmd, uintptr_t uArg, int32_t *piRetVal)
1975{
1976 AssertPtrReturn(uSessionData, VERR_INVALID_POINTER);
1977 RT_NOREF_PV(pThis); RT_NOREF_PV(pSession);
1978 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1979
1980 int rc = dtrace_ioctl((dtrace_state_t *)uSessionData, (intptr_t)uCmd, (intptr_t)uArg, piRetVal);
1981
1982 VBDT_CLEAR_STACK_DATA();
1983 return RTErrConvertFromErrno(rc);
1984}
1985
1986
1987/**
1988 * interface_method_impl{SUPDRVTRACERREG,pfnTracerClose}
1989 */
1990static DECLCALLBACK(void) vboxDtTOps_TracerClose(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uintptr_t uSessionData)
1991{
1992 AssertPtrReturnVoid(uSessionData);
1993 RT_NOREF_PV(pThis); RT_NOREF_PV(pSession);
1994 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1995
1996 dtrace_close((dtrace_state_t *)uSessionData);
1997
1998 VBDT_CLEAR_STACK_DATA();
1999}
2000
2001
2002/**
2003 * interface_method_impl{SUPDRVTRACERREG,pfnProviderRegister}
2004 */
2005static DECLCALLBACK(int) vboxDtTOps_ProviderRegister(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
2006{
2007 LOG_DTRACE(("%s: %p %s/%s\n", __FUNCTION__, pThis, pCore->pszModName, pCore->pszName));
2008 AssertReturn(pCore->TracerData.DTrace.idProvider == 0, VERR_INTERNAL_ERROR_3);
2009 RT_NOREF_PV(pThis);
2010 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
2011
2012 PVTGDESCPROVIDER pDesc = pCore->pDesc;
2013 dtrace_pattr_t DtAttrs;
2014 vboxDtVtgConvAttr(&DtAttrs.dtpa_provider, &pDesc->AttrSelf);
2015 vboxDtVtgConvAttr(&DtAttrs.dtpa_mod, &pDesc->AttrModules);
2016 vboxDtVtgConvAttr(&DtAttrs.dtpa_func, &pDesc->AttrFunctions);
2017 vboxDtVtgConvAttr(&DtAttrs.dtpa_name, &pDesc->AttrNames);
2018 vboxDtVtgConvAttr(&DtAttrs.dtpa_args, &pDesc->AttrArguments);
2019
2020 /* Note! DTrace may call us back before dtrace_register returns, so we
2021 have to point it to pCore->TracerData.DTrace.idProvider. */
2022 AssertCompile(sizeof(dtrace_provider_id_t) == sizeof(pCore->TracerData.DTrace.idProvider));
2023 int rc = dtrace_register(pCore->pszName,
2024 &DtAttrs,
2025 DTRACE_PRIV_KERNEL,
2026 NULL /* cred */,
2027 &g_vboxDtVtgProvOps,
2028 pCore,
2029 &pCore->TracerData.DTrace.idProvider);
2030 if (!rc)
2031 {
2032 LOG_DTRACE(("%s: idProvider=%p\n", __FUNCTION__, pCore->TracerData.DTrace.idProvider));
2033 AssertPtr(pCore->TracerData.DTrace.idProvider);
2034 rc = VINF_SUCCESS;
2035 }
2036 else
2037 {
2038 pCore->TracerData.DTrace.idProvider = 0;
2039 rc = RTErrConvertFromErrno(rc);
2040 }
2041
2042 VBDT_CLEAR_STACK_DATA();
2043 LOG_DTRACE(("%s: returns %Rrc\n", __FUNCTION__, rc));
2044 return rc;
2045}
2046
2047
2048/**
2049 * interface_method_impl{SUPDRVTRACERREG,pfnProviderDeregister}
2050 */
2051static DECLCALLBACK(int) vboxDtTOps_ProviderDeregister(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
2052{
2053 uintptr_t idProvider = pCore->TracerData.DTrace.idProvider;
2054 LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pThis, idProvider));
2055 AssertPtrReturn(idProvider, VERR_INTERNAL_ERROR_3);
2056 RT_NOREF_PV(pThis);
2057 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
2058
2059 dtrace_invalidate(idProvider);
2060 int rc = dtrace_unregister(idProvider);
2061 if (!rc)
2062 {
2063 pCore->TracerData.DTrace.idProvider = 0;
2064 rc = VINF_SUCCESS;
2065 }
2066 else
2067 {
2068 AssertMsg(rc == EBUSY, ("%d\n", rc));
2069 pCore->TracerData.DTrace.fZombie = true;
2070 rc = VERR_TRY_AGAIN;
2071 }
2072
2073 VBDT_CLEAR_STACK_DATA();
2074 LOG_DTRACE(("%s: returns %Rrc\n", __FUNCTION__, rc));
2075 return rc;
2076}
2077
2078
2079/**
2080 * interface_method_impl{SUPDRVTRACERREG,pfnProviderDeregisterZombie}
2081 */
2082static DECLCALLBACK(int) vboxDtTOps_ProviderDeregisterZombie(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
2083{
2084 uintptr_t idProvider = pCore->TracerData.DTrace.idProvider;
2085 LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pThis, idProvider));
2086 AssertPtrReturn(idProvider, VERR_INTERNAL_ERROR_3);
2087 Assert(pCore->TracerData.DTrace.fZombie);
2088 RT_NOREF_PV(pThis);
2089 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
2090
2091 int rc = dtrace_unregister(idProvider);
2092 if (!rc)
2093 {
2094 pCore->TracerData.DTrace.idProvider = 0;
2095 rc = VINF_SUCCESS;
2096 }
2097 else
2098 {
2099 AssertMsg(rc == EBUSY, ("%d\n", rc));
2100 rc = VERR_TRY_AGAIN;
2101 }
2102
2103 VBDT_CLEAR_STACK_DATA();
2104 LOG_DTRACE(("%s: returns %Rrc\n", __FUNCTION__, rc));
2105 return rc;
2106}
2107
2108
2109
2110/**
2111 * The tracer registration record of the VBox DTrace implementation
2112 */
2113static SUPDRVTRACERREG g_VBoxDTraceReg =
2114{
2115 SUPDRVTRACERREG_MAGIC,
2116 SUPDRVTRACERREG_VERSION,
2117 vboxDtTOps_ProbeFireKernel,
2118 vboxDtTOps_ProbeFireUser,
2119 vboxDtTOps_TracerOpen,
2120 vboxDtTOps_TracerIoCtl,
2121 vboxDtTOps_TracerClose,
2122 vboxDtTOps_ProviderRegister,
2123 vboxDtTOps_ProviderDeregister,
2124 vboxDtTOps_ProviderDeregisterZombie,
2125 SUPDRVTRACERREG_MAGIC
2126};
2127
2128
2129
2130/**
2131 * Module termination code.
2132 *
2133 * @param hMod Opque module handle.
2134 */
2135DECLEXPORT(void) ModuleTerm(void *hMod)
2136{
2137 SUPR0TracerDeregisterImpl(hMod, NULL);
2138 dtrace_detach();
2139 vboxDtTermThreadDb();
2140}
2141
2142
2143/**
2144 * Module initialization code.
2145 *
2146 * @param hMod Opque module handle.
2147 */
2148DECLEXPORT(int) ModuleInit(void *hMod)
2149{
2150 int rc = vboxDtInitThreadDb();
2151 if (RT_SUCCESS(rc))
2152 {
2153 rc = dtrace_attach();
2154 if (rc == DDI_SUCCESS)
2155 {
2156 rc = SUPR0TracerRegisterImpl(hMod, NULL, &g_VBoxDTraceReg, &g_pVBoxDTraceHlp);
2157 if (RT_SUCCESS(rc))
2158 return rc;
2159
2160 dtrace_detach();
2161 }
2162 else
2163 {
2164 SUPR0Printf("dtrace_attach -> %d\n", rc);
2165 rc = VERR_INTERNAL_ERROR_5;
2166 }
2167 vboxDtTermThreadDb();
2168 }
2169 else
2170 SUPR0Printf("vboxDtInitThreadDb -> %d\n", rc);
2171
2172 return rc;
2173}
2174
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use