VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 98103

Last change on this file since 98103 was 98103, checked in by vboxsync, 17 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 163.6 KB
Line 
1/* $Id: VBoxGuest.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37/** @page pg_vbdrv VBoxGuest
38 *
39 * VBoxGuest is the device driver for VMMDev.
40 *
41 * The device driver is shipped as part of the guest additions. It has roots in
42 * the host VMM support driver (usually known as VBoxDrv), so fixes in platform
43 * specific code may apply to both drivers.
44 *
45 * The common code lives in VBoxGuest.cpp and is compiled both as C++ and C.
46 * The VBoxGuest.cpp source file shall not contain platform specific code,
47 * though it must occationally do a few \#ifdef RT_OS_XXX tests to cater for
48 * platform differences. Though, in those cases, it is common that more than
49 * one platform needs special handling.
50 *
51 * On most platforms the device driver should create two device nodes, one for
52 * full (unrestricted) access to the feature set, and one which only provides a
53 * restrict set of functions. These are generally referred to as 'vboxguest'
54 * and 'vboxuser' respectively. Currently, this two device approach is only
55 * implemented on Linux!
56 *
57 */
58
59
60/*********************************************************************************************************************************
61* Header Files *
62*********************************************************************************************************************************/
63#define LOG_GROUP LOG_GROUP_DEFAULT
64#include "VBoxGuestInternal.h"
65#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
66#include <VBox/err.h>
67#include <VBox/log.h>
68#include <VBox/HostServices/GuestPropertySvc.h>
69#include <iprt/ctype.h>
70#include <iprt/mem.h>
71#include <iprt/time.h>
72#include <iprt/memobj.h>
73#include <iprt/asm.h>
74#include <iprt/asm-amd64-x86.h>
75#include <iprt/string.h>
76#include <iprt/process.h>
77#include <iprt/assert.h>
78#include <iprt/param.h>
79#include <iprt/timer.h>
80#ifdef VBOX_WITH_HGCM
81# include <iprt/thread.h>
82#endif
83#include "version-generated.h"
84#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
85# include "revision-generated.h"
86#endif
87#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
88# include <iprt/rand.h>
89#endif
90
91
92/*********************************************************************************************************************************
93* Defined Constants And Macros *
94*********************************************************************************************************************************/
95#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
96
97
98/*********************************************************************************************************************************
99* Internal Functions *
100*********************************************************************************************************************************/
101#ifdef VBOX_WITH_HGCM
102static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
103#endif
104static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
105static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
106static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
107static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents);
108static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt);
109static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt);
110static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
111 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
112static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
113 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
114static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
115 uint32_t fOrMask, uint32_t fNoMask,
116 uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps, bool fSessionTermination);
117static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
118 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags, bool fSessionTermination);
119static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents);
120
121
122/*********************************************************************************************************************************
123* Global Variables *
124*********************************************************************************************************************************/
125static const uint32_t g_cbChangeMemBalloonReq = RT_UOFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
126
127#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
128/**
129 * Drag in the rest of IRPT since we share it with the
130 * rest of the kernel modules on Solaris.
131 */
132struct CLANG11WEIRDNESS { PFNRT pfn; } g_apfnVBoxGuestIPRTDeps[] =
133{
134 /* VirtioNet */
135 { (PFNRT)RTRandBytes },
136 /* RTSemMutex* */
137 { (PFNRT)RTSemMutexCreate },
138 { (PFNRT)RTSemMutexDestroy },
139 { (PFNRT)RTSemMutexRequest },
140 { (PFNRT)RTSemMutexRequestNoResume },
141 { (PFNRT)RTSemMutexRequestDebug },
142 { (PFNRT)RTSemMutexRequestNoResumeDebug },
143 { (PFNRT)RTSemMutexRelease },
144 { (PFNRT)RTSemMutexIsOwned },
145 { NULL }
146};
147#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
148
149
150/**
151 * Reserves memory in which the VMM can relocate any guest mappings
152 * that are floating around.
153 *
154 * This operation is a little bit tricky since the VMM might not accept
155 * just any address because of address clashes between the three contexts
156 * it operates in, so use a small stack to perform this operation.
157 *
158 * @returns VBox status code (ignored).
159 * @param pDevExt The device extension.
160 */
161static int vgdrvInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
162{
163 /*
164 * Query the required space.
165 */
166 VMMDevReqHypervisorInfo *pReq;
167 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
168 if (RT_FAILURE(rc))
169 return rc;
170 pReq->hypervisorStart = 0;
171 pReq->hypervisorSize = 0;
172 rc = VbglR0GRPerform(&pReq->header);
173 if (RT_FAILURE(rc)) /* this shouldn't happen! */
174 {
175 VbglR0GRFree(&pReq->header);
176 return rc;
177 }
178
179 /*
180 * The VMM will report back if there is nothing it wants to map, like for
181 * instance in VT-x and AMD-V mode.
182 */
183 if (pReq->hypervisorSize == 0)
184 Log(("vgdrvInitFixateGuestMappings: nothing to do\n"));
185 else
186 {
187 /*
188 * We have to try several times since the host can be picky
189 * about certain addresses.
190 */
191 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
192 uint32_t cbHypervisor = pReq->hypervisorSize;
193 RTR0MEMOBJ ahTries[5];
194 uint32_t iTry;
195 bool fBitched = false;
196 Log(("vgdrvInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
197 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
198 {
199 /*
200 * Reserve space, or if that isn't supported, create a object for
201 * some fictive physical memory and map that in to kernel space.
202 *
203 * To make the code a bit uglier, most systems cannot help with
204 * 4MB alignment, so we have to deal with that in addition to
205 * having two ways of getting the memory.
206 */
207 uint32_t uAlignment = _4M;
208 RTR0MEMOBJ hObj;
209 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
210 if (rc == VERR_NOT_SUPPORTED)
211 {
212 uAlignment = PAGE_SIZE;
213 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
214 }
215 /*
216 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
217 * not implemented at all at the current platform, try to map the memory object into the
218 * virtual kernel space.
219 */
220 if (rc == VERR_NOT_SUPPORTED)
221 {
222 if (hFictive == NIL_RTR0MEMOBJ)
223 {
224 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
225 if (RT_FAILURE(rc))
226 break;
227 hFictive = hObj;
228 }
229 uAlignment = _4M;
230 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
231 if (rc == VERR_NOT_SUPPORTED)
232 {
233 uAlignment = PAGE_SIZE;
234 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
235 }
236 }
237 if (RT_FAILURE(rc))
238 {
239 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
240 rc, cbHypervisor, uAlignment, iTry));
241 fBitched = true;
242 break;
243 }
244
245 /*
246 * Try set it.
247 */
248 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
249 pReq->header.rc = VERR_INTERNAL_ERROR;
250 pReq->hypervisorSize = cbHypervisor;
251 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
252 if ( uAlignment == PAGE_SIZE
253 && pReq->hypervisorStart & (_4M - 1))
254 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
255 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
256
257 rc = VbglR0GRPerform(&pReq->header);
258 if (RT_SUCCESS(rc))
259 {
260 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
261 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
262 RTR0MemObjAddress(pDevExt->hGuestMappings),
263 RTR0MemObjSize(pDevExt->hGuestMappings),
264 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
265 break;
266 }
267 ahTries[iTry] = hObj;
268 }
269
270 /*
271 * Cleanup failed attempts.
272 */
273 while (iTry-- > 0)
274 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
275 if ( RT_FAILURE(rc)
276 && hFictive != NIL_RTR0PTR)
277 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
278 if (RT_FAILURE(rc) && !fBitched)
279 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
280 }
281 VbglR0GRFree(&pReq->header);
282
283 /*
284 * We ignore failed attempts for now.
285 */
286 return VINF_SUCCESS;
287}
288
289
290/**
291 * Undo what vgdrvInitFixateGuestMappings did.
292 *
293 * @param pDevExt The device extension.
294 */
295static void vgdrvTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
296{
297 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
298 {
299 /*
300 * Tell the host that we're going to free the memory we reserved for
301 * it, the free it up. (Leak the memory if anything goes wrong here.)
302 */
303 VMMDevReqHypervisorInfo *pReq;
304 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
305 if (RT_SUCCESS(rc))
306 {
307 pReq->hypervisorStart = 0;
308 pReq->hypervisorSize = 0;
309 rc = VbglR0GRPerform(&pReq->header);
310 VbglR0GRFree(&pReq->header);
311 }
312 if (RT_SUCCESS(rc))
313 {
314 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
315 AssertRC(rc);
316 }
317 else
318 LogRel(("vgdrvTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
319
320 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
321 }
322}
323
324
325
326/**
327 * Report the guest information to the host.
328 *
329 * @returns IPRT status code.
330 * @param enmOSType The OS type to report.
331 */
332static int vgdrvReportGuestInfo(VBOXOSTYPE enmOSType)
333{
334 /*
335 * Allocate and fill in the two guest info reports.
336 */
337 VMMDevReportGuestInfo2 *pReqInfo2 = NULL;
338 VMMDevReportGuestInfo *pReqInfo1 = NULL;
339 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo2, sizeof (VMMDevReportGuestInfo2), VMMDevReq_ReportGuestInfo2);
340 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
341 if (RT_SUCCESS(rc))
342 {
343 pReqInfo2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
344 pReqInfo2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
345 pReqInfo2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
346 pReqInfo2->guestInfo.additionsRevision = VBOX_SVN_REV;
347 pReqInfo2->guestInfo.additionsFeatures = VBOXGSTINFO2_F_REQUESTOR_INFO;
348 RTStrCopy(pReqInfo2->guestInfo.szName, sizeof(pReqInfo2->guestInfo.szName), VBOX_VERSION_STRING);
349
350 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo1, sizeof (VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
351 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
352 if (RT_SUCCESS(rc))
353 {
354 pReqInfo1->guestInfo.interfaceVersion = VMMDEV_VERSION;
355 pReqInfo1->guestInfo.osType = enmOSType;
356
357 /*
358 * There are two protocols here:
359 * 1. Info2 + Info1. Supported by >=3.2.51.
360 * 2. Info1 and optionally Info2. The old protocol.
361 *
362 * We try protocol 1 first. It will fail with VERR_NOT_SUPPORTED
363 * if not supported by the VMMDev (message ordering requirement).
364 */
365 rc = VbglR0GRPerform(&pReqInfo2->header);
366 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
367 if (RT_SUCCESS(rc))
368 {
369 rc = VbglR0GRPerform(&pReqInfo1->header);
370 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
371 }
372 else if ( rc == VERR_NOT_SUPPORTED
373 || rc == VERR_NOT_IMPLEMENTED)
374 {
375 rc = VbglR0GRPerform(&pReqInfo1->header);
376 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
377 if (RT_SUCCESS(rc))
378 {
379 rc = VbglR0GRPerform(&pReqInfo2->header);
380 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
381 if (rc == VERR_NOT_IMPLEMENTED)
382 rc = VINF_SUCCESS;
383 }
384 }
385 VbglR0GRFree(&pReqInfo1->header);
386 }
387 VbglR0GRFree(&pReqInfo2->header);
388 }
389
390 return rc;
391}
392
393
394/**
395 * Report the guest driver status to the host.
396 *
397 * @returns IPRT status code.
398 * @param fActive Flag whether the driver is now active or not.
399 */
400static int vgdrvReportDriverStatus(bool fActive)
401{
402 /*
403 * Report guest status of the VBox driver to the host.
404 */
405 VMMDevReportGuestStatus *pReq2 = NULL;
406 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestStatus);
407 Log(("vgdrvReportDriverStatus: VbglR0GRAlloc VMMDevReportGuestStatus completed with rc=%Rrc\n", rc));
408 if (RT_SUCCESS(rc))
409 {
410 pReq2->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
411 pReq2->guestStatus.status = fActive ?
412 VBoxGuestFacilityStatus_Active
413 : VBoxGuestFacilityStatus_Inactive;
414 pReq2->guestStatus.flags = 0;
415 rc = VbglR0GRPerform(&pReq2->header);
416 Log(("vgdrvReportDriverStatus: VbglR0GRPerform VMMDevReportGuestStatus completed with fActive=%d, rc=%Rrc\n",
417 fActive ? 1 : 0, rc));
418 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
419 rc = VINF_SUCCESS;
420 VbglR0GRFree(&pReq2->header);
421 }
422
423 return rc;
424}
425
426
427/** @name Memory Ballooning
428 * @{
429 */
430
431/**
432 * Inflate the balloon by one chunk represented by an R0 memory object.
433 *
434 * The caller owns the balloon mutex.
435 *
436 * @returns IPRT status code.
437 * @param pMemObj Pointer to the R0 memory object.
438 * @param pReq The pre-allocated request for performing the VMMDev call.
439 */
440static int vgdrvBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
441{
442 uint32_t iPage;
443 int rc;
444
445 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
446 {
447 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
448 pReq->aPhysPage[iPage] = phys;
449 }
450
451 pReq->fInflate = true;
452 pReq->header.size = g_cbChangeMemBalloonReq;
453 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
454
455 rc = VbglR0GRPerform(&pReq->header);
456 if (RT_FAILURE(rc))
457 LogRel(("vgdrvBalloonInflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
458 return rc;
459}
460
461
462/**
463 * Deflate the balloon by one chunk - info the host and free the memory object.
464 *
465 * The caller owns the balloon mutex.
466 *
467 * @returns IPRT status code.
468 * @param pMemObj Pointer to the R0 memory object.
469 * The memory object will be freed afterwards.
470 * @param pReq The pre-allocated request for performing the VMMDev call.
471 */
472static int vgdrvBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
473{
474 uint32_t iPage;
475 int rc;
476
477 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
478 {
479 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
480 pReq->aPhysPage[iPage] = phys;
481 }
482
483 pReq->fInflate = false;
484 pReq->header.size = g_cbChangeMemBalloonReq;
485 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
486
487 rc = VbglR0GRPerform(&pReq->header);
488 if (RT_FAILURE(rc))
489 {
490 LogRel(("vgdrvBalloonDeflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
491 return rc;
492 }
493
494 rc = RTR0MemObjFree(*pMemObj, true);
495 if (RT_FAILURE(rc))
496 {
497 LogRel(("vgdrvBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
498 return rc;
499 }
500
501 *pMemObj = NIL_RTR0MEMOBJ;
502 return VINF_SUCCESS;
503}
504
505
506/**
507 * Inflate/deflate the memory balloon and notify the host.
508 *
509 * This is a worker used by vgdrvIoCtl_CheckMemoryBalloon - it takes the mutex.
510 *
511 * @returns VBox status code.
512 * @param pDevExt The device extension.
513 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
514 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
515 * (VINF_SUCCESS if set).
516 */
517static int vgdrvSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, bool *pfHandleInR3)
518{
519 int rc = VINF_SUCCESS;
520
521 if (pDevExt->MemBalloon.fUseKernelAPI)
522 {
523 VMMDevChangeMemBalloon *pReq;
524 uint32_t i;
525
526 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
527 {
528 LogRel(("vgdrvSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
529 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
530 return VERR_INVALID_PARAMETER;
531 }
532
533 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
534 return VINF_SUCCESS; /* nothing to do */
535
536 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
537 && !pDevExt->MemBalloon.paMemObj)
538 {
539 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
540 if (!pDevExt->MemBalloon.paMemObj)
541 {
542 LogRel(("vgdrvSetBalloonSizeKernel: no memory for paMemObj!\n"));
543 return VERR_NO_MEMORY;
544 }
545 }
546
547 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
548 if (RT_FAILURE(rc))
549 return rc;
550
551 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
552 {
553 /* inflate */
554 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
555 {
556 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
557 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
558 if (RT_FAILURE(rc))
559 {
560 if (rc == VERR_NOT_SUPPORTED)
561 {
562 /* not supported -- fall back to the R3-allocated memory. */
563 rc = VINF_SUCCESS;
564 pDevExt->MemBalloon.fUseKernelAPI = false;
565 Assert(pDevExt->MemBalloon.cChunks == 0);
566 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
567 }
568 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
569 * cannot allocate more memory => don't try further, just stop here */
570 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
571 break;
572 }
573
574 rc = vgdrvBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
575 if (RT_FAILURE(rc))
576 {
577 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
578 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
579 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
580 break;
581 }
582 pDevExt->MemBalloon.cChunks++;
583 }
584 }
585 else
586 {
587 /* deflate */
588 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
589 {
590 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
591 if (RT_FAILURE(rc))
592 {
593 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
594 break;
595 }
596 pDevExt->MemBalloon.cChunks--;
597 }
598 }
599
600 VbglR0GRFree(&pReq->header);
601 }
602
603 /*
604 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
605 * the balloon changes via the other API.
606 */
607 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
608
609 return rc;
610}
611
612
613/**
614 * Inflate/deflate the balloon by one chunk.
615 *
616 * Worker for vgdrvIoCtl_ChangeMemoryBalloon - it takes the mutex.
617 *
618 * @returns VBox status code.
619 * @param pDevExt The device extension.
620 * @param pSession The session.
621 * @param pvChunk The address of the chunk to add to / remove from the
622 * balloon. (user space address)
623 * @param fInflate Inflate if true, deflate if false.
624 */
625static int vgdrvSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, RTR3PTR pvChunk, bool fInflate)
626{
627 VMMDevChangeMemBalloon *pReq;
628 PRTR0MEMOBJ pMemObj = NULL;
629 int rc = VINF_SUCCESS;
630 uint32_t i;
631 RT_NOREF1(pSession);
632
633 if (fInflate)
634 {
635 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
636 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
637 {
638 LogRel(("vgdrvSetBalloonSizeFromUser: cannot inflate balloon, already have %u chunks (max=%u)\n",
639 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
640 return VERR_INVALID_PARAMETER;
641 }
642
643 if (!pDevExt->MemBalloon.paMemObj)
644 {
645 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
646 if (!pDevExt->MemBalloon.paMemObj)
647 {
648 LogRel(("vgdrvSetBalloonSizeFromUser: no memory for paMemObj!\n"));
649 return VERR_NO_MEMORY;
650 }
651 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
652 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
653 }
654 }
655 else
656 {
657 if (pDevExt->MemBalloon.cChunks == 0)
658 {
659 AssertMsgFailed(("vgdrvSetBalloonSizeFromUser: cannot decrease balloon, already at size 0\n"));
660 return VERR_INVALID_PARAMETER;
661 }
662 }
663
664 /*
665 * Enumerate all memory objects and check if the object is already registered.
666 */
667 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
668 {
669 if ( fInflate
670 && !pMemObj
671 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
672 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
673 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == pvChunk)
674 {
675 if (fInflate)
676 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
677 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
678 break;
679 }
680 }
681 if (!pMemObj)
682 {
683 if (fInflate)
684 {
685 /* no free object pointer found -- should not happen */
686 return VERR_NO_MEMORY;
687 }
688
689 /* cannot free this memory as it wasn't provided before */
690 return VERR_NOT_FOUND;
691 }
692
693 /*
694 * Try inflate / default the balloon as requested.
695 */
696 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
697 if (RT_FAILURE(rc))
698 return rc;
699 pReq->header.fRequestor = pSession->fRequestor;
700
701 if (fInflate)
702 {
703 rc = RTR0MemObjLockUser(pMemObj, pvChunk, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
704 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
705 if (RT_SUCCESS(rc))
706 {
707 rc = vgdrvBalloonInflate(pMemObj, pReq);
708 if (RT_SUCCESS(rc))
709 pDevExt->MemBalloon.cChunks++;
710 else
711 {
712 Log(("vgdrvSetBalloonSizeFromUser(inflate): failed, rc=%Rrc!\n", rc));
713 RTR0MemObjFree(*pMemObj, true);
714 *pMemObj = NIL_RTR0MEMOBJ;
715 }
716 }
717 }
718 else
719 {
720 rc = vgdrvBalloonDeflate(pMemObj, pReq);
721 if (RT_SUCCESS(rc))
722 pDevExt->MemBalloon.cChunks--;
723 else
724 Log(("vgdrvSetBalloonSizeFromUser(deflate): failed, rc=%Rrc!\n", rc));
725 }
726
727 VbglR0GRFree(&pReq->header);
728 return rc;
729}
730
731
732/**
733 * Cleanup the memory balloon of a session.
734 *
735 * Will request the balloon mutex, so it must be valid and the caller must not
736 * own it already.
737 *
738 * @param pDevExt The device extension.
739 * @param pSession The session. Can be NULL at unload.
740 */
741static void vgdrvCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
742{
743 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
744 if ( pDevExt->MemBalloon.pOwner == pSession
745 || pSession == NULL /*unload*/)
746 {
747 if (pDevExt->MemBalloon.paMemObj)
748 {
749 VMMDevChangeMemBalloon *pReq;
750 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
751 if (RT_SUCCESS(rc))
752 {
753 /* fRequestor is kernel here, as we're cleaning up. */
754
755 uint32_t i;
756 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
757 {
758 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
759 if (RT_FAILURE(rc))
760 {
761 LogRel(("vgdrvCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
762 rc, pDevExt->MemBalloon.cChunks));
763 break;
764 }
765 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
766 pDevExt->MemBalloon.cChunks--;
767 }
768 VbglR0GRFree(&pReq->header);
769 }
770 else
771 LogRel(("vgdrvCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
772 rc, pDevExt->MemBalloon.cChunks));
773 RTMemFree(pDevExt->MemBalloon.paMemObj);
774 pDevExt->MemBalloon.paMemObj = NULL;
775 }
776
777 pDevExt->MemBalloon.pOwner = NULL;
778 }
779 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
780}
781
782/** @} */
783
784
785
786/** @name Heartbeat
787 * @{
788 */
789
790/**
791 * Sends heartbeat to host.
792 *
793 * @returns VBox status code.
794 */
795static int vgdrvHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
796{
797 int rc;
798 if (pDevExt->pReqGuestHeartbeat)
799 {
800 rc = VbglR0GRPerform(pDevExt->pReqGuestHeartbeat);
801 Log3(("vgdrvHeartbeatSend: VbglR0GRPerform vgdrvHeartbeatSend completed with rc=%Rrc\n", rc));
802 }
803 else
804 rc = VERR_INVALID_STATE;
805 return rc;
806}
807
808
809/**
810 * Callback for heartbeat timer.
811 */
812static DECLCALLBACK(void) vgdrvHeartbeatTimerHandler(PRTTIMER hTimer, void *pvUser, uint64_t iTick)
813{
814 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
815 int rc;
816 AssertReturnVoid(pDevExt);
817
818 rc = vgdrvHeartbeatSend(pDevExt);
819 if (RT_FAILURE(rc))
820 Log(("HB Timer: vgdrvHeartbeatSend failed: rc=%Rrc\n", rc));
821
822 NOREF(hTimer); NOREF(iTick);
823}
824
825
826/**
827 * Configure the host to check guest's heartbeat
828 * and get heartbeat interval from the host.
829 *
830 * @returns VBox status code.
831 * @param pDevExt The device extension.
832 * @param fEnabled Set true to enable guest heartbeat checks on host.
833 */
834static int vgdrvHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
835{
836 VMMDevReqHeartbeat *pReq;
837 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
838 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRAlloc vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
839 if (RT_SUCCESS(rc))
840 {
841 pReq->fEnabled = fEnabled;
842 pReq->cNsInterval = 0;
843 rc = VbglR0GRPerform(&pReq->header);
844 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRPerform vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
845 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
846 VbglR0GRFree(&pReq->header);
847 }
848 return rc;
849}
850
851
852/**
853 * Initializes the heartbeat timer.
854 *
855 * This feature may be disabled by the host.
856 *
857 * @returns VBox status (ignored).
858 * @param pDevExt The device extension.
859 */
860static int vgdrvHeartbeatInit(PVBOXGUESTDEVEXT pDevExt)
861{
862 /*
863 * Make sure that heartbeat checking is disabled.
864 */
865 int rc = vgdrvHeartbeatHostConfigure(pDevExt, false);
866 if (RT_SUCCESS(rc))
867 {
868 rc = vgdrvHeartbeatHostConfigure(pDevExt, true);
869 if (RT_SUCCESS(rc))
870 {
871 /*
872 * Preallocate the request to use it from the timer callback because:
873 * 1) on Windows VbglR0GRAlloc must be called at IRQL <= APC_LEVEL
874 * and the timer callback runs at DISPATCH_LEVEL;
875 * 2) avoid repeated allocations.
876 */
877 rc = VbglR0GRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
878 if (RT_SUCCESS(rc))
879 {
880 LogRel(("vgdrvHeartbeatInit: Setting up heartbeat to trigger every %RU64 milliseconds\n",
881 pDevExt->cNsHeartbeatInterval / RT_NS_1MS));
882 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval, 0 /*fFlags*/,
883 (PFNRTTIMER)vgdrvHeartbeatTimerHandler, pDevExt);
884 if (RT_SUCCESS(rc))
885 {
886 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
887 if (RT_SUCCESS(rc))
888 return VINF_SUCCESS;
889
890 LogRel(("vgdrvHeartbeatInit: Heartbeat timer failed to start, rc=%Rrc\n", rc));
891 }
892 else
893 LogRel(("vgdrvHeartbeatInit: Failed to create heartbeat timer: %Rrc\n", rc));
894
895 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
896 pDevExt->pReqGuestHeartbeat = NULL;
897 }
898 else
899 LogRel(("vgdrvHeartbeatInit: VbglR0GRAlloc(VMMDevReq_GuestHeartbeat): %Rrc\n", rc));
900
901 LogRel(("vgdrvHeartbeatInit: Failed to set up the timer, guest heartbeat is disabled\n"));
902 vgdrvHeartbeatHostConfigure(pDevExt, false);
903 }
904 else
905 LogRel(("vgdrvHeartbeatInit: Failed to configure host for heartbeat checking: rc=%Rrc\n", rc));
906 }
907 return rc;
908}
909
910/** @} */
911
912
913/**
914 * Helper to reinit the VMMDev communication after hibernation.
915 *
916 * @returns VBox status code.
917 * @param pDevExt The device extension.
918 * @param enmOSType The OS type.
919 *
920 * @todo Call this on all platforms, not just windows.
921 */
922int VGDrvCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
923{
924 int rc = vgdrvReportGuestInfo(enmOSType);
925 if (RT_SUCCESS(rc))
926 {
927 rc = vgdrvReportDriverStatus(true /* Driver is active */);
928 if (RT_FAILURE(rc))
929 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
930 }
931 else
932 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
933 LogFlow(("VGDrvCommonReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
934 RT_NOREF1(pDevExt);
935 return rc;
936}
937
938
939/**
940 * Initializes the release logger (debug is implicit), if configured.
941 *
942 * @returns IPRT status code.
943 */
944int VGDrvCommonInitLoggers(void)
945{
946#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
947 /*
948 * Create the release log.
949 */
950 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
951 PRTLOGGER pRelLogger;
952 int rc = RTLogCreate(&pRelLogger, 0 /*fFlags*/, "all", "VBOXGUEST_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
953 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
954 if (RT_SUCCESS(rc))
955 RTLogRelSetDefaultInstance(pRelLogger);
956 /** @todo Add native hook for getting logger config parameters and setting
957 * them. On linux we should use the module parameter stuff... */
958 return rc;
959#else
960 return VINF_SUCCESS;
961#endif
962}
963
964
965/**
966 * Destroys the loggers.
967 */
968void VGDrvCommonDestroyLoggers(void)
969{
970#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
971 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
972 RTLogDestroy(RTLogSetDefaultInstance(NULL));
973#endif
974}
975
976
977/**
978 * Initialize the device extension fundament.
979 *
980 * There are no device resources at this point, VGDrvCommonInitDevExtResources
981 * should be called when they are available.
982 *
983 * @returns VBox status code.
984 * @param pDevExt The device extension to init.
985 */
986int VGDrvCommonInitDevExtFundament(PVBOXGUESTDEVEXT pDevExt)
987{
988 int rc;
989 AssertMsg( pDevExt->uInitState != VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT
990 && pDevExt->uInitState != VBOXGUESTDEVEXT_INIT_STATE_RESOURCES, ("uInitState=%#x\n", pDevExt->uInitState));
991
992 /*
993 * Initialize the data.
994 */
995 pDevExt->IOPortBase = UINT16_MAX;
996 pDevExt->pVMMDevMemory = NULL;
997 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
998 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
999 pDevExt->fHostFeatures = 0;
1000 pDevExt->pIrqAckEvents = NULL;
1001 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
1002 RTListInit(&pDevExt->WaitList);
1003#ifdef VBOX_WITH_HGCM
1004 RTListInit(&pDevExt->HGCMWaitList);
1005#endif
1006#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1007 RTListInit(&pDevExt->WakeUpList);
1008#endif
1009 RTListInit(&pDevExt->WokenUpList);
1010 RTListInit(&pDevExt->FreeList);
1011 RTListInit(&pDevExt->SessionList);
1012 pDevExt->cSessions = 0;
1013 pDevExt->fLoggingEnabled = false;
1014 pDevExt->f32PendingEvents = 0;
1015 pDevExt->u32MousePosChangedSeq = 0;
1016 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
1017 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
1018 pDevExt->MemBalloon.cChunks = 0;
1019 pDevExt->MemBalloon.cMaxChunks = 0;
1020 pDevExt->MemBalloon.fUseKernelAPI = true;
1021 pDevExt->MemBalloon.paMemObj = NULL;
1022 pDevExt->MemBalloon.pOwner = NULL;
1023 pDevExt->pfnMouseNotifyCallback = NULL;
1024 pDevExt->pvMouseNotifyCallbackArg = NULL;
1025 pDevExt->pReqGuestHeartbeat = NULL;
1026
1027 pDevExt->fFixedEvents = 0;
1028 vgdrvBitUsageTrackerClear(&pDevExt->EventFilterTracker);
1029 pDevExt->fEventFilterHost = UINT32_MAX; /* forces a report */
1030
1031 vgdrvBitUsageTrackerClear(&pDevExt->MouseStatusTracker);
1032 pDevExt->fMouseStatusHost = UINT32_MAX; /* forces a report */
1033
1034 pDevExt->fAcquireModeGuestCaps = 0;
1035 pDevExt->fSetModeGuestCaps = 0;
1036 pDevExt->fAcquiredGuestCaps = 0;
1037 vgdrvBitUsageTrackerClear(&pDevExt->SetGuestCapsTracker);
1038 pDevExt->fGuestCapsHost = UINT32_MAX; /* forces a report */
1039
1040 /*
1041 * Create the wait and session spinlocks as well as the ballooning mutex.
1042 */
1043 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1044 if (RT_SUCCESS(rc))
1045 {
1046 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1047 if (RT_SUCCESS(rc))
1048 {
1049 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1050 if (RT_SUCCESS(rc))
1051 {
1052 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT;
1053 return VINF_SUCCESS;
1054 }
1055
1056 LogRel(("VGDrvCommonInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1057 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1058 }
1059 else
1060 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1061 RTSpinlockDestroy(pDevExt->EventSpinlock);
1062 }
1063 else
1064 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1065
1066 pDevExt->uInitState = 0;
1067 return rc;
1068}
1069
1070
1071/**
1072 * Counter to VGDrvCommonInitDevExtFundament.
1073 *
1074 * @param pDevExt The device extension.
1075 */
1076void VGDrvCommonDeleteDevExtFundament(PVBOXGUESTDEVEXT pDevExt)
1077{
1078 int rc2;
1079 AssertMsgReturnVoid(pDevExt->uInitState == VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT, ("uInitState=%#x\n", pDevExt->uInitState));
1080 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_DELETED;
1081
1082 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1083 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1084 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1085}
1086
1087
1088/**
1089 * Initializes the VBoxGuest device extension resource parts.
1090 *
1091 * The native code locates the VMMDev on the PCI bus and retrieve the MMIO and
1092 * I/O port ranges, this function will take care of mapping the MMIO memory (if
1093 * present). Upon successful return the native code should set up the interrupt
1094 * handler.
1095 *
1096 * @returns VBox status code.
1097 *
1098 * @param pDevExt The device extension. Allocated by the native code.
1099 * @param IOPortBase The base of the I/O port range.
1100 * @param pvMMIOBase The base of the MMIO memory mapping.
1101 * This is optional, pass NULL if not present.
1102 * @param cbMMIO The size of the MMIO memory mapping.
1103 * This is optional, pass 0 if not present.
1104 * @param enmOSType The guest OS type to report to the VMMDev.
1105 * @param fFixedEvents Events that will be enabled upon init and no client
1106 * will ever be allowed to mask.
1107 */
1108int VGDrvCommonInitDevExtResources(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
1109 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
1110{
1111 int rc;
1112 AssertMsgReturn(pDevExt->uInitState == VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT, ("uInitState=%#x\n", pDevExt->uInitState),
1113 VERR_INVALID_STATE);
1114
1115 /*
1116 * If there is an MMIO region validate the version and size.
1117 */
1118 if (pvMMIOBase)
1119 {
1120 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1121 Assert(cbMMIO);
1122 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1123 && pVMMDev->u32Size >= 32
1124 && pVMMDev->u32Size <= cbMMIO)
1125 {
1126 pDevExt->pVMMDevMemory = pVMMDev;
1127 Log(("VGDrvCommonInitDevExtResources: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
1128 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1129 }
1130 else /* try live without it. */
1131 LogRel(("VGDrvCommonInitDevExtResources: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
1132 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1133 }
1134
1135 /*
1136 * Initialize the guest library and report the guest info back to VMMDev,
1137 * set the interrupt control filter mask, and fixate the guest mappings
1138 * made by the VMM.
1139 */
1140 pDevExt->IOPortBase = IOPortBase;
1141 rc = VbglR0InitPrimary(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory, &pDevExt->fHostFeatures);
1142 if (RT_SUCCESS(rc))
1143 {
1144 VMMDevRequestHeader *pAckReq = NULL;
1145 rc = VbglR0GRAlloc(&pAckReq, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1146 if (RT_SUCCESS(rc))
1147 {
1148 pDevExt->PhysIrqAckEvents = VbglR0PhysHeapGetPhysAddr(pAckReq);
1149 Assert(pDevExt->PhysIrqAckEvents != 0);
1150 ASMCompilerBarrier(); /* linux + solaris already have IRQs hooked up at this point, so take care. */
1151 pDevExt->pIrqAckEvents = (VMMDevEvents *)pAckReq;
1152
1153 rc = vgdrvReportGuestInfo(enmOSType);
1154 if (RT_SUCCESS(rc))
1155 {
1156 /*
1157 * Set the fixed event and make sure the host doesn't have any lingering
1158 * the guest capabilities or mouse status bits set.
1159 */
1160#ifdef VBOX_WITH_HGCM
1161 fFixedEvents |= VMMDEV_EVENT_HGCM;
1162#endif
1163 pDevExt->fFixedEvents = fFixedEvents;
1164 rc = vgdrvResetEventFilterOnHost(pDevExt, fFixedEvents);
1165 if (RT_SUCCESS(rc))
1166 {
1167 rc = vgdrvResetCapabilitiesOnHost(pDevExt);
1168 if (RT_SUCCESS(rc))
1169 {
1170 rc = vgdrvResetMouseStatusOnHost(pDevExt);
1171 if (RT_SUCCESS(rc))
1172 {
1173 /*
1174 * Initialize stuff which may fail without requiring the driver init to fail.
1175 */
1176 vgdrvInitFixateGuestMappings(pDevExt);
1177 vgdrvHeartbeatInit(pDevExt);
1178
1179 /*
1180 * Done!
1181 */
1182 rc = vgdrvReportDriverStatus(true /* Driver is active */);
1183 if (RT_FAILURE(rc))
1184 LogRel(("VGDrvCommonInitDevExtResources: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1185
1186 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_RESOURCES;
1187 LogFlowFunc(("VGDrvCommonInitDevExtResources: returns success\n"));
1188 return VINF_SUCCESS;
1189 }
1190 LogRel(("VGDrvCommonInitDevExtResources: failed to clear mouse status: rc=%Rrc\n", rc));
1191 }
1192 else
1193 LogRel(("VGDrvCommonInitDevExtResources: failed to clear guest capabilities: rc=%Rrc\n", rc));
1194 }
1195 else
1196 LogRel(("VGDrvCommonInitDevExtResources: failed to set fixed event filter: rc=%Rrc\n", rc));
1197 pDevExt->fFixedEvents = 0;
1198 }
1199 else
1200 LogRel(("VGDrvCommonInitDevExtResources: vgdrvReportGuestInfo failed: rc=%Rrc\n", rc));
1201 VbglR0GRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1202 }
1203 else
1204 LogRel(("VGDrvCommonInitDevExtResources: VbglR0GRAlloc failed: rc=%Rrc\n", rc));
1205
1206 VbglR0TerminatePrimary();
1207 }
1208 else
1209 LogRel(("VGDrvCommonInitDevExtResources: VbglR0InitPrimary failed: rc=%Rrc\n", rc));
1210 pDevExt->IOPortBase = UINT16_MAX;
1211 return rc;
1212}
1213
1214
1215/**
1216 * Deletes all the items in a wait chain.
1217 * @param pList The head of the chain.
1218 */
1219static void vgdrvDeleteWaitList(PRTLISTNODE pList)
1220{
1221 while (!RTListIsEmpty(pList))
1222 {
1223 int rc2;
1224 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1225 RTListNodeRemove(&pWait->ListNode);
1226
1227 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1228 pWait->Event = NIL_RTSEMEVENTMULTI;
1229 pWait->pSession = NULL;
1230 RTMemFree(pWait);
1231 }
1232}
1233
1234
1235/**
1236 * Counter to VGDrvCommonInitDevExtResources.
1237 *
1238 * @param pDevExt The device extension.
1239 */
1240void VGDrvCommonDeleteDevExtResources(PVBOXGUESTDEVEXT pDevExt)
1241{
1242 Log(("VGDrvCommonDeleteDevExtResources:\n"));
1243 AssertMsgReturnVoid(pDevExt->uInitState == VBOXGUESTDEVEXT_INIT_STATE_RESOURCES, ("uInitState=%#x\n", pDevExt->uInitState));
1244 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT;
1245
1246 /*
1247 * Stop and destroy HB timer and disable host heartbeat checking.
1248 */
1249 if (pDevExt->pHeartbeatTimer)
1250 {
1251 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1252 vgdrvHeartbeatHostConfigure(pDevExt, false);
1253 }
1254
1255 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
1256 pDevExt->pReqGuestHeartbeat = NULL;
1257
1258 /*
1259 * Clean up the bits that involves the host first.
1260 */
1261 vgdrvTermUnfixGuestMappings(pDevExt);
1262 if (!RTListIsEmpty(&pDevExt->SessionList))
1263 {
1264 LogRelFunc(("session list not empty!\n"));
1265 RTListInit(&pDevExt->SessionList);
1266 }
1267
1268 /*
1269 * Update the host flags (mouse status etc) not to reflect this session.
1270 */
1271 pDevExt->fFixedEvents = 0;
1272 vgdrvResetEventFilterOnHost(pDevExt, 0 /*fFixedEvents*/);
1273 vgdrvResetCapabilitiesOnHost(pDevExt);
1274 vgdrvResetMouseStatusOnHost(pDevExt);
1275
1276 vgdrvCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1277
1278 /*
1279 * No more IRQs.
1280 */
1281 pDevExt->pIrqAckEvents = NULL; /* Will be freed by VbglR0TerminatePrimary. */
1282 ASMAtomicWriteU32(&pDevExt->fHostFeatures, 0);
1283
1284 /*
1285 * Cleanup all the other resources.
1286 */
1287 vgdrvDeleteWaitList(&pDevExt->WaitList);
1288#ifdef VBOX_WITH_HGCM
1289 vgdrvDeleteWaitList(&pDevExt->HGCMWaitList);
1290#endif
1291#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1292 vgdrvDeleteWaitList(&pDevExt->WakeUpList);
1293#endif
1294 vgdrvDeleteWaitList(&pDevExt->WokenUpList);
1295 vgdrvDeleteWaitList(&pDevExt->FreeList);
1296
1297 VbglR0TerminatePrimary();
1298
1299
1300 pDevExt->pVMMDevMemory = NULL;
1301 pDevExt->IOPortBase = 0;
1302}
1303
1304
1305/**
1306 * Initializes the VBoxGuest device extension when the device driver is loaded.
1307 *
1308 * The native code locates the VMMDev on the PCI bus and retrieve the MMIO and
1309 * I/O port ranges, this function will take care of mapping the MMIO memory (if
1310 * present). Upon successful return the native code should set up the interrupt
1311 * handler.
1312 *
1313 * Instead of calling this method, the host specific code choose to perform a
1314 * more granular initialization using:
1315 * 1. VGDrvCommonInitLoggers
1316 * 2. VGDrvCommonInitDevExtFundament
1317 * 3. VGDrvCommonInitDevExtResources
1318 *
1319 * @returns VBox status code.
1320 *
1321 * @param pDevExt The device extension. Allocated by the native code.
1322 * @param IOPortBase The base of the I/O port range.
1323 * @param pvMMIOBase The base of the MMIO memory mapping.
1324 * This is optional, pass NULL if not present.
1325 * @param cbMMIO The size of the MMIO memory mapping.
1326 * This is optional, pass 0 if not present.
1327 * @param enmOSType The guest OS type to report to the VMMDev.
1328 * @param fFixedEvents Events that will be enabled upon init and no client
1329 * will ever be allowed to mask.
1330 */
1331int VGDrvCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
1332 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
1333{
1334 int rc;
1335 VGDrvCommonInitLoggers();
1336
1337 rc = VGDrvCommonInitDevExtFundament(pDevExt);
1338 if (RT_SUCCESS(rc))
1339 {
1340 rc = VGDrvCommonInitDevExtResources(pDevExt, IOPortBase, pvMMIOBase, cbMMIO, enmOSType, fFixedEvents);
1341 if (RT_SUCCESS(rc))
1342 return rc;
1343
1344 VGDrvCommonDeleteDevExtFundament(pDevExt);
1345 }
1346 VGDrvCommonDestroyLoggers();
1347 return rc; /* (failed) */
1348}
1349
1350
1351/**
1352 * Checks if the given option can be taken to not mean 'false'.
1353 *
1354 * @returns true or false accordingly.
1355 * @param pszValue The value to consider.
1356 */
1357bool VBDrvCommonIsOptionValueTrue(const char *pszValue)
1358{
1359 if (pszValue)
1360 {
1361 char ch;
1362 while ( (ch = *pszValue) != '\0'
1363 && RT_C_IS_SPACE(ch))
1364 pszValue++;
1365
1366 return ch != '\0'
1367 && ch != 'n' /* no */
1368 && ch != 'N' /* NO */
1369 && ch != 'd' /* disabled */
1370 && ch != 'f' /* false*/
1371 && ch != 'F' /* FALSE */
1372 && ch != 'D' /* DISABLED */
1373 && ( (ch != 'o' && ch != 'O') /* off, OFF, Off */
1374 || (pszValue[1] != 'f' && pszValue[1] != 'F') )
1375 && (ch != '0' || pszValue[1] != '\0') /* '0' */
1376 ;
1377 }
1378 return false;
1379}
1380
1381
1382/**
1383 * Processes a option.
1384 *
1385 * This will let the OS specific code have a go at it too.
1386 *
1387 * @param pDevExt The device extension.
1388 * @param pszName The option name, sans prefix.
1389 * @param pszValue The option value.
1390 */
1391void VGDrvCommonProcessOption(PVBOXGUESTDEVEXT pDevExt, const char *pszName, const char *pszValue)
1392{
1393 Log(("VGDrvCommonProcessOption: pszName='%s' pszValue='%s'\n", pszName, pszValue));
1394
1395 if ( RTStrICmpAscii(pszName, "r3_log_to_host") == 0
1396 || RTStrICmpAscii(pszName, "LoggingEnabled") == 0 /*legacy*/ )
1397 pDevExt->fLoggingEnabled = VBDrvCommonIsOptionValueTrue(pszValue);
1398 else if ( RTStrNICmpAscii(pszName, RT_STR_TUPLE("log")) == 0
1399 || RTStrNICmpAscii(pszName, RT_STR_TUPLE("dbg_log")) == 0)
1400 {
1401 bool const fDbgRel = *pszName == 'd' || *pszName == 'D';
1402 const char *pszSubName = &pszName[fDbgRel ? 4 + 3 : 3];
1403 if ( !*pszSubName
1404 || RTStrICmpAscii(pszSubName, "_flags") == 0
1405 || RTStrICmpAscii(pszSubName, "_dest") == 0)
1406 {
1407 PRTLOGGER pLogger = !fDbgRel ? RTLogRelGetDefaultInstance() : RTLogDefaultInstance();
1408 if (pLogger)
1409 {
1410 if (!*pszSubName)
1411 RTLogGroupSettings(pLogger, pszValue);
1412 else if (RTStrICmpAscii(pszSubName, "_flags"))
1413 RTLogFlags(pLogger, pszValue);
1414 else
1415 RTLogDestinations(pLogger, pszValue);
1416 }
1417 }
1418 else if (!VGDrvNativeProcessOption(pDevExt, pszName, pszValue))
1419 LogRel(("VBoxGuest: Ignoring unknown option '%s' (value '%s')\n", pszName, pszValue));
1420 }
1421 else if (!VGDrvNativeProcessOption(pDevExt, pszName, pszValue))
1422 LogRel(("VBoxGuest: Ignoring unknown option '%s' (value '%s')\n", pszName, pszValue));
1423}
1424
1425
1426/**
1427 * Read driver configuration from the host.
1428 *
1429 * This involves connecting to the guest properties service, which means that
1430 * interrupts needs to work and that the calling thread must be able to block.
1431 *
1432 * @param pDevExt The device extension.
1433 */
1434void VGDrvCommonProcessOptionsFromHost(PVBOXGUESTDEVEXT pDevExt)
1435{
1436 /*
1437 * Create a kernel session without our selves, then connect to the HGCM service.
1438 */
1439 PVBOXGUESTSESSION pSession;
1440 int rc = VGDrvCommonCreateKernelSession(pDevExt, &pSession);
1441 if (RT_SUCCESS(rc))
1442 {
1443 union
1444 {
1445 VBGLIOCHGCMCONNECT Connect;
1446 VBGLIOCHGCMDISCONNECT Disconnect;
1447 GuestPropMsgEnumProperties EnumMsg;
1448 } uBuf;
1449
1450 RT_ZERO(uBuf.Connect);
1451 VBGLREQHDR_INIT(&uBuf.Connect.Hdr, HGCM_CONNECT);
1452 uBuf.Connect.u.In.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1453 RTStrCopy(uBuf.Connect.u.In.Loc.u.host.achName, sizeof(uBuf.Connect.u.In.Loc.u.host.achName),
1454 "VBoxGuestPropSvc"); /** @todo Add a define to the header for the name. */
1455 rc = VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_CONNECT, pDevExt, pSession, &uBuf.Connect.Hdr, sizeof(uBuf.Connect));
1456 if (RT_SUCCESS(rc))
1457 {
1458 static const char g_szzPattern[] = "/VirtualBox/GuestAdd/VBoxGuest/*\0";
1459 uint32_t const idClient = uBuf.Connect.u.Out.idClient;
1460 char *pszzStrings = NULL;
1461 uint32_t cbStrings;
1462
1463 /*
1464 * Enumerate all the relevant properties. We try with a 1KB buffer, but
1465 * will double it until we get what we want or go beyond 16KB.
1466 */
1467 for (cbStrings = _1K; cbStrings <= _16K; cbStrings *= 2)
1468 {
1469 pszzStrings = (char *)RTMemAllocZ(cbStrings);
1470 if (pszzStrings)
1471 {
1472 VBGL_HGCM_HDR_INIT(&uBuf.EnumMsg.hdr, idClient, GUEST_PROP_FN_ENUM_PROPS, 3);
1473
1474 uBuf.EnumMsg.patterns.type = VMMDevHGCMParmType_LinAddr;
1475 uBuf.EnumMsg.patterns.u.Pointer.size = sizeof(g_szzPattern);
1476 uBuf.EnumMsg.patterns.u.Pointer.u.linearAddr = (uintptr_t)g_szzPattern;
1477
1478 uBuf.EnumMsg.strings.type = VMMDevHGCMParmType_LinAddr;
1479 uBuf.EnumMsg.strings.u.Pointer.size = cbStrings;
1480 uBuf.EnumMsg.strings.u.Pointer.u.linearAddr = (uintptr_t)pszzStrings;
1481
1482 uBuf.EnumMsg.size.type = VMMDevHGCMParmType_32bit;
1483 uBuf.EnumMsg.size.u.value32 = 0;
1484
1485 rc = VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_CALL(sizeof(uBuf.EnumMsg)), pDevExt, pSession,
1486 &uBuf.EnumMsg.hdr.Hdr, sizeof(uBuf.EnumMsg));
1487 if (RT_SUCCESS(rc))
1488 {
1489 if ( uBuf.EnumMsg.size.type == VMMDevHGCMParmType_32bit
1490 && uBuf.EnumMsg.size.u.value32 <= cbStrings
1491 && uBuf.EnumMsg.size.u.value32 > 0)
1492 cbStrings = uBuf.EnumMsg.size.u.value32;
1493 Log(("VGDrvCommonReadConfigurationFromHost: GUEST_PROP_FN_ENUM_PROPS -> %#x bytes (cbStrings=%#x)\n",
1494 uBuf.EnumMsg.size.u.value32, cbStrings));
1495 break;
1496 }
1497
1498 RTMemFree(pszzStrings);
1499 pszzStrings = NULL;
1500 }
1501 else
1502 {
1503 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to allocate %#x bytes\n", cbStrings));
1504 break;
1505 }
1506 }
1507
1508 /*
1509 * Disconnect and destroy the session.
1510 */
1511 VBGLREQHDR_INIT(&uBuf.Disconnect.Hdr, HGCM_DISCONNECT);
1512 uBuf.Disconnect.u.In.idClient = idClient;
1513 VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_DISCONNECT, pDevExt, pSession, &uBuf.Disconnect.Hdr, sizeof(uBuf.Disconnect));
1514
1515 VGDrvCommonCloseSession(pDevExt, pSession);
1516
1517 /*
1518 * Process the properties if we got any.
1519 *
1520 * The string buffer contains packed strings in groups of four - name, value,
1521 * timestamp (as a decimal string) and flags. It is terminated by four empty
1522 * strings. Layout:
1523 * Name\0Value\0Timestamp\0Flags\0
1524 */
1525 if (pszzStrings)
1526 {
1527 uint32_t off;
1528 for (off = 0; off < cbStrings; off++)
1529 {
1530 /*
1531 * Parse the four fields, checking that it's all plain ASCII w/o any control characters.
1532 */
1533 const char *apszFields[4] = { NULL, NULL, NULL, NULL };
1534 bool fValidFields = true;
1535 unsigned iField;
1536 for (iField = 0; iField < RT_ELEMENTS(apszFields); iField++)
1537 {
1538 apszFields[0] = &pszzStrings[off];
1539 while (off < cbStrings)
1540 {
1541 char ch = pszzStrings[off++];
1542 if ((unsigned)ch < 0x20U || (unsigned)ch > 0x7fU)
1543 {
1544 if (!ch)
1545 break;
1546 if (fValidFields)
1547 Log(("VGDrvCommonReadConfigurationFromHost: Invalid char %#x at %#x (field %u)\n",
1548 ch, off - 1, iField));
1549 fValidFields = false;
1550 }
1551 }
1552 }
1553 if ( off <= cbStrings
1554 && fValidFields
1555 && *apszFields[0] != '\0')
1556 {
1557 /*
1558 * Validate and convert the flags to integer, then process the option.
1559 */
1560 uint32_t fFlags = 0;
1561 rc = GuestPropValidateFlags(apszFields[3], &fFlags);
1562 if (RT_SUCCESS(rc))
1563 {
1564 if (fFlags & GUEST_PROP_F_RDONLYGUEST)
1565 {
1566 apszFields[0] += sizeof(g_szzPattern) - 2;
1567 VGDrvCommonProcessOption(pDevExt, apszFields[0], apszFields[1]);
1568 }
1569 else
1570 LogRel(("VBoxGuest: Ignoring '%s' as it does not have RDONLYGUEST set\n", apszFields[0]));
1571 }
1572 else
1573 LogRel(("VBoxGuest: Invalid flags '%s' for '%s': %Rrc\n", apszFields[2], apszFields[0], rc));
1574 }
1575 else if (off < cbStrings)
1576 {
1577 LogRel(("VBoxGuest: Malformed guest properties enum result!\n"));
1578 Log(("VBoxGuest: off=%#x cbStrings=%#x\n%.*Rhxd\n", off, cbStrings, cbStrings, pszzStrings));
1579 break;
1580 }
1581 else if (!fValidFields)
1582 LogRel(("VBoxGuest: Ignoring %.*Rhxs as it has invalid characters in one or more fields\n",
1583 (int)strlen(apszFields[0]), apszFields[0]));
1584 else
1585 break;
1586 }
1587
1588 RTMemFree(pszzStrings);
1589 }
1590 else
1591 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to enumerate '%s': %Rrc\n", g_szzPattern, rc));
1592
1593 }
1594 else
1595 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to connect: %Rrc\n", rc));
1596 }
1597 else
1598 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to connect: %Rrc\n", rc));
1599}
1600
1601
1602/**
1603 * Destroys the VBoxGuest device extension.
1604 *
1605 * The native code should call this before the driver is unloaded,
1606 * but don't call this on shutdown.
1607 *
1608 * @param pDevExt The device extension.
1609 */
1610void VGDrvCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1611{
1612 Log(("VGDrvCommonDeleteDevExt:\n"));
1613 Log(("VBoxGuest: The additions driver is terminating.\n"));
1614 VGDrvCommonDeleteDevExtResources(pDevExt);
1615 VGDrvCommonDeleteDevExtFundament(pDevExt);
1616 VGDrvCommonDestroyLoggers();
1617}
1618
1619
1620/**
1621 * Creates a VBoxGuest user session.
1622 *
1623 * The native code calls this when a ring-3 client opens the device.
1624 * Use VGDrvCommonCreateKernelSession when a ring-0 client connects.
1625 *
1626 * @returns VBox status code.
1627 * @param pDevExt The device extension.
1628 * @param fRequestor VMMDEV_REQUESTOR_XXX.
1629 * @param ppSession Where to store the session on success.
1630 */
1631int VGDrvCommonCreateUserSession(PVBOXGUESTDEVEXT pDevExt, uint32_t fRequestor, PVBOXGUESTSESSION *ppSession)
1632{
1633 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1634 if (RT_UNLIKELY(!pSession))
1635 {
1636 LogRel(("VGDrvCommonCreateUserSession: no memory!\n"));
1637 return VERR_NO_MEMORY;
1638 }
1639
1640 pSession->Process = RTProcSelf();
1641 pSession->R0Process = RTR0ProcHandleSelf();
1642 pSession->pDevExt = pDevExt;
1643 pSession->fRequestor = fRequestor;
1644 pSession->fUserSession = RT_BOOL(fRequestor & VMMDEV_REQUESTOR_USER_DEVICE);
1645 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1646 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1647 pDevExt->cSessions++;
1648 RTSpinlockRelease(pDevExt->SessionSpinlock);
1649
1650 *ppSession = pSession;
1651 LogFlow(("VGDrvCommonCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1652 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1653 return VINF_SUCCESS;
1654}
1655
1656
1657/**
1658 * Creates a VBoxGuest kernel session.
1659 *
1660 * The native code calls this when a ring-0 client connects to the device.
1661 * Use VGDrvCommonCreateUserSession when a ring-3 client opens the device.
1662 *
1663 * @returns VBox status code.
1664 * @param pDevExt The device extension.
1665 * @param ppSession Where to store the session on success.
1666 */
1667int VGDrvCommonCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1668{
1669 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1670 if (RT_UNLIKELY(!pSession))
1671 {
1672 LogRel(("VGDrvCommonCreateKernelSession: no memory!\n"));
1673 return VERR_NO_MEMORY;
1674 }
1675
1676 pSession->Process = NIL_RTPROCESS;
1677 pSession->R0Process = NIL_RTR0PROCESS;
1678 pSession->pDevExt = pDevExt;
1679 pSession->fRequestor = VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER
1680 | VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
1681 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1682 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1683 pDevExt->cSessions++;
1684 RTSpinlockRelease(pDevExt->SessionSpinlock);
1685
1686 *ppSession = pSession;
1687 LogFlow(("VGDrvCommonCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1688 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1689 return VINF_SUCCESS;
1690}
1691
1692
1693/**
1694 * Closes a VBoxGuest session.
1695 *
1696 * @param pDevExt The device extension.
1697 * @param pSession The session to close (and free).
1698 */
1699void VGDrvCommonCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1700{
1701#ifdef VBOX_WITH_HGCM
1702 unsigned i;
1703#endif
1704 LogFlow(("VGDrvCommonCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1705 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1706
1707 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1708 RTListNodeRemove(&pSession->ListNode);
1709 pDevExt->cSessions--;
1710 RTSpinlockRelease(pDevExt->SessionSpinlock);
1711 vgdrvAcquireSessionCapabilities(pDevExt, pSession, 0, UINT32_MAX, VBGL_IOC_AGC_FLAGS_DEFAULT, true /*fSessionTermination*/);
1712 vgdrvSetSessionCapabilities(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/,
1713 NULL /*pfSessionCaps*/, NULL /*pfGlobalCaps*/, true /*fSessionTermination*/);
1714 vgdrvSetSessionEventFilter(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1715 vgdrvSetSessionMouseStatus(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1716
1717 vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
1718
1719#ifdef VBOX_WITH_HGCM
1720 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1721 if (pSession->aHGCMClientIds[i])
1722 {
1723 uint32_t idClient = pSession->aHGCMClientIds[i];
1724 pSession->aHGCMClientIds[i] = 0;
1725 Log(("VGDrvCommonCloseSession: disconnecting client id %#RX32\n", idClient));
1726 VbglR0HGCMInternalDisconnect(idClient, VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV,
1727 vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1728 }
1729#endif
1730
1731 pSession->pDevExt = NULL;
1732 pSession->Process = NIL_RTPROCESS;
1733 pSession->R0Process = NIL_RTR0PROCESS;
1734 vgdrvCloseMemBalloon(pDevExt, pSession);
1735 RTMemFree(pSession);
1736}
1737
1738
1739/**
1740 * Allocates a wait-for-event entry.
1741 *
1742 * @returns The wait-for-event entry.
1743 * @param pDevExt The device extension.
1744 * @param pSession The session that's allocating this. Can be NULL.
1745 */
1746static PVBOXGUESTWAIT vgdrvWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1747{
1748 /*
1749 * Allocate it one way or the other.
1750 */
1751 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1752 if (pWait)
1753 {
1754 RTSpinlockAcquire(pDevExt->EventSpinlock);
1755
1756 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1757 if (pWait)
1758 RTListNodeRemove(&pWait->ListNode);
1759
1760 RTSpinlockRelease(pDevExt->EventSpinlock);
1761 }
1762 if (!pWait)
1763 {
1764 int rc;
1765
1766 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1767 if (!pWait)
1768 {
1769 LogRelMax(32, ("vgdrvWaitAlloc: out-of-memory!\n"));
1770 return NULL;
1771 }
1772
1773 rc = RTSemEventMultiCreate(&pWait->Event);
1774 if (RT_FAILURE(rc))
1775 {
1776 LogRelMax(32, ("vgdrvWaitAlloc: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1777 RTMemFree(pWait);
1778 return NULL;
1779 }
1780
1781 pWait->ListNode.pNext = NULL;
1782 pWait->ListNode.pPrev = NULL;
1783 }
1784
1785 /*
1786 * Zero members just as an precaution.
1787 */
1788 pWait->fReqEvents = 0;
1789 pWait->fResEvents = 0;
1790#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1791 pWait->fPendingWakeUp = false;
1792 pWait->fFreeMe = false;
1793#endif
1794 pWait->pSession = pSession;
1795#ifdef VBOX_WITH_HGCM
1796 pWait->pHGCMReq = NULL;
1797#endif
1798 RTSemEventMultiReset(pWait->Event);
1799 return pWait;
1800}
1801
1802
1803/**
1804 * Frees the wait-for-event entry.
1805 *
1806 * The caller must own the wait spinlock !
1807 * The entry must be in a list!
1808 *
1809 * @param pDevExt The device extension.
1810 * @param pWait The wait-for-event entry to free.
1811 */
1812static void vgdrvWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1813{
1814 pWait->fReqEvents = 0;
1815 pWait->fResEvents = 0;
1816#ifdef VBOX_WITH_HGCM
1817 pWait->pHGCMReq = NULL;
1818#endif
1819#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1820 Assert(!pWait->fFreeMe);
1821 if (pWait->fPendingWakeUp)
1822 pWait->fFreeMe = true;
1823 else
1824#endif
1825 {
1826 RTListNodeRemove(&pWait->ListNode);
1827 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1828 }
1829}
1830
1831
1832/**
1833 * Frees the wait-for-event entry.
1834 *
1835 * @param pDevExt The device extension.
1836 * @param pWait The wait-for-event entry to free.
1837 */
1838static void vgdrvWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1839{
1840 RTSpinlockAcquire(pDevExt->EventSpinlock);
1841 vgdrvWaitFreeLocked(pDevExt, pWait);
1842 RTSpinlockRelease(pDevExt->EventSpinlock);
1843}
1844
1845
1846#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1847/**
1848 * Processes the wake-up list.
1849 *
1850 * All entries in the wake-up list gets signalled and moved to the woken-up
1851 * list.
1852 * At least on Windows this function can be invoked concurrently from
1853 * different VCPUs. So, be thread-safe.
1854 *
1855 * @param pDevExt The device extension.
1856 */
1857void VGDrvCommonWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1858{
1859 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1860 {
1861 RTSpinlockAcquire(pDevExt->EventSpinlock);
1862 for (;;)
1863 {
1864 int rc;
1865 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1866 if (!pWait)
1867 break;
1868 /* Prevent other threads from accessing pWait when spinlock is released. */
1869 RTListNodeRemove(&pWait->ListNode);
1870
1871 pWait->fPendingWakeUp = true;
1872 RTSpinlockRelease(pDevExt->EventSpinlock);
1873
1874 rc = RTSemEventMultiSignal(pWait->Event);
1875 AssertRC(rc);
1876
1877 RTSpinlockAcquire(pDevExt->EventSpinlock);
1878 Assert(pWait->ListNode.pNext == NULL && pWait->ListNode.pPrev == NULL);
1879 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1880 pWait->fPendingWakeUp = false;
1881 if (RT_LIKELY(!pWait->fFreeMe))
1882 { /* likely */ }
1883 else
1884 {
1885 pWait->fFreeMe = false;
1886 vgdrvWaitFreeLocked(pDevExt, pWait);
1887 }
1888 }
1889 RTSpinlockRelease(pDevExt->EventSpinlock);
1890 }
1891}
1892#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1893
1894
1895/**
1896 * Implements the fast (no input or output) type of IOCtls.
1897 *
1898 * This is currently just a placeholder stub inherited from the support driver code.
1899 *
1900 * @returns VBox status code.
1901 * @param iFunction The IOCtl function number.
1902 * @param pDevExt The device extension.
1903 * @param pSession The session.
1904 */
1905int VGDrvCommonIoCtlFast(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1906{
1907 LogFlow(("VGDrvCommonIoCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1908
1909 NOREF(iFunction);
1910 NOREF(pDevExt);
1911 NOREF(pSession);
1912 return VERR_NOT_SUPPORTED;
1913}
1914
1915
1916/**
1917 * Gets the driver I/O control interface version, maybe adjusting it for
1918 * backwards compatibility.
1919 *
1920 * The adjusting is currently not implemented as we only have one major I/O
1921 * control interface version out there to support. This is something we will
1922 * implement as needed.
1923 *
1924 * returns IPRT status code.
1925 * @param pDevExt The device extension.
1926 * @param pSession The session.
1927 * @param pReq The request info.
1928 */
1929static int vgdrvIoCtl_DriverVersionInfo(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCDRIVERVERSIONINFO pReq)
1930{
1931 int rc;
1932 LogFlow(("VBGL_IOCTL_DRIVER_VERSION_INFO: uReqVersion=%#x uMinVersion=%#x uReserved1=%#x uReserved2=%#x\n",
1933 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved1, pReq->u.In.uReserved2));
1934 RT_NOREF2(pDevExt, pSession);
1935
1936 /*
1937 * Input validation.
1938 */
1939 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1940 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1941 {
1942 /*
1943 * Match the version.
1944 * The current logic is very simple, match the major interface version.
1945 */
1946 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1947 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1948 rc = VINF_SUCCESS;
1949 else
1950 {
1951 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1952 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1953 rc = VERR_VERSION_MISMATCH;
1954 }
1955 }
1956 else
1957 {
1958 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1959 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1960 rc = VERR_INVALID_PARAMETER;
1961 }
1962
1963 pReq->u.Out.uSessionVersion = RT_SUCCESS(rc) ? VBGL_IOC_VERSION : UINT32_MAX;
1964 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1965 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1966 pReq->u.Out.uReserved1 = 0;
1967 pReq->u.Out.uReserved2 = 0;
1968 return rc;
1969}
1970
1971
1972/**
1973 * Similar to vgdrvIoCtl_DriverVersionInfo, except its for IDC.
1974 *
1975 * returns IPRT status code.
1976 * @param pDevExt The device extension.
1977 * @param pSession The session.
1978 * @param pReq The request info.
1979 */
1980static int vgdrvIoCtl_IdcConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCCONNECT pReq)
1981{
1982 int rc;
1983 LogFlow(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x uReqVersion=%#x uMinVersion=%#x uReserved=%#x\n",
1984 pReq->u.In.u32MagicCookie, pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved));
1985 Assert(pSession != NULL);
1986 RT_NOREF(pDevExt);
1987
1988 /*
1989 * Input validation.
1990 */
1991 if (pReq->u.In.u32MagicCookie == VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE)
1992 {
1993 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1994 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1995 {
1996 /*
1997 * Match the version.
1998 * The current logic is very simple, match the major interface version.
1999 */
2000 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
2001 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
2002 {
2003 pReq->u.Out.pvSession = pSession;
2004 pReq->u.Out.uSessionVersion = VBGL_IOC_VERSION;
2005 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
2006 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2007 pReq->u.Out.uReserved1 = 0;
2008 pReq->u.Out.pvReserved2 = NULL;
2009 return VINF_SUCCESS;
2010
2011 }
2012 LogRel(("VBGL_IOCTL_IDC_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2013 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
2014 rc = VERR_VERSION_MISMATCH;
2015 }
2016 else
2017 {
2018 LogRel(("VBGL_IOCTL_IDC_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2019 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2020 rc = VERR_INVALID_PARAMETER;
2021 }
2022
2023 pReq->u.Out.pvSession = NULL;
2024 pReq->u.Out.uSessionVersion = UINT32_MAX;
2025 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
2026 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2027 pReq->u.Out.uReserved1 = 0;
2028 pReq->u.Out.pvReserved2 = NULL;
2029 }
2030 else
2031 {
2032 LogRel(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2033 pReq->u.In.u32MagicCookie, VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE));
2034 rc = VERR_INVALID_PARAMETER;
2035 }
2036 return rc;
2037}
2038
2039
2040/**
2041 * Counterpart to vgdrvIoCtl_IdcConnect, destroys the session.
2042 *
2043 * returns IPRT status code.
2044 * @param pDevExt The device extension.
2045 * @param pSession The session.
2046 * @param pReq The request info.
2047 */
2048static int vgdrvIoCtl_IdcDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCDISCONNECT pReq)
2049{
2050 LogFlow(("VBGL_IOCTL_IDC_DISCONNECT: pvSession=%p vs pSession=%p\n", pReq->u.In.pvSession, pSession));
2051 RT_NOREF(pDevExt);
2052 Assert(pSession != NULL);
2053
2054 if (pReq->u.In.pvSession == pSession)
2055 {
2056 VGDrvCommonCloseSession(pDevExt, pSession);
2057 return VINF_SUCCESS;
2058 }
2059 LogRel(("VBGL_IOCTL_IDC_DISCONNECT: In.pvSession=%p is not equal to pSession=%p!\n", pReq->u.In.pvSession, pSession));
2060 return VERR_INVALID_PARAMETER;
2061}
2062
2063
2064/**
2065 * Return the VMM device I/O info.
2066 *
2067 * returns IPRT status code.
2068 * @param pDevExt The device extension.
2069 * @param pInfo The request info.
2070 * @note Ring-0 only, caller checked.
2071 */
2072static int vgdrvIoCtl_GetVMMDevIoInfo(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCGETVMMDEVIOINFO pInfo)
2073{
2074 LogFlow(("VBGL_IOCTL_GET_VMMDEV_IO_INFO\n"));
2075
2076 pInfo->u.Out.IoPort = pDevExt->IOPortBase;
2077 pInfo->u.Out.pvVmmDevMapping = pDevExt->pVMMDevMemory;
2078 pInfo->u.Out.auPadding[0] = 0;
2079#if HC_ARCH_BITS != 32
2080 pInfo->u.Out.auPadding[1] = 0;
2081 pInfo->u.Out.auPadding[2] = 0;
2082#endif
2083 return VINF_SUCCESS;
2084}
2085
2086
2087/**
2088 * Set the callback for the kernel mouse handler.
2089 *
2090 * returns IPRT status code.
2091 * @param pDevExt The device extension.
2092 * @param pNotify The new callback information.
2093 */
2094static int vgdrvIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCSETMOUSENOTIFYCALLBACK pNotify)
2095{
2096 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->u.In.pfnNotify, pNotify->u.In.pvUser));
2097
2098#ifdef VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
2099 VGDrvNativeSetMouseNotifyCallback(pDevExt, pNotify);
2100#else
2101 RTSpinlockAcquire(pDevExt->EventSpinlock);
2102 pDevExt->pfnMouseNotifyCallback = pNotify->u.In.pfnNotify;
2103 pDevExt->pvMouseNotifyCallbackArg = pNotify->u.In.pvUser;
2104 RTSpinlockRelease(pDevExt->EventSpinlock);
2105#endif
2106 return VINF_SUCCESS;
2107}
2108
2109
2110/**
2111 * Worker vgdrvIoCtl_WaitEvent.
2112 *
2113 * The caller enters the spinlock, we leave it.
2114 *
2115 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
2116 */
2117DECLINLINE(int) vbdgCheckWaitEventCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2118 PVBGLIOCWAITFOREVENTS pInfo, int iEvent, const uint32_t fReqEvents)
2119{
2120 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
2121 if (fMatches & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
2122 fMatches &= vgdrvGetAllowedEventMaskForSession(pDevExt, pSession);
2123 if (fMatches || pSession->fPendingCancelWaitEvents)
2124 {
2125 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
2126 RTSpinlockRelease(pDevExt->EventSpinlock);
2127
2128 pInfo->u.Out.fEvents = fMatches;
2129 if (fReqEvents & ~((uint32_t)1 << iEvent))
2130 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
2131 else
2132 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
2133 pSession->fPendingCancelWaitEvents = false;
2134 return VINF_SUCCESS;
2135 }
2136
2137 RTSpinlockRelease(pDevExt->EventSpinlock);
2138 return VERR_TIMEOUT;
2139}
2140
2141
2142static int vgdrvIoCtl_WaitForEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2143 PVBGLIOCWAITFOREVENTS pInfo, bool fInterruptible)
2144{
2145 uint32_t const cMsTimeout = pInfo->u.In.cMsTimeOut;
2146 const uint32_t fReqEvents = pInfo->u.In.fEvents;
2147 uint32_t fResEvents;
2148 int iEvent;
2149 PVBOXGUESTWAIT pWait;
2150 int rc;
2151
2152 pInfo->u.Out.fEvents = 0; /* Note! This overwrites pInfo->u.In.* fields! */
2153
2154 /*
2155 * Copy and verify the input mask.
2156 */
2157 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
2158 if (RT_UNLIKELY(iEvent < 0))
2159 {
2160 LogRel(("VBOXGUEST_IOCTL_WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
2161 return VERR_INVALID_PARAMETER;
2162 }
2163
2164 /*
2165 * Check the condition up front, before doing the wait-for-event allocations.
2166 */
2167 RTSpinlockAcquire(pDevExt->EventSpinlock);
2168 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
2169 if (rc == VINF_SUCCESS)
2170 return rc;
2171
2172 if (!cMsTimeout)
2173 {
2174 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT\n"));
2175 return VERR_TIMEOUT;
2176 }
2177
2178 pWait = vgdrvWaitAlloc(pDevExt, pSession);
2179 if (!pWait)
2180 return VERR_NO_MEMORY;
2181 pWait->fReqEvents = fReqEvents;
2182
2183 /*
2184 * We've got the wait entry now, re-enter the spinlock and check for the condition.
2185 * If the wait condition is met, return.
2186 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
2187 */
2188 RTSpinlockAcquire(pDevExt->EventSpinlock);
2189 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
2190 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
2191 if (rc == VINF_SUCCESS)
2192 {
2193 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2194 return rc;
2195 }
2196
2197 if (fInterruptible)
2198 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
2199 else
2200 rc = RTSemEventMultiWait(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
2201
2202 /*
2203 * There is one special case here and that's when the semaphore is
2204 * destroyed upon device driver unload. This shouldn't happen of course,
2205 * but in case it does, just get out of here ASAP.
2206 */
2207 if (rc == VERR_SEM_DESTROYED)
2208 return rc;
2209
2210 /*
2211 * Unlink the wait item and dispose of it.
2212 */
2213 RTSpinlockAcquire(pDevExt->EventSpinlock);
2214 fResEvents = pWait->fResEvents;
2215 vgdrvWaitFreeLocked(pDevExt, pWait);
2216 RTSpinlockRelease(pDevExt->EventSpinlock);
2217
2218 /*
2219 * Now deal with the return code.
2220 */
2221 if ( fResEvents
2222 && fResEvents != UINT32_MAX)
2223 {
2224 pInfo->u.Out.fEvents = fResEvents;
2225 if (fReqEvents & ~((uint32_t)1 << iEvent))
2226 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
2227 else
2228 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
2229 rc = VINF_SUCCESS;
2230 }
2231 else if ( fResEvents == UINT32_MAX
2232 || rc == VERR_INTERRUPTED)
2233 {
2234 rc = VERR_INTERRUPTED;
2235 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_INTERRUPTED\n"));
2236 }
2237 else if (rc == VERR_TIMEOUT)
2238 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT (2)\n"));
2239 else
2240 {
2241 if (RT_SUCCESS(rc))
2242 {
2243 LogRelMax(32, ("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc but no events!\n", rc));
2244 rc = VERR_INTERNAL_ERROR;
2245 }
2246 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc\n", rc));
2247 }
2248
2249 return rc;
2250}
2251
2252
2253/** @todo the semantics of this IoCtl have been tightened, so that no calls to
2254 * VBOXGUEST_IOCTL_WAITEVENT are allowed in a session after it has been
2255 * called. Change the code to make calls to VBOXGUEST_IOCTL_WAITEVENT made
2256 * after that to return VERR_INTERRUPTED or something appropriate. */
2257static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2258{
2259 PVBOXGUESTWAIT pWait;
2260 PVBOXGUESTWAIT pSafe;
2261 int rc = 0;
2262 /* Was as least one WAITEVENT in process for this session? If not we
2263 * set a flag that the next call should be interrupted immediately. This
2264 * is needed so that a user thread can reliably interrupt another one in a
2265 * WAITEVENT loop. */
2266 bool fCancelledOne = false;
2267
2268 LogFlow(("VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS\n"));
2269
2270 /*
2271 * Walk the event list and wake up anyone with a matching session.
2272 */
2273 RTSpinlockAcquire(pDevExt->EventSpinlock);
2274 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2275 {
2276 if (pWait->pSession == pSession)
2277 {
2278 fCancelledOne = true;
2279 pWait->fResEvents = UINT32_MAX;
2280 RTListNodeRemove(&pWait->ListNode);
2281#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2282 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2283#else
2284 rc |= RTSemEventMultiSignal(pWait->Event);
2285 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2286#endif
2287 }
2288 }
2289 if (!fCancelledOne)
2290 pSession->fPendingCancelWaitEvents = true;
2291 RTSpinlockRelease(pDevExt->EventSpinlock);
2292 Assert(rc == 0);
2293 NOREF(rc);
2294
2295#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2296 VGDrvCommonWaitDoWakeUps(pDevExt);
2297#endif
2298
2299 return VINF_SUCCESS;
2300}
2301
2302
2303/**
2304 * Checks if the VMM request is allowed in the context of the given session.
2305 *
2306 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
2307 * @param pDevExt The device extension.
2308 * @param pSession The calling session.
2309 * @param enmType The request type.
2310 * @param pReqHdr The request.
2311 */
2312static int vgdrvCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
2313 VMMDevRequestHeader const *pReqHdr)
2314{
2315 /*
2316 * Categorize the request being made.
2317 */
2318 /** @todo This need quite some more work! */
2319 enum
2320 {
2321 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
2322 } enmRequired;
2323 RT_NOREF1(pDevExt);
2324
2325 switch (enmType)
2326 {
2327 /*
2328 * Deny access to anything we don't know or provide specialized I/O controls for.
2329 */
2330#ifdef VBOX_WITH_HGCM
2331 case VMMDevReq_HGCMConnect:
2332 case VMMDevReq_HGCMDisconnect:
2333# ifdef VBOX_WITH_64_BITS_GUESTS
2334 case VMMDevReq_HGCMCall64:
2335# endif
2336 case VMMDevReq_HGCMCall32:
2337 case VMMDevReq_HGCMCancel:
2338 case VMMDevReq_HGCMCancel2:
2339#endif /* VBOX_WITH_HGCM */
2340 case VMMDevReq_SetGuestCapabilities:
2341 default:
2342 enmRequired = kLevel_NoOne;
2343 break;
2344
2345 /*
2346 * There are a few things only this driver can do (and it doesn't use
2347 * the VMMRequst I/O control route anyway, but whatever).
2348 */
2349 case VMMDevReq_ReportGuestInfo:
2350 case VMMDevReq_ReportGuestInfo2:
2351 case VMMDevReq_GetHypervisorInfo:
2352 case VMMDevReq_SetHypervisorInfo:
2353 case VMMDevReq_RegisterPatchMemory:
2354 case VMMDevReq_DeregisterPatchMemory:
2355 case VMMDevReq_GetMemBalloonChangeRequest:
2356 case VMMDevReq_ChangeMemBalloon:
2357 enmRequired = kLevel_OnlyVBoxGuest;
2358 break;
2359
2360 /*
2361 * Trusted users apps only.
2362 */
2363 case VMMDevReq_QueryCredentials:
2364 case VMMDevReq_ReportCredentialsJudgement:
2365 case VMMDevReq_RegisterSharedModule:
2366 case VMMDevReq_UnregisterSharedModule:
2367 case VMMDevReq_WriteCoreDump:
2368 case VMMDevReq_GetCpuHotPlugRequest:
2369 case VMMDevReq_SetCpuHotPlugStatus:
2370 case VMMDevReq_CheckSharedModules:
2371 case VMMDevReq_GetPageSharingStatus:
2372 case VMMDevReq_DebugIsPageShared:
2373 case VMMDevReq_ReportGuestStats:
2374 case VMMDevReq_ReportGuestUserState:
2375 case VMMDevReq_GetStatisticsChangeRequest:
2376 enmRequired = kLevel_TrustedUsers;
2377 break;
2378
2379 /*
2380 * Anyone.
2381 */
2382 case VMMDevReq_GetMouseStatus:
2383 case VMMDevReq_SetMouseStatus:
2384 case VMMDevReq_SetPointerShape:
2385 case VMMDevReq_GetHostVersion:
2386 case VMMDevReq_Idle:
2387 case VMMDevReq_GetHostTime:
2388 case VMMDevReq_SetPowerStatus:
2389 case VMMDevReq_AcknowledgeEvents:
2390 case VMMDevReq_CtlGuestFilterMask:
2391 case VMMDevReq_ReportGuestStatus:
2392 case VMMDevReq_GetDisplayChangeRequest:
2393 case VMMDevReq_VideoModeSupported:
2394 case VMMDevReq_GetHeightReduction:
2395 case VMMDevReq_GetDisplayChangeRequest2:
2396 case VMMDevReq_VideoModeSupported2:
2397 case VMMDevReq_VideoAccelEnable:
2398 case VMMDevReq_VideoAccelFlush:
2399 case VMMDevReq_VideoSetVisibleRegion:
2400 case VMMDevReq_VideoUpdateMonitorPositions:
2401 case VMMDevReq_GetDisplayChangeRequestEx:
2402 case VMMDevReq_GetDisplayChangeRequestMulti:
2403 case VMMDevReq_GetSeamlessChangeRequest:
2404 case VMMDevReq_GetVRDPChangeRequest:
2405 case VMMDevReq_LogString:
2406 case VMMDevReq_GetSessionId:
2407 enmRequired = kLevel_AllUsers;
2408 break;
2409
2410 /*
2411 * Depends on the request parameters...
2412 */
2413 /** @todo this have to be changed into an I/O control and the facilities
2414 * tracked in the session so they can automatically be failed when the
2415 * session terminates without reporting the new status.
2416 *
2417 * The information presented by IGuest is not reliable without this! */
2418 case VMMDevReq_ReportGuestCapabilities:
2419 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
2420 {
2421 case VBoxGuestFacilityType_All:
2422 case VBoxGuestFacilityType_VBoxGuestDriver:
2423 enmRequired = kLevel_OnlyVBoxGuest;
2424 break;
2425 case VBoxGuestFacilityType_VBoxService:
2426 enmRequired = kLevel_TrustedUsers;
2427 break;
2428 case VBoxGuestFacilityType_VBoxTrayClient:
2429 case VBoxGuestFacilityType_Seamless:
2430 case VBoxGuestFacilityType_Graphics:
2431 default:
2432 enmRequired = kLevel_AllUsers;
2433 break;
2434 }
2435 break;
2436 }
2437
2438 /*
2439 * Check against the session.
2440 */
2441 switch (enmRequired)
2442 {
2443 default:
2444 case kLevel_NoOne:
2445 break;
2446 case kLevel_OnlyVBoxGuest:
2447 case kLevel_OnlyKernel:
2448 if (pSession->R0Process == NIL_RTR0PROCESS)
2449 return VINF_SUCCESS;
2450 break;
2451 case kLevel_TrustedUsers:
2452 if (pSession->fUserSession)
2453 break;
2454 RT_FALL_THRU();
2455 case kLevel_AllUsers:
2456 return VINF_SUCCESS;
2457 }
2458
2459 return VERR_PERMISSION_DENIED;
2460}
2461
2462static int vgdrvIoCtl_VMMDevRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2463 VMMDevRequestHeader *pReqHdr, size_t cbData)
2464{
2465 int rc;
2466 VMMDevRequestHeader *pReqCopy;
2467
2468 /*
2469 * Validate the header and request size.
2470 */
2471 const VMMDevRequestType enmType = pReqHdr->requestType;
2472 const uint32_t cbReq = pReqHdr->size;
2473 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
2474
2475 LogFlow(("VBOXGUEST_IOCTL_VMMREQUEST: type %d\n", pReqHdr->requestType));
2476
2477 if (cbReq < cbMinSize)
2478 {
2479 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
2480 cbReq, cbMinSize, enmType));
2481 return VERR_INVALID_PARAMETER;
2482 }
2483 if (cbReq > cbData)
2484 {
2485 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
2486 cbData, cbReq, enmType));
2487 return VERR_INVALID_PARAMETER;
2488 }
2489 rc = VbglGR0Verify(pReqHdr, cbData);
2490 if (RT_FAILURE(rc))
2491 {
2492 Log(("VBOXGUEST_IOCTL_VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
2493 cbData, cbReq, enmType, rc));
2494 return rc;
2495 }
2496
2497 rc = vgdrvCheckIfVmmReqIsAllowed(pDevExt, pSession, enmType, pReqHdr);
2498 if (RT_FAILURE(rc))
2499 {
2500 Log(("VBOXGUEST_IOCTL_VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
2501 return rc;
2502 }
2503
2504 /*
2505 * Make a copy of the request in the physical memory heap so
2506 * the VBoxGuestLibrary can more easily deal with the request.
2507 * (This is really a waste of time since the OS or the OS specific
2508 * code has already buffered or locked the input/output buffer, but
2509 * it does makes things a bit simpler wrt to phys address.)
2510 */
2511 rc = VbglR0GRAlloc(&pReqCopy, cbReq, enmType);
2512 if (RT_FAILURE(rc))
2513 {
2514 Log(("VBOXGUEST_IOCTL_VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2515 cbReq, cbReq, rc));
2516 return rc;
2517 }
2518 memcpy(pReqCopy, pReqHdr, cbReq);
2519 Assert(pReqCopy->reserved1 == cbReq);
2520 pReqCopy->reserved1 = 0; /* VGDrvCommonIoCtl or caller sets cbOut, so clear it. */
2521 pReqCopy->fRequestor = pSession->fRequestor;
2522
2523 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
2524 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
2525
2526 rc = VbglR0GRPerform(pReqCopy);
2527 if ( RT_SUCCESS(rc)
2528 && RT_SUCCESS(pReqCopy->rc))
2529 {
2530 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
2531 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
2532
2533 memcpy(pReqHdr, pReqCopy, cbReq);
2534 pReqHdr->reserved1 = cbReq; /* preserve cbOut */
2535 }
2536 else if (RT_FAILURE(rc))
2537 Log(("VBOXGUEST_IOCTL_VMMREQUEST: VbglR0GRPerform - rc=%Rrc!\n", rc));
2538 else
2539 {
2540 Log(("VBOXGUEST_IOCTL_VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
2541 rc = pReqCopy->rc;
2542 }
2543
2544 VbglR0GRFree(pReqCopy);
2545 return rc;
2546}
2547
2548
2549#ifdef VBOX_WITH_HGCM
2550
2551AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2552
2553/** Worker for vgdrvHgcmAsyncWaitCallback*. */
2554static int vgdrvHgcmAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2555 bool fInterruptible, uint32_t cMillies)
2556{
2557 int rc;
2558
2559 /*
2560 * Check to see if the condition was met by the time we got here.
2561 *
2562 * We create a simple poll loop here for dealing with out-of-memory
2563 * conditions since the caller isn't necessarily able to deal with
2564 * us returning too early.
2565 */
2566 PVBOXGUESTWAIT pWait;
2567 for (;;)
2568 {
2569 RTSpinlockAcquire(pDevExt->EventSpinlock);
2570 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2571 {
2572 RTSpinlockRelease(pDevExt->EventSpinlock);
2573 return VINF_SUCCESS;
2574 }
2575 RTSpinlockRelease(pDevExt->EventSpinlock);
2576
2577 pWait = vgdrvWaitAlloc(pDevExt, NULL);
2578 if (pWait)
2579 break;
2580 if (fInterruptible)
2581 return VERR_INTERRUPTED;
2582 RTThreadSleep(1);
2583 }
2584 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2585 pWait->pHGCMReq = pHdr;
2586
2587 /*
2588 * Re-enter the spinlock and re-check for the condition.
2589 * If the condition is met, return.
2590 * Otherwise link us into the HGCM wait list and go to sleep.
2591 */
2592 RTSpinlockAcquire(pDevExt->EventSpinlock);
2593 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2594 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2595 {
2596 vgdrvWaitFreeLocked(pDevExt, pWait);
2597 RTSpinlockRelease(pDevExt->EventSpinlock);
2598 return VINF_SUCCESS;
2599 }
2600 RTSpinlockRelease(pDevExt->EventSpinlock);
2601
2602 if (fInterruptible)
2603 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2604 else
2605 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2606 if (rc == VERR_SEM_DESTROYED)
2607 return rc;
2608
2609 /*
2610 * Unlink, free and return.
2611 */
2612 if ( RT_FAILURE(rc)
2613 && rc != VERR_TIMEOUT
2614 && ( !fInterruptible
2615 || rc != VERR_INTERRUPTED))
2616 LogRel(("vgdrvHgcmAsyncWaitCallback: wait failed! %Rrc\n", rc));
2617
2618 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2619 return rc;
2620}
2621
2622
2623/**
2624 * This is a callback for dealing with async waits.
2625 *
2626 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2627 */
2628static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2629{
2630 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2631 LogFlow(("vgdrvHgcmAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2632 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2633 false /* fInterruptible */, u32User /* cMillies */);
2634}
2635
2636
2637/**
2638 * This is a callback for dealing with async waits with a timeout.
2639 *
2640 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2641 */
2642static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2643{
2644 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2645 LogFlow(("vgdrvHgcmAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2646 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2647 true /* fInterruptible */, u32User /* cMillies */);
2648}
2649
2650
2651static int vgdrvIoCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCONNECT pInfo)
2652{
2653 int rc;
2654 HGCMCLIENTID idClient = 0;
2655
2656 /*
2657 * The VbglHGCMConnect call will invoke the callback if the HGCM
2658 * call is performed in an ASYNC fashion. The function is not able
2659 * to deal with cancelled requests.
2660 */
2661 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: %.128s\n",
2662 pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2663 ? pInfo->u.In.Loc.u.host.achName : "<not local host>"));
2664
2665 rc = VbglR0HGCMInternalConnect(&pInfo->u.In.Loc, pSession->fRequestor, &idClient,
2666 vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2667 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: idClient=%RX32 (rc=%Rrc)\n", idClient, rc));
2668 if (RT_SUCCESS(rc))
2669 {
2670 /*
2671 * Append the client id to the client id table.
2672 * If the table has somehow become filled up, we'll disconnect the session.
2673 */
2674 unsigned i;
2675 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2676 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2677 if (!pSession->aHGCMClientIds[i])
2678 {
2679 pSession->aHGCMClientIds[i] = idClient;
2680 break;
2681 }
2682 RTSpinlockRelease(pDevExt->SessionSpinlock);
2683 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2684 {
2685 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2686 VbglR0HGCMInternalDisconnect(idClient, pSession->fRequestor, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2687
2688 pInfo->u.Out.idClient = 0;
2689 return VERR_TOO_MANY_OPEN_FILES;
2690 }
2691 }
2692 pInfo->u.Out.idClient = idClient;
2693 return rc;
2694}
2695
2696
2697static int vgdrvIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMDISCONNECT pInfo)
2698{
2699 /*
2700 * Validate the client id and invalidate its entry while we're in the call.
2701 */
2702 int rc;
2703 const uint32_t idClient = pInfo->u.In.idClient;
2704 unsigned i;
2705 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2706 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2707 if (pSession->aHGCMClientIds[i] == idClient)
2708 {
2709 pSession->aHGCMClientIds[i] = UINT32_MAX;
2710 break;
2711 }
2712 RTSpinlockRelease(pDevExt->SessionSpinlock);
2713 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2714 {
2715 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2716 return VERR_INVALID_HANDLE;
2717 }
2718
2719 /*
2720 * The VbglHGCMConnect call will invoke the callback if the HGCM
2721 * call is performed in an ASYNC fashion. The function is not able
2722 * to deal with cancelled requests.
2723 */
2724 Log(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2725 rc = VbglR0HGCMInternalDisconnect(idClient, pSession->fRequestor, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2726 LogFlow(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: rc=%Rrc\n", rc));
2727
2728 /* Update the client id array according to the result. */
2729 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2730 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2731 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) ? 0 : idClient;
2732 RTSpinlockRelease(pDevExt->SessionSpinlock);
2733
2734 return rc;
2735}
2736
2737
2738static int vgdrvIoCtl_HGCMCallInner(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2739 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2740 size_t cbExtra, size_t cbData)
2741{
2742 const uint32_t u32ClientId = pInfo->u32ClientID;
2743 uint32_t fFlags;
2744 size_t cbActual;
2745 unsigned i;
2746 int rc;
2747
2748 /*
2749 * Some more validations.
2750 */
2751 if (RT_LIKELY(pInfo->cParms <= VMMDEV_MAX_HGCM_PARMS)) /* (Just make sure it doesn't overflow the next check.) */
2752 { /* likely */}
2753 else
2754 {
2755 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2756 return VERR_INVALID_PARAMETER;
2757 }
2758
2759 cbActual = cbExtra + sizeof(*pInfo);
2760#ifdef RT_ARCH_AMD64
2761 if (f32bit)
2762 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2763 else
2764#endif
2765 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2766 if (RT_LIKELY(cbData >= cbActual))
2767 { /* likely */}
2768 else
2769 {
2770 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2771 cbData, cbData, cbActual, cbActual));
2772 return VERR_INVALID_PARAMETER;
2773 }
2774 pInfo->Hdr.cbOut = (uint32_t)cbActual;
2775
2776 /*
2777 * Validate the client id.
2778 */
2779 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2780 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2781 if (pSession->aHGCMClientIds[i] == u32ClientId)
2782 break;
2783 RTSpinlockRelease(pDevExt->SessionSpinlock);
2784 if (RT_LIKELY(i < RT_ELEMENTS(pSession->aHGCMClientIds)))
2785 { /* likely */}
2786 else
2787 {
2788 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2789 return VERR_INVALID_HANDLE;
2790 }
2791
2792 /*
2793 * The VbglHGCMCall call will invoke the callback if the HGCM
2794 * call is performed in an ASYNC fashion. This function can
2795 * deal with cancelled requests, so we let user more requests
2796 * be interruptible (should add a flag for this later I guess).
2797 */
2798 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2799 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2800 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2801#ifdef RT_ARCH_AMD64
2802 if (f32bit)
2803 {
2804 if (fInterruptible)
2805 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, pSession->fRequestor,
2806 vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2807 else
2808 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, pSession->fRequestor,
2809 vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2810 }
2811 else
2812#endif
2813 {
2814 if (fInterruptible)
2815 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, pSession->fRequestor,
2816 vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2817 else
2818 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, pSession->fRequestor,
2819 vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2820 }
2821 if (RT_SUCCESS(rc))
2822 {
2823 rc = pInfo->Hdr.rc;
2824 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: result=%Rrc\n", rc));
2825 }
2826 else
2827 {
2828 if ( rc != VERR_INTERRUPTED
2829 && rc != VERR_TIMEOUT)
2830 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2831 else
2832 Log(("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2833 }
2834 return rc;
2835}
2836
2837
2838static int vgdrvIoCtl_HGCMCallWrapper(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2839 bool f32bit, bool fUserData, size_t cbData)
2840{
2841 return vgdrvIoCtl_HGCMCallInner(pDevExt, pSession, pInfo, pInfo->cMsTimeout,
2842 pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2843 f32bit, fUserData, 0 /*cbExtra*/, cbData);
2844}
2845
2846
2847/**
2848 * Handles a fast HGCM call from another driver.
2849 *
2850 * The driver has provided a fully assembled HGCM call request and all we need
2851 * to do is send it to the host and do the wait processing.
2852 *
2853 * @returns VBox status code of the request submission part.
2854 * @param pDevExt The device extension.
2855 * @param pCallReq The call request.
2856 */
2857static int vgdrvIoCtl_HGCMFastCall(PVBOXGUESTDEVEXT pDevExt, VBGLIOCIDCHGCMFASTCALL volatile *pCallReq)
2858{
2859 VMMDevHGCMCall volatile *pHgcmCall = (VMMDevHGCMCall volatile *)(pCallReq + 1);
2860 int rc;
2861
2862 /*
2863 * Check out the physical address.
2864 */
2865 Assert((pCallReq->GCPhysReq & PAGE_OFFSET_MASK) == ((uintptr_t)pHgcmCall & PAGE_OFFSET_MASK));
2866
2867 AssertReturn(!pCallReq->fInterruptible, VERR_NOT_IMPLEMENTED);
2868
2869 /*
2870 * Submit the request.
2871 */
2872 Log(("vgdrvIoCtl_HGCMFastCall -> host\n"));
2873 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pCallReq->GCPhysReq);
2874
2875 /* Make the compiler aware that the host has changed memory. */
2876 ASMCompilerBarrier();
2877
2878 rc = pHgcmCall->header.header.rc;
2879 Log(("vgdrvIoCtl_HGCMFastCall -> %Rrc (header rc=%Rrc)\n", rc, pHgcmCall->header.result));
2880
2881 /*
2882 * The host is likely to engage in asynchronous execution of HGCM, unless it fails.
2883 */
2884 if (rc == VINF_HGCM_ASYNC_EXECUTE)
2885 {
2886 rc = vgdrvHgcmAsyncWaitCallbackWorker(&pHgcmCall->header, pDevExt, false /* fInterruptible */, RT_INDEFINITE_WAIT);
2887 if (pHgcmCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
2888 {
2889 Assert(!(pHgcmCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
2890 rc = VINF_SUCCESS;
2891 }
2892 else
2893 {
2894 /*
2895 * Timeout and interrupt scenarios are messy and requires
2896 * cancelation, so implement later.
2897 */
2898 AssertReleaseMsgFailed(("rc=%Rrc\n", rc));
2899 }
2900 }
2901 else
2902 Assert((pHgcmCall->header.fu32Flags & VBOX_HGCM_REQ_DONE) || RT_FAILURE_NP(rc));
2903
2904 Log(("vgdrvIoCtl_HGCMFastCall: rc=%Rrc result=%Rrc fu32Flags=%#x\n", rc, pHgcmCall->header.result, pHgcmCall->header.fu32Flags));
2905 return rc;
2906
2907}
2908
2909#endif /* VBOX_WITH_HGCM */
2910
2911/**
2912 * Handle VBGL_IOCTL_CHECK_BALLOON from R3.
2913 *
2914 * Ask the host for the size of the balloon and try to set it accordingly. If
2915 * this approach fails because it's not supported, return with fHandleInR3 set
2916 * and let the user land supply memory we can lock via the other ioctl.
2917 *
2918 * @returns VBox status code.
2919 *
2920 * @param pDevExt The device extension.
2921 * @param pSession The session.
2922 * @param pInfo The output buffer.
2923 */
2924static int vgdrvIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHECKBALLOON pInfo)
2925{
2926 VMMDevGetMemBalloonChangeRequest *pReq;
2927 int rc;
2928
2929 LogFlow(("VBGL_IOCTL_CHECK_BALLOON:\n"));
2930 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2931 AssertRCReturn(rc, rc);
2932
2933 /*
2934 * The first user trying to query/change the balloon becomes the
2935 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2936 */
2937 if ( pDevExt->MemBalloon.pOwner != pSession
2938 && pDevExt->MemBalloon.pOwner == NULL)
2939 pDevExt->MemBalloon.pOwner = pSession;
2940
2941 if (pDevExt->MemBalloon.pOwner == pSession)
2942 {
2943 /*
2944 * This is a response to that event. Setting this bit means that
2945 * we request the value from the host and change the guest memory
2946 * balloon according to this value.
2947 */
2948 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2949 if (RT_SUCCESS(rc))
2950 {
2951 pReq->header.fRequestor = pSession->fRequestor;
2952 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2953 rc = VbglR0GRPerform(&pReq->header);
2954 if (RT_SUCCESS(rc))
2955 {
2956 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2957 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2958
2959 pInfo->u.Out.cBalloonChunks = pReq->cBalloonChunks;
2960 pInfo->u.Out.fHandleInR3 = false;
2961 pInfo->u.Out.afPadding[0] = false;
2962 pInfo->u.Out.afPadding[1] = false;
2963 pInfo->u.Out.afPadding[2] = false;
2964
2965 rc = vgdrvSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->u.Out.fHandleInR3);
2966 /* Ignore various out of memory failures. */
2967 if ( rc == VERR_NO_MEMORY
2968 || rc == VERR_NO_PHYS_MEMORY
2969 || rc == VERR_NO_CONT_MEMORY)
2970 rc = VINF_SUCCESS;
2971 }
2972 else
2973 LogRel(("VBGL_IOCTL_CHECK_BALLOON: VbglR0GRPerform failed. rc=%Rrc\n", rc));
2974 VbglR0GRFree(&pReq->header);
2975 }
2976 }
2977 else
2978 rc = VERR_PERMISSION_DENIED;
2979
2980 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2981 LogFlow(("VBGL_IOCTL_CHECK_BALLOON returns %Rrc\n", rc));
2982 return rc;
2983}
2984
2985
2986/**
2987 * Handle a request for changing the memory balloon.
2988 *
2989 * @returns VBox status code.
2990 *
2991 * @param pDevExt The device extention.
2992 * @param pSession The session.
2993 * @param pInfo The change request structure (input).
2994 */
2995static int vgdrvIoCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEBALLOON pInfo)
2996{
2997 int rc;
2998 LogFlow(("VBGL_IOCTL_CHANGE_BALLOON: fInflate=%RTbool u64ChunkAddr=%p\n", pInfo->u.In.fInflate, pInfo->u.In.pvChunk));
2999 if ( pInfo->u.In.abPadding[0]
3000 || pInfo->u.In.abPadding[1]
3001 || pInfo->u.In.abPadding[2]
3002 || pInfo->u.In.abPadding[3]
3003 || pInfo->u.In.abPadding[4]
3004 || pInfo->u.In.abPadding[5]
3005 || pInfo->u.In.abPadding[6]
3006#if ARCH_BITS == 32
3007 || pInfo->u.In.abPadding[7]
3008 || pInfo->u.In.abPadding[8]
3009 || pInfo->u.In.abPadding[9]
3010#endif
3011 )
3012 {
3013 Log(("VBGL_IOCTL_CHANGE_BALLOON: Padding isn't all zero: %.*Rhxs\n", sizeof(pInfo->u.In.abPadding), pInfo->u.In.abPadding));
3014 return VERR_INVALID_PARAMETER;
3015 }
3016
3017 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
3018 AssertRCReturn(rc, rc);
3019
3020 if (!pDevExt->MemBalloon.fUseKernelAPI)
3021 {
3022 /*
3023 * The first user trying to query/change the balloon becomes the
3024 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
3025 */
3026 if ( pDevExt->MemBalloon.pOwner != pSession
3027 && pDevExt->MemBalloon.pOwner == NULL)
3028 pDevExt->MemBalloon.pOwner = pSession;
3029
3030 if (pDevExt->MemBalloon.pOwner == pSession)
3031 rc = vgdrvSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u.In.pvChunk, pInfo->u.In.fInflate != false);
3032 else
3033 rc = VERR_PERMISSION_DENIED;
3034 }
3035 else
3036 rc = VERR_PERMISSION_DENIED;
3037
3038 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
3039 return rc;
3040}
3041
3042
3043/**
3044 * Handle a request for writing a core dump of the guest on the host.
3045 *
3046 * @returns VBox status code.
3047 *
3048 * @param pDevExt The device extension.
3049 * @param pSession The session.
3050 * @param pInfo The output buffer.
3051 */
3052static int vgdrvIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCWRITECOREDUMP pInfo)
3053{
3054 VMMDevReqWriteCoreDump *pReq = NULL;
3055 int rc;
3056 LogFlow(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP\n"));
3057 RT_NOREF1(pDevExt);
3058
3059 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
3060 if (RT_SUCCESS(rc))
3061 {
3062 pReq->header.fRequestor = pSession->fRequestor;
3063 pReq->fFlags = pInfo->u.In.fFlags;
3064 rc = VbglR0GRPerform(&pReq->header);
3065 if (RT_FAILURE(rc))
3066 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: VbglR0GRPerform failed, rc=%Rrc!\n", rc));
3067
3068 VbglR0GRFree(&pReq->header);
3069 }
3070 else
3071 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
3072 sizeof(*pReq), sizeof(*pReq), rc));
3073 return rc;
3074}
3075
3076
3077/**
3078 * Guest backdoor logging.
3079 *
3080 * @returns VBox status code.
3081 *
3082 * @param pDevExt The device extension.
3083 * @param pch The log message (need not be NULL terminated).
3084 * @param cbData Size of the buffer.
3085 * @param fUserSession Copy of VBOXGUESTSESSION::fUserSession for the
3086 * call. True normal user, false root user.
3087 */
3088static int vgdrvIoCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, bool fUserSession)
3089{
3090 if (pDevExt->fLoggingEnabled)
3091 RTLogBackdoorPrintf("%.*s", cbData, pch);
3092 else if (!fUserSession)
3093 LogRel(("%.*s", cbData, pch));
3094 else
3095 Log(("%.*s", cbData, pch));
3096 return VINF_SUCCESS;
3097}
3098
3099
3100/** @name Guest Capabilities, Mouse Status and Event Filter
3101 * @{
3102 */
3103
3104/**
3105 * Clears a bit usage tracker (init time).
3106 *
3107 * @param pTracker The tracker to clear.
3108 */
3109static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
3110{
3111 uint32_t iBit;
3112 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
3113
3114 for (iBit = 0; iBit < 32; iBit++)
3115 pTracker->acPerBitUsage[iBit] = 0;
3116 pTracker->fMask = 0;
3117}
3118
3119
3120#ifdef VBOX_STRICT
3121/**
3122 * Checks that pTracker->fMask is correct and that the usage values are within
3123 * the valid range.
3124 *
3125 * @param pTracker The tracker.
3126 * @param cMax Max valid usage value.
3127 * @param pszWhat Identifies the tracker in assertions.
3128 */
3129static void vgdrvBitUsageTrackerCheckMask(PCVBOXGUESTBITUSAGETRACER pTracker, uint32_t cMax, const char *pszWhat)
3130{
3131 uint32_t fMask = 0;
3132 uint32_t iBit;
3133 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
3134
3135 for (iBit = 0; iBit < 32; iBit++)
3136 if (pTracker->acPerBitUsage[iBit])
3137 {
3138 fMask |= RT_BIT_32(iBit);
3139 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
3140 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
3141 }
3142
3143 AssertMsg(fMask == pTracker->fMask, ("%s: %#x vs %#x\n", pszWhat, fMask, pTracker->fMask));
3144}
3145#endif
3146
3147
3148/**
3149 * Applies a change to the bit usage tracker.
3150 *
3151 *
3152 * @returns true if the mask changed, false if not.
3153 * @param pTracker The bit usage tracker.
3154 * @param fChanged The bits to change.
3155 * @param fPrevious The previous value of the bits.
3156 * @param cMax The max valid usage value for assertions.
3157 * @param pszWhat Identifies the tracker in assertions.
3158 */
3159static bool vgdrvBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker, uint32_t fChanged, uint32_t fPrevious,
3160 uint32_t cMax, const char *pszWhat)
3161{
3162 bool fGlobalChange = false;
3163 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
3164
3165 while (fChanged)
3166 {
3167 uint32_t const iBit = ASMBitFirstSetU32(fChanged) - 1;
3168 uint32_t const fBitMask = RT_BIT_32(iBit);
3169 Assert(iBit < 32); Assert(fBitMask & fChanged);
3170
3171 if (fBitMask & fPrevious)
3172 {
3173 pTracker->acPerBitUsage[iBit] -= 1;
3174 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
3175 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
3176 if (pTracker->acPerBitUsage[iBit] == 0)
3177 {
3178 fGlobalChange = true;
3179 pTracker->fMask &= ~fBitMask;
3180 }
3181 }
3182 else
3183 {
3184 pTracker->acPerBitUsage[iBit] += 1;
3185 AssertMsg(pTracker->acPerBitUsage[iBit] > 0 && pTracker->acPerBitUsage[iBit] <= cMax,
3186 ("pTracker->acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
3187 if (pTracker->acPerBitUsage[iBit] == 1)
3188 {
3189 fGlobalChange = true;
3190 pTracker->fMask |= fBitMask;
3191 }
3192 }
3193
3194 fChanged &= ~fBitMask;
3195 }
3196
3197#ifdef VBOX_STRICT
3198 vgdrvBitUsageTrackerCheckMask(pTracker, cMax, pszWhat);
3199#endif
3200 NOREF(pszWhat); NOREF(cMax);
3201 return fGlobalChange;
3202}
3203
3204
3205/**
3206 * Init and termination worker for resetting the (host) event filter on the host
3207 *
3208 * @returns VBox status code.
3209 * @param pDevExt The device extension.
3210 * @param fFixedEvents Fixed events (init time).
3211 */
3212static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents)
3213{
3214 VMMDevCtlGuestFilterMask *pReq;
3215 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
3216 if (RT_SUCCESS(rc))
3217 {
3218 pReq->u32NotMask = UINT32_MAX & ~fFixedEvents;
3219 pReq->u32OrMask = fFixedEvents;
3220 rc = VbglR0GRPerform(&pReq->header);
3221 if (RT_FAILURE(rc))
3222 LogRelFunc(("failed with rc=%Rrc\n", rc));
3223 VbglR0GRFree(&pReq->header);
3224 }
3225 RT_NOREF1(pDevExt);
3226 return rc;
3227}
3228
3229
3230/**
3231 * Changes the event filter mask for the given session.
3232 *
3233 * This is called in response to VBGL_IOCTL_CHANGE_FILTER_MASK as well as to do
3234 * session cleanup.
3235 *
3236 * @returns VBox status code.
3237 * @param pDevExt The device extension.
3238 * @param pSession The session.
3239 * @param fOrMask The events to add.
3240 * @param fNotMask The events to remove.
3241 * @param fSessionTermination Set if we're called by the session cleanup code.
3242 * This tweaks the error handling so we perform
3243 * proper session cleanup even if the host
3244 * misbehaves.
3245 *
3246 * @remarks Takes the session spinlock.
3247 */
3248static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3249 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3250{
3251 VMMDevCtlGuestFilterMask *pReq;
3252 uint32_t fChanged;
3253 uint32_t fPrevious;
3254 int rc;
3255
3256 /*
3257 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3258 */
3259 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
3260 if (RT_SUCCESS(rc))
3261 { /* nothing */ }
3262 else if (!fSessionTermination)
3263 {
3264 LogRel(("vgdrvSetSessionFilterMask: VbglR0GRAlloc failure: %Rrc\n", rc));
3265 return rc;
3266 }
3267 else
3268 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3269
3270
3271 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3272
3273 /*
3274 * Apply the changes to the session mask.
3275 */
3276 fPrevious = pSession->fEventFilter;
3277 pSession->fEventFilter |= fOrMask;
3278 pSession->fEventFilter &= ~fNotMask;
3279
3280 /*
3281 * If anything actually changed, update the global usage counters.
3282 */
3283 fChanged = fPrevious ^ pSession->fEventFilter;
3284 LogFlow(("vgdrvSetSessionEventFilter: Session->fEventFilter: %#x -> %#x (changed %#x)\n",
3285 fPrevious, pSession->fEventFilter, fChanged));
3286 if (fChanged)
3287 {
3288 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, fPrevious,
3289 pDevExt->cSessions, "EventFilterTracker");
3290
3291 /*
3292 * If there are global changes, update the event filter on the host.
3293 */
3294 if (fGlobalChange || pDevExt->fEventFilterHost == UINT32_MAX)
3295 {
3296 Assert(pReq || fSessionTermination);
3297 if (pReq)
3298 {
3299 pReq->u32OrMask = pDevExt->fFixedEvents | pDevExt->EventFilterTracker.fMask;
3300 if (pReq->u32OrMask == pDevExt->fEventFilterHost)
3301 rc = VINF_SUCCESS;
3302 else
3303 {
3304 pDevExt->fEventFilterHost = pReq->u32OrMask;
3305 pReq->u32NotMask = ~pReq->u32OrMask;
3306 rc = VbglR0GRPerform(&pReq->header);
3307 if (RT_FAILURE(rc))
3308 {
3309 /*
3310 * Failed, roll back (unless it's session termination time).
3311 */
3312 pDevExt->fEventFilterHost = UINT32_MAX;
3313 if (!fSessionTermination)
3314 {
3315 vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, pSession->fEventFilter,
3316 pDevExt->cSessions, "EventFilterTracker");
3317 pSession->fEventFilter = fPrevious;
3318 }
3319 }
3320 }
3321 }
3322 else
3323 rc = VINF_SUCCESS;
3324 }
3325 }
3326
3327 RTSpinlockRelease(pDevExt->SessionSpinlock);
3328 if (pReq)
3329 VbglR0GRFree(&pReq->header);
3330 return rc;
3331}
3332
3333
3334/**
3335 * Handle VBGL_IOCTL_CHANGE_FILTER_MASK.
3336 *
3337 * @returns VBox status code.
3338 *
3339 * @param pDevExt The device extension.
3340 * @param pSession The session.
3341 * @param pInfo The request.
3342 */
3343static int vgdrvIoCtl_ChangeFilterMask(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEFILTERMASK pInfo)
3344{
3345 LogFlow(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3346
3347 if ((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
3348 {
3349 Log(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x: Invalid masks!\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3350 return VERR_INVALID_PARAMETER;
3351 }
3352
3353 return vgdrvSetSessionEventFilter(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask, false /*fSessionTermination*/);
3354}
3355
3356
3357/**
3358 * Init and termination worker for set mouse feature status to zero on the host.
3359 *
3360 * @returns VBox status code.
3361 * @param pDevExt The device extension.
3362 */
3363static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt)
3364{
3365 VMMDevReqMouseStatus *pReq;
3366 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
3367 if (RT_SUCCESS(rc))
3368 {
3369 pReq->mouseFeatures = 0;
3370 pReq->pointerXPos = 0;
3371 pReq->pointerYPos = 0;
3372 rc = VbglR0GRPerform(&pReq->header);
3373 if (RT_FAILURE(rc))
3374 LogRelFunc(("failed with rc=%Rrc\n", rc));
3375 VbglR0GRFree(&pReq->header);
3376 }
3377 RT_NOREF1(pDevExt);
3378 return rc;
3379}
3380
3381
3382/**
3383 * Changes the mouse status mask for the given session.
3384 *
3385 * This is called in response to VBOXGUEST_IOCTL_SET_MOUSE_STATUS as well as to
3386 * do session cleanup.
3387 *
3388 * @returns VBox status code.
3389 * @param pDevExt The device extension.
3390 * @param pSession The session.
3391 * @param fOrMask The status flags to add.
3392 * @param fNotMask The status flags to remove.
3393 * @param fSessionTermination Set if we're called by the session cleanup code.
3394 * This tweaks the error handling so we perform
3395 * proper session cleanup even if the host
3396 * misbehaves.
3397 *
3398 * @remarks Takes the session spinlock.
3399 */
3400static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3401 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3402{
3403 VMMDevReqMouseStatus *pReq;
3404 uint32_t fChanged;
3405 uint32_t fPrevious;
3406 int rc;
3407
3408 /*
3409 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3410 */
3411 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
3412 if (RT_SUCCESS(rc))
3413 {
3414 if (!fSessionTermination)
3415 pReq->header.fRequestor = pSession->fRequestor;
3416 }
3417 else if (!fSessionTermination)
3418 {
3419 LogRel(("vgdrvSetSessionMouseStatus: VbglR0GRAlloc failure: %Rrc\n", rc));
3420 return rc;
3421 }
3422 else
3423 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3424
3425
3426 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3427
3428 /*
3429 * Apply the changes to the session mask.
3430 */
3431 fPrevious = pSession->fMouseStatus;
3432 pSession->fMouseStatus |= fOrMask;
3433 pSession->fMouseStatus &= ~fNotMask;
3434
3435 /*
3436 * If anything actually changed, update the global usage counters.
3437 */
3438 fChanged = fPrevious ^ pSession->fMouseStatus;
3439 if (fChanged)
3440 {
3441 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, fPrevious,
3442 pDevExt->cSessions, "MouseStatusTracker");
3443
3444 /*
3445 * If there are global changes, update the event filter on the host.
3446 */
3447 if (fGlobalChange || pDevExt->fMouseStatusHost == UINT32_MAX)
3448 {
3449 Assert(pReq || fSessionTermination);
3450 if (pReq)
3451 {
3452 pReq->mouseFeatures = pDevExt->MouseStatusTracker.fMask;
3453 if (pReq->mouseFeatures == pDevExt->fMouseStatusHost)
3454 rc = VINF_SUCCESS;
3455 else
3456 {
3457 pDevExt->fMouseStatusHost = pReq->mouseFeatures;
3458 pReq->pointerXPos = 0;
3459 pReq->pointerYPos = 0;
3460 rc = VbglR0GRPerform(&pReq->header);
3461 if (RT_FAILURE(rc))
3462 {
3463 /*
3464 * Failed, roll back (unless it's session termination time).
3465 */
3466 pDevExt->fMouseStatusHost = UINT32_MAX;
3467 if (!fSessionTermination)
3468 {
3469 vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, pSession->fMouseStatus,
3470 pDevExt->cSessions, "MouseStatusTracker");
3471 pSession->fMouseStatus = fPrevious;
3472 }
3473 }
3474 }
3475 }
3476 else
3477 rc = VINF_SUCCESS;
3478 }
3479 }
3480
3481 RTSpinlockRelease(pDevExt->SessionSpinlock);
3482 if (pReq)
3483 VbglR0GRFree(&pReq->header);
3484 return rc;
3485}
3486
3487
3488/**
3489 * Sets the mouse status features for this session and updates them globally.
3490 *
3491 * @returns VBox status code.
3492 *
3493 * @param pDevExt The device extention.
3494 * @param pSession The session.
3495 * @param fFeatures New bitmap of enabled features.
3496 */
3497static int vgdrvIoCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
3498{
3499 LogFlow(("VBGL_IOCTL_SET_MOUSE_STATUS: features=%#x\n", fFeatures));
3500
3501 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
3502 return VERR_INVALID_PARAMETER;
3503
3504 return vgdrvSetSessionMouseStatus(pDevExt, pSession, fFeatures, ~fFeatures, false /*fSessionTermination*/);
3505}
3506
3507
3508/**
3509 * Return the mask of VMM device events that this session is allowed to see (wrt
3510 * to "acquire" mode guest capabilities).
3511 *
3512 * The events associated with guest capabilities in "acquire" mode will be
3513 * restricted to sessions which has acquired the respective capabilities.
3514 * If someone else tries to wait for acquired events, they won't be woken up
3515 * when the event becomes pending. Should some other thread in the session
3516 * acquire the capability while the corresponding event is pending, the waiting
3517 * thread will woken up.
3518 *
3519 * @returns Mask of events valid for the given session.
3520 * @param pDevExt The device extension.
3521 * @param pSession The session.
3522 *
3523 * @remarks Needs only be called when dispatching events in the
3524 * VBOXGUEST_ACQUIRE_STYLE_EVENTS mask.
3525 */
3526static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
3527{
3528 uint32_t fAcquireModeGuestCaps;
3529 uint32_t fAcquiredGuestCaps;
3530 uint32_t fAllowedEvents;
3531
3532 /*
3533 * Note! Reads pSession->fAcquiredGuestCaps and pDevExt->fAcquireModeGuestCaps
3534 * WITHOUT holding VBOXGUESTDEVEXT::SessionSpinlock.
3535 */
3536 fAcquireModeGuestCaps = ASMAtomicUoReadU32(&pDevExt->fAcquireModeGuestCaps);
3537 if (fAcquireModeGuestCaps == 0)
3538 return VMMDEV_EVENT_VALID_EVENT_MASK;
3539 fAcquiredGuestCaps = ASMAtomicUoReadU32(&pSession->fAcquiredGuestCaps);
3540
3541 /*
3542 * Calculate which events to allow according to the cap config and caps
3543 * acquired by the session.
3544 */
3545 fAllowedEvents = VMMDEV_EVENT_VALID_EVENT_MASK;
3546 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
3547 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
3548 fAllowedEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
3549
3550 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3551 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
3552 fAllowedEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3553
3554 return fAllowedEvents;
3555}
3556
3557
3558/**
3559 * Init and termination worker for set guest capabilities to zero on the host.
3560 *
3561 * @returns VBox status code.
3562 * @param pDevExt The device extension.
3563 */
3564static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt)
3565{
3566 VMMDevReqGuestCapabilities2 *pReq;
3567 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3568 if (RT_SUCCESS(rc))
3569 {
3570 pReq->u32NotMask = UINT32_MAX;
3571 pReq->u32OrMask = 0;
3572 rc = VbglR0GRPerform(&pReq->header);
3573
3574 if (RT_FAILURE(rc))
3575 LogRelFunc(("failed with rc=%Rrc\n", rc));
3576 VbglR0GRFree(&pReq->header);
3577 }
3578 RT_NOREF1(pDevExt);
3579 return rc;
3580}
3581
3582
3583/**
3584 * Sets the guest capabilities to the host while holding the lock.
3585 *
3586 * This will ASSUME that we're the ones in charge of the mask, so
3587 * we'll simply clear all bits we don't set.
3588 *
3589 * @returns VBox status code.
3590 * @param pDevExt The device extension.
3591 * @param pReq The request.
3592 */
3593static int vgdrvUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT pDevExt, VMMDevReqGuestCapabilities2 *pReq)
3594{
3595 int rc;
3596
3597 pReq->u32OrMask = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3598 if (pReq->u32OrMask == pDevExt->fGuestCapsHost)
3599 rc = VINF_SUCCESS;
3600 else
3601 {
3602 pDevExt->fGuestCapsHost = pReq->u32OrMask;
3603 pReq->u32NotMask = ~pReq->u32OrMask;
3604 rc = VbglR0GRPerform(&pReq->header);
3605 if (RT_FAILURE(rc))
3606 pDevExt->fGuestCapsHost = UINT32_MAX;
3607 }
3608
3609 return rc;
3610}
3611
3612
3613/**
3614 * Switch a set of capabilities into "acquire" mode and (maybe) acquire them for
3615 * the given session.
3616 *
3617 * This is called in response to VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE as well as
3618 * to do session cleanup.
3619 *
3620 * @returns VBox status code.
3621 * @param pDevExt The device extension.
3622 * @param pSession The session.
3623 * @param fOrMask The capabilities to add .
3624 * @param fNotMask The capabilities to remove. Ignored in
3625 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE.
3626 * @param fFlags Confusing operation modifier.
3627 * VBOXGUESTCAPSACQUIRE_FLAGS_NONE means to both
3628 * configure and acquire/release the capabilities.
3629 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3630 * means only configure capabilities in the
3631 * @a fOrMask capabilities for "acquire" mode.
3632 * @param fSessionTermination Set if we're called by the session cleanup code.
3633 * This tweaks the error handling so we perform
3634 * proper session cleanup even if the host
3635 * misbehaves.
3636 *
3637 * @remarks Takes both the session and event spinlocks.
3638 */
3639static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3640 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags,
3641 bool fSessionTermination)
3642{
3643 uint32_t fCurrentOwnedCaps;
3644 uint32_t fSessionRemovedCaps;
3645 uint32_t fSessionAddedCaps;
3646 uint32_t fOtherConflictingCaps;
3647 VMMDevReqGuestCapabilities2 *pReq = NULL;
3648 int rc;
3649
3650
3651 /*
3652 * Validate and adjust input.
3653 */
3654 if (fOrMask & ~( VMMDEV_GUEST_SUPPORTS_SEAMLESS
3655 | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING
3656 | VMMDEV_GUEST_SUPPORTS_GRAPHICS ) )
3657 {
3658 LogRel(("vgdrvAcquireSessionCapabilities: invalid fOrMask=%#x (pSession=%p fNotMask=%#x fFlags=%#x)\n",
3659 fOrMask, pSession, fNotMask, fFlags));
3660 return VERR_INVALID_PARAMETER;
3661 }
3662
3663 if ((fFlags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) != 0)
3664 {
3665 LogRel(("vgdrvAcquireSessionCapabilities: invalid fFlags=%#x (pSession=%p fOrMask=%#x fNotMask=%#x)\n",
3666 fFlags, pSession, fOrMask, fNotMask));
3667 return VERR_INVALID_PARAMETER;
3668 }
3669 Assert(!fOrMask || !fSessionTermination);
3670
3671 /* The fNotMask no need to have all values valid, invalid ones will simply be ignored. */
3672 fNotMask &= ~fOrMask;
3673
3674 /*
3675 * Preallocate a update request if we're about to do more than just configure
3676 * the capability mode.
3677 */
3678 if (!(fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE))
3679 {
3680 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3681 if (RT_SUCCESS(rc))
3682 {
3683 if (!fSessionTermination)
3684 pReq->header.fRequestor = pSession->fRequestor;
3685 }
3686 else if (!fSessionTermination)
3687 {
3688 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: VbglR0GRAlloc failure: %Rrc\n",
3689 pSession, fOrMask, fNotMask, fFlags, rc));
3690 return rc;
3691 }
3692 else
3693 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3694 }
3695
3696 /*
3697 * Try switch the capabilities in the OR mask into "acquire" mode.
3698 *
3699 * Note! We currently ignore anyone which may already have "set" the capabilities
3700 * in fOrMask. Perhaps not the best way to handle it, but it's simple...
3701 */
3702 RTSpinlockAcquire(pDevExt->EventSpinlock);
3703
3704 if (!(pDevExt->fSetModeGuestCaps & fOrMask))
3705 pDevExt->fAcquireModeGuestCaps |= fOrMask;
3706 else
3707 {
3708 RTSpinlockRelease(pDevExt->EventSpinlock);
3709
3710 if (pReq)
3711 VbglR0GRFree(&pReq->header);
3712 AssertMsgFailed(("Trying to change caps mode: %#x\n", fOrMask));
3713 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: calling caps acquire for set caps\n",
3714 pSession, fOrMask, fNotMask, fFlags));
3715 return VERR_INVALID_STATE;
3716 }
3717
3718 /*
3719 * If we only wanted to switch the capabilities into "acquire" mode, we're done now.
3720 */
3721 if (fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
3722 {
3723 RTSpinlockRelease(pDevExt->EventSpinlock);
3724
3725 Assert(!pReq);
3726 Log(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: configured acquire caps: 0x%x\n",
3727 pSession, fOrMask, fNotMask, fFlags));
3728 return VINF_SUCCESS;
3729 }
3730 Assert(pReq || fSessionTermination);
3731
3732 /*
3733 * Caller wants to acquire/release the capabilities too.
3734 *
3735 * Note! The mode change of the capabilities above won't be reverted on
3736 * failure, this is intentional.
3737 */
3738 fCurrentOwnedCaps = pSession->fAcquiredGuestCaps;
3739 fSessionRemovedCaps = fCurrentOwnedCaps & fNotMask;
3740 fSessionAddedCaps = fOrMask & ~fCurrentOwnedCaps;
3741 fOtherConflictingCaps = pDevExt->fAcquiredGuestCaps & ~fCurrentOwnedCaps;
3742 fOtherConflictingCaps &= fSessionAddedCaps;
3743
3744 if (!fOtherConflictingCaps)
3745 {
3746 if (fSessionAddedCaps)
3747 {
3748 pSession->fAcquiredGuestCaps |= fSessionAddedCaps;
3749 pDevExt->fAcquiredGuestCaps |= fSessionAddedCaps;
3750 }
3751
3752 if (fSessionRemovedCaps)
3753 {
3754 pSession->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3755 pDevExt->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3756 }
3757
3758 /*
3759 * If something changes (which is very likely), tell the host.
3760 */
3761 if (fSessionAddedCaps || fSessionRemovedCaps || pDevExt->fGuestCapsHost == UINT32_MAX)
3762 {
3763 Assert(pReq || fSessionTermination);
3764 if (pReq)
3765 {
3766 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3767 if (RT_FAILURE(rc) && !fSessionTermination)
3768 {
3769 /* Failed, roll back. */
3770 if (fSessionAddedCaps)
3771 {
3772 pSession->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3773 pDevExt->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3774 }
3775 if (fSessionRemovedCaps)
3776 {
3777 pSession->fAcquiredGuestCaps |= fSessionRemovedCaps;
3778 pDevExt->fAcquiredGuestCaps |= fSessionRemovedCaps;
3779 }
3780
3781 RTSpinlockRelease(pDevExt->EventSpinlock);
3782 LogRel(("vgdrvAcquireSessionCapabilities: vgdrvUpdateCapabilitiesOnHostWithReqAndLock failed: rc=%Rrc\n", rc));
3783 VbglR0GRFree(&pReq->header);
3784 return rc;
3785 }
3786 }
3787 }
3788 }
3789 else
3790 {
3791 RTSpinlockRelease(pDevExt->EventSpinlock);
3792
3793 Log(("vgdrvAcquireSessionCapabilities: Caps %#x were busy\n", fOtherConflictingCaps));
3794 VbglR0GRFree(&pReq->header);
3795 return VERR_RESOURCE_BUSY;
3796 }
3797
3798 RTSpinlockRelease(pDevExt->EventSpinlock);
3799 if (pReq)
3800 VbglR0GRFree(&pReq->header);
3801
3802 /*
3803 * If we added a capability, check if that means some other thread in our
3804 * session should be unblocked because there are events pending.
3805 *
3806 * HACK ALERT! When the seamless support capability is added we generate a
3807 * seamless change event so that the ring-3 client can sync with
3808 * the seamless state. Although this introduces a spurious
3809 * wakeups of the ring-3 client, it solves the problem of client
3810 * state inconsistency in multiuser environment (on Windows).
3811 */
3812 if (fSessionAddedCaps)
3813 {
3814 uint32_t fGenFakeEvents = 0;
3815 if (fSessionAddedCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3816 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3817
3818 RTSpinlockAcquire(pDevExt->EventSpinlock);
3819 if (fGenFakeEvents || pDevExt->f32PendingEvents)
3820 vgdrvDispatchEventsLocked(pDevExt, fGenFakeEvents);
3821 RTSpinlockRelease(pDevExt->EventSpinlock);
3822
3823#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3824 VGDrvCommonWaitDoWakeUps(pDevExt);
3825#endif
3826 }
3827
3828 return VINF_SUCCESS;
3829}
3830
3831
3832/**
3833 * Handle VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES.
3834 *
3835 * @returns VBox status code.
3836 *
3837 * @param pDevExt The device extension.
3838 * @param pSession The session.
3839 * @param pAcquire The request.
3840 */
3841static int vgdrvIoCtl_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCACQUIREGUESTCAPS pAcquire)
3842{
3843 int rc;
3844 LogFlow(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES: or=%#x not=%#x flags=%#x\n",
3845 pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask, pAcquire->u.In.fFlags));
3846
3847 rc = vgdrvAcquireSessionCapabilities(pDevExt, pSession, pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask,
3848 pAcquire->u.In.fFlags, false /*fSessionTermination*/);
3849 if (RT_FAILURE(rc))
3850 LogRel(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES failed rc=%Rrc\n", rc));
3851 return rc;
3852}
3853
3854
3855/**
3856 * Sets the guest capabilities for a session.
3857 *
3858 * @returns VBox status code.
3859 * @param pDevExt The device extension.
3860 * @param pSession The session.
3861 * @param fOrMask The capabilities to add.
3862 * @param fNotMask The capabilities to remove.
3863 * @param pfSessionCaps Where to return the guest capabilities reported
3864 * for this session. Optional.
3865 * @param pfGlobalCaps Where to return the guest capabilities reported
3866 * for all the sessions. Optional.
3867 *
3868 * @param fSessionTermination Set if we're called by the session cleanup code.
3869 * This tweaks the error handling so we perform
3870 * proper session cleanup even if the host
3871 * misbehaves.
3872 *
3873 * @remarks Takes the session spinlock.
3874 */
3875static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3876 uint32_t fOrMask, uint32_t fNotMask, uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps,
3877 bool fSessionTermination)
3878{
3879 /*
3880 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3881 */
3882 VMMDevReqGuestCapabilities2 *pReq;
3883 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3884 if (RT_SUCCESS(rc))
3885 {
3886 if (!fSessionTermination)
3887 pReq->header.fRequestor = pSession->fRequestor;
3888 }
3889 else if (!fSessionTermination)
3890 {
3891 if (pfSessionCaps)
3892 *pfSessionCaps = UINT32_MAX;
3893 if (pfGlobalCaps)
3894 *pfGlobalCaps = UINT32_MAX;
3895 LogRel(("vgdrvSetSessionCapabilities: VbglR0GRAlloc failure: %Rrc\n", rc));
3896 return rc;
3897 }
3898 else
3899 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3900
3901
3902 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3903
3904#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3905 /*
3906 * Capabilities in "acquire" mode cannot be set via this API.
3907 * (Acquire mode is only used on windows at the time of writing.)
3908 */
3909 if (!(fOrMask & pDevExt->fAcquireModeGuestCaps))
3910#endif
3911 {
3912 /*
3913 * Apply the changes to the session mask.
3914 */
3915 uint32_t fChanged;
3916 uint32_t fPrevious = pSession->fCapabilities;
3917 pSession->fCapabilities |= fOrMask;
3918 pSession->fCapabilities &= ~fNotMask;
3919
3920 /*
3921 * If anything actually changed, update the global usage counters.
3922 */
3923 fChanged = fPrevious ^ pSession->fCapabilities;
3924 if (fChanged)
3925 {
3926 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, fPrevious,
3927 pDevExt->cSessions, "SetGuestCapsTracker");
3928
3929 /*
3930 * If there are global changes, update the capabilities on the host.
3931 */
3932 if (fGlobalChange || pDevExt->fGuestCapsHost == UINT32_MAX)
3933 {
3934 Assert(pReq || fSessionTermination);
3935 if (pReq)
3936 {
3937 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3938
3939 /* On failure, roll back (unless it's session termination time). */
3940 if (RT_FAILURE(rc) && !fSessionTermination)
3941 {
3942 vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, pSession->fCapabilities,
3943 pDevExt->cSessions, "SetGuestCapsTracker");
3944 pSession->fCapabilities = fPrevious;
3945 }
3946 }
3947 }
3948 }
3949 }
3950#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3951 else
3952 rc = VERR_RESOURCE_BUSY;
3953#endif
3954
3955 if (pfSessionCaps)
3956 *pfSessionCaps = pSession->fCapabilities;
3957 if (pfGlobalCaps)
3958 *pfGlobalCaps = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3959
3960 RTSpinlockRelease(pDevExt->SessionSpinlock);
3961 if (pReq)
3962 VbglR0GRFree(&pReq->header);
3963 return rc;
3964}
3965
3966
3967/**
3968 * Handle VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES.
3969 *
3970 * @returns VBox status code.
3971 *
3972 * @param pDevExt The device extension.
3973 * @param pSession The session.
3974 * @param pInfo The request.
3975 */
3976static int vgdrvIoCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCSETGUESTCAPS pInfo)
3977{
3978 int rc;
3979 LogFlow(("VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3980
3981 if (!((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_GUEST_CAPABILITIES_MASK))
3982 rc = vgdrvSetSessionCapabilities(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask,
3983 &pInfo->u.Out.fSessionCaps, &pInfo->u.Out.fGlobalCaps, false /*fSessionTermination*/);
3984 else
3985 rc = VERR_INVALID_PARAMETER;
3986
3987 return rc;
3988}
3989
3990/** @} */
3991
3992
3993/**
3994 * Common IOCtl for user to kernel and kernel to kernel communication.
3995 *
3996 * This function only does the basic validation and then invokes
3997 * worker functions that takes care of each specific function.
3998 *
3999 * @returns VBox status code.
4000 *
4001 * @param iFunction The requested function.
4002 * @param pDevExt The device extension.
4003 * @param pSession The client session.
4004 * @param pReqHdr Pointer to the request. This always starts with
4005 * a request common header.
4006 * @param cbReq The max size of the request buffer.
4007 */
4008int VGDrvCommonIoCtl(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLREQHDR pReqHdr, size_t cbReq)
4009{
4010 uintptr_t const iFunctionStripped = VBGL_IOCTL_CODE_STRIPPED(iFunction);
4011 int rc;
4012
4013 LogFlow(("VGDrvCommonIoCtl: iFunction=%#x pDevExt=%p pSession=%p pReqHdr=%p cbReq=%zu\n",
4014 iFunction, pDevExt, pSession, pReqHdr, cbReq));
4015
4016 /*
4017 * Define some helper macros to simplify validation.
4018 */
4019#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
4020 do { \
4021 if (RT_LIKELY( pReqHdr->cbIn == (cbInExpect) \
4022 && ( pReqHdr->cbOut == (cbOutExpect) \
4023 || ((cbInExpect) == (cbOutExpect) && pReqHdr->cbOut == 0) ) )) \
4024 { /* likely */ } \
4025 else \
4026 { \
4027 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
4028 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
4029 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4030 } \
4031 } while (0)
4032
4033#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
4034
4035#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
4036 do { \
4037 if (RT_LIKELY(pReqHdr->cbIn == (cbInExpect))) \
4038 { /* likely */ } \
4039 else \
4040 { \
4041 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
4042 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
4043 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4044 } \
4045 } while (0)
4046
4047#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
4048 do { \
4049 if (RT_LIKELY( pReqHdr->cbOut == (cbOutExpect) \
4050 || (pReqHdr->cbOut == 0 && pReqHdr->cbIn == (cbOutExpect)))) \
4051 { /* likely */ } \
4052 else \
4053 { \
4054 Log(( #Name ": Invalid input/output sizes. cbOut=%ld (%ld) expected %ld.\n", \
4055 (long)pReqHdr->cbOut, (long)pReqHdr->cbIn, (long)(cbOutExpect))); \
4056 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4057 } \
4058 } while (0)
4059
4060#define REQ_CHECK_EXPR(Name, expr) \
4061 do { \
4062 if (RT_LIKELY(!!(expr))) \
4063 { /* likely */ } \
4064 else \
4065 { \
4066 Log(( #Name ": %s\n", #expr)); \
4067 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4068 } \
4069 } while (0)
4070
4071#define REQ_CHECK_EXPR_FMT(expr, fmt) \
4072 do { \
4073 if (RT_LIKELY(!!(expr))) \
4074 { /* likely */ } \
4075 else \
4076 { \
4077 Log( fmt ); \
4078 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4079 } \
4080 } while (0)
4081
4082#define REQ_CHECK_RING0(mnemonic) \
4083 do { \
4084 if (pSession->R0Process != NIL_RTR0PROCESS) \
4085 { \
4086 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
4087 pSession->Process, (uintptr_t)pSession->R0Process)); \
4088 return pReqHdr->rc = VERR_PERMISSION_DENIED; \
4089 } \
4090 } while (0)
4091
4092
4093 /*
4094 * Validate the request.
4095 */
4096 if (RT_LIKELY(cbReq >= sizeof(*pReqHdr)))
4097 { /* likely */ }
4098 else
4099 {
4100 Log(("VGDrvCommonIoCtl: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
4101 return VERR_INVALID_PARAMETER;
4102 }
4103
4104 if (pReqHdr->cbOut == 0)
4105 pReqHdr->cbOut = pReqHdr->cbIn;
4106
4107 if (RT_LIKELY( pReqHdr->uVersion == VBGLREQHDR_VERSION
4108 && pReqHdr->cbIn >= sizeof(*pReqHdr)
4109 && pReqHdr->cbIn <= cbReq
4110 && pReqHdr->cbOut >= sizeof(*pReqHdr)
4111 && pReqHdr->cbOut <= cbReq))
4112 { /* likely */ }
4113 else
4114 {
4115 Log(("VGDrvCommonIoCtl: Bad ioctl request header; cbIn=%#lx cbOut=%#lx version=%#lx\n",
4116 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->uVersion));
4117 return VERR_INVALID_PARAMETER;
4118 }
4119
4120 if (RT_LIKELY(RT_VALID_PTR(pSession)))
4121 { /* likely */ }
4122 else
4123 {
4124 Log(("VGDrvCommonIoCtl: Invalid pSession value %p (ioctl=%#x)\n", pSession, iFunction));
4125 return VERR_INVALID_PARAMETER;
4126 }
4127
4128
4129 /*
4130 * Deal with variably sized requests first.
4131 */
4132 rc = VINF_SUCCESS;
4133 if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST(0))
4134 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST_BIG) )
4135 {
4136 REQ_CHECK_EXPR(VBGL_IOCTL_VMMDEV_REQUEST, pReqHdr->uType != VBGLREQHDR_TYPE_DEFAULT);
4137 REQ_CHECK_EXPR_FMT(pReqHdr->cbIn == pReqHdr->cbOut,
4138 ("VBGL_IOCTL_VMMDEV_REQUEST: cbIn=%ld != cbOut=%ld\n", (long)pReqHdr->cbIn, (long)pReqHdr->cbOut));
4139 pReqHdr->rc = vgdrvIoCtl_VMMDevRequest(pDevExt, pSession, (VMMDevRequestHeader *)pReqHdr, cbReq);
4140 }
4141 else if (RT_LIKELY(pReqHdr->uType == VBGLREQHDR_TYPE_DEFAULT))
4142 {
4143 if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_LOG(0)))
4144 {
4145 REQ_CHECK_SIZE_OUT(VBGL_IOCTL_LOG, VBGL_IOCTL_LOG_SIZE_OUT);
4146 pReqHdr->rc = vgdrvIoCtl_Log(pDevExt, &((PVBGLIOCLOG)pReqHdr)->u.In.szMsg[0], pReqHdr->cbIn - sizeof(VBGLREQHDR),
4147 pSession->fUserSession);
4148 }
4149#ifdef VBOX_WITH_HGCM
4150 else if (iFunction == VBGL_IOCTL_IDC_HGCM_FAST_CALL) /* (is variable size, but we don't bother encoding it) */
4151 {
4152 REQ_CHECK_RING0("VBGL_IOCTL_IDC_HGCM_FAST_CALL");
4153 REQ_CHECK_EXPR(VBGL_IOCTL_IDC_HGCM_FAST_CALL, cbReq >= sizeof(VBGLIOCIDCHGCMFASTCALL) + sizeof(VMMDevHGCMCall));
4154 pReqHdr->rc = vgdrvIoCtl_HGCMFastCall(pDevExt, (VBGLIOCIDCHGCMFASTCALL volatile *)pReqHdr);
4155 }
4156 else if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL(0))
4157# if ARCH_BITS == 64
4158 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0))
4159# endif
4160 )
4161 {
4162 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
4163 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
4164 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
4165 iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0)),
4166 false /*fUserData*/, cbReq);
4167 }
4168 else if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA(0)))
4169 {
4170 REQ_CHECK_RING0("VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA");
4171 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
4172 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
4173 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
4174 ARCH_BITS == 32, true /*fUserData*/, cbReq);
4175 }
4176#endif /* VBOX_WITH_HGCM */
4177 else
4178 {
4179 switch (iFunction)
4180 {
4181 /*
4182 * Ring-0 only:
4183 */
4184 case VBGL_IOCTL_IDC_CONNECT:
4185 REQ_CHECK_RING0("VBGL_IOCL_IDC_CONNECT");
4186 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_CONNECT);
4187 pReqHdr->rc = vgdrvIoCtl_IdcConnect(pDevExt, pSession, (PVBGLIOCIDCCONNECT)pReqHdr);
4188 break;
4189
4190 case VBGL_IOCTL_IDC_DISCONNECT:
4191 REQ_CHECK_RING0("VBGL_IOCTL_IDC_DISCONNECT");
4192 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_DISCONNECT);
4193 pReqHdr->rc = vgdrvIoCtl_IdcDisconnect(pDevExt, pSession, (PVBGLIOCIDCDISCONNECT)pReqHdr);
4194 break;
4195
4196 case VBGL_IOCTL_GET_VMMDEV_IO_INFO:
4197 REQ_CHECK_RING0("GET_VMMDEV_IO_INFO");
4198 REQ_CHECK_SIZES(VBGL_IOCTL_GET_VMMDEV_IO_INFO);
4199 pReqHdr->rc = vgdrvIoCtl_GetVMMDevIoInfo(pDevExt, (PVBGLIOCGETVMMDEVIOINFO)pReqHdr);
4200 break;
4201
4202 case VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
4203 REQ_CHECK_RING0("SET_MOUSE_NOTIFY_CALLBACK");
4204 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK);
4205 pReqHdr->rc = vgdrvIoCtl_SetMouseNotifyCallback(pDevExt, (PVBGLIOCSETMOUSENOTIFYCALLBACK)pReqHdr);
4206 break;
4207
4208 /*
4209 * Ring-3 only:
4210 */
4211 case VBGL_IOCTL_DRIVER_VERSION_INFO:
4212 REQ_CHECK_SIZES(VBGL_IOCTL_DRIVER_VERSION_INFO);
4213 pReqHdr->rc = vgdrvIoCtl_DriverVersionInfo(pDevExt, pSession, (PVBGLIOCDRIVERVERSIONINFO)pReqHdr);
4214 break;
4215
4216 /*
4217 * Both ring-3 and ring-0:
4218 */
4219 case VBGL_IOCTL_WAIT_FOR_EVENTS:
4220 REQ_CHECK_SIZES(VBGL_IOCTL_WAIT_FOR_EVENTS);
4221 pReqHdr->rc = vgdrvIoCtl_WaitForEvents(pDevExt, pSession, (VBGLIOCWAITFOREVENTS *)pReqHdr,
4222 pSession->R0Process != NIL_RTR0PROCESS);
4223 break;
4224
4225 case VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
4226 REQ_CHECK_SIZES(VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS);
4227 pReqHdr->rc = vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
4228 break;
4229
4230 case VBGL_IOCTL_CHANGE_FILTER_MASK:
4231 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_FILTER_MASK);
4232 pReqHdr->rc = vgdrvIoCtl_ChangeFilterMask(pDevExt, pSession, (PVBGLIOCCHANGEFILTERMASK)pReqHdr);
4233 break;
4234
4235#ifdef VBOX_WITH_HGCM
4236 case VBGL_IOCTL_HGCM_CONNECT:
4237 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_CONNECT);
4238 pReqHdr->rc = vgdrvIoCtl_HGCMConnect(pDevExt, pSession, (PVBGLIOCHGCMCONNECT)pReqHdr);
4239 break;
4240
4241 case VBGL_IOCTL_HGCM_DISCONNECT:
4242 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_DISCONNECT);
4243 pReqHdr->rc = vgdrvIoCtl_HGCMDisconnect(pDevExt, pSession, (PVBGLIOCHGCMDISCONNECT)pReqHdr);
4244 break;
4245#endif
4246
4247 case VBGL_IOCTL_CHECK_BALLOON:
4248 REQ_CHECK_SIZES(VBGL_IOCTL_CHECK_BALLOON);
4249 pReqHdr->rc = vgdrvIoCtl_CheckMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHECKBALLOON)pReqHdr);
4250 break;
4251
4252 case VBGL_IOCTL_CHANGE_BALLOON:
4253 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_BALLOON);
4254 pReqHdr->rc = vgdrvIoCtl_ChangeMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHANGEBALLOON)pReqHdr);
4255 break;
4256
4257 case VBGL_IOCTL_WRITE_CORE_DUMP:
4258 REQ_CHECK_SIZES(VBGL_IOCTL_WRITE_CORE_DUMP);
4259 pReqHdr->rc = vgdrvIoCtl_WriteCoreDump(pDevExt, pSession, (PVBGLIOCWRITECOREDUMP)pReqHdr);
4260 break;
4261
4262 case VBGL_IOCTL_SET_MOUSE_STATUS:
4263 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_STATUS);
4264 pReqHdr->rc = vgdrvIoCtl_SetMouseStatus(pDevExt, pSession, ((PVBGLIOCSETMOUSESTATUS)pReqHdr)->u.In.fStatus);
4265 break;
4266
4267 case VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
4268 REQ_CHECK_SIZES(VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES);
4269 pReqHdr->rc = vgdrvIoCtl_GuestCapsAcquire(pDevExt, pSession, (PVBGLIOCACQUIREGUESTCAPS)pReqHdr);
4270 break;
4271
4272 case VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES:
4273 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES);
4274 pReqHdr->rc = vgdrvIoCtl_SetCapabilities(pDevExt, pSession, (PVBGLIOCSETGUESTCAPS)pReqHdr);
4275 break;
4276
4277#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
4278 case VBGL_IOCTL_DPC_LATENCY_CHECKER:
4279 REQ_CHECK_SIZES(VBGL_IOCTL_DPC_LATENCY_CHECKER);
4280 pReqHdr->rc = VGDrvNtIOCtl_DpcLatencyChecker();
4281 break;
4282#endif
4283
4284 default:
4285 {
4286 LogRel(("VGDrvCommonIoCtl: Unknown request iFunction=%#x (stripped %#x) cbReq=%#x\n",
4287 iFunction, iFunctionStripped, cbReq));
4288 pReqHdr->rc = rc = VERR_NOT_SUPPORTED;
4289 break;
4290 }
4291 }
4292 }
4293 }
4294 else
4295 {
4296 Log(("VGDrvCommonIoCtl: uType=%#x, expected default (ioctl=%#x)\n", pReqHdr->uType, iFunction));
4297 return VERR_INVALID_PARAMETER;
4298 }
4299
4300 LogFlow(("VGDrvCommonIoCtl: returns %Rrc (req: rc=%Rrc cbOut=%#x)\n", rc, pReqHdr->rc, pReqHdr->cbOut));
4301 return rc;
4302}
4303
4304
4305/**
4306 * Used by VGDrvCommonISR as well as the acquire guest capability code.
4307 *
4308 * @returns VINF_SUCCESS on success. On failure, ORed together
4309 * RTSemEventMultiSignal errors (completes processing despite errors).
4310 * @param pDevExt The VBoxGuest device extension.
4311 * @param fEvents The events to dispatch.
4312 */
4313static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents)
4314{
4315 PVBOXGUESTWAIT pWait;
4316 PVBOXGUESTWAIT pSafe;
4317 int rc = VINF_SUCCESS;
4318
4319 fEvents |= pDevExt->f32PendingEvents;
4320
4321 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
4322 {
4323 uint32_t fHandledEvents = pWait->fReqEvents & fEvents;
4324 if ( fHandledEvents != 0
4325 && !pWait->fResEvents)
4326 {
4327 /* Does this one wait on any of the events we're dispatching? We do a quick
4328 check first, then deal with VBOXGUEST_ACQUIRE_STYLE_EVENTS as applicable. */
4329 if (fHandledEvents & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
4330 fHandledEvents &= vgdrvGetAllowedEventMaskForSession(pDevExt, pWait->pSession);
4331 if (fHandledEvents)
4332 {
4333 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
4334 fEvents &= ~pWait->fResEvents;
4335 RTListNodeRemove(&pWait->ListNode);
4336#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
4337 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
4338#else
4339 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
4340 rc |= RTSemEventMultiSignal(pWait->Event);
4341#endif
4342 if (!fEvents)
4343 break;
4344 }
4345 }
4346 }
4347
4348 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
4349 return rc;
4350}
4351
4352
4353/**
4354 * Simply checks whether the IRQ is ours or not, does not do any interrupt
4355 * procesing.
4356 *
4357 * @returns true if it was our interrupt, false if it wasn't.
4358 * @param pDevExt The VBoxGuest device extension.
4359 */
4360bool VGDrvCommonIsOurIRQ(PVBOXGUESTDEVEXT pDevExt)
4361{
4362 VMMDevMemory volatile *pVMMDevMemory;
4363 bool fOurIrq;
4364
4365 RTSpinlockAcquire(pDevExt->EventSpinlock);
4366 pVMMDevMemory = pDevExt->pVMMDevMemory;
4367 fOurIrq = pVMMDevMemory ? pVMMDevMemory->V.V1_04.fHaveEvents : false;
4368 RTSpinlockRelease(pDevExt->EventSpinlock);
4369
4370 return fOurIrq;
4371}
4372
4373
4374/**
4375 * Common interrupt service routine.
4376 *
4377 * This deals with events and with waking up thread waiting for those events.
4378 *
4379 * @returns true if it was our interrupt, false if it wasn't.
4380 * @param pDevExt The VBoxGuest device extension.
4381 */
4382bool VGDrvCommonISR(PVBOXGUESTDEVEXT pDevExt)
4383{
4384 VMMDevEvents volatile *pReq;
4385 bool fMousePositionChanged = false;
4386 int rc = 0;
4387 VMMDevMemory volatile *pVMMDevMemory;
4388 bool fOurIrq;
4389
4390 /*
4391 * Make sure we've initialized the device extension.
4392 */
4393 if (RT_LIKELY(pDevExt->fHostFeatures & VMMDEV_HVF_FAST_IRQ_ACK))
4394 pReq = NULL;
4395 else if (RT_LIKELY((pReq = pDevExt->pIrqAckEvents) != NULL))
4396 { /* likely */ }
4397 else
4398 return false;
4399
4400 /*
4401 * Enter the spinlock and check if it's our IRQ or not.
4402 */
4403 RTSpinlockAcquire(pDevExt->EventSpinlock);
4404 pVMMDevMemory = pDevExt->pVMMDevMemory;
4405 fOurIrq = pVMMDevMemory ? pVMMDevMemory->V.V1_04.fHaveEvents : false;
4406 if (fOurIrq)
4407 {
4408 /*
4409 * Acknowledge events.
4410 * We don't use VbglR0GRPerform here as it may take another spinlocks.
4411 */
4412 uint32_t fEvents;
4413 if (!pReq)
4414 {
4415 fEvents = ASMInU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST_FAST);
4416 ASMCompilerBarrier(); /* paranoia */
4417 rc = fEvents != UINT32_MAX ? VINF_SUCCESS : VERR_INTERNAL_ERROR;
4418 }
4419 else
4420 {
4421 pReq->header.rc = VERR_INTERNAL_ERROR;
4422 pReq->events = 0;
4423 ASMCompilerBarrier();
4424 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
4425 ASMCompilerBarrier(); /* paranoia */
4426 fEvents = pReq->events;
4427 rc = pReq->header.rc;
4428 }
4429 if (RT_SUCCESS(rc))
4430 {
4431 Log3(("VGDrvCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
4432
4433 /*
4434 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
4435 */
4436 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
4437 {
4438 fMousePositionChanged = true;
4439 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
4440#if !defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT)
4441 if (pDevExt->pfnMouseNotifyCallback)
4442 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
4443#endif
4444 }
4445
4446#ifdef VBOX_WITH_HGCM
4447 /*
4448 * The HGCM event/list is kind of different in that we evaluate all entries.
4449 */
4450 if (fEvents & VMMDEV_EVENT_HGCM)
4451 {
4452 PVBOXGUESTWAIT pWait;
4453 PVBOXGUESTWAIT pSafe;
4454 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
4455 {
4456 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
4457 {
4458 pWait->fResEvents = VMMDEV_EVENT_HGCM;
4459 RTListNodeRemove(&pWait->ListNode);
4460# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
4461 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
4462# else
4463 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
4464 rc |= RTSemEventMultiSignal(pWait->Event);
4465# endif
4466 }
4467 }
4468 fEvents &= ~VMMDEV_EVENT_HGCM;
4469 }
4470#endif
4471
4472 /*
4473 * Normal FIFO waiter evaluation.
4474 */
4475 rc |= vgdrvDispatchEventsLocked(pDevExt, fEvents);
4476 }
4477 else /* something is serious wrong... */
4478 Log(("VGDrvCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n", rc, fEvents));
4479 }
4480 else
4481 Log3(("VGDrvCommonISR: not ours\n"));
4482
4483 RTSpinlockRelease(pDevExt->EventSpinlock);
4484
4485 /*
4486 * Execute the mouse notification callback here if it cannot be executed while
4487 * holding the interrupt safe spinlock, see @bugref{8639}.
4488 */
4489#if defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT) && !defined(RT_OS_WINDOWS) /* (Windows does this in the Dpc callback) */
4490 if ( fMousePositionChanged
4491 && pDevExt->pfnMouseNotifyCallback)
4492 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
4493#endif
4494
4495#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
4496 /*
4497 * Do wake-ups.
4498 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
4499 * care of it. Same on darwin, doing it in the work loop callback.
4500 */
4501 VGDrvCommonWaitDoWakeUps(pDevExt);
4502#endif
4503
4504 /*
4505 * Work the poll and async notification queues on OSes that implements that.
4506 * (Do this outside the spinlock to prevent some recursive spinlocking.)
4507 */
4508 if (fMousePositionChanged)
4509 {
4510 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
4511 VGDrvNativeISRMousePollEvent(pDevExt);
4512 }
4513
4514 AssertMsg(rc == 0, ("rc=%#x (%d)\n", rc, rc));
4515 return fOurIrq;
4516}
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use