VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 35263

Last change on this file since 35263 was 34406, checked in by vboxsync, 14 years ago

iprt/list.h: RTListNodeGetFirst/Last -> RTListGetFirst/Last; added RTListGetNext, RTListGetPrev, RTListNodeInsertAfter and RTListNodeInsertBefore.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 85.4 KB
Line 
1/* $Id: VBoxGuest.cpp 34406 2010-11-26 16:45:34Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include "VBoxGuest2.h"
25#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
26#include <VBox/log.h>
27#include <iprt/mem.h>
28#include <iprt/time.h>
29#include <iprt/memobj.h>
30#include <iprt/asm.h>
31#include <iprt/asm-amd64-x86.h>
32#include <iprt/string.h>
33#include <iprt/process.h>
34#include <iprt/assert.h>
35#include <iprt/param.h>
36#ifdef VBOX_WITH_HGCM
37# include <iprt/thread.h>
38#endif
39#include "version-generated.h"
40#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
41# include "revision-generated.h"
42#endif
43#ifdef RT_OS_WINDOWS
44# ifndef CTL_CODE
45# include <Windows.h>
46# endif
47#endif
48#if defined(RT_OS_SOLARIS)
49# include <iprt/rand.h>
50#endif
51
52
53/*******************************************************************************
54* Internal Functions *
55*******************************************************************************/
56#ifdef VBOX_WITH_HGCM
57static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
58#endif
59
60
61/*******************************************************************************
62* Global Variables *
63*******************************************************************************/
64static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
65
66#if defined(RT_OS_SOLARIS)
67/**
68 * Drag in the rest of IRPT since we share it with the
69 * rest of the kernel modules on Solaris.
70 */
71PFNRT g_apfnVBoxGuestIPRTDeps[] =
72{
73 /* VirtioNet */
74 (PFNRT)RTRandBytes,
75 NULL
76};
77#endif /* RT_OS_SOLARIS */
78
79
80/**
81 * Reserves memory in which the VMM can relocate any guest mappings
82 * that are floating around.
83 *
84 * This operation is a little bit tricky since the VMM might not accept
85 * just any address because of address clashes between the three contexts
86 * it operates in, so use a small stack to perform this operation.
87 *
88 * @returns VBox status code (ignored).
89 * @param pDevExt The device extension.
90 */
91static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
92{
93 /*
94 * Query the required space.
95 */
96 VMMDevReqHypervisorInfo *pReq;
97 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
98 if (RT_FAILURE(rc))
99 return rc;
100 pReq->hypervisorStart = 0;
101 pReq->hypervisorSize = 0;
102 rc = VbglGRPerform(&pReq->header);
103 if (RT_FAILURE(rc)) /* this shouldn't happen! */
104 {
105 VbglGRFree(&pReq->header);
106 return rc;
107 }
108
109 /*
110 * The VMM will report back if there is nothing it wants to map, like for
111 * instance in VT-x and AMD-V mode.
112 */
113 if (pReq->hypervisorSize == 0)
114 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
115 else
116 {
117 /*
118 * We have to try several times since the host can be picky
119 * about certain addresses.
120 */
121 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
122 uint32_t cbHypervisor = pReq->hypervisorSize;
123 RTR0MEMOBJ ahTries[5];
124 uint32_t iTry;
125 bool fBitched = false;
126 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
127 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
128 {
129 /*
130 * Reserve space, or if that isn't supported, create a object for
131 * some fictive physical memory and map that in to kernel space.
132 *
133 * To make the code a bit uglier, most systems cannot help with
134 * 4MB alignment, so we have to deal with that in addition to
135 * having two ways of getting the memory.
136 */
137 uint32_t uAlignment = _4M;
138 RTR0MEMOBJ hObj;
139 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
140 if (rc == VERR_NOT_SUPPORTED)
141 {
142 uAlignment = PAGE_SIZE;
143 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
144 }
145 /*
146 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
147 * not implemented at all at the current platform, try to map the memory object into the
148 * virtual kernel space.
149 */
150 if (rc == VERR_NOT_SUPPORTED)
151 {
152 if (hFictive == NIL_RTR0MEMOBJ)
153 {
154 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
155 if (RT_FAILURE(rc))
156 break;
157 hFictive = hObj;
158 }
159 uAlignment = _4M;
160 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
161 if (rc == VERR_NOT_SUPPORTED)
162 {
163 uAlignment = PAGE_SIZE;
164 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
165 }
166 }
167 if (RT_FAILURE(rc))
168 {
169 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
170 rc, cbHypervisor, uAlignment, iTry));
171 fBitched = true;
172 break;
173 }
174
175 /*
176 * Try set it.
177 */
178 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
179 pReq->header.rc = VERR_INTERNAL_ERROR;
180 pReq->hypervisorSize = cbHypervisor;
181 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
182 if ( uAlignment == PAGE_SIZE
183 && pReq->hypervisorStart & (_4M - 1))
184 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
185 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
186
187 rc = VbglGRPerform(&pReq->header);
188 if (RT_SUCCESS(rc))
189 {
190 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
191 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
192 RTR0MemObjAddress(pDevExt->hGuestMappings),
193 RTR0MemObjSize(pDevExt->hGuestMappings),
194 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
195 break;
196 }
197 ahTries[iTry] = hObj;
198 }
199
200 /*
201 * Cleanup failed attempts.
202 */
203 while (iTry-- > 0)
204 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
205 if ( RT_FAILURE(rc)
206 && hFictive != NIL_RTR0PTR)
207 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
208 if (RT_FAILURE(rc) && !fBitched)
209 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
210 }
211 VbglGRFree(&pReq->header);
212
213 /*
214 * We ignore failed attempts for now.
215 */
216 return VINF_SUCCESS;
217}
218
219
220/**
221 * Undo what vboxGuestInitFixateGuestMappings did.
222 *
223 * @param pDevExt The device extension.
224 */
225static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
226{
227 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
228 {
229 /*
230 * Tell the host that we're going to free the memory we reserved for
231 * it, the free it up. (Leak the memory if anything goes wrong here.)
232 */
233 VMMDevReqHypervisorInfo *pReq;
234 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
235 if (RT_SUCCESS(rc))
236 {
237 pReq->hypervisorStart = 0;
238 pReq->hypervisorSize = 0;
239 rc = VbglGRPerform(&pReq->header);
240 VbglGRFree(&pReq->header);
241 }
242 if (RT_SUCCESS(rc))
243 {
244 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
245 AssertRC(rc);
246 }
247 else
248 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
249
250 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
251 }
252}
253
254
255/**
256 * Sets the interrupt filter mask during initialization and termination.
257 *
258 * This will ASSUME that we're the ones in carge over the mask, so
259 * we'll simply clear all bits we don't set.
260 *
261 * @returns VBox status code (ignored).
262 * @param pDevExt The device extension.
263 * @param fMask The new mask.
264 */
265static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
266{
267 VMMDevCtlGuestFilterMask *pReq;
268 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
269 if (RT_SUCCESS(rc))
270 {
271 pReq->u32OrMask = fMask;
272 pReq->u32NotMask = ~fMask;
273 rc = VbglGRPerform(&pReq->header);
274 if (RT_FAILURE(rc))
275 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
276 VbglGRFree(&pReq->header);
277 }
278 return rc;
279}
280
281
282/**
283 * Inflate the balloon by one chunk represented by an R0 memory object.
284 *
285 * The caller owns the balloon mutex.
286 *
287 * @returns IPRT status code.
288 * @param pMemObj Pointer to the R0 memory object.
289 * @param pReq The pre-allocated request for performing the VMMDev call.
290 */
291static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
292{
293 uint32_t iPage;
294 int rc;
295
296 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
297 {
298 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
299 pReq->aPhysPage[iPage] = phys;
300 }
301
302 pReq->fInflate = true;
303 pReq->header.size = cbChangeMemBalloonReq;
304 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
305
306 rc = VbglGRPerform(&pReq->header);
307 if (RT_FAILURE(rc))
308 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
309 return rc;
310}
311
312
313/**
314 * Deflate the balloon by one chunk - info the host and free the memory object.
315 *
316 * The caller owns the balloon mutex.
317 *
318 * @returns IPRT status code.
319 * @param pMemObj Pointer to the R0 memory object.
320 * The memory object will be freed afterwards.
321 * @param pReq The pre-allocated request for performing the VMMDev call.
322 */
323static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
324{
325 uint32_t iPage;
326 int rc;
327
328 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
329 {
330 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
331 pReq->aPhysPage[iPage] = phys;
332 }
333
334 pReq->fInflate = false;
335 pReq->header.size = cbChangeMemBalloonReq;
336 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
337
338 rc = VbglGRPerform(&pReq->header);
339 if (RT_FAILURE(rc))
340 {
341 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
342 return rc;
343 }
344
345 rc = RTR0MemObjFree(*pMemObj, true);
346 if (RT_FAILURE(rc))
347 {
348 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
349 return rc;
350 }
351
352 *pMemObj = NIL_RTR0MEMOBJ;
353 return VINF_SUCCESS;
354}
355
356
357/**
358 * Inflate/deflate the memory balloon and notify the host.
359 *
360 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
361 * the mutex.
362 *
363 * @returns VBox status code.
364 * @param pDevExt The device extension.
365 * @param pSession The session.
366 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
367 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
368 * (VINF_SUCCESS if set).
369 */
370static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
371{
372 int rc = VINF_SUCCESS;
373
374 if (pDevExt->MemBalloon.fUseKernelAPI)
375 {
376 VMMDevChangeMemBalloon *pReq;
377 uint32_t i;
378
379 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
380 {
381 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
382 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
383 return VERR_INVALID_PARAMETER;
384 }
385
386 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
387 return VINF_SUCCESS; /* nothing to do */
388
389 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
390 && !pDevExt->MemBalloon.paMemObj)
391 {
392 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
393 if (!pDevExt->MemBalloon.paMemObj)
394 {
395 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
396 return VERR_NO_MEMORY;
397 }
398 }
399
400 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
401 if (RT_FAILURE(rc))
402 return rc;
403
404 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
405 {
406 /* inflate */
407 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
408 {
409 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
410 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
411 if (RT_FAILURE(rc))
412 {
413 if (rc == VERR_NOT_SUPPORTED)
414 {
415 /* not supported -- fall back to the R3-allocated memory. */
416 rc = VINF_SUCCESS;
417 pDevExt->MemBalloon.fUseKernelAPI = false;
418 Assert(pDevExt->MemBalloon.cChunks == 0);
419 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
420 }
421 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
422 * cannot allocate more memory => don't try further, just stop here */
423 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
424 break;
425 }
426
427 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
428 if (RT_FAILURE(rc))
429 {
430 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
431 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
432 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
433 break;
434 }
435 pDevExt->MemBalloon.cChunks++;
436 }
437 }
438 else
439 {
440 /* deflate */
441 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
442 {
443 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
444 if (RT_FAILURE(rc))
445 {
446 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
447 break;
448 }
449 pDevExt->MemBalloon.cChunks--;
450 }
451 }
452
453 VbglGRFree(&pReq->header);
454 }
455
456 /*
457 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
458 * the balloon changes via the other API.
459 */
460 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
461
462 return rc;
463}
464
465
466/**
467 * Helper to reinit the VBoxVMM communication after hibernation.
468 *
469 * @returns VBox status code.
470 * @param pDevExt The device extension.
471 * @param enmOSType The OS type.
472 */
473int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
474{
475 int rc = VBoxGuestReportGuestInfo(enmOSType);
476 if (RT_SUCCESS(rc))
477 {
478 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
479 if (RT_FAILURE(rc))
480 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
481 }
482 else
483 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
484 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
485 return rc;
486}
487
488
489/**
490 * Inflate/deflate the balloon by one chunk.
491 *
492 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
493 *
494 * @returns VBox status code.
495 * @param pDevExt The device extension.
496 * @param pSession The session.
497 * @param u64ChunkAddr The address of the chunk to add to / remove from the
498 * balloon.
499 * @param fInflate Inflate if true, deflate if false.
500 */
501static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
502 uint64_t u64ChunkAddr, bool fInflate)
503{
504 VMMDevChangeMemBalloon *pReq;
505 int rc = VINF_SUCCESS;
506 uint32_t i;
507 PRTR0MEMOBJ pMemObj = NULL;
508
509 if (fInflate)
510 {
511 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
512 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
513 {
514 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
515 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
516 return VERR_INVALID_PARAMETER;
517 }
518
519 if (!pDevExt->MemBalloon.paMemObj)
520 {
521 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
522 if (!pDevExt->MemBalloon.paMemObj)
523 {
524 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
525 return VERR_NO_MEMORY;
526 }
527 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
528 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
529 }
530 }
531 else
532 {
533 if (pDevExt->MemBalloon.cChunks == 0)
534 {
535 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
536 return VERR_INVALID_PARAMETER;
537 }
538 }
539
540 /*
541 * Enumerate all memory objects and check if the object is already registered.
542 */
543 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
544 {
545 if ( fInflate
546 && !pMemObj
547 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
548 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
549 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
550 {
551 if (fInflate)
552 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
553 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
554 break;
555 }
556 }
557 if (!pMemObj)
558 {
559 if (fInflate)
560 {
561 /* no free object pointer found -- should not happen */
562 return VERR_NO_MEMORY;
563 }
564
565 /* cannot free this memory as it wasn't provided before */
566 return VERR_NOT_FOUND;
567 }
568
569 /*
570 * Try inflate / default the balloon as requested.
571 */
572 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
573 if (RT_FAILURE(rc))
574 return rc;
575
576 if (fInflate)
577 {
578 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
579 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
580 if (RT_SUCCESS(rc))
581 {
582 rc = vboxGuestBalloonInflate(pMemObj, pReq);
583 if (RT_SUCCESS(rc))
584 pDevExt->MemBalloon.cChunks++;
585 else
586 {
587 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
588 RTR0MemObjFree(*pMemObj, true);
589 *pMemObj = NIL_RTR0MEMOBJ;
590 }
591 }
592 }
593 else
594 {
595 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
596 if (RT_SUCCESS(rc))
597 pDevExt->MemBalloon.cChunks--;
598 else
599 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
600 }
601
602 VbglGRFree(&pReq->header);
603 return rc;
604}
605
606
607/**
608 * Cleanup the memory balloon of a session.
609 *
610 * Will request the balloon mutex, so it must be valid and the caller must not
611 * own it already.
612 *
613 * @param pDevExt The device extension.
614 * @param pDevExt The session. Can be NULL at unload.
615 */
616static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
617{
618 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
619 if ( pDevExt->MemBalloon.pOwner == pSession
620 || pSession == NULL /*unload*/)
621 {
622 if (pDevExt->MemBalloon.paMemObj)
623 {
624 VMMDevChangeMemBalloon *pReq;
625 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
626 if (RT_SUCCESS(rc))
627 {
628 uint32_t i;
629 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
630 {
631 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
632 if (RT_FAILURE(rc))
633 {
634 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
635 rc, pDevExt->MemBalloon.cChunks));
636 break;
637 }
638 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
639 pDevExt->MemBalloon.cChunks--;
640 }
641 VbglGRFree(&pReq->header);
642 }
643 else
644 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
645 rc, pDevExt->MemBalloon.cChunks));
646 RTMemFree(pDevExt->MemBalloon.paMemObj);
647 pDevExt->MemBalloon.paMemObj = NULL;
648 }
649
650 pDevExt->MemBalloon.pOwner = NULL;
651 }
652 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
653}
654
655
656/**
657 * Initializes the VBoxGuest device extension when the
658 * device driver is loaded.
659 *
660 * The native code locates the VMMDev on the PCI bus and retrieve
661 * the MMIO and I/O port ranges, this function will take care of
662 * mapping the MMIO memory (if present). Upon successful return
663 * the native code should set up the interrupt handler.
664 *
665 * @returns VBox status code.
666 *
667 * @param pDevExt The device extension. Allocated by the native code.
668 * @param IOPortBase The base of the I/O port range.
669 * @param pvMMIOBase The base of the MMIO memory mapping.
670 * This is optional, pass NULL if not present.
671 * @param cbMMIO The size of the MMIO memory mapping.
672 * This is optional, pass 0 if not present.
673 * @param enmOSType The guest OS type to report to the VMMDev.
674 * @param fFixedEvents Events that will be enabled upon init and no client
675 * will ever be allowed to mask.
676 */
677int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
678 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
679{
680 int rc, rc2;
681
682 /*
683 * Adjust fFixedEvents.
684 */
685#ifdef VBOX_WITH_HGCM
686 fFixedEvents |= VMMDEV_EVENT_HGCM;
687#endif
688
689 /*
690 * Initialize the data.
691 */
692 pDevExt->IOPortBase = IOPortBase;
693 pDevExt->pVMMDevMemory = NULL;
694 pDevExt->fFixedEvents = fFixedEvents;
695 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
696 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
697 pDevExt->pIrqAckEvents = NULL;
698 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
699 RTListInit(&pDevExt->WaitList);
700#ifdef VBOX_WITH_HGCM
701 RTListInit(&pDevExt->HGCMWaitList);
702#endif
703#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
704 RTListInit(&pDevExt->WakeUpList);
705#endif
706 RTListInit(&pDevExt->WokenUpList);
707 RTListInit(&pDevExt->FreeList);
708 pDevExt->f32PendingEvents = 0;
709 pDevExt->u32MousePosChangedSeq = 0;
710 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
711 pDevExt->u32ClipboardClientId = 0;
712 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
713 pDevExt->MemBalloon.cChunks = 0;
714 pDevExt->MemBalloon.cMaxChunks = 0;
715 pDevExt->MemBalloon.fUseKernelAPI = true;
716 pDevExt->MemBalloon.paMemObj = NULL;
717 pDevExt->MemBalloon.pOwner = NULL;
718
719 /*
720 * If there is an MMIO region validate the version and size.
721 */
722 if (pvMMIOBase)
723 {
724 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
725 Assert(cbMMIO);
726 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
727 && pVMMDev->u32Size >= 32
728 && pVMMDev->u32Size <= cbMMIO)
729 {
730 pDevExt->pVMMDevMemory = pVMMDev;
731 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
732 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
733 }
734 else /* try live without it. */
735 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
736 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
737 }
738
739 /*
740 * Create the wait and session spinlocks as well as the ballooning mutex.
741 */
742 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
743 if (RT_SUCCESS(rc))
744 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
745 if (RT_FAILURE(rc))
746 {
747 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
748 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
749 RTSpinlockDestroy(pDevExt->EventSpinlock);
750 return rc;
751 }
752
753 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
754 if (RT_FAILURE(rc))
755 {
756 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
757 RTSpinlockDestroy(pDevExt->SessionSpinlock);
758 RTSpinlockDestroy(pDevExt->EventSpinlock);
759 return rc;
760 }
761
762 /*
763 * Initialize the guest library and report the guest info back to VMMDev,
764 * set the interrupt control filter mask, and fixate the guest mappings
765 * made by the VMM.
766 */
767 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
768 if (RT_SUCCESS(rc))
769 {
770 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
771 if (RT_SUCCESS(rc))
772 {
773 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
774 Assert(pDevExt->PhysIrqAckEvents != 0);
775
776 rc = VBoxGuestReportGuestInfo(enmOSType);
777 if (RT_SUCCESS(rc))
778 {
779 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
780 if (RT_SUCCESS(rc))
781 {
782 /*
783 * Disable guest graphics capability by default. The guest specific
784 * graphics driver will re-enable this when it is necessary.
785 */
786 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
787 if (RT_SUCCESS(rc))
788 {
789 vboxGuestInitFixateGuestMappings(pDevExt);
790
791 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
792 if (RT_FAILURE(rc))
793 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
794
795 Log(("VBoxGuestInitDevExt: returns success\n"));
796 return VINF_SUCCESS;
797 }
798
799 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
800 }
801 else
802 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
803 }
804 else
805 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
806 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
807 }
808 else
809 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
810
811 VbglTerminate();
812 }
813 else
814 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
815
816 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
817 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
818 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
819 return rc; /* (failed) */
820}
821
822
823/**
824 * Deletes all the items in a wait chain.
825 * @param pList The head of the chain.
826 */
827static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
828{
829 while (!RTListIsEmpty(pList))
830 {
831 int rc2;
832 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
833 RTListNodeRemove(&pWait->ListNode);
834
835 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
836 pWait->Event = NIL_RTSEMEVENTMULTI;
837 pWait->pSession = NULL;
838 RTMemFree(pWait);
839 }
840}
841
842
843/**
844 * Destroys the VBoxGuest device extension.
845 *
846 * The native code should call this before the driver is loaded,
847 * but don't call this on shutdown.
848 *
849 * @param pDevExt The device extension.
850 */
851void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
852{
853 int rc2;
854 Log(("VBoxGuestDeleteDevExt:\n"));
855 Log(("VBoxGuest: The additions driver is terminating.\n"));
856
857 /*
858 * Clean up the bits that involves the host first.
859 */
860 vboxGuestTermUnfixGuestMappings(pDevExt);
861 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
862 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
863 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
864
865 /*
866 * Cleanup all the other resources.
867 */
868 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
869 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
870 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
871
872 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
873#ifdef VBOX_WITH_HGCM
874 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
875#endif
876#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
877 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
878#endif
879 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
880 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
881
882 VbglTerminate();
883
884 pDevExt->pVMMDevMemory = NULL;
885
886 pDevExt->IOPortBase = 0;
887 pDevExt->pIrqAckEvents = NULL;
888}
889
890
891/**
892 * Creates a VBoxGuest user session.
893 *
894 * The native code calls this when a ring-3 client opens the device.
895 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
896 *
897 * @returns VBox status code.
898 * @param pDevExt The device extension.
899 * @param ppSession Where to store the session on success.
900 */
901int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
902{
903 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
904 if (RT_UNLIKELY(!pSession))
905 {
906 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
907 return VERR_NO_MEMORY;
908 }
909
910 pSession->Process = RTProcSelf();
911 pSession->R0Process = RTR0ProcHandleSelf();
912 pSession->pDevExt = pDevExt;
913
914 *ppSession = pSession;
915 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
916 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
917 return VINF_SUCCESS;
918}
919
920
921/**
922 * Creates a VBoxGuest kernel session.
923 *
924 * The native code calls this when a ring-0 client connects to the device.
925 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
926 *
927 * @returns VBox status code.
928 * @param pDevExt The device extension.
929 * @param ppSession Where to store the session on success.
930 */
931int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
932{
933 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
934 if (RT_UNLIKELY(!pSession))
935 {
936 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
937 return VERR_NO_MEMORY;
938 }
939
940 pSession->Process = NIL_RTPROCESS;
941 pSession->R0Process = NIL_RTR0PROCESS;
942 pSession->pDevExt = pDevExt;
943
944 *ppSession = pSession;
945 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
946 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
947 return VINF_SUCCESS;
948}
949
950
951
952/**
953 * Closes a VBoxGuest session.
954 *
955 * @param pDevExt The device extension.
956 * @param pSession The session to close (and free).
957 */
958void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
959{
960 unsigned i; NOREF(i);
961 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
962 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
963
964#ifdef VBOX_WITH_HGCM
965 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
966 if (pSession->aHGCMClientIds[i])
967 {
968 VBoxGuestHGCMDisconnectInfo Info;
969 Info.result = 0;
970 Info.u32ClientID = pSession->aHGCMClientIds[i];
971 pSession->aHGCMClientIds[i] = 0;
972 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
973 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
974 }
975#endif
976
977 pSession->pDevExt = NULL;
978 pSession->Process = NIL_RTPROCESS;
979 pSession->R0Process = NIL_RTR0PROCESS;
980 vboxGuestCloseMemBalloon(pDevExt, pSession);
981 RTMemFree(pSession);
982}
983
984
985/**
986 * Allocates a wait-for-event entry.
987 *
988 * @returns The wait-for-event entry.
989 * @param pDevExt The device extension.
990 * @param pSession The session that's allocating this. Can be NULL.
991 */
992static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
993{
994 /*
995 * Allocate it one way or the other.
996 */
997 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
998 if (pWait)
999 {
1000 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1001 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1002
1003 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1004 if (pWait)
1005 RTListNodeRemove(&pWait->ListNode);
1006
1007 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1008 }
1009 if (!pWait)
1010 {
1011 static unsigned s_cErrors = 0;
1012 int rc;
1013
1014 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1015 if (!pWait)
1016 {
1017 if (s_cErrors++ < 32)
1018 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1019 return NULL;
1020 }
1021
1022 rc = RTSemEventMultiCreate(&pWait->Event);
1023 if (RT_FAILURE(rc))
1024 {
1025 if (s_cErrors++ < 32)
1026 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1027 RTMemFree(pWait);
1028 return NULL;
1029 }
1030
1031 pWait->ListNode.pNext = NULL;
1032 pWait->ListNode.pPrev = NULL;
1033 }
1034
1035 /*
1036 * Zero members just as an precaution.
1037 */
1038 pWait->fReqEvents = 0;
1039 pWait->fResEvents = 0;
1040#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1041 pWait->fPendingWakeUp = false;
1042 pWait->fFreeMe = false;
1043#endif
1044 pWait->pSession = pSession;
1045#ifdef VBOX_WITH_HGCM
1046 pWait->pHGCMReq = NULL;
1047#endif
1048 RTSemEventMultiReset(pWait->Event);
1049 return pWait;
1050}
1051
1052
1053/**
1054 * Frees the wait-for-event entry.
1055 *
1056 * The caller must own the wait spinlock !
1057 * The entry must be in a list!
1058 *
1059 * @param pDevExt The device extension.
1060 * @param pWait The wait-for-event entry to free.
1061 */
1062static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1063{
1064 pWait->fReqEvents = 0;
1065 pWait->fResEvents = 0;
1066#ifdef VBOX_WITH_HGCM
1067 pWait->pHGCMReq = NULL;
1068#endif
1069#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1070 Assert(!pWait->fFreeMe);
1071 if (pWait->fPendingWakeUp)
1072 pWait->fFreeMe = true;
1073 else
1074#endif
1075 {
1076 RTListNodeRemove(&pWait->ListNode);
1077 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1078 }
1079}
1080
1081
1082/**
1083 * Frees the wait-for-event entry.
1084 *
1085 * @param pDevExt The device extension.
1086 * @param pWait The wait-for-event entry to free.
1087 */
1088static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1089{
1090 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1091 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1092 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1093 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1094}
1095
1096
1097#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1098/**
1099 * Processes the wake-up list.
1100 *
1101 * All entries in the wake-up list gets signalled and moved to the woken-up
1102 * list.
1103 *
1104 * @param pDevExt The device extension.
1105 */
1106void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1107{
1108 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1109 {
1110 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1111 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1112 for (;;)
1113 {
1114 int rc;
1115 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1116 if (!pWait)
1117 break;
1118 pWait->fPendingWakeUp = true;
1119 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1120
1121 rc = RTSemEventMultiSignal(pWait->Event);
1122 AssertRC(rc);
1123
1124 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1125 pWait->fPendingWakeUp = false;
1126 if (!pWait->fFreeMe)
1127 {
1128 RTListNodeRemove(&pWait->ListNode);
1129 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1130 }
1131 else
1132 {
1133 pWait->fFreeMe = false;
1134 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1135 }
1136 }
1137 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1138 }
1139}
1140#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1141
1142
1143/**
1144 * Modifies the guest capabilities.
1145 *
1146 * Should be called during driver init and termination.
1147 *
1148 * @returns VBox status code.
1149 * @param fOr The Or mask (what to enable).
1150 * @param fNot The Not mask (what to disable).
1151 */
1152int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1153{
1154 VMMDevReqGuestCapabilities2 *pReq;
1155 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1156 if (RT_FAILURE(rc))
1157 {
1158 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1159 sizeof(*pReq), sizeof(*pReq), rc));
1160 return rc;
1161 }
1162
1163 pReq->u32OrMask = fOr;
1164 pReq->u32NotMask = fNot;
1165
1166 rc = VbglGRPerform(&pReq->header);
1167 if (RT_FAILURE(rc))
1168 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1169
1170 VbglGRFree(&pReq->header);
1171 return rc;
1172}
1173
1174
1175/**
1176 * Implements the fast (no input or output) type of IOCtls.
1177 *
1178 * This is currently just a placeholder stub inherited from the support driver code.
1179 *
1180 * @returns VBox status code.
1181 * @param iFunction The IOCtl function number.
1182 * @param pDevExt The device extension.
1183 * @param pSession The session.
1184 */
1185int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1186{
1187 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1188
1189 NOREF(iFunction);
1190 NOREF(pDevExt);
1191 NOREF(pSession);
1192 return VERR_NOT_SUPPORTED;
1193}
1194
1195
1196/**
1197 * Return the VMM device port.
1198 *
1199 * returns IPRT status code.
1200 * @param pDevExt The device extension.
1201 * @param pInfo The request info.
1202 * @param pcbDataReturned (out) contains the number of bytes to return.
1203 */
1204static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1205{
1206 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1207 pInfo->portAddress = pDevExt->IOPortBase;
1208 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1209 if (pcbDataReturned)
1210 *pcbDataReturned = sizeof(*pInfo);
1211 return VINF_SUCCESS;
1212}
1213
1214
1215/**
1216 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1217 *
1218 * The caller enters the spinlock, we leave it.
1219 *
1220 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1221 */
1222DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1223 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1224{
1225 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1226 if (fMatches)
1227 {
1228 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1229 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1230
1231 pInfo->u32EventFlagsOut = fMatches;
1232 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1233 if (fReqEvents & ~((uint32_t)1 << iEvent))
1234 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1235 else
1236 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1237 return VINF_SUCCESS;
1238 }
1239 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1240 return VERR_TIMEOUT;
1241}
1242
1243
1244static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1245 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1246{
1247 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1248 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1249 uint32_t fResEvents;
1250 int iEvent;
1251 PVBOXGUESTWAIT pWait;
1252 int rc;
1253
1254 pInfo->u32EventFlagsOut = 0;
1255 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1256 if (pcbDataReturned)
1257 *pcbDataReturned = sizeof(*pInfo);
1258
1259 /*
1260 * Copy and verify the input mask.
1261 */
1262 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1263 if (RT_UNLIKELY(iEvent < 0))
1264 {
1265 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1266 return VERR_INVALID_PARAMETER;
1267 }
1268
1269 /*
1270 * Check the condition up front, before doing the wait-for-event allocations.
1271 */
1272 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1273 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1274 if (rc == VINF_SUCCESS)
1275 return rc;
1276
1277 if (!pInfo->u32TimeoutIn)
1278 {
1279 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1280 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1281 return VERR_TIMEOUT;
1282 }
1283
1284 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1285 if (!pWait)
1286 return VERR_NO_MEMORY;
1287 pWait->fReqEvents = fReqEvents;
1288
1289 /*
1290 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1291 * If the wait condition is met, return.
1292 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1293 */
1294 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1295 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1296 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1297 if (rc == VINF_SUCCESS)
1298 {
1299 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1300 return rc;
1301 }
1302
1303 if (fInterruptible)
1304 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1305 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1306 else
1307 rc = RTSemEventMultiWait(pWait->Event,
1308 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1309
1310 /*
1311 * There is one special case here and that's when the semaphore is
1312 * destroyed upon device driver unload. This shouldn't happen of course,
1313 * but in case it does, just get out of here ASAP.
1314 */
1315 if (rc == VERR_SEM_DESTROYED)
1316 return rc;
1317
1318 /*
1319 * Unlink the wait item and dispose of it.
1320 */
1321 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1322 fResEvents = pWait->fResEvents;
1323 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1324 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1325
1326 /*
1327 * Now deal with the return code.
1328 */
1329 if ( fResEvents
1330 && fResEvents != UINT32_MAX)
1331 {
1332 pInfo->u32EventFlagsOut = fResEvents;
1333 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1334 if (fReqEvents & ~((uint32_t)1 << iEvent))
1335 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1336 else
1337 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1338 rc = VINF_SUCCESS;
1339 }
1340 else if ( fResEvents == UINT32_MAX
1341 || rc == VERR_INTERRUPTED)
1342 {
1343 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1344 rc = VERR_INTERRUPTED;
1345 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1346 }
1347 else if (rc == VERR_TIMEOUT)
1348 {
1349 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1350 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1351 }
1352 else
1353 {
1354 if (RT_SUCCESS(rc))
1355 {
1356 static unsigned s_cErrors = 0;
1357 if (s_cErrors++ < 32)
1358 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1359 rc = VERR_INTERNAL_ERROR;
1360 }
1361 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1362 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1363 }
1364
1365 return rc;
1366}
1367
1368
1369static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1370{
1371 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1372 PVBOXGUESTWAIT pWait;
1373 PVBOXGUESTWAIT pSafe;
1374 int rc = 0;
1375
1376 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1377
1378 /*
1379 * Walk the event list and wake up anyone with a matching session.
1380 */
1381 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1382 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1383 {
1384 if (pWait->pSession == pSession)
1385 {
1386 pWait->fResEvents = UINT32_MAX;
1387 RTListNodeRemove(&pWait->ListNode);
1388#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1389 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1390#else
1391 rc |= RTSemEventMultiSignal(pWait->Event);
1392 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1393#endif
1394 }
1395 }
1396 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1397 Assert(rc == 0);
1398
1399#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1400 VBoxGuestWaitDoWakeUps(pDevExt);
1401#endif
1402
1403 return VINF_SUCCESS;
1404}
1405
1406
1407static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1408 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1409{
1410 int rc;
1411 VMMDevRequestHeader *pReqCopy;
1412
1413 /*
1414 * Validate the header and request size.
1415 */
1416 const VMMDevRequestType enmType = pReqHdr->requestType;
1417 const uint32_t cbReq = pReqHdr->size;
1418 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1419
1420 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1421
1422 if (cbReq < cbMinSize)
1423 {
1424 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1425 cbReq, cbMinSize, enmType));
1426 return VERR_INVALID_PARAMETER;
1427 }
1428 if (cbReq > cbData)
1429 {
1430 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1431 cbData, cbReq, enmType));
1432 return VERR_INVALID_PARAMETER;
1433 }
1434 rc = VbglGRVerify(pReqHdr, cbData);
1435 if (RT_FAILURE(rc))
1436 {
1437 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1438 cbData, cbReq, enmType, rc));
1439 return rc;
1440 }
1441
1442 /*
1443 * Make a copy of the request in the physical memory heap so
1444 * the VBoxGuestLibrary can more easily deal with the request.
1445 * (This is really a waste of time since the OS or the OS specific
1446 * code has already buffered or locked the input/output buffer, but
1447 * it does makes things a bit simpler wrt to phys address.)
1448 */
1449 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1450 if (RT_FAILURE(rc))
1451 {
1452 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1453 cbReq, cbReq, rc));
1454 return rc;
1455 }
1456 memcpy(pReqCopy, pReqHdr, cbReq);
1457
1458 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1459 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1460
1461 rc = VbglGRPerform(pReqCopy);
1462 if ( RT_SUCCESS(rc)
1463 && RT_SUCCESS(pReqCopy->rc))
1464 {
1465 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1466 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1467
1468 memcpy(pReqHdr, pReqCopy, cbReq);
1469 if (pcbDataReturned)
1470 *pcbDataReturned = cbReq;
1471 }
1472 else if (RT_FAILURE(rc))
1473 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1474 else
1475 {
1476 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1477 rc = pReqCopy->rc;
1478 }
1479
1480 VbglGRFree(pReqCopy);
1481 return rc;
1482}
1483
1484
1485static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1486{
1487 VMMDevCtlGuestFilterMask *pReq;
1488 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1489 if (RT_FAILURE(rc))
1490 {
1491 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1492 sizeof(*pReq), sizeof(*pReq), rc));
1493 return rc;
1494 }
1495
1496 pReq->u32OrMask = pInfo->u32OrMask;
1497 pReq->u32NotMask = pInfo->u32NotMask;
1498 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1499 rc = VbglGRPerform(&pReq->header);
1500 if (RT_FAILURE(rc))
1501 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1502
1503 VbglGRFree(&pReq->header);
1504 return rc;
1505}
1506
1507#ifdef VBOX_WITH_HGCM
1508
1509AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1510
1511/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1512static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1513 bool fInterruptible, uint32_t cMillies)
1514{
1515 int rc;
1516
1517 /*
1518 * Check to see if the condition was met by the time we got here.
1519 *
1520 * We create a simple poll loop here for dealing with out-of-memory
1521 * conditions since the caller isn't necessarily able to deal with
1522 * us returning too early.
1523 */
1524 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1525 PVBOXGUESTWAIT pWait;
1526 for (;;)
1527 {
1528 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1529 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1530 {
1531 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1532 return VINF_SUCCESS;
1533 }
1534 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1535
1536 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1537 if (pWait)
1538 break;
1539 if (fInterruptible)
1540 return VERR_INTERRUPTED;
1541 RTThreadSleep(1);
1542 }
1543 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1544 pWait->pHGCMReq = pHdr;
1545
1546 /*
1547 * Re-enter the spinlock and re-check for the condition.
1548 * If the condition is met, return.
1549 * Otherwise link us into the HGCM wait list and go to sleep.
1550 */
1551 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1552 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1553 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1554 {
1555 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1556 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1557 return VINF_SUCCESS;
1558 }
1559 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1560
1561 if (fInterruptible)
1562 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1563 else
1564 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1565 if (rc == VERR_SEM_DESTROYED)
1566 return rc;
1567
1568 /*
1569 * Unlink, free and return.
1570 */
1571 if ( RT_FAILURE(rc)
1572 && rc != VERR_TIMEOUT
1573 && ( !fInterruptible
1574 || rc != VERR_INTERRUPTED))
1575 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1576
1577 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1578 return rc;
1579}
1580
1581
1582/**
1583 * This is a callback for dealing with async waits.
1584 *
1585 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1586 */
1587static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1588{
1589 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1590 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1591 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1592 pDevExt,
1593 false /* fInterruptible */,
1594 u32User /* cMillies */);
1595}
1596
1597
1598/**
1599 * This is a callback for dealing with async waits with a timeout.
1600 *
1601 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1602 */
1603static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1604 void *pvUser, uint32_t u32User)
1605{
1606 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1607 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1608 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1609 pDevExt,
1610 true /* fInterruptible */,
1611 u32User /* cMillies */ );
1612
1613}
1614
1615
1616static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1617 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1618{
1619 int rc;
1620
1621 /*
1622 * The VbglHGCMConnect call will invoke the callback if the HGCM
1623 * call is performed in an ASYNC fashion. The function is not able
1624 * to deal with cancelled requests.
1625 */
1626 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1627 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1628 ? pInfo->Loc.u.host.achName : "<not local host>"));
1629
1630 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1631 if (RT_SUCCESS(rc))
1632 {
1633 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1634 pInfo->u32ClientID, pInfo->result, rc));
1635 if (RT_SUCCESS(pInfo->result))
1636 {
1637 /*
1638 * Append the client id to the client id table.
1639 * If the table has somehow become filled up, we'll disconnect the session.
1640 */
1641 unsigned i;
1642 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1643 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1644 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1645 if (!pSession->aHGCMClientIds[i])
1646 {
1647 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1648 break;
1649 }
1650 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1651 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1652 {
1653 static unsigned s_cErrors = 0;
1654 VBoxGuestHGCMDisconnectInfo Info;
1655
1656 if (s_cErrors++ < 32)
1657 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1658
1659 Info.result = 0;
1660 Info.u32ClientID = pInfo->u32ClientID;
1661 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1662 return VERR_TOO_MANY_OPEN_FILES;
1663 }
1664 }
1665 if (pcbDataReturned)
1666 *pcbDataReturned = sizeof(*pInfo);
1667 }
1668 return rc;
1669}
1670
1671
1672static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1673 size_t *pcbDataReturned)
1674{
1675 /*
1676 * Validate the client id and invalidate its entry while we're in the call.
1677 */
1678 int rc;
1679 const uint32_t u32ClientId = pInfo->u32ClientID;
1680 unsigned i;
1681 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1682 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1683 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1684 if (pSession->aHGCMClientIds[i] == u32ClientId)
1685 {
1686 pSession->aHGCMClientIds[i] = UINT32_MAX;
1687 break;
1688 }
1689 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1690 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1691 {
1692 static unsigned s_cErrors = 0;
1693 if (s_cErrors++ > 32)
1694 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1695 return VERR_INVALID_HANDLE;
1696 }
1697
1698 /*
1699 * The VbglHGCMConnect call will invoke the callback if the HGCM
1700 * call is performed in an ASYNC fashion. The function is not able
1701 * to deal with cancelled requests.
1702 */
1703 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1704 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1705 if (RT_SUCCESS(rc))
1706 {
1707 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1708 if (pcbDataReturned)
1709 *pcbDataReturned = sizeof(*pInfo);
1710 }
1711
1712 /* Update the client id array according to the result. */
1713 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1714 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1715 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1716 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1717
1718 return rc;
1719}
1720
1721
1722static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1723 PVBOXGUESTSESSION pSession,
1724 VBoxGuestHGCMCallInfo *pInfo,
1725 uint32_t cMillies, bool fInterruptible, bool f32bit,
1726 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1727{
1728 const uint32_t u32ClientId = pInfo->u32ClientID;
1729 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1730 uint32_t fFlags;
1731 size_t cbActual;
1732 unsigned i;
1733 int rc;
1734
1735 /*
1736 * Some more validations.
1737 */
1738 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1739 {
1740 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1741 return VERR_INVALID_PARAMETER;
1742 }
1743
1744 cbActual = cbExtra + sizeof(*pInfo);
1745#ifdef RT_ARCH_AMD64
1746 if (f32bit)
1747 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1748 else
1749#endif
1750 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1751 if (cbData < cbActual)
1752 {
1753 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1754 cbData, cbActual));
1755 return VERR_INVALID_PARAMETER;
1756 }
1757
1758 /*
1759 * Validate the client id.
1760 */
1761 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1762 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1763 if (pSession->aHGCMClientIds[i] == u32ClientId)
1764 break;
1765 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1766 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1767 {
1768 static unsigned s_cErrors = 0;
1769 if (s_cErrors++ > 32)
1770 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1771 return VERR_INVALID_HANDLE;
1772 }
1773
1774 /*
1775 * The VbglHGCMCall call will invoke the callback if the HGCM
1776 * call is performed in an ASYNC fashion. This function can
1777 * deal with cancelled requests, so we let user more requests
1778 * be interruptible (should add a flag for this later I guess).
1779 */
1780 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1781 fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1782#ifdef RT_ARCH_AMD64
1783 if (f32bit)
1784 {
1785 if (fInterruptible)
1786 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1787 else
1788 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1789 }
1790 else
1791#endif
1792 {
1793 if (fInterruptible)
1794 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1795 else
1796 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1797 }
1798 if (RT_SUCCESS(rc))
1799 {
1800 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1801 if (pcbDataReturned)
1802 *pcbDataReturned = cbActual;
1803 }
1804 else
1805 {
1806 if ( rc != VERR_INTERRUPTED
1807 && rc != VERR_TIMEOUT)
1808 {
1809 static unsigned s_cErrors = 0;
1810 if (s_cErrors++ < 32)
1811 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1812 }
1813 else
1814 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1815 }
1816 return rc;
1817}
1818
1819
1820/**
1821 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1822 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1823 *
1824 * @param pDevExt The device extension.
1825 * @param pu32ClientId The client id.
1826 * @param pcbDataReturned Where to store the amount of returned data. Can
1827 * be NULL.
1828 */
1829static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1830{
1831 int rc;
1832 VBoxGuestHGCMConnectInfo CnInfo;
1833
1834 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1835
1836 /*
1837 * If there is an old client, try disconnect it first.
1838 */
1839 if (pDevExt->u32ClipboardClientId != 0)
1840 {
1841 VBoxGuestHGCMDisconnectInfo DiInfo;
1842 DiInfo.result = VERR_WRONG_ORDER;
1843 DiInfo.u32ClientID = pDevExt->u32ClipboardClientId;
1844 rc = VbglR0HGCMInternalDisconnect(&DiInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1845 if (RT_SUCCESS(rc))
1846 {
1847 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1848 return rc;
1849 }
1850 if (RT_FAILURE((int32_t)DiInfo.result))
1851 {
1852 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. DiInfo.result=%Rrc\n", DiInfo.result));
1853 return DiInfo.result;
1854 }
1855 pDevExt->u32ClipboardClientId = 0;
1856 }
1857
1858 /*
1859 * Try connect.
1860 */
1861 CnInfo.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1862 strcpy(CnInfo.Loc.u.host.achName, "VBoxSharedClipboard");
1863 CnInfo.u32ClientID = 0;
1864 CnInfo.result = VERR_WRONG_ORDER;
1865
1866 rc = VbglR0HGCMInternalConnect(&CnInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1867 if (RT_FAILURE(rc))
1868 {
1869 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1870 return rc;
1871 }
1872 if (RT_FAILURE(CnInfo.result))
1873 {
1874 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1875 return rc;
1876 }
1877
1878 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", CnInfo.u32ClientID));
1879
1880 pDevExt->u32ClipboardClientId = CnInfo.u32ClientID;
1881 *pu32ClientId = CnInfo.u32ClientID;
1882 if (pcbDataReturned)
1883 *pcbDataReturned = sizeof(uint32_t);
1884
1885 return VINF_SUCCESS;
1886}
1887
1888#endif /* VBOX_WITH_HGCM */
1889
1890/**
1891 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1892 *
1893 * Ask the host for the size of the balloon and try to set it accordingly. If
1894 * this approach fails because it's not supported, return with fHandleInR3 set
1895 * and let the user land supply memory we can lock via the other ioctl.
1896 *
1897 * @returns VBox status code.
1898 *
1899 * @param pDevExt The device extension.
1900 * @param pSession The session.
1901 * @param pInfo The output buffer.
1902 * @param pcbDataReturned Where to store the amount of returned data. Can
1903 * be NULL.
1904 */
1905static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1906 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1907{
1908 VMMDevGetMemBalloonChangeRequest *pReq;
1909 int rc;
1910
1911 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1912 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1913 AssertRCReturn(rc, rc);
1914
1915 /*
1916 * The first user trying to query/change the balloon becomes the
1917 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1918 */
1919 if ( pDevExt->MemBalloon.pOwner != pSession
1920 && pDevExt->MemBalloon.pOwner == NULL)
1921 pDevExt->MemBalloon.pOwner = pSession;
1922
1923 if (pDevExt->MemBalloon.pOwner == pSession)
1924 {
1925 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1926 if (RT_SUCCESS(rc))
1927 {
1928 /*
1929 * This is a response to that event. Setting this bit means that
1930 * we request the value from the host and change the guest memory
1931 * balloon according to this value.
1932 */
1933 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1934 rc = VbglGRPerform(&pReq->header);
1935 if (RT_SUCCESS(rc))
1936 {
1937 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1938 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1939
1940 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1941 pInfo->fHandleInR3 = false;
1942
1943 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1944 /* Ignore various out of memory failures. */
1945 if ( rc == VERR_NO_MEMORY
1946 || rc == VERR_NO_PHYS_MEMORY
1947 || rc == VERR_NO_CONT_MEMORY)
1948 rc = VINF_SUCCESS;
1949
1950 if (pcbDataReturned)
1951 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1952 }
1953 else
1954 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1955 VbglGRFree(&pReq->header);
1956 }
1957 }
1958 else
1959 rc = VERR_PERMISSION_DENIED;
1960
1961 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1962 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1963 return rc;
1964}
1965
1966
1967/**
1968 * Handle a request for changing the memory balloon.
1969 *
1970 * @returns VBox status code.
1971 *
1972 * @param pDevExt The device extention.
1973 * @param pSession The session.
1974 * @param pInfo The change request structure (input).
1975 * @param pcbDataReturned Where to store the amount of returned data. Can
1976 * be NULL.
1977 */
1978static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1979 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1980{
1981 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1982 AssertRCReturn(rc, rc);
1983
1984 if (!pDevExt->MemBalloon.fUseKernelAPI)
1985 {
1986 /*
1987 * The first user trying to query/change the balloon becomes the
1988 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1989 */
1990 if ( pDevExt->MemBalloon.pOwner != pSession
1991 && pDevExt->MemBalloon.pOwner == NULL)
1992 pDevExt->MemBalloon.pOwner = pSession;
1993
1994 if (pDevExt->MemBalloon.pOwner == pSession)
1995 {
1996 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
1997 if (pcbDataReturned)
1998 *pcbDataReturned = 0;
1999 }
2000 else
2001 rc = VERR_PERMISSION_DENIED;
2002 }
2003 else
2004 rc = VERR_PERMISSION_DENIED;
2005
2006 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2007 return rc;
2008}
2009
2010
2011/**
2012 * Handle a request for writing a core dump of the guest on the host.
2013 *
2014 * @returns VBox status code.
2015 *
2016 * @param pDevExt The device extension.
2017 * @param pInfo The output buffer.
2018 */
2019static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2020{
2021 VMMDevReqWriteCoreDump *pReq = NULL;
2022 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2023 if (RT_FAILURE(rc))
2024 {
2025 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2026 sizeof(*pReq), sizeof(*pReq), rc));
2027 return rc;
2028 }
2029
2030 pReq->fFlags = pInfo->fFlags;
2031 rc = VbglGRPerform(&pReq->header);
2032 if (RT_FAILURE(rc))
2033 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2034
2035 VbglGRFree(&pReq->header);
2036 return rc;
2037}
2038
2039
2040#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2041/**
2042 * Enables the VRDP session and saves its session ID.
2043 *
2044 * @returns VBox status code.
2045 *
2046 * @param pDevExt The device extention.
2047 * @param pSession The session.
2048 */
2049static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2050{
2051 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2052 return VERR_NOT_IMPLEMENTED;
2053}
2054
2055
2056/**
2057 * Disables the VRDP session.
2058 *
2059 * @returns VBox status code.
2060 *
2061 * @param pDevExt The device extention.
2062 * @param pSession The session.
2063 */
2064static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2065{
2066 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2067 return VERR_NOT_IMPLEMENTED;
2068}
2069#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2070
2071
2072/**
2073 * Guest backdoor logging.
2074 *
2075 * @returns VBox status code.
2076 *
2077 * @param pch The log message (need not be NULL terminated).
2078 * @param cbData Size of the buffer.
2079 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2080 */
2081static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
2082{
2083 NOREF(pch);
2084 NOREF(cbData);
2085 Log(("%.*s", cbData, pch));
2086 if (pcbDataReturned)
2087 *pcbDataReturned = 0;
2088 return VINF_SUCCESS;
2089}
2090
2091
2092/**
2093 * Common IOCtl for user to kernel and kernel to kernel communication.
2094 *
2095 * This function only does the basic validation and then invokes
2096 * worker functions that takes care of each specific function.
2097 *
2098 * @returns VBox status code.
2099 *
2100 * @param iFunction The requested function.
2101 * @param pDevExt The device extension.
2102 * @param pSession The client session.
2103 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2104 * @param cbData The max size of the data buffer.
2105 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2106 */
2107int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2108 void *pvData, size_t cbData, size_t *pcbDataReturned)
2109{
2110 int rc;
2111 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2112 iFunction, pDevExt, pSession, pvData, cbData));
2113
2114 /*
2115 * Make sure the returned data size is set to zero.
2116 */
2117 if (pcbDataReturned)
2118 *pcbDataReturned = 0;
2119
2120 /*
2121 * Define some helper macros to simplify validation.
2122 */
2123#define CHECKRET_RING0(mnemonic) \
2124 do { \
2125 if (pSession->R0Process != NIL_RTR0PROCESS) \
2126 { \
2127 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2128 pSession->Process, (uintptr_t)pSession->R0Process)); \
2129 return VERR_PERMISSION_DENIED; \
2130 } \
2131 } while (0)
2132#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2133 do { \
2134 if (cbData < (cbMin)) \
2135 { \
2136 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2137 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2138 return VERR_BUFFER_OVERFLOW; \
2139 } \
2140 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2141 { \
2142 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2143 return VERR_INVALID_POINTER; \
2144 } \
2145 } while (0)
2146
2147
2148 /*
2149 * Deal with variably sized requests first.
2150 */
2151 rc = VINF_SUCCESS;
2152 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2153 {
2154 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2155 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2156 }
2157#ifdef VBOX_WITH_HGCM
2158 /*
2159 * These ones are a bit tricky.
2160 */
2161 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2162 {
2163 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2164 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2165 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2166 fInterruptible, false /*f32bit*/,
2167 0, cbData, pcbDataReturned);
2168 }
2169 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2170 {
2171 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2172 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2173 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2174 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2175 false /*f32bit*/,
2176 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2177 }
2178# ifdef RT_ARCH_AMD64
2179 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2180 {
2181 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2182 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2183 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2184 fInterruptible, true /*f32bit*/,
2185 0, cbData, pcbDataReturned);
2186 }
2187 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2188 {
2189 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2190 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2191 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2192 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2193 true /*f32bit*/,
2194 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2195 }
2196# endif
2197#endif /* VBOX_WITH_HGCM */
2198 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2199 {
2200 CHECKRET_MIN_SIZE("LOG", 1);
2201 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2202 }
2203 else
2204 {
2205 switch (iFunction)
2206 {
2207 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2208 CHECKRET_RING0("GETVMMDEVPORT");
2209 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2210 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2211 break;
2212
2213 case VBOXGUEST_IOCTL_WAITEVENT:
2214 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2215 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2216 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2217 break;
2218
2219 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2220 if (cbData != 0)
2221 rc = VERR_INVALID_PARAMETER;
2222 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2223 break;
2224
2225 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2226 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2227 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2228 break;
2229
2230#ifdef VBOX_WITH_HGCM
2231 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2232# ifdef RT_ARCH_AMD64
2233 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2234# endif
2235 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2236 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2237 break;
2238
2239 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2240# ifdef RT_ARCH_AMD64
2241 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2242# endif
2243 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2244 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2245 break;
2246
2247 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2248 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2249 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2250 break;
2251#endif /* VBOX_WITH_HGCM */
2252
2253 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2254 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2255 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2256 break;
2257
2258 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2259 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2260 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2261 break;
2262
2263 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2264 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2265 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2266 break;
2267
2268#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2269 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2270 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2271 break;
2272
2273 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2274 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2275 break;
2276#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2277
2278 default:
2279 {
2280 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2281 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2282 rc = VERR_NOT_SUPPORTED;
2283 break;
2284 }
2285 }
2286 }
2287
2288 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2289 return rc;
2290}
2291
2292
2293
2294/**
2295 * Common interrupt service routine.
2296 *
2297 * This deals with events and with waking up thread waiting for those events.
2298 *
2299 * @returns true if it was our interrupt, false if it wasn't.
2300 * @param pDevExt The VBoxGuest device extension.
2301 */
2302bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2303{
2304 bool fMousePositionChanged = false;
2305 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2306 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2307 int rc = 0;
2308 bool fOurIrq;
2309
2310 /*
2311 * Make sure we've initialized the device extension.
2312 */
2313 if (RT_UNLIKELY(!pReq))
2314 return false;
2315
2316 /*
2317 * Enter the spinlock and check if it's our IRQ or not.
2318 */
2319 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2320 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2321 if (fOurIrq)
2322 {
2323 /*
2324 * Acknowlegde events.
2325 * We don't use VbglGRPerform here as it may take another spinlocks.
2326 */
2327 pReq->header.rc = VERR_INTERNAL_ERROR;
2328 pReq->events = 0;
2329 ASMCompilerBarrier();
2330 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2331 ASMCompilerBarrier(); /* paranoia */
2332 if (RT_SUCCESS(pReq->header.rc))
2333 {
2334 uint32_t fEvents = pReq->events;
2335 PVBOXGUESTWAIT pWait;
2336 PVBOXGUESTWAIT pSafe;
2337
2338 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2339
2340 /*
2341 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2342 */
2343 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2344 {
2345 fMousePositionChanged = true;
2346 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2347 }
2348
2349#ifdef VBOX_WITH_HGCM
2350 /*
2351 * The HGCM event/list is kind of different in that we evaluate all entries.
2352 */
2353 if (fEvents & VMMDEV_EVENT_HGCM)
2354 {
2355 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2356 {
2357 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
2358 {
2359 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2360 RTListNodeRemove(&pWait->ListNode);
2361# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2362 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2363# else
2364 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2365 rc |= RTSemEventMultiSignal(pWait->Event);
2366# endif
2367 }
2368 }
2369 fEvents &= ~VMMDEV_EVENT_HGCM;
2370 }
2371#endif
2372
2373 /*
2374 * Normal FIFO waiter evaluation.
2375 */
2376 fEvents |= pDevExt->f32PendingEvents;
2377 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2378 {
2379 if ( (pWait->fReqEvents & fEvents)
2380 && !pWait->fResEvents)
2381 {
2382 pWait->fResEvents = pWait->fReqEvents & fEvents;
2383 fEvents &= ~pWait->fResEvents;
2384 RTListNodeRemove(&pWait->ListNode);
2385#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2386 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2387#else
2388 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2389 rc |= RTSemEventMultiSignal(pWait->Event);
2390#endif
2391 if (!fEvents)
2392 break;
2393 }
2394 }
2395 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2396 }
2397 else /* something is serious wrong... */
2398 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2399 pReq->header.rc, pReq->events));
2400 }
2401 else
2402 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2403
2404 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2405
2406#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
2407 /*
2408 * Do wake-ups.
2409 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
2410 * care of it.
2411 */
2412 VBoxGuestWaitDoWakeUps(pDevExt);
2413#endif
2414
2415 /*
2416 * Work the poll and async notification queues on OSes that implements that.
2417 * (Do this outside the spinlock to prevent some recursive spinlocking.)
2418 */
2419 if (fMousePositionChanged)
2420 {
2421 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2422 VBoxGuestNativeISRMousePollEvent(pDevExt);
2423 }
2424
2425 Assert(rc == 0);
2426 return fOurIrq;
2427}
2428
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use