VirtualBox

source: vbox/trunk/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp

Last change on this file was 100185, checked in by vboxsync, 11 months ago

Devices/VMMDev: Add an MMIO interface in addition to the existing PIO interface for guest additions running inside an ARM based guest. Also remove the dependency from the architecture page size and introduce a 4KiB VMM page size as ARM has different page sizes (4KiB, 16KiB, 64KiB) and it can differ between host and guest, bugref:10456

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 114.1 KB
Line 
1/* $Id: VMMDevHGCM.cpp 100185 2023-06-16 06:54:50Z vboxsync $ */
2/** @file
3 * VMMDev - HGCM - Host-Guest Communication Manager Device.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_VMM
33#include <iprt/alloc.h>
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/param.h>
37#include <iprt/string.h>
38
39#include <VBox/AssertGuest.h>
40#include <VBox/err.h>
41#include <VBox/hgcmsvc.h>
42#include <VBox/log.h>
43
44#include "VMMDevHGCM.h"
45
46#ifdef DEBUG
47# define VBOX_STRICT_GUEST
48#endif
49
50#ifdef VBOX_WITH_DTRACE
51# include "dtrace/VBoxDD.h"
52#else
53# define VBOXDD_HGCMCALL_ENTER(a,b,c,d) do { } while (0)
54# define VBOXDD_HGCMCALL_COMPLETED_REQ(a,b) do { } while (0)
55# define VBOXDD_HGCMCALL_COMPLETED_EMT(a,b) do { } while (0)
56# define VBOXDD_HGCMCALL_COMPLETED_DONE(a,b,c,d) do { } while (0)
57#endif
58
59
60/*********************************************************************************************************************************
61* Structures and Typedefs *
62*********************************************************************************************************************************/
63typedef enum VBOXHGCMCMDTYPE
64{
65 VBOXHGCMCMDTYPE_LOADSTATE = 0,
66 VBOXHGCMCMDTYPE_CONNECT,
67 VBOXHGCMCMDTYPE_DISCONNECT,
68 VBOXHGCMCMDTYPE_CALL,
69 VBOXHGCMCMDTYPE_SizeHack = 0x7fffffff
70} VBOXHGCMCMDTYPE;
71
72/**
73 * Information about a 32 or 64 bit parameter.
74 */
75typedef struct VBOXHGCMPARMVAL
76{
77 /** Actual value. Both 32 and 64 bit is saved here. */
78 uint64_t u64Value;
79
80 /** Offset from the start of the request where the value is stored. */
81 uint32_t offValue;
82
83 /** Size of the value: 4 for 32 bit and 8 for 64 bit. */
84 uint32_t cbValue;
85
86} VBOXHGCMPARMVAL;
87
88/**
89 * Information about a pointer parameter.
90 */
91typedef struct VBOXHGCMPARMPTR
92{
93 /** Size of the buffer described by the pointer parameter. */
94 uint32_t cbData;
95
96/** @todo save 8 bytes here by putting offFirstPage, cPages, and f32Direction
97 * into a bitfields like in VBOXHGCMPARMPAGES. */
98 /** Offset in the first physical page of the region. */
99 uint32_t offFirstPage;
100
101 /** How many pages. */
102 uint32_t cPages;
103
104 /** How the buffer should be copied VBOX_HGCM_F_PARM_*. */
105 uint32_t fu32Direction;
106
107 /** Pointer to array of the GC physical addresses for these pages.
108 * It is assumed that the physical address of the locked resident guest page
109 * does not change. */
110 RTGCPHYS *paPages;
111
112 /** For single page requests. */
113 RTGCPHYS GCPhysSinglePage;
114
115} VBOXHGCMPARMPTR;
116
117
118/**
119 * Pages w/o bounce buffering.
120 */
121typedef struct VBOXHGCMPARMPAGES
122{
123 /** The buffer size. */
124 uint32_t cbData;
125 /** Start of buffer offset into the first page. */
126 uint32_t offFirstPage : 12;
127 /** VBOX_HGCM_F_PARM_XXX flags. */
128 uint32_t fFlags : 3;
129 /** Set if we've locked all the pages. */
130 uint32_t fLocked : 1;
131 /** Number of pages. */
132 uint32_t cPages : 16;
133 /**< Array of page locks followed by array of page pointers, the first page
134 * pointer is adjusted by offFirstPage. */
135 PPGMPAGEMAPLOCK paPgLocks;
136} VBOXHGCMPARMPAGES;
137
138/**
139 * Information about a guest HGCM parameter.
140 */
141typedef struct VBOXHGCMGUESTPARM
142{
143 /** The parameter type. */
144 HGCMFunctionParameterType enmType;
145
146 union
147 {
148 VBOXHGCMPARMVAL val;
149 VBOXHGCMPARMPTR ptr;
150 VBOXHGCMPARMPAGES Pages;
151 } u;
152
153} VBOXHGCMGUESTPARM;
154
155typedef struct VBOXHGCMCMD
156{
157 /** Active commands, list is protected by critsectHGCMCmdList. */
158 RTLISTNODE node;
159
160 /** The type of the command (VBOXHGCMCMDTYPE). */
161 uint8_t enmCmdType;
162
163 /** Whether the command was cancelled by the guest. */
164 bool fCancelled;
165
166 /** Set if allocated from the memory cache, clear if heap. */
167 bool fMemCache;
168
169 /** Whether the command was restored from saved state. */
170 bool fRestored : 1;
171 /** Whether this command has a no-bounce page list and needs to be restored
172 * from guest memory the old fashioned way. */
173 bool fRestoreFromGuestMem : 1;
174
175 /** Copy of VMMDevRequestHeader::fRequestor.
176 * @note Only valid if VBOXGSTINFO2_F_REQUESTOR_INFO is set in
177 * VMMDevState.guestInfo2.fFeatures. */
178 uint32_t fRequestor;
179
180 /** GC physical address of the guest request. */
181 RTGCPHYS GCPhys;
182
183 /** Request packet size. */
184 uint32_t cbRequest;
185
186 /** The type of the guest request. */
187 VMMDevRequestType enmRequestType;
188
189 /** Pointer to the locked request, NULL if not locked. */
190 void *pvReqLocked;
191 /** The PGM lock for GCPhys if pvReqLocked is not NULL. */
192 PGMPAGEMAPLOCK ReqMapLock;
193
194 /** The accounting index (into VMMDEVR3::aHgcmAcc). */
195 uint8_t idxHeapAcc;
196 uint8_t abPadding[3];
197 /** The heap cost of this command. */
198 uint32_t cbHeapCost;
199
200 /** The STAM_GET_TS() value when the request arrived. */
201 uint64_t tsArrival;
202 /** The STAM_GET_TS() value when the hgcmR3Completed() is called. */
203 uint64_t tsComplete;
204
205 union
206 {
207 struct
208 {
209 uint32_t u32ClientID;
210 HGCMServiceLocation *pLoc; /**< Allocated after this structure. */
211 } connect;
212
213 struct
214 {
215 uint32_t u32ClientID;
216 } disconnect;
217
218 struct
219 {
220 /* Number of elements in paGuestParms and paHostParms arrays. */
221 uint32_t cParms;
222
223 uint32_t u32ClientID;
224
225 uint32_t u32Function;
226
227 /** Pointer to information about guest parameters in case of a Call request.
228 * Follows this structure in the same memory block.
229 */
230 VBOXHGCMGUESTPARM *paGuestParms;
231
232 /** Pointer to converted host parameters in case of a Call request.
233 * Follows this structure in the same memory block.
234 */
235 VBOXHGCMSVCPARM *paHostParms;
236
237 /* VBOXHGCMGUESTPARM[] */
238 /* VBOXHGCMSVCPARM[] */
239 } call;
240 } u;
241} VBOXHGCMCMD;
242
243
244/**
245 * Version for the memory cache.
246 */
247typedef struct VBOXHGCMCMDCACHED
248{
249 VBOXHGCMCMD Core; /**< 120 */
250 VBOXHGCMGUESTPARM aGuestParms[6]; /**< 40 * 6 = 240 */
251 VBOXHGCMSVCPARM aHostParms[6]; /**< 24 * 6 = 144 */
252} VBOXHGCMCMDCACHED; /**< 120+240+144 = 504 */
253AssertCompile(sizeof(VBOXHGCMCMD) <= 120);
254AssertCompile(sizeof(VBOXHGCMGUESTPARM) <= 40);
255AssertCompile(sizeof(VBOXHGCMSVCPARM) <= 24);
256AssertCompile(sizeof(VBOXHGCMCMDCACHED) <= 512);
257AssertCompile(sizeof(VBOXHGCMCMDCACHED) > sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
258
259
260/*********************************************************************************************************************************
261* Internal Functions *
262*********************************************************************************************************************************/
263DECLINLINE(void *) vmmdevR3HgcmCallMemAllocZ(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested);
264
265
266
267DECLINLINE(int) vmmdevR3HgcmCmdListLock(PVMMDEVCC pThisCC)
268{
269 int rc = RTCritSectEnter(&pThisCC->critsectHGCMCmdList);
270 AssertRC(rc);
271 return rc;
272}
273
274DECLINLINE(void) vmmdevR3HgcmCmdListUnlock(PVMMDEVCC pThisCC)
275{
276 int rc = RTCritSectLeave(&pThisCC->critsectHGCMCmdList);
277 AssertRC(rc);
278}
279
280/** Allocate and initialize VBOXHGCMCMD structure for HGCM request.
281 *
282 * @returns Pointer to the command on success, NULL otherwise.
283 * @param pThisCC The VMMDev ring-3 instance data.
284 * @param enmCmdType Type of the command.
285 * @param GCPhys The guest physical address of the HGCM request.
286 * @param cbRequest The size of the HGCM request.
287 * @param cParms Number of HGCM parameters for VBOXHGCMCMDTYPE_CALL command.
288 * @param fRequestor The VMMDevRequestHeader::fRequestor value.
289 */
290static PVBOXHGCMCMD vmmdevR3HgcmCmdAlloc(PVMMDEVCC pThisCC, VBOXHGCMCMDTYPE enmCmdType, RTGCPHYS GCPhys,
291 uint32_t cbRequest, uint32_t cParms, uint32_t fRequestor)
292{
293 /*
294 * Pick the heap accounting category.
295 *
296 * Initial idea was to just use what VMMDEV_REQUESTOR_USR_MASK yields directly,
297 * but there are so many unused categories then (DRV, RESERVED1, GUEST). Better
298 * to have fewer and more heap available in each.
299 */
300 uintptr_t idxHeapAcc;
301 if (fRequestor != VMMDEV_REQUESTOR_LEGACY)
302 switch (fRequestor & VMMDEV_REQUESTOR_USR_MASK)
303 {
304 case VMMDEV_REQUESTOR_USR_NOT_GIVEN:
305 case VMMDEV_REQUESTOR_USR_DRV:
306 case VMMDEV_REQUESTOR_USR_DRV_OTHER:
307 idxHeapAcc = VMMDEV_HGCM_CATEGORY_KERNEL;
308 break;
309 case VMMDEV_REQUESTOR_USR_ROOT:
310 case VMMDEV_REQUESTOR_USR_SYSTEM:
311 idxHeapAcc = VMMDEV_HGCM_CATEGORY_ROOT;
312 break;
313 default:
314 AssertFailed(); RT_FALL_THRU();
315 case VMMDEV_REQUESTOR_USR_RESERVED1:
316 case VMMDEV_REQUESTOR_USR_USER:
317 case VMMDEV_REQUESTOR_USR_GUEST:
318 idxHeapAcc = VMMDEV_HGCM_CATEGORY_USER;
319 break;
320 }
321 else
322 idxHeapAcc = VMMDEV_HGCM_CATEGORY_KERNEL;
323
324#if 1
325 /*
326 * Try use the cache.
327 */
328 VBOXHGCMCMDCACHED *pCmdCached;
329 AssertCompile(sizeof(*pCmdCached) >= sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
330 if (cParms <= RT_ELEMENTS(pCmdCached->aGuestParms))
331 {
332 if (sizeof(*pCmdCached) <= pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget)
333 {
334 int rc = RTMemCacheAllocEx(pThisCC->hHgcmCmdCache, (void **)&pCmdCached);
335 if (RT_SUCCESS(rc))
336 {
337 RT_ZERO(*pCmdCached);
338 pCmdCached->Core.fMemCache = true;
339 pCmdCached->Core.GCPhys = GCPhys;
340 pCmdCached->Core.cbRequest = cbRequest;
341 pCmdCached->Core.enmCmdType = enmCmdType;
342 pCmdCached->Core.fRequestor = fRequestor;
343 pCmdCached->Core.idxHeapAcc = (uint8_t)idxHeapAcc;
344 pCmdCached->Core.cbHeapCost = sizeof(*pCmdCached);
345 Log5Func(("aHgcmAcc[%zu] %#RX64 -= %#zx (%p)\n",
346 idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, sizeof(*pCmdCached), &pCmdCached->Core));
347 pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget -= sizeof(*pCmdCached);
348
349 if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
350 {
351 pCmdCached->Core.u.call.cParms = cParms;
352 pCmdCached->Core.u.call.paGuestParms = pCmdCached->aGuestParms;
353 pCmdCached->Core.u.call.paHostParms = pCmdCached->aHostParms;
354 }
355 else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
356 pCmdCached->Core.u.connect.pLoc = (HGCMServiceLocation *)(&pCmdCached->Core + 1);
357
358 Assert(!pCmdCached->Core.pvReqLocked);
359
360 Log3Func(("returns %p (enmCmdType=%d GCPhys=%RGp)\n", &pCmdCached->Core, enmCmdType, GCPhys));
361 return &pCmdCached->Core;
362 }
363 }
364 else
365 LogFunc(("Heap budget overrun: sizeof(*pCmdCached)=%#zx aHgcmAcc[%zu].cbHeapBudget=%#RX64 - enmCmdType=%d\n",
366 sizeof(*pCmdCached), idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, enmCmdType));
367 STAM_REL_COUNTER_INC(&pThisCC->aHgcmAcc[idxHeapAcc].StatBudgetOverruns);
368 return NULL;
369 }
370 STAM_REL_COUNTER_INC(&pThisCC->StatHgcmLargeCmdAllocs);
371
372#else
373 RT_NOREF(pThisCC);
374#endif
375
376 /* Size of required memory buffer. */
377 const uint32_t cbCmd = sizeof(VBOXHGCMCMD) + cParms * (sizeof(VBOXHGCMGUESTPARM) + sizeof(VBOXHGCMSVCPARM))
378 + (enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? sizeof(HGCMServiceLocation) : 0);
379 if (cbCmd <= pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget)
380 {
381 PVBOXHGCMCMD pCmd = (PVBOXHGCMCMD)RTMemAllocZ(cbCmd);
382 if (pCmd)
383 {
384 pCmd->enmCmdType = enmCmdType;
385 pCmd->GCPhys = GCPhys;
386 pCmd->cbRequest = cbRequest;
387 pCmd->fRequestor = fRequestor;
388 pCmd->idxHeapAcc = (uint8_t)idxHeapAcc;
389 pCmd->cbHeapCost = cbCmd;
390 Log5Func(("aHgcmAcc[%zu] %#RX64 -= %#x (%p)\n", idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, cbCmd, pCmd));
391 pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget -= cbCmd;
392
393 if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
394 {
395 pCmd->u.call.cParms = cParms;
396 if (cParms)
397 {
398 pCmd->u.call.paGuestParms = (VBOXHGCMGUESTPARM *)((uint8_t *)pCmd
399 + sizeof(struct VBOXHGCMCMD));
400 pCmd->u.call.paHostParms = (VBOXHGCMSVCPARM *)((uint8_t *)pCmd->u.call.paGuestParms
401 + cParms * sizeof(VBOXHGCMGUESTPARM));
402 }
403 }
404 else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
405 pCmd->u.connect.pLoc = (HGCMServiceLocation *)(pCmd + 1);
406 }
407 Log3Func(("returns %p (enmCmdType=%d GCPhys=%RGp cbCmd=%#x)\n", pCmd, enmCmdType, GCPhys, cbCmd));
408 return pCmd;
409 }
410 STAM_REL_COUNTER_INC(&pThisCC->aHgcmAcc[idxHeapAcc].StatBudgetOverruns);
411 LogFunc(("Heap budget overrun: cbCmd=%#x aHgcmAcc[%zu].cbHeapBudget=%#RX64 - enmCmdType=%d\n",
412 cbCmd, idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, enmCmdType));
413 return NULL;
414}
415
416/** Deallocate VBOXHGCMCMD memory.
417 *
418 * @param pDevIns The device instance.
419 * @param pThis The VMMDev shared instance data.
420 * @param pThisCC The VMMDev ring-3 instance data.
421 * @param pCmd Command to deallocate.
422 */
423static void vmmdevR3HgcmCmdFree(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd)
424{
425 if (pCmd)
426 {
427 Assert( pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL
428 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT
429 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT
430 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_LOADSTATE);
431 if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
432 {
433 uint32_t i;
434 for (i = 0; i < pCmd->u.call.cParms; ++i)
435 {
436 VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
437 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
438
439 if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
440 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
441 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
442 || pGuestParm->enmType == VMMDevHGCMParmType_PageList
443 || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
444 {
445 Assert(pHostParm->type == VBOX_HGCM_SVC_PARM_PTR);
446 if (pGuestParm->u.ptr.paPages != &pGuestParm->u.ptr.GCPhysSinglePage)
447 RTMemFree(pGuestParm->u.ptr.paPages);
448 RTMemFreeZ(pHostParm->u.pointer.addr, pGuestParm->u.ptr.cbData);
449 }
450 else if (pGuestParm->enmType == VMMDevHGCMParmType_Embedded)
451 {
452 Assert(pHostParm->type == VBOX_HGCM_SVC_PARM_PTR);
453 RTMemFreeZ(pHostParm->u.pointer.addr, pGuestParm->u.ptr.cbData);
454 }
455 else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
456 {
457 Assert(pHostParm->type == VBOX_HGCM_SVC_PARM_PAGES);
458 if (pGuestParm->u.Pages.paPgLocks)
459 {
460 if (pGuestParm->u.Pages.fLocked)
461 PDMDevHlpPhysBulkReleasePageMappingLocks(pDevIns, pGuestParm->u.Pages.cPages,
462 pGuestParm->u.Pages.paPgLocks);
463 RTMemFree(pGuestParm->u.Pages.paPgLocks);
464 pGuestParm->u.Pages.paPgLocks = NULL;
465 }
466 }
467 else
468 Assert(pHostParm->type != VBOX_HGCM_SVC_PARM_PTR && pHostParm->type != VBOX_HGCM_SVC_PARM_PAGES);
469 }
470 }
471
472 if (pCmd->pvReqLocked)
473 {
474 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &pCmd->ReqMapLock);
475 pCmd->pvReqLocked = NULL;
476 }
477
478 pCmd->enmCmdType = UINT8_MAX; /* poison */
479
480 /* Update heap budget. Need the critsect to do this safely. */
481 Assert(pCmd->cbHeapCost != 0);
482 uintptr_t idx = pCmd->idxHeapAcc;
483 AssertStmt(idx < RT_ELEMENTS(pThisCC->aHgcmAcc), idx %= RT_ELEMENTS(pThisCC->aHgcmAcc));
484
485 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
486 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->CritSect, rcLock);
487
488 Log5Func(("aHgcmAcc[%zu] %#RX64 += %#x (%p)\n", idx, pThisCC->aHgcmAcc[idx].cbHeapBudget, pCmd->cbHeapCost, pCmd));
489 pThisCC->aHgcmAcc[idx].cbHeapBudget += pCmd->cbHeapCost;
490 AssertMsg(pThisCC->aHgcmAcc[idx].cbHeapBudget <= pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig,
491 ("idx=%d (%d) fRequestor=%#x pCmd=%p: %#RX64 vs %#RX64 -> %#RX64\n", idx, pCmd->idxHeapAcc, pCmd->fRequestor, pCmd,
492 pThisCC->aHgcmAcc[idx].cbHeapBudget, pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig,
493 pThisCC->aHgcmAcc[idx].cbHeapBudget - pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig));
494 pCmd->cbHeapCost = 0;
495
496#if 1
497 if (pCmd->fMemCache)
498 {
499 RTMemCacheFree(pThisCC->hHgcmCmdCache, pCmd);
500 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect); /* releasing it after just to be on the safe side. */
501 }
502 else
503#endif
504 {
505 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
506 RTMemFree(pCmd);
507 }
508 }
509}
510
511/** Add VBOXHGCMCMD to the list of pending commands.
512 *
513 * @returns VBox status code.
514 * @param pDevIns The device instance.
515 * @param pThis The VMMDev shared instance data.
516 * @param pThisCC The VMMDev ring-3 instance data.
517 * @param pCmd Command to add.
518 */
519static int vmmdevR3HgcmAddCommand(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd)
520{
521 int rc = vmmdevR3HgcmCmdListLock(pThisCC);
522 AssertRCReturn(rc, rc);
523
524 LogFlowFunc(("%p type %d\n", pCmd, pCmd->enmCmdType));
525
526 RTListPrepend(&pThisCC->listHGCMCmd, &pCmd->node);
527
528 /* stats */
529 uintptr_t idx = pCmd->idxHeapAcc;
530 AssertStmt(idx < RT_ELEMENTS(pThisCC->aHgcmAcc), idx %= RT_ELEMENTS(pThisCC->aHgcmAcc));
531 STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->aHgcmAcc[idx].StateMsgHeapUsage, pCmd->cbHeapCost);
532
533 /* Automatically enable HGCM events, if there are HGCM commands. */
534 if ( pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT
535 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT
536 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
537 {
538 LogFunc(("u32HGCMEnabled = %d\n", pThisCC->u32HGCMEnabled));
539 if (ASMAtomicCmpXchgU32(&pThisCC->u32HGCMEnabled, 1, 0))
540 VMMDevCtlSetGuestFilterMask(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM, 0);
541 }
542
543 vmmdevR3HgcmCmdListUnlock(pThisCC);
544 return rc;
545}
546
547/** Remove VBOXHGCMCMD from the list of pending commands.
548 *
549 * @returns VBox status code.
550 * @param pThisCC The VMMDev ring-3 instance data.
551 * @param pCmd Command to remove.
552 */
553static int vmmdevR3HgcmRemoveCommand(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd)
554{
555 int rc = vmmdevR3HgcmCmdListLock(pThisCC);
556 AssertRCReturn(rc, rc);
557
558 LogFlowFunc(("%p\n", pCmd));
559
560 RTListNodeRemove(&pCmd->node);
561
562 vmmdevR3HgcmCmdListUnlock(pThisCC);
563 return rc;
564}
565
566/**
567 * Find a HGCM command by its physical address.
568 *
569 * The caller is responsible for taking the command list lock before calling
570 * this function.
571 *
572 * @returns Pointer to the command on success, NULL otherwise.
573 * @param pThisCC The VMMDev ring-3 instance data.
574 * @param GCPhys The physical address of the command we're looking for.
575 */
576DECLINLINE(PVBOXHGCMCMD) vmmdevR3HgcmFindCommandLocked(PVMMDEVCC pThisCC, RTGCPHYS GCPhys)
577{
578 PVBOXHGCMCMD pCmd;
579 RTListForEach(&pThisCC->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
580 {
581 if (pCmd->GCPhys == GCPhys)
582 return pCmd;
583 }
584 return NULL;
585}
586
587/** Copy VMMDevHGCMConnect request data from the guest to VBOXHGCMCMD command.
588 *
589 * @param pHGCMConnect The source guest request (cached in host memory).
590 * @param pCmd Destination command.
591 */
592static void vmmdevR3HgcmConnectFetch(const VMMDevHGCMConnect *pHGCMConnect, PVBOXHGCMCMD pCmd)
593{
594 pCmd->enmRequestType = pHGCMConnect->header.header.requestType;
595 pCmd->u.connect.u32ClientID = pHGCMConnect->u32ClientID;
596 *pCmd->u.connect.pLoc = pHGCMConnect->loc;
597}
598
599/** Handle VMMDevHGCMConnect request.
600 *
601 * @param pDevIns The device instance.
602 * @param pThis The VMMDev shared instance data.
603 * @param pThisCC The VMMDev ring-3 instance data.
604 * @param pHGCMConnect The guest request (cached in host memory).
605 * @param GCPhys The physical address of the request.
606 */
607int vmmdevR3HgcmConnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
608 const VMMDevHGCMConnect *pHGCMConnect, RTGCPHYS GCPhys)
609{
610 int rc;
611 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_CONNECT, GCPhys, pHGCMConnect->header.header.size, 0,
612 pHGCMConnect->header.header.fRequestor);
613 if (pCmd)
614 {
615 vmmdevR3HgcmConnectFetch(pHGCMConnect, pCmd);
616
617 /* Only allow the guest to use existing services! */
618 ASSERT_GUEST(pHGCMConnect->loc.type == VMMDevHGCMLoc_LocalHost_Existing);
619 pCmd->u.connect.pLoc->type = VMMDevHGCMLoc_LocalHost_Existing;
620
621 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
622 rc = pThisCC->pHGCMDrv->pfnConnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.connect.pLoc, &pCmd->u.connect.u32ClientID);
623 if (RT_FAILURE(rc))
624 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
625 }
626 else
627 rc = VERR_NO_MEMORY;
628
629 return rc;
630}
631
632/** Copy VMMDevHGCMDisconnect request data from the guest to VBOXHGCMCMD command.
633 *
634 * @param pHGCMDisconnect The source guest request (cached in host memory).
635 * @param pCmd Destination command.
636 */
637static void vmmdevR3HgcmDisconnectFetch(const VMMDevHGCMDisconnect *pHGCMDisconnect, PVBOXHGCMCMD pCmd)
638{
639 pCmd->enmRequestType = pHGCMDisconnect->header.header.requestType;
640 pCmd->u.disconnect.u32ClientID = pHGCMDisconnect->u32ClientID;
641}
642
643/** Handle VMMDevHGCMDisconnect request.
644 *
645 * @param pDevIns The device instance.
646 * @param pThis The VMMDev shared instance data.
647 * @param pThisCC The VMMDev ring-3 instance data.
648 * @param pHGCMDisconnect The guest request (cached in host memory).
649 * @param GCPhys The physical address of the request.
650 */
651int vmmdevR3HgcmDisconnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
652 const VMMDevHGCMDisconnect *pHGCMDisconnect, RTGCPHYS GCPhys)
653{
654 int rc;
655 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_DISCONNECT, GCPhys, pHGCMDisconnect->header.header.size, 0,
656 pHGCMDisconnect->header.header.fRequestor);
657 if (pCmd)
658 {
659 vmmdevR3HgcmDisconnectFetch(pHGCMDisconnect, pCmd);
660
661 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
662 rc = pThisCC->pHGCMDrv->pfnDisconnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
663 if (RT_FAILURE(rc))
664 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
665 }
666 else
667 rc = VERR_NO_MEMORY;
668
669 return rc;
670}
671
672/** Translate LinAddr parameter type to the direction of data transfer.
673 *
674 * @returns VBOX_HGCM_F_PARM_DIRECTION_* flags.
675 * @param enmType Type of the LinAddr parameter.
676 */
677static uint32_t vmmdevR3HgcmParmTypeToDirection(HGCMFunctionParameterType enmType)
678{
679 if (enmType == VMMDevHGCMParmType_LinAddr_In) return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
680 if (enmType == VMMDevHGCMParmType_LinAddr_Out) return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
681 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
682}
683
684/** Check if list of pages in a HGCM pointer parameter corresponds to a contiguous buffer.
685 *
686 * @returns true if pages are contiguous, false otherwise.
687 * @param pPtr Information about a pointer HGCM parameter.
688 */
689DECLINLINE(bool) vmmdevR3HgcmGuestBufferIsContiguous(const VBOXHGCMPARMPTR *pPtr)
690{
691 if (pPtr->cPages == 1)
692 return true;
693 RTGCPHYS64 Phys = pPtr->paPages[0] + VMMDEV_PAGE_SIZE;
694 if (Phys != pPtr->paPages[1])
695 return false;
696 if (pPtr->cPages > 2)
697 {
698 uint32_t iPage = 2;
699 do
700 {
701 Phys += VMMDEV_PAGE_SIZE;
702 if (Phys != pPtr->paPages[iPage])
703 return false;
704 ++iPage;
705 } while (iPage < pPtr->cPages);
706 }
707 return true;
708}
709
710/** Copy data from guest memory to the host buffer.
711 *
712 * @returns VBox status code.
713 * @param pDevIns The device instance for PDMDevHlp.
714 * @param pvDst The destination host buffer.
715 * @param cbDst Size of the destination host buffer.
716 * @param pPtr Description of the source HGCM pointer parameter.
717 */
718static int vmmdevR3HgcmGuestBufferRead(PPDMDEVINSR3 pDevIns, void *pvDst, uint32_t cbDst, const VBOXHGCMPARMPTR *pPtr)
719{
720 /*
721 * Try detect contiguous buffers.
722 */
723 /** @todo We need a flag for indicating this. */
724 if (vmmdevR3HgcmGuestBufferIsContiguous(pPtr))
725 return PDMDevHlpPhysRead(pDevIns, pPtr->paPages[0] | pPtr->offFirstPage, pvDst, cbDst);
726
727 /*
728 * Page by page fallback.
729 */
730 uint8_t *pu8Dst = (uint8_t *)pvDst;
731 uint32_t offPage = pPtr->offFirstPage;
732 uint32_t cbRemaining = cbDst;
733
734 for (uint32_t iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
735 {
736 uint32_t cbToRead = VMMDEV_PAGE_SIZE - offPage;
737 if (cbToRead > cbRemaining)
738 cbToRead = cbRemaining;
739
740 /* Skip invalid pages. */
741 const RTGCPHYS GCPhys = pPtr->paPages[iPage];
742 if (GCPhys != NIL_RTGCPHYS)
743 {
744 int rc = PDMDevHlpPhysRead(pDevIns, GCPhys + offPage, pu8Dst, cbToRead);
745 AssertMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp offPage=%#x cbToRead=%#x\n", rc, GCPhys, offPage, cbToRead), rc);
746 }
747
748 offPage = 0; /* A next page is read from 0 offset. */
749 cbRemaining -= cbToRead;
750 pu8Dst += cbToRead;
751 }
752
753 return VINF_SUCCESS;
754}
755
756/** Copy data from the host buffer to guest memory.
757 *
758 * @returns VBox status code.
759 * @param pDevIns The device instance for PDMDevHlp.
760 * @param pPtr Description of the destination HGCM pointer parameter.
761 * @param pvSrc The source host buffer.
762 * @param cbSrc Size of the source host buffer.
763 */
764static int vmmdevR3HgcmGuestBufferWrite(PPDMDEVINSR3 pDevIns, const VBOXHGCMPARMPTR *pPtr, const void *pvSrc, uint32_t cbSrc)
765{
766 int rc = VINF_SUCCESS;
767
768 uint8_t *pu8Src = (uint8_t *)pvSrc;
769 uint32_t offPage = pPtr->offFirstPage;
770 uint32_t cbRemaining = RT_MIN(cbSrc, pPtr->cbData);
771
772 uint32_t iPage;
773 for (iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
774 {
775 uint32_t cbToWrite = VMMDEV_PAGE_SIZE - offPage;
776 if (cbToWrite > cbRemaining)
777 cbToWrite = cbRemaining;
778
779 /* Skip invalid pages. */
780 const RTGCPHYS GCPhys = pPtr->paPages[iPage];
781 if (GCPhys != NIL_RTGCPHYS)
782 {
783 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys + offPage, pu8Src, cbToWrite);
784 AssertRCBreak(rc);
785 }
786
787 offPage = 0; /* A next page is written at 0 offset. */
788 cbRemaining -= cbToWrite;
789 pu8Src += cbToWrite;
790 }
791
792 return rc;
793}
794
795/** Initializes pCmd->paHostParms from already initialized pCmd->paGuestParms.
796 * Allocates memory for pointer parameters and copies data from the guest.
797 *
798 * @returns VBox status code that the guest should see.
799 * @param pDevIns The device instance.
800 * @param pThisCC The VMMDev ring-3 instance data.
801 * @param pCmd Command structure where host parameters needs initialization.
802 * @param pbReq The request buffer.
803 */
804static int vmmdevR3HgcmInitHostParameters(PPDMDEVINS pDevIns, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, uint8_t const *pbReq)
805{
806 AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
807
808 for (uint32_t i = 0; i < pCmd->u.call.cParms; ++i)
809 {
810 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
811 VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
812
813 switch (pGuestParm->enmType)
814 {
815 case VMMDevHGCMParmType_32bit:
816 {
817 pHostParm->type = VBOX_HGCM_SVC_PARM_32BIT;
818 pHostParm->u.uint32 = (uint32_t)pGuestParm->u.val.u64Value;
819
820 break;
821 }
822
823 case VMMDevHGCMParmType_64bit:
824 {
825 pHostParm->type = VBOX_HGCM_SVC_PARM_64BIT;
826 pHostParm->u.uint64 = pGuestParm->u.val.u64Value;
827
828 break;
829 }
830
831 case VMMDevHGCMParmType_PageList:
832 case VMMDevHGCMParmType_LinAddr_In:
833 case VMMDevHGCMParmType_LinAddr_Out:
834 case VMMDevHGCMParmType_LinAddr:
835 case VMMDevHGCMParmType_Embedded:
836 case VMMDevHGCMParmType_ContiguousPageList:
837 {
838 const uint32_t cbData = pGuestParm->u.ptr.cbData;
839
840 pHostParm->type = VBOX_HGCM_SVC_PARM_PTR;
841 pHostParm->u.pointer.size = cbData;
842
843 if (cbData)
844 {
845 /* Zero memory, the buffer content is potentially copied to the guest. */
846 void *pv = vmmdevR3HgcmCallMemAllocZ(pThisCC, pCmd, cbData);
847 AssertReturn(pv, VERR_NO_MEMORY);
848 pHostParm->u.pointer.addr = pv;
849
850 if (pGuestParm->u.ptr.fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_TO_HOST)
851 {
852 if (pGuestParm->enmType != VMMDevHGCMParmType_Embedded)
853 {
854 if (pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList)
855 {
856 int rc = vmmdevR3HgcmGuestBufferRead(pDevIns, pv, cbData, &pGuestParm->u.ptr);
857 ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
858 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
859 }
860 else
861 {
862 int rc = PDMDevHlpPhysRead(pDevIns,
863 pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
864 pv, cbData);
865 ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
866 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
867 }
868 }
869 else
870 {
871 memcpy(pv, &pbReq[pGuestParm->u.ptr.offFirstPage], cbData);
872 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
873 }
874 }
875 }
876 else
877 {
878 pHostParm->u.pointer.addr = NULL;
879 }
880
881 break;
882 }
883
884 case VMMDevHGCMParmType_NoBouncePageList:
885 {
886 pHostParm->type = VBOX_HGCM_SVC_PARM_PAGES;
887 pHostParm->u.Pages.cb = pGuestParm->u.Pages.cbData;
888 pHostParm->u.Pages.cPages = pGuestParm->u.Pages.cPages;
889 pHostParm->u.Pages.papvPages = (void **)&pGuestParm->u.Pages.paPgLocks[pGuestParm->u.Pages.cPages];
890
891 break;
892 }
893
894 default:
895 ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
896 }
897 }
898
899 return VINF_SUCCESS;
900}
901
902
903/** Allocate and initialize VBOXHGCMCMD structure for a HGCMCall request.
904 *
905 * @returns VBox status code that the guest should see.
906 * @param pThisCC The VMMDev ring-3 instance data.
907 * @param pHGCMCall The HGCMCall request (cached in host memory).
908 * @param cbHGCMCall Size of the request.
909 * @param GCPhys Guest physical address of the request.
910 * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
911 * @param ppCmd Where to store pointer to allocated command.
912 * @param pcbHGCMParmStruct Where to store size of used HGCM parameter structure.
913 */
914static int vmmdevR3HgcmCallAlloc(PVMMDEVCC pThisCC, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPhys,
915 VMMDevRequestType enmRequestType, PVBOXHGCMCMD *ppCmd, uint32_t *pcbHGCMParmStruct)
916{
917#ifdef VBOX_WITH_64_BITS_GUESTS
918 const uint32_t cbHGCMParmStruct = enmRequestType == VMMDevReq_HGCMCall64 ? sizeof(HGCMFunctionParameter64)
919 : sizeof(HGCMFunctionParameter32);
920#else
921 const uint32_t cbHGCMParmStruct = sizeof(HGCMFunctionParameter);
922#endif
923
924 const uint32_t cParms = pHGCMCall->cParms;
925
926 /* Whether there is enough space for parameters and sane upper limit. */
927 ASSERT_GUEST_STMT_RETURN( cParms <= (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct
928 && cParms <= VMMDEV_MAX_HGCM_PARMS,
929 LogRelMax(50, ("VMMDev: request packet with invalid number of HGCM parameters: %d vs %d. Refusing operation.\n",
930 (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct, cParms)),
931 VERR_INVALID_PARAMETER);
932 RT_UNTRUSTED_VALIDATED_FENCE();
933
934 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_CALL, GCPhys, cbHGCMCall, cParms,
935 pHGCMCall->header.header.fRequestor);
936 if (pCmd == NULL)
937 return VERR_NO_MEMORY;
938
939 /* Request type has been validated in vmmdevReqDispatcher. */
940 pCmd->enmRequestType = enmRequestType;
941 pCmd->u.call.u32ClientID = pHGCMCall->u32ClientID;
942 pCmd->u.call.u32Function = pHGCMCall->u32Function;
943
944 *ppCmd = pCmd;
945 *pcbHGCMParmStruct = cbHGCMParmStruct;
946 return VINF_SUCCESS;
947}
948
949/**
950 * Heap budget wrapper around RTMemAlloc and RTMemAllocZ.
951 */
952static void *vmmdevR3HgcmCallMemAllocEx(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested, bool fZero)
953{
954 uintptr_t idx = pCmd->idxHeapAcc;
955 AssertStmt(idx < RT_ELEMENTS(pThisCC->aHgcmAcc), idx %= RT_ELEMENTS(pThisCC->aHgcmAcc));
956
957 /* Check against max heap costs for this request. */
958 Assert(pCmd->cbHeapCost <= VMMDEV_MAX_HGCM_DATA_SIZE);
959 if (cbRequested <= VMMDEV_MAX_HGCM_DATA_SIZE - pCmd->cbHeapCost)
960 {
961 /* Check heap budget (we're under lock). */
962 if (cbRequested <= pThisCC->aHgcmAcc[idx].cbHeapBudget)
963 {
964 /* Do the actual allocation. */
965 void *pv = fZero ? RTMemAllocZ(cbRequested) : RTMemAlloc(cbRequested);
966 if (pv)
967 {
968 /* Update the request cost and heap budget. */
969 Log5Func(("aHgcmAcc[%zu] %#RX64 += %#x (%p)\n", idx, pThisCC->aHgcmAcc[idx].cbHeapBudget, cbRequested, pCmd));
970 pThisCC->aHgcmAcc[idx].cbHeapBudget -= cbRequested;
971 pCmd->cbHeapCost += (uint32_t)cbRequested;
972 return pv;
973 }
974 LogFunc(("Heap alloc failed: cbRequested=%#zx - enmCmdType=%d\n", cbRequested, pCmd->enmCmdType));
975 }
976 else
977 LogFunc(("Heap budget overrun: cbRequested=%#zx cbHeapCost=%#x aHgcmAcc[%u].cbHeapBudget=%#RX64 - enmCmdType=%d\n",
978 cbRequested, pCmd->cbHeapCost, pCmd->idxHeapAcc, pThisCC->aHgcmAcc[idx].cbHeapBudget, pCmd->enmCmdType));
979 }
980 else
981 LogFunc(("Request too big: cbRequested=%#zx cbHeapCost=%#x - enmCmdType=%d\n",
982 cbRequested, pCmd->cbHeapCost, pCmd->enmCmdType));
983 STAM_REL_COUNTER_INC(&pThisCC->aHgcmAcc[idx].StatBudgetOverruns);
984 return NULL;
985}
986
987/**
988 * Heap budget wrapper around RTMemAlloc.
989 */
990DECLINLINE(void *) vmmdevR3HgcmCallMemAlloc(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested)
991{
992 return vmmdevR3HgcmCallMemAllocEx(pThisCC, pCmd, cbRequested, false /*fZero*/);
993}
994
995/**
996 * Heap budget wrapper around RTMemAllocZ.
997 */
998DECLINLINE(void *) vmmdevR3HgcmCallMemAllocZ(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested)
999{
1000 return vmmdevR3HgcmCallMemAllocEx(pThisCC, pCmd, cbRequested, true /*fZero*/);
1001}
1002
1003/** Copy VMMDevHGCMCall request data from the guest to VBOXHGCMCMD command.
1004 *
1005 * @returns VBox status code that the guest should see.
1006 * @param pDevIns The device instance.
1007 * @param pThisCC The VMMDev ring-3 instance data.
1008 * @param pCmd The destination command.
1009 * @param pHGCMCall The HGCMCall request (cached in host memory).
1010 * @param cbHGCMCall Size of the request.
1011 * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
1012 * @param cbHGCMParmStruct Size of used HGCM parameter structure.
1013 */
1014static int vmmdevR3HgcmCallFetchGuestParms(PPDMDEVINS pDevIns, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd,
1015 const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall,
1016 VMMDevRequestType enmRequestType, uint32_t cbHGCMParmStruct)
1017{
1018 /*
1019 * Go over all guest parameters and initialize relevant VBOXHGCMCMD fields.
1020 * VBOXHGCMCMD must contain all information about the request,
1021 * the request will be not read from the guest memory again.
1022 */
1023#ifdef VBOX_WITH_64_BITS_GUESTS
1024 const bool f64Bits = (enmRequestType == VMMDevReq_HGCMCall64);
1025#endif
1026
1027 const uint32_t cParms = pCmd->u.call.cParms;
1028
1029 /* Offsets in the request buffer to HGCM parameters and additional data. */
1030 const uint32_t offHGCMParms = sizeof(VMMDevHGCMCall);
1031 const uint32_t offExtra = offHGCMParms + cParms * cbHGCMParmStruct;
1032
1033 /* Pointer to the next HGCM parameter of the request. */
1034 const uint8_t *pu8HGCMParm = (uint8_t *)pHGCMCall + offHGCMParms;
1035
1036 for (uint32_t i = 0; i < cParms; ++i, pu8HGCMParm += cbHGCMParmStruct)
1037 {
1038 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
1039
1040#ifdef VBOX_WITH_64_BITS_GUESTS
1041 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, type, HGCMFunctionParameter32, type);
1042 pGuestParm->enmType = ((HGCMFunctionParameter64 *)pu8HGCMParm)->type;
1043#else
1044 pGuestParm->enmType = ((HGCMFunctionParameter *)pu8HGCMParm)->type;
1045#endif
1046
1047 switch (pGuestParm->enmType)
1048 {
1049 case VMMDevHGCMParmType_32bit:
1050 {
1051#ifdef VBOX_WITH_64_BITS_GUESTS
1052 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value32, HGCMFunctionParameter32, u.value32);
1053 uint32_t *pu32 = &((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value32;
1054#else
1055 uint32_t *pu32 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value32;
1056#endif
1057 LogFunc(("uint32 guest parameter %RI32\n", *pu32));
1058
1059 pGuestParm->u.val.u64Value = *pu32;
1060 pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu32 - (uintptr_t)pHGCMCall);
1061 pGuestParm->u.val.cbValue = sizeof(uint32_t);
1062
1063 break;
1064 }
1065
1066 case VMMDevHGCMParmType_64bit:
1067 {
1068#ifdef VBOX_WITH_64_BITS_GUESTS
1069 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value64, HGCMFunctionParameter32, u.value64);
1070 uint64_t *pu64 = (uint64_t *)(uintptr_t)&((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value64; /* MSC detect misalignment, thus casts. */
1071#else
1072 uint64_t *pu64 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value64;
1073#endif
1074 LogFunc(("uint64 guest parameter %RI64\n", *pu64));
1075
1076 pGuestParm->u.val.u64Value = *pu64;
1077 pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu64 - (uintptr_t)pHGCMCall);
1078 pGuestParm->u.val.cbValue = sizeof(uint64_t);
1079
1080 break;
1081 }
1082
1083 case VMMDevHGCMParmType_LinAddr_In: /* In (read) */
1084 case VMMDevHGCMParmType_LinAddr_Out: /* Out (write) */
1085 case VMMDevHGCMParmType_LinAddr: /* In & Out */
1086 {
1087#ifdef VBOX_WITH_64_BITS_GUESTS
1088 uint32_t cbData = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.size
1089 : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.size;
1090 RTGCPTR GCPtr = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.u.linearAddr
1091 : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.u.linearAddr;
1092#else
1093 uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.size;
1094 RTGCPTR GCPtr = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.u.linearAddr;
1095#endif
1096 LogFunc(("LinAddr guest parameter %RGv, cb %u\n", GCPtr, cbData));
1097
1098 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
1099
1100 const uint32_t offFirstPage = cbData > 0 ? GCPtr & VMMDEV_PAGE_OFFSET_MASK : 0;
1101 const uint32_t cPages = cbData > 0 ? (offFirstPage + cbData + VMMDEV_PAGE_SIZE - 1) / VMMDEV_PAGE_SIZE : 0;
1102
1103 pGuestParm->u.ptr.cbData = cbData;
1104 pGuestParm->u.ptr.offFirstPage = offFirstPage;
1105 pGuestParm->u.ptr.cPages = cPages;
1106 pGuestParm->u.ptr.fu32Direction = vmmdevR3HgcmParmTypeToDirection(pGuestParm->enmType);
1107
1108 if (cbData > 0)
1109 {
1110 if (cPages == 1)
1111 pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
1112 else
1113 {
1114 /* (Max 262144 bytes with current limits.) */
1115 pGuestParm->u.ptr.paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd,
1116 cPages * sizeof(RTGCPHYS));
1117 AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
1118 }
1119
1120 /* Gonvert the guest linear pointers of pages to physical addresses. */
1121 GCPtr &= ~(RTGCPTR)VMMDEV_PAGE_OFFSET_MASK;
1122 for (uint32_t iPage = 0; iPage < cPages; ++iPage)
1123 {
1124 /* The guest might specify invalid GCPtr, just skip such addresses.
1125 * Also if the guest parameters are fetched when restoring an old saved state,
1126 * then GCPtr may become invalid and do not have a corresponding GCPhys.
1127 * The command restoration routine will take care of this.
1128 */
1129 RTGCPHYS GCPhys;
1130 int rc2 = PDMDevHlpPhysGCPtr2GCPhys(pDevIns, GCPtr, &GCPhys);
1131 if (RT_FAILURE(rc2))
1132 GCPhys = NIL_RTGCPHYS;
1133 LogFunc(("Page %d: %RGv -> %RGp. %Rrc\n", iPage, GCPtr, GCPhys, rc2));
1134
1135 pGuestParm->u.ptr.paPages[iPage] = GCPhys;
1136 GCPtr += VMMDEV_PAGE_SIZE;
1137 }
1138 }
1139
1140 break;
1141 }
1142
1143 case VMMDevHGCMParmType_PageList:
1144 case VMMDevHGCMParmType_ContiguousPageList:
1145 case VMMDevHGCMParmType_NoBouncePageList:
1146 {
1147#ifdef VBOX_WITH_64_BITS_GUESTS
1148 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
1149 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.offset, HGCMFunctionParameter32, u.PageList.offset);
1150 uint32_t cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.size;
1151 uint32_t offPageListInfo = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.offset;
1152#else
1153 uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.size;
1154 uint32_t offPageListInfo = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.offset;
1155#endif
1156 LogFunc(("PageList guest parameter cb %u, offset %u\n", cbData, offPageListInfo));
1157
1158 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
1159
1160/** @todo respect zero byte page lists... */
1161 /* Check that the page list info is within the request. */
1162 ASSERT_GUEST_RETURN( offPageListInfo >= offExtra
1163 && cbHGCMCall >= sizeof(HGCMPageListInfo)
1164 && offPageListInfo <= cbHGCMCall - sizeof(HGCMPageListInfo),
1165 VERR_INVALID_PARAMETER);
1166 RT_UNTRUSTED_VALIDATED_FENCE();
1167
1168 /* The HGCMPageListInfo structure is within the request. */
1169 const HGCMPageListInfo *pPageListInfo = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offPageListInfo);
1170
1171 /* Enough space for page pointers? */
1172 const uint32_t cMaxPages = 1 + (cbHGCMCall - offPageListInfo - sizeof(HGCMPageListInfo)) / sizeof(RTGCPHYS);
1173 ASSERT_GUEST_RETURN( pPageListInfo->cPages > 0
1174 && pPageListInfo->cPages <= cMaxPages,
1175 VERR_INVALID_PARAMETER);
1176
1177 /* Flags. */
1178 ASSERT_GUEST_MSG_RETURN(VBOX_HGCM_F_PARM_ARE_VALID(pPageListInfo->flags),
1179 ("%#x\n", pPageListInfo->flags), VERR_INVALID_FLAGS);
1180 /* First page offset. */
1181 ASSERT_GUEST_MSG_RETURN(pPageListInfo->offFirstPage < VMMDEV_PAGE_SIZE,
1182 ("%#x\n", pPageListInfo->offFirstPage), VERR_INVALID_PARAMETER);
1183
1184 /* Contiguous page lists only ever have a single page and
1185 no-bounce page list requires cPages to match the size exactly.
1186 Plain page list does not impose any restrictions on cPages currently. */
1187 ASSERT_GUEST_MSG_RETURN( pPageListInfo->cPages
1188 == (pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList ? 1
1189 : RT_ALIGN_32(pPageListInfo->offFirstPage + cbData, VMMDEV_PAGE_SIZE)
1190 >> VMMDEV_PAGE_SHIFT)
1191 || pGuestParm->enmType == VMMDevHGCMParmType_PageList,
1192 ("offFirstPage=%#x cbData=%#x cPages=%#x enmType=%d\n",
1193 pPageListInfo->offFirstPage, cbData, pPageListInfo->cPages, pGuestParm->enmType),
1194 VERR_INVALID_PARAMETER);
1195
1196 RT_UNTRUSTED_VALIDATED_FENCE();
1197
1198 /*
1199 * Deal with no-bounce buffers first, as
1200 * VMMDevHGCMParmType_PageList is the fallback.
1201 */
1202 if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
1203 {
1204 /* Validate page offsets */
1205 ASSERT_GUEST_MSG_RETURN( !(pPageListInfo->aPages[0] & VMMDEV_PAGE_OFFSET_MASK)
1206 || (pPageListInfo->aPages[0] & VMMDEV_PAGE_OFFSET_MASK) == pPageListInfo->offFirstPage,
1207 ("%#RX64 offFirstPage=%#x\n", pPageListInfo->aPages[0], pPageListInfo->offFirstPage),
1208 VERR_INVALID_POINTER);
1209 uint32_t const cPages = pPageListInfo->cPages;
1210 for (uint32_t iPage = 1; iPage < cPages; iPage++)
1211 ASSERT_GUEST_MSG_RETURN(!(pPageListInfo->aPages[iPage] & VMMDEV_PAGE_OFFSET_MASK),
1212 ("[%#zx]=%#RX64\n", iPage, pPageListInfo->aPages[iPage]), VERR_INVALID_POINTER);
1213 RT_UNTRUSTED_VALIDATED_FENCE();
1214
1215 pGuestParm->u.Pages.cbData = cbData;
1216 pGuestParm->u.Pages.offFirstPage = pPageListInfo->offFirstPage;
1217 pGuestParm->u.Pages.fFlags = pPageListInfo->flags;
1218 pGuestParm->u.Pages.cPages = (uint16_t)cPages;
1219 pGuestParm->u.Pages.fLocked = false;
1220 pGuestParm->u.Pages.paPgLocks = (PPGMPAGEMAPLOCK)vmmdevR3HgcmCallMemAllocZ(pThisCC, pCmd,
1221 ( sizeof(PGMPAGEMAPLOCK)
1222 + sizeof(void *)) * cPages);
1223 AssertReturn(pGuestParm->u.Pages.paPgLocks, VERR_NO_MEMORY);
1224
1225 /* Make sure the page offsets are sensible. */
1226 int rc = VINF_SUCCESS;
1227 void **papvPages = (void **)&pGuestParm->u.Pages.paPgLocks[cPages];
1228 if (pPageListInfo->flags & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST)
1229 rc = PDMDevHlpPhysBulkGCPhys2CCPtr(pDevIns, cPages, pPageListInfo->aPages, 0 /*fFlags*/,
1230 papvPages, pGuestParm->u.Pages.paPgLocks);
1231 else
1232 rc = PDMDevHlpPhysBulkGCPhys2CCPtrReadOnly(pDevIns, cPages, pPageListInfo->aPages, 0 /*fFlags*/,
1233 (void const **)papvPages, pGuestParm->u.Pages.paPgLocks);
1234 if (RT_SUCCESS(rc))
1235 {
1236 papvPages[0] = (void *)((uintptr_t)papvPages[0] | pPageListInfo->offFirstPage);
1237 pGuestParm->u.Pages.fLocked = true;
1238 break;
1239 }
1240
1241 /* Locking failed, bail out. In case of MMIO we fall back on regular page list handling. */
1242 RTMemFree(pGuestParm->u.Pages.paPgLocks);
1243 pGuestParm->u.Pages.paPgLocks = NULL;
1244 STAM_REL_COUNTER_INC(&pThisCC->StatHgcmFailedPageListLocking);
1245 ASSERT_GUEST_MSG_RETURN(rc == VERR_PGM_PHYS_PAGE_RESERVED, ("cPages=%u %Rrc\n", cPages, rc), rc);
1246 pGuestParm->enmType = VMMDevHGCMParmType_PageList;
1247 }
1248
1249 /*
1250 * Regular page list or contiguous page list.
1251 */
1252 pGuestParm->u.ptr.cbData = cbData;
1253 pGuestParm->u.ptr.offFirstPage = pPageListInfo->offFirstPage;
1254 pGuestParm->u.ptr.cPages = pPageListInfo->cPages;
1255 pGuestParm->u.ptr.fu32Direction = pPageListInfo->flags;
1256 if (pPageListInfo->cPages == 1)
1257 {
1258 pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
1259 pGuestParm->u.ptr.GCPhysSinglePage = pPageListInfo->aPages[0];
1260 }
1261 else
1262 {
1263 pGuestParm->u.ptr.paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd,
1264 pPageListInfo->cPages * sizeof(RTGCPHYS));
1265 AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
1266
1267 for (uint32_t iPage = 0; iPage < pGuestParm->u.ptr.cPages; ++iPage)
1268 pGuestParm->u.ptr.paPages[iPage] = pPageListInfo->aPages[iPage];
1269 }
1270 break;
1271 }
1272
1273 case VMMDevHGCMParmType_Embedded:
1274 {
1275#ifdef VBOX_WITH_64_BITS_GUESTS
1276 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.Embedded.cbData, HGCMFunctionParameter32, u.Embedded.cbData);
1277 uint32_t const cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.cbData;
1278 uint32_t const offData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.offData;
1279 uint32_t const fFlags = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.fFlags;
1280#else
1281 uint32_t const cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.cbData;
1282 uint32_t const offData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.offData;
1283 uint32_t const fFlags = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.fFlags;
1284#endif
1285 LogFunc(("Embedded guest parameter cb %u, offset %u, flags %#x\n", cbData, offData, fFlags));
1286
1287 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
1288
1289 /* Check flags and buffer range. */
1290 ASSERT_GUEST_MSG_RETURN(VBOX_HGCM_F_PARM_ARE_VALID(fFlags), ("%#x\n", fFlags), VERR_INVALID_FLAGS);
1291 ASSERT_GUEST_MSG_RETURN( offData >= offExtra
1292 && offData <= cbHGCMCall
1293 && cbData <= cbHGCMCall - offData,
1294 ("offData=%#x cbData=%#x cbHGCMCall=%#x offExtra=%#x\n", offData, cbData, cbHGCMCall, offExtra),
1295 VERR_INVALID_PARAMETER);
1296 RT_UNTRUSTED_VALIDATED_FENCE();
1297
1298 /* We use part of the ptr member. */
1299 pGuestParm->u.ptr.fu32Direction = fFlags;
1300 pGuestParm->u.ptr.cbData = cbData;
1301 pGuestParm->u.ptr.offFirstPage = offData;
1302 pGuestParm->u.ptr.GCPhysSinglePage = pCmd->GCPhys + offData;
1303 pGuestParm->u.ptr.cPages = 1;
1304 pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
1305 break;
1306 }
1307
1308 default:
1309 ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
1310 }
1311 }
1312
1313 return VINF_SUCCESS;
1314}
1315
1316/**
1317 * Handles VMMDevHGCMCall request.
1318 *
1319 * @returns VBox status code that the guest should see.
1320 * @param pDevIns The device instance.
1321 * @param pThis The VMMDev shared instance data.
1322 * @param pThisCC The VMMDev ring-3 instance data.
1323 * @param pHGCMCall The request to handle (cached in host memory).
1324 * @param cbHGCMCall Size of the entire request (including HGCM parameters).
1325 * @param GCPhys The guest physical address of the request.
1326 * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
1327 * @param tsArrival The STAM_GET_TS() value when the request arrived.
1328 * @param ppLock Pointer to the lock info pointer (latter can be
1329 * NULL). Set to NULL if HGCM takes lock ownership.
1330 */
1331int vmmdevR3HgcmCall(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall,
1332 RTGCPHYS GCPhys, VMMDevRequestType enmRequestType, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock)
1333{
1334 LogFunc(("client id = %d, function = %d, cParms = %d, enmRequestType = %d, fRequestor = %#x\n", pHGCMCall->u32ClientID,
1335 pHGCMCall->u32Function, pHGCMCall->cParms, enmRequestType, pHGCMCall->header.header.fRequestor));
1336
1337 /*
1338 * Validation.
1339 */
1340 ASSERT_GUEST_RETURN(cbHGCMCall >= sizeof(VMMDevHGCMCall), VERR_INVALID_PARAMETER);
1341#ifdef VBOX_WITH_64_BITS_GUESTS
1342 ASSERT_GUEST_RETURN( enmRequestType == VMMDevReq_HGCMCall32
1343 || enmRequestType == VMMDevReq_HGCMCall64, VERR_INVALID_PARAMETER);
1344#else
1345 ASSERT_GUEST_RETURN(enmRequestType == VMMDevReq_HGCMCall32, VERR_INVALID_PARAMETER);
1346#endif
1347 RT_UNTRUSTED_VALIDATED_FENCE();
1348
1349 /*
1350 * Create a command structure.
1351 */
1352 PVBOXHGCMCMD pCmd;
1353 uint32_t cbHGCMParmStruct;
1354 int rc = vmmdevR3HgcmCallAlloc(pThisCC, pHGCMCall, cbHGCMCall, GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
1355 if (RT_SUCCESS(rc))
1356 {
1357 pCmd->tsArrival = tsArrival;
1358 PVMMDEVREQLOCK pLock = *ppLock;
1359 if (pLock)
1360 {
1361 pCmd->ReqMapLock = pLock->Lock;
1362 pCmd->pvReqLocked = pLock->pvReq;
1363 *ppLock = NULL;
1364 }
1365
1366 rc = vmmdevR3HgcmCallFetchGuestParms(pDevIns, pThisCC, pCmd, pHGCMCall, cbHGCMCall, enmRequestType, cbHGCMParmStruct);
1367 if (RT_SUCCESS(rc))
1368 {
1369 /* Copy guest data to host parameters, so HGCM services can use the data. */
1370 rc = vmmdevR3HgcmInitHostParameters(pDevIns, pThisCC, pCmd, (uint8_t const *)pHGCMCall);
1371 if (RT_SUCCESS(rc))
1372 {
1373 /*
1374 * Pass the function call to HGCM connector for actual processing
1375 */
1376 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
1377
1378#if 0 /* DONT ENABLE - for performance hacking. */
1379 if ( pCmd->u.call.u32Function == 9
1380 && pCmd->u.call.cParms == 5)
1381 {
1382 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
1383
1384 if (pCmd->pvReqLocked)
1385 {
1386 VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
1387 pHeader->header.rc = VINF_SUCCESS;
1388 pHeader->result = VINF_SUCCESS;
1389 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
1390 }
1391 else
1392 {
1393 VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)pHGCMCall;
1394 pHeader->header.rc = VINF_SUCCESS;
1395 pHeader->result = VINF_SUCCESS;
1396 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
1397 PDMDevHlpPhysWrite(pDevIns, GCPhys, pHeader, sizeof(*pHeader));
1398 }
1399 vmmdevR3HgcmCmdFree(pDevIns, pThisCC, pCmd);
1400 return VINF_HGCM_ASYNC_EXECUTE; /* ignored, but avoids assertions. */
1401 }
1402#endif
1403
1404 rc = pThisCC->pHGCMDrv->pfnCall(pThisCC->pHGCMDrv, pCmd,
1405 pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
1406 pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsArrival);
1407
1408 if (rc == VINF_HGCM_ASYNC_EXECUTE)
1409 {
1410 /*
1411 * Done. Just update statistics and return.
1412 */
1413#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
1414 uint64_t tsNow;
1415 STAM_GET_TS(tsNow);
1416 STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->StatHgcmCmdArrival, tsNow - tsArrival);
1417#endif
1418 return rc;
1419 }
1420
1421 /*
1422 * Failed, bail out.
1423 */
1424 LogFunc(("pfnCall rc = %Rrc\n", rc));
1425 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
1426 }
1427 }
1428 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
1429 }
1430 return rc;
1431}
1432
1433/**
1434 * VMMDevReq_HGCMCancel worker.
1435 *
1436 * @returns VBox status code that the guest should see.
1437 * @param pThisCC The VMMDev ring-3 instance data.
1438 * @param pHGCMCancel The request to handle (cached in host memory).
1439 * @param GCPhys The address of the request.
1440 *
1441 * @thread EMT
1442 */
1443int vmmdevR3HgcmCancel(PVMMDEVCC pThisCC, const VMMDevHGCMCancel *pHGCMCancel, RTGCPHYS GCPhys)
1444{
1445 NOREF(pHGCMCancel);
1446 int rc = vmmdevR3HgcmCancel2(pThisCC, GCPhys);
1447 return rc == VERR_NOT_FOUND ? VERR_INVALID_PARAMETER : rc;
1448}
1449
1450/**
1451 * VMMDevReq_HGCMCancel2 worker.
1452 *
1453 * @retval VINF_SUCCESS on success.
1454 * @retval VERR_NOT_FOUND if the request was not found.
1455 * @retval VERR_INVALID_PARAMETER if the request address is invalid.
1456 *
1457 * @param pThisCC The VMMDev ring-3 instance data.
1458 * @param GCPhys The address of the request that should be cancelled.
1459 *
1460 * @thread EMT
1461 */
1462int vmmdevR3HgcmCancel2(PVMMDEVCC pThisCC, RTGCPHYS GCPhys)
1463{
1464 if ( GCPhys == 0
1465 || GCPhys == NIL_RTGCPHYS
1466 || GCPhys == NIL_RTGCPHYS32)
1467 {
1468 Log(("vmmdevR3HgcmCancel2: GCPhys=%#x\n", GCPhys));
1469 return VERR_INVALID_PARAMETER;
1470 }
1471
1472 /*
1473 * Locate the command and cancel it while under the protection of
1474 * the lock. hgcmCompletedWorker makes assumptions about this.
1475 */
1476 int rc = vmmdevR3HgcmCmdListLock(pThisCC);
1477 AssertRCReturn(rc, rc);
1478
1479 PVBOXHGCMCMD pCmd = vmmdevR3HgcmFindCommandLocked(pThisCC, GCPhys);
1480 if (pCmd)
1481 {
1482 pCmd->fCancelled = true;
1483
1484 Log(("vmmdevR3HgcmCancel2: Cancelled pCmd=%p / GCPhys=%#x\n", pCmd, GCPhys));
1485 if (pThisCC->pHGCMDrv)
1486 pThisCC->pHGCMDrv->pfnCancelled(pThisCC->pHGCMDrv, pCmd,
1487 pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.u32ClientID
1488 : pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? pCmd->u.connect.u32ClientID
1489 : pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT ? pCmd->u.disconnect.u32ClientID
1490 : 0);
1491 }
1492 else
1493 rc = VERR_NOT_FOUND;
1494
1495 vmmdevR3HgcmCmdListUnlock(pThisCC);
1496 return rc;
1497}
1498
1499/** Write HGCM call parameters and buffers back to the guest request and memory.
1500 *
1501 * @returns VBox status code that the guest should see.
1502 * @param pDevIns The device instance.
1503 * @param pCmd Completed call command.
1504 * @param pHGCMCall The guestrequest which needs updating (cached in the host memory).
1505 * @param pbReq The request copy or locked memory for handling
1506 * embedded buffers.
1507 */
1508static int vmmdevR3HgcmCompleteCallRequest(PPDMDEVINS pDevIns, PVBOXHGCMCMD pCmd, VMMDevHGCMCall *pHGCMCall, uint8_t *pbReq)
1509{
1510 AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
1511
1512 /*
1513 * Go over parameter descriptions saved in pCmd.
1514 */
1515#ifdef VBOX_WITH_64_BITS_GUESTS
1516 HGCMFunctionParameter64 *pReqParm = (HGCMFunctionParameter64 *)(pbReq + sizeof(VMMDevHGCMCall));
1517 size_t const cbHGCMParmStruct = pCmd->enmRequestType == VMMDevReq_HGCMCall64
1518 ? sizeof(HGCMFunctionParameter64) : sizeof(HGCMFunctionParameter32);
1519#else
1520 HGCMFunctionParameter *pReqParm = (HGCMFunctionParameter *)(pbReq + sizeof(VMMDevHGCMCall));
1521 size_t const cbHGCMParmStruct = sizeof(HGCMFunctionParameter);
1522#endif
1523 for (uint32_t i = 0;
1524 i < pCmd->u.call.cParms;
1525#ifdef VBOX_WITH_64_BITS_GUESTS
1526 ++i, pReqParm = (HGCMFunctionParameter64 *)((uint8_t *)pReqParm + cbHGCMParmStruct)
1527#else
1528 ++i, pReqParm = (HGCMFunctionParameter *)((uint8_t *)pReqParm + cbHGCMParmStruct)
1529#endif
1530 )
1531 {
1532 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
1533 VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
1534
1535 const HGCMFunctionParameterType enmType = pGuestParm->enmType;
1536 switch (enmType)
1537 {
1538 case VMMDevHGCMParmType_32bit:
1539 case VMMDevHGCMParmType_64bit:
1540 {
1541 const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
1542 const void *pvSrc = enmType == VMMDevHGCMParmType_32bit ? (void *)&pHostParm->u.uint32
1543 : (void *)&pHostParm->u.uint64;
1544/** @todo optimize memcpy away here. */
1545 memcpy((uint8_t *)pHGCMCall + pVal->offValue, pvSrc, pVal->cbValue);
1546 break;
1547 }
1548
1549 case VMMDevHGCMParmType_LinAddr_In:
1550 case VMMDevHGCMParmType_LinAddr_Out:
1551 case VMMDevHGCMParmType_LinAddr:
1552 case VMMDevHGCMParmType_PageList:
1553 {
1554/** @todo Update the return buffer size? */
1555 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1556 if ( pPtr->cbData > 0
1557 && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
1558 {
1559 const void *pvSrc = pHostParm->u.pointer.addr;
1560 uint32_t cbSrc = pHostParm->u.pointer.size;
1561 int rc = vmmdevR3HgcmGuestBufferWrite(pDevIns, pPtr, pvSrc, cbSrc);
1562 if (RT_FAILURE(rc))
1563 break;
1564 }
1565 break;
1566 }
1567
1568 case VMMDevHGCMParmType_Embedded:
1569 {
1570 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1571
1572 /* Update size. */
1573#ifdef VBOX_WITH_64_BITS_GUESTS
1574 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.Embedded.cbData, HGCMFunctionParameter32, u.Embedded.cbData);
1575#endif
1576 pReqParm->u.Embedded.cbData = pHostParm->u.pointer.size;
1577
1578 /* Copy out data. */
1579 if ( pPtr->cbData > 0
1580 && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
1581 {
1582 const void *pvSrc = pHostParm->u.pointer.addr;
1583 uint32_t cbSrc = pHostParm->u.pointer.size;
1584 uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
1585 memcpy(pbReq + pPtr->offFirstPage, pvSrc, cbToCopy);
1586 }
1587 break;
1588 }
1589
1590 case VMMDevHGCMParmType_ContiguousPageList:
1591 {
1592 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1593
1594 /* Update size. */
1595#ifdef VBOX_WITH_64_BITS_GUESTS
1596 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
1597#endif
1598 pReqParm->u.PageList.size = pHostParm->u.pointer.size;
1599
1600 /* Copy out data. */
1601 if ( pPtr->cbData > 0
1602 && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
1603 {
1604 const void *pvSrc = pHostParm->u.pointer.addr;
1605 uint32_t cbSrc = pHostParm->u.pointer.size;
1606 uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
1607 int rc = PDMDevHlpPhysWrite(pDevIns, pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
1608 pvSrc, cbToCopy);
1609 if (RT_FAILURE(rc))
1610 break;
1611 }
1612 break;
1613 }
1614
1615 case VMMDevHGCMParmType_NoBouncePageList:
1616 {
1617 /* Update size. */
1618#ifdef VBOX_WITH_64_BITS_GUESTS
1619 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
1620#endif
1621 pReqParm->u.PageList.size = pHostParm->u.Pages.cb;
1622
1623 /* unlock early. */
1624 if (pGuestParm->u.Pages.fLocked)
1625 {
1626 PDMDevHlpPhysBulkReleasePageMappingLocks(pDevIns, pGuestParm->u.Pages.cPages,
1627 pGuestParm->u.Pages.paPgLocks);
1628 pGuestParm->u.Pages.fLocked = false;
1629 }
1630 break;
1631 }
1632
1633 default:
1634 break;
1635 }
1636 }
1637
1638 return VINF_SUCCESS;
1639}
1640
1641/** Update HGCM request in the guest memory and mark it as completed.
1642 *
1643 * @returns VINF_SUCCESS or VERR_CANCELLED.
1644 * @param pInterface Pointer to this PDM interface.
1645 * @param result HGCM completion status code (VBox status code).
1646 * @param pCmd Completed command, which contains updated host parameters.
1647 *
1648 * @thread EMT
1649 */
1650static int hgcmCompletedWorker(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
1651{
1652 PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
1653 PPDMDEVINS pDevIns = pThisCC->pDevIns;
1654 PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
1655#ifdef VBOX_WITH_DTRACE
1656 uint32_t idFunction = 0;
1657 uint32_t idClient = 0;
1658#endif
1659
1660 if (result == VINF_HGCM_SAVE_STATE)
1661 {
1662 /* If the completion routine was called while the HGCM service saves its state,
1663 * then currently nothing to be done here. The pCmd stays in the list and will
1664 * be saved later when the VMMDev state will be saved and re-submitted on load.
1665 *
1666 * It it assumed that VMMDev saves state after the HGCM services (VMMDev driver
1667 * attached by constructor before it registers its SSM state), and, therefore,
1668 * VBOXHGCMCMD structures are not removed by vmmdevR3HgcmSaveState from the list,
1669 * while HGCM uses them.
1670 */
1671 LogFlowFunc(("VINF_HGCM_SAVE_STATE for command %p\n", pCmd));
1672 return VINF_SUCCESS;
1673 }
1674
1675 VBOXDD_HGCMCALL_COMPLETED_EMT(pCmd, result);
1676
1677 int rc = VINF_SUCCESS;
1678
1679 /*
1680 * The cancellation protocol requires us to remove the command here
1681 * and then check the flag. Cancelled commands must not be written
1682 * back to guest memory.
1683 */
1684 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
1685
1686 if (RT_LIKELY(!pCmd->fCancelled))
1687 {
1688 if (!pCmd->pvReqLocked)
1689 {
1690 /*
1691 * Request is not locked:
1692 */
1693 VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
1694 if (pHeader)
1695 {
1696 /*
1697 * Read the request from the guest memory for updating.
1698 * The request data is not be used for anything but checking the request type.
1699 */
1700 PDMDevHlpPhysRead(pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest);
1701 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1702
1703 /* Verify the request type. This is the only field which is used from the guest memory. */
1704 const VMMDevRequestType enmRequestType = pHeader->header.requestType;
1705 if ( enmRequestType == pCmd->enmRequestType
1706 || enmRequestType == VMMDevReq_HGCMCancel)
1707 {
1708 RT_UNTRUSTED_VALIDATED_FENCE();
1709
1710 /*
1711 * Update parameters and data buffers.
1712 */
1713 switch (enmRequestType)
1714 {
1715#ifdef VBOX_WITH_64_BITS_GUESTS
1716 case VMMDevReq_HGCMCall64:
1717#endif
1718 case VMMDevReq_HGCMCall32:
1719 {
1720 VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
1721 rc = vmmdevR3HgcmCompleteCallRequest(pDevIns, pCmd, pHGCMCall, (uint8_t *)pHeader);
1722#ifdef VBOX_WITH_DTRACE
1723 idFunction = pCmd->u.call.u32Function;
1724 idClient = pCmd->u.call.u32ClientID;
1725#endif
1726 break;
1727 }
1728
1729 case VMMDevReq_HGCMConnect:
1730 {
1731 /* save the client id in the guest request packet */
1732 VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
1733 pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
1734 break;
1735 }
1736
1737 default:
1738 /* make compiler happy */
1739 break;
1740 }
1741 }
1742 else
1743 {
1744 /* Guest has changed the command type. */
1745 LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
1746 pCmd->enmCmdType, pHeader->header.requestType));
1747
1748 ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
1749 }
1750
1751 /* Setup return code for the guest. */
1752 if (RT_SUCCESS(rc))
1753 pHeader->result = result;
1754 else
1755 pHeader->result = rc;
1756
1757 /* First write back the request. */
1758 PDMDevHlpPhysWrite(pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest);
1759
1760 /* Mark request as processed. */
1761 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
1762
1763 /* Second write the flags to mark the request as processed. */
1764 PDMDevHlpPhysWrite(pDevIns, pCmd->GCPhys + RT_UOFFSETOF(VMMDevHGCMRequestHeader, fu32Flags),
1765 &pHeader->fu32Flags, sizeof(pHeader->fu32Flags));
1766
1767 /* Now, when the command was removed from the internal list, notify the guest. */
1768 VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM);
1769
1770 RTMemFreeZ(pHeader, pCmd->cbRequest);
1771 }
1772 else
1773 {
1774 LogRelMax(10, ("VMMDev: Failed to allocate %u bytes for HGCM request completion!!!\n", pCmd->cbRequest));
1775 }
1776 }
1777 /*
1778 * Request was locked:
1779 */
1780 else
1781 {
1782 VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
1783
1784 /* Verify the request type. This is the only field which is used from the guest memory. */
1785 const VMMDevRequestType enmRequestType = pHeader->header.requestType;
1786 if ( enmRequestType == pCmd->enmRequestType
1787 || enmRequestType == VMMDevReq_HGCMCancel)
1788 {
1789 RT_UNTRUSTED_VALIDATED_FENCE();
1790
1791 /*
1792 * Update parameters and data buffers.
1793 */
1794 switch (enmRequestType)
1795 {
1796#ifdef VBOX_WITH_64_BITS_GUESTS
1797 case VMMDevReq_HGCMCall64:
1798#endif
1799 case VMMDevReq_HGCMCall32:
1800 {
1801 VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
1802 rc = vmmdevR3HgcmCompleteCallRequest(pDevIns, pCmd, pHGCMCall, (uint8_t *)pHeader);
1803#ifdef VBOX_WITH_DTRACE
1804 idFunction = pCmd->u.call.u32Function;
1805 idClient = pCmd->u.call.u32ClientID;
1806#endif
1807 break;
1808 }
1809
1810 case VMMDevReq_HGCMConnect:
1811 {
1812 /* save the client id in the guest request packet */
1813 VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
1814 pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
1815 break;
1816 }
1817
1818 default:
1819 /* make compiler happy */
1820 break;
1821 }
1822 }
1823 else
1824 {
1825 /* Guest has changed the command type. */
1826 LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
1827 pCmd->enmCmdType, pHeader->header.requestType));
1828
1829 ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
1830 }
1831
1832 /* Setup return code for the guest. */
1833 if (RT_SUCCESS(rc))
1834 pHeader->result = result;
1835 else
1836 pHeader->result = rc;
1837
1838 /* Mark request as processed. */
1839 ASMAtomicOrU32(&pHeader->fu32Flags, VBOX_HGCM_REQ_DONE);
1840
1841 /* Now, when the command was removed from the internal list, notify the guest. */
1842 VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM);
1843 }
1844
1845 /* Set the status to success for now, though we might consider passing
1846 along the vmmdevR3HgcmCompleteCallRequest errors... */
1847 rc = VINF_SUCCESS;
1848 }
1849 else
1850 {
1851 LogFlowFunc(("Cancelled command %p\n", pCmd));
1852 rc = VERR_CANCELLED;
1853 }
1854
1855#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
1856 /* Save for final stats. */
1857 uint64_t const tsArrival = pCmd->tsArrival;
1858 uint64_t const tsComplete = pCmd->tsComplete;
1859#endif
1860
1861 /* Deallocate the command memory. Enter the critsect for proper */
1862 VBOXDD_HGCMCALL_COMPLETED_DONE(pCmd, idFunction, idClient, result);
1863 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
1864
1865#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
1866 /* Update stats. */
1867 uint64_t tsNow;
1868 STAM_GET_TS(tsNow);
1869 STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->StatHgcmCmdCompletion, tsNow - tsComplete);
1870 if (tsArrival != 0)
1871 STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->StatHgcmCmdTotal, tsNow - tsArrival);
1872#endif
1873
1874 return rc;
1875}
1876
1877/**
1878 * HGCM callback for request completion. Forwards to hgcmCompletedWorker.
1879 *
1880 * @returns VINF_SUCCESS or VERR_CANCELLED.
1881 * @param pInterface Pointer to this PDM interface.
1882 * @param result HGCM completion status code (VBox status code).
1883 * @param pCmd Completed command, which contains updated host parameters.
1884 */
1885DECLCALLBACK(int) hgcmR3Completed(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
1886{
1887#if 0 /* This seems to be significantly slower. Half of MsgTotal time seems to be spend here. */
1888 PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
1889 STAM_GET_TS(pCmd->tsComplete);
1890
1891 VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
1892
1893/** @todo no longer necessary to forward to EMT, but it might be more
1894 * efficient...? */
1895 /* Not safe to execute asynchronously; forward to EMT */
1896 int rc = VMR3ReqCallVoidNoWait(PDMDevHlpGetVM(pDevIns), VMCPUID_ANY,
1897 (PFNRT)hgcmCompletedWorker, 3, pInterface, result, pCmd);
1898 AssertRC(rc);
1899 return VINF_SUCCESS; /* cannot tell if canceled or not... */
1900#else
1901 STAM_GET_TS(pCmd->tsComplete);
1902 VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
1903 return hgcmCompletedWorker(pInterface, result, pCmd);
1904#endif
1905}
1906
1907/**
1908 * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdRestored}
1909 */
1910DECLCALLBACK(bool) hgcmR3IsCmdRestored(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
1911{
1912 RT_NOREF(pInterface);
1913 return pCmd && pCmd->fRestored;
1914}
1915
1916/**
1917 * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdCancelled}
1918 */
1919DECLCALLBACK(bool) hgcmR3IsCmdCancelled(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
1920{
1921 RT_NOREF(pInterface);
1922 return pCmd && pCmd->fCancelled;
1923}
1924
1925/**
1926 * @interface_method_impl{PDMIHGCMPORT,pfnGetRequestor}
1927 */
1928DECLCALLBACK(uint32_t) hgcmR3GetRequestor(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
1929{
1930 PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
1931 PVMMDEV pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVMMDEV);
1932 AssertPtrReturn(pCmd, VMMDEV_REQUESTOR_LOWEST);
1933 if (pThis->guestInfo2.fFeatures & VBOXGSTINFO2_F_REQUESTOR_INFO)
1934 return pCmd->fRequestor;
1935 return VMMDEV_REQUESTOR_LEGACY;
1936}
1937
1938/**
1939 * @interface_method_impl{PDMIHGCMPORT,pfnGetVMMDevSessionId}
1940 */
1941DECLCALLBACK(uint64_t) hgcmR3GetVMMDevSessionId(PPDMIHGCMPORT pInterface)
1942{
1943 PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
1944 PVMMDEV pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVMMDEV);
1945 return pThis->idSession;
1946}
1947
1948/** Save information about pending HGCM requests from pThisCC->listHGCMCmd.
1949 *
1950 * @returns VBox status code that the guest should see.
1951 * @param pThisCC The VMMDev ring-3 instance data.
1952 * @param pSSM SSM handle for SSM functions.
1953 *
1954 * @thread EMT
1955 */
1956int vmmdevR3HgcmSaveState(PVMMDEVCC pThisCC, PSSMHANDLE pSSM)
1957{
1958 PCPDMDEVHLPR3 pHlp = pThisCC->pDevIns->pHlpR3;
1959
1960 LogFlowFunc(("\n"));
1961
1962 /* Compute how many commands are pending. */
1963 uint32_t cCmds = 0;
1964 PVBOXHGCMCMD pCmd;
1965 RTListForEach(&pThisCC->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
1966 {
1967 LogFlowFunc(("pCmd %p\n", pCmd));
1968 ++cCmds;
1969 }
1970 LogFlowFunc(("cCmds = %d\n", cCmds));
1971
1972 /* Save number of commands. */
1973 int rc = pHlp->pfnSSMPutU32(pSSM, cCmds);
1974 AssertRCReturn(rc, rc);
1975
1976 if (cCmds > 0)
1977 {
1978 RTListForEach(&pThisCC->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
1979 {
1980 LogFlowFunc(("Saving %RGp, size %d\n", pCmd->GCPhys, pCmd->cbRequest));
1981
1982 /** @todo Don't save cancelled requests! It serves no purpose. See restore and
1983 * @bugref{4032#c4} for details. */
1984 pHlp->pfnSSMPutU32 (pSSM, (uint32_t)pCmd->enmCmdType);
1985 pHlp->pfnSSMPutBool (pSSM, pCmd->fCancelled);
1986 pHlp->pfnSSMPutGCPhys (pSSM, pCmd->GCPhys);
1987 pHlp->pfnSSMPutU32 (pSSM, pCmd->cbRequest);
1988 pHlp->pfnSSMPutU32 (pSSM, (uint32_t)pCmd->enmRequestType);
1989 const uint32_t cParms = pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.cParms : 0;
1990 rc = pHlp->pfnSSMPutU32(pSSM, cParms);
1991 AssertRCReturn(rc, rc);
1992
1993 if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
1994 {
1995 pHlp->pfnSSMPutU32 (pSSM, pCmd->u.call.u32ClientID);
1996 rc = pHlp->pfnSSMPutU32(pSSM, pCmd->u.call.u32Function);
1997 AssertRCReturn(rc, rc);
1998
1999 /* Guest parameters. */
2000 uint32_t i;
2001 for (i = 0; i < pCmd->u.call.cParms; ++i)
2002 {
2003 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
2004
2005 rc = pHlp->pfnSSMPutU32(pSSM, (uint32_t)pGuestParm->enmType);
2006 AssertRCReturn(rc, rc);
2007
2008 if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
2009 || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
2010 {
2011 const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
2012 pHlp->pfnSSMPutU64 (pSSM, pVal->u64Value);
2013 pHlp->pfnSSMPutU32 (pSSM, pVal->offValue);
2014 rc = pHlp->pfnSSMPutU32(pSSM, pVal->cbValue);
2015 }
2016 else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
2017 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
2018 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
2019 || pGuestParm->enmType == VMMDevHGCMParmType_PageList
2020 || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
2021 || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
2022 {
2023 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
2024 pHlp->pfnSSMPutU32 (pSSM, pPtr->cbData);
2025 pHlp->pfnSSMPutU32 (pSSM, pPtr->offFirstPage);
2026 pHlp->pfnSSMPutU32 (pSSM, pPtr->cPages);
2027 rc = pHlp->pfnSSMPutU32(pSSM, pPtr->fu32Direction);
2028
2029 uint32_t iPage;
2030 for (iPage = 0; RT_SUCCESS(rc) && iPage < pPtr->cPages; ++iPage)
2031 rc = pHlp->pfnSSMPutGCPhys(pSSM, pPtr->paPages[iPage]);
2032 }
2033 else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
2034 {
2035 /* We don't have the page addresses here, so it will need to be
2036 restored from guest memory. This isn't an issue as it is only
2037 use with services which won't survive a save/restore anyway. */
2038 }
2039 else
2040 {
2041 AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
2042 }
2043 AssertRCReturn(rc, rc);
2044 }
2045 }
2046 else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
2047 {
2048 pHlp->pfnSSMPutU32(pSSM, pCmd->u.connect.u32ClientID);
2049 pHlp->pfnSSMPutMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
2050 }
2051 else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
2052 {
2053 pHlp->pfnSSMPutU32(pSSM, pCmd->u.disconnect.u32ClientID);
2054 }
2055 else
2056 {
2057 AssertFailedReturn(VERR_INTERNAL_ERROR);
2058 }
2059
2060 /* A reserved field, will allow to extend saved data for a command. */
2061 rc = pHlp->pfnSSMPutU32(pSSM, 0);
2062 AssertRCReturn(rc, rc);
2063 }
2064 }
2065
2066 /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
2067 rc = pHlp->pfnSSMPutU32(pSSM, 0);
2068 AssertRCReturn(rc, rc);
2069
2070 return rc;
2071}
2072
2073/** Load information about pending HGCM requests.
2074 *
2075 * Allocate VBOXHGCMCMD commands and add them to pThisCC->listHGCMCmd
2076 * temporarily. vmmdevR3HgcmLoadStateDone will process the temporary list. This
2077 * includes loading the correct fRequestor fields.
2078 *
2079 * @returns VBox status code that the guest should see.
2080 * @param pDevIns The device instance.
2081 * @param pThis The VMMDev shared instance data.
2082 * @param pThisCC The VMMDev ring-3 instance data.
2083 * @param pSSM SSM handle for SSM functions.
2084 * @param uVersion Saved state version.
2085 *
2086 * @thread EMT
2087 */
2088int vmmdevR3HgcmLoadState(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PSSMHANDLE pSSM, uint32_t uVersion)
2089{
2090 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
2091
2092 LogFlowFunc(("\n"));
2093
2094 pThisCC->uSavedStateVersion = uVersion; /* For vmmdevR3HgcmLoadStateDone */
2095
2096 /* Read how many commands were pending. */
2097 uint32_t cCmds = 0;
2098 int rc = pHlp->pfnSSMGetU32(pSSM, &cCmds);
2099 AssertRCReturn(rc, rc);
2100
2101 LogFlowFunc(("cCmds = %d\n", cCmds));
2102
2103 if (uVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
2104 {
2105 /* Saved information about all HGCM parameters. */
2106 uint32_t u32;
2107
2108 uint32_t iCmd;
2109 for (iCmd = 0; iCmd < cCmds; ++iCmd)
2110 {
2111 /* Command fields. */
2112 VBOXHGCMCMDTYPE enmCmdType;
2113 bool fCancelled;
2114 RTGCPHYS GCPhys;
2115 uint32_t cbRequest;
2116 VMMDevRequestType enmRequestType;
2117 uint32_t cParms;
2118
2119 pHlp->pfnSSMGetU32 (pSSM, &u32);
2120 enmCmdType = (VBOXHGCMCMDTYPE)u32;
2121 pHlp->pfnSSMGetBool (pSSM, &fCancelled);
2122 pHlp->pfnSSMGetGCPhys (pSSM, &GCPhys);
2123 pHlp->pfnSSMGetU32 (pSSM, &cbRequest);
2124 pHlp->pfnSSMGetU32 (pSSM, &u32);
2125 enmRequestType = (VMMDevRequestType)u32;
2126 rc = pHlp->pfnSSMGetU32(pSSM, &cParms);
2127 AssertRCReturn(rc, rc);
2128
2129 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, enmCmdType, GCPhys, cbRequest, cParms, 0 /*fRequestor*/);
2130 AssertReturn(pCmd, VERR_NO_MEMORY);
2131
2132 pCmd->fCancelled = fCancelled;
2133 pCmd->GCPhys = GCPhys;
2134 pCmd->cbRequest = cbRequest;
2135 pCmd->enmRequestType = enmRequestType;
2136
2137 if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
2138 {
2139 pHlp->pfnSSMGetU32 (pSSM, &pCmd->u.call.u32ClientID);
2140 rc = pHlp->pfnSSMGetU32(pSSM, &pCmd->u.call.u32Function);
2141 AssertRCReturn(rc, rc);
2142
2143 /* Guest parameters. */
2144 uint32_t i;
2145 for (i = 0; i < cParms; ++i)
2146 {
2147 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
2148
2149 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2150 AssertRCReturn(rc, rc);
2151 pGuestParm->enmType = (HGCMFunctionParameterType)u32;
2152
2153 if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
2154 || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
2155 {
2156 VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
2157 pHlp->pfnSSMGetU64 (pSSM, &pVal->u64Value);
2158 pHlp->pfnSSMGetU32 (pSSM, &pVal->offValue);
2159 rc = pHlp->pfnSSMGetU32(pSSM, &pVal->cbValue);
2160 }
2161 else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
2162 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
2163 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
2164 || pGuestParm->enmType == VMMDevHGCMParmType_PageList
2165 || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
2166 || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
2167 {
2168 VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
2169 pHlp->pfnSSMGetU32 (pSSM, &pPtr->cbData);
2170 pHlp->pfnSSMGetU32 (pSSM, &pPtr->offFirstPage);
2171 pHlp->pfnSSMGetU32 (pSSM, &pPtr->cPages);
2172 rc = pHlp->pfnSSMGetU32(pSSM, &pPtr->fu32Direction);
2173 if (RT_SUCCESS(rc))
2174 {
2175 if (pPtr->cPages == 1)
2176 pPtr->paPages = &pPtr->GCPhysSinglePage;
2177 else
2178 {
2179 AssertReturn( pGuestParm->enmType != VMMDevHGCMParmType_Embedded
2180 && pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList, VERR_INTERNAL_ERROR_3);
2181 pPtr->paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd,
2182 pPtr->cPages * sizeof(RTGCPHYS));
2183 AssertStmt(pPtr->paPages, rc = VERR_NO_MEMORY);
2184 }
2185
2186 if (RT_SUCCESS(rc))
2187 {
2188 uint32_t iPage;
2189 for (iPage = 0; iPage < pPtr->cPages; ++iPage)
2190 rc = pHlp->pfnSSMGetGCPhys(pSSM, &pPtr->paPages[iPage]);
2191 }
2192 }
2193 }
2194 else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
2195 {
2196 /* This request type can only be stored from guest memory for now. */
2197 pCmd->fRestoreFromGuestMem = true;
2198 }
2199 else
2200 {
2201 AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
2202 }
2203 AssertRCReturn(rc, rc);
2204 }
2205 }
2206 else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
2207 {
2208 pHlp->pfnSSMGetU32(pSSM, &pCmd->u.connect.u32ClientID);
2209 rc = pHlp->pfnSSMGetMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
2210 AssertRCReturn(rc, rc);
2211 }
2212 else if (enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
2213 {
2214 rc = pHlp->pfnSSMGetU32(pSSM, &pCmd->u.disconnect.u32ClientID);
2215 AssertRCReturn(rc, rc);
2216 }
2217 else
2218 {
2219 AssertFailedReturn(VERR_INTERNAL_ERROR);
2220 }
2221
2222 /* A reserved field, will allow to extend saved data for a command. */
2223 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2224 AssertRCReturn(rc, rc);
2225
2226 /*
2227 * Do not restore cancelled calls. Why do we save them to start with?
2228 *
2229 * The guest memory no longer contains a valid request! So, it is not
2230 * possible to restore it. The memory is often reused for a new request
2231 * by now and we will end up trying to complete that more than once if
2232 * we restore a cancelled call. In some cases VERR_HGCM_INVALID_CLIENT_ID
2233 * is returned, though it might just be silent memory corruption.
2234 */
2235 /* See current version above. */
2236 if (!fCancelled)
2237 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2238 else
2239 {
2240 Log(("vmmdevR3HgcmLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
2241 enmCmdType, GCPhys, cbRequest));
2242 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2243 }
2244 }
2245
2246 /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
2247 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2248 AssertRCReturn(rc, rc);
2249 }
2250 else if (uVersion >= 9)
2251 {
2252 /* Version 9+: Load information about commands. Pre-rewrite. */
2253 uint32_t u32;
2254
2255 uint32_t iCmd;
2256 for (iCmd = 0; iCmd < cCmds; ++iCmd)
2257 {
2258 VBOXHGCMCMDTYPE enmCmdType;
2259 bool fCancelled;
2260 RTGCPHYS GCPhys;
2261 uint32_t cbRequest;
2262 uint32_t cLinAddrs;
2263
2264 pHlp->pfnSSMGetGCPhys (pSSM, &GCPhys);
2265 rc = pHlp->pfnSSMGetU32(pSSM, &cbRequest);
2266 AssertRCReturn(rc, rc);
2267
2268 LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
2269
2270 /* For uVersion <= 12, this was the size of entire command.
2271 * Now the command is reconstructed in vmmdevR3HgcmLoadStateDone.
2272 */
2273 if (uVersion <= 12)
2274 pHlp->pfnSSMSkip(pSSM, sizeof (uint32_t));
2275
2276 pHlp->pfnSSMGetU32 (pSSM, &u32);
2277 enmCmdType = (VBOXHGCMCMDTYPE)u32;
2278 pHlp->pfnSSMGetBool (pSSM, &fCancelled);
2279 /* How many linear pointers. Always 0 if not a call command. */
2280 rc = pHlp->pfnSSMGetU32(pSSM, &cLinAddrs);
2281 AssertRCReturn(rc, rc);
2282
2283 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, enmCmdType, GCPhys, cbRequest, cLinAddrs, 0 /*fRequestor*/);
2284 AssertReturn(pCmd, VERR_NO_MEMORY);
2285
2286 pCmd->fCancelled = fCancelled;
2287 pCmd->GCPhys = GCPhys;
2288 pCmd->cbRequest = cbRequest;
2289
2290 if (cLinAddrs > 0)
2291 {
2292 /* Skip number of pages for all LinAddrs in this command. */
2293 pHlp->pfnSSMSkip(pSSM, sizeof(uint32_t));
2294
2295 uint32_t i;
2296 for (i = 0; i < cLinAddrs; ++i)
2297 {
2298 VBOXHGCMPARMPTR * const pPtr = &pCmd->u.call.paGuestParms[i].u.ptr;
2299
2300 /* Index of the parameter. Use cbData field to store the index. */
2301 pHlp->pfnSSMGetU32 (pSSM, &pPtr->cbData);
2302 pHlp->pfnSSMGetU32 (pSSM, &pPtr->offFirstPage);
2303 rc = pHlp->pfnSSMGetU32(pSSM, &pPtr->cPages);
2304 AssertRCReturn(rc, rc);
2305
2306 pPtr->paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd, pPtr->cPages * sizeof(RTGCPHYS));
2307 AssertReturn(pPtr->paPages, VERR_NO_MEMORY);
2308
2309 uint32_t iPage;
2310 for (iPage = 0; iPage < pPtr->cPages; ++iPage)
2311 rc = pHlp->pfnSSMGetGCPhys(pSSM, &pPtr->paPages[iPage]);
2312 }
2313 }
2314
2315 /* A reserved field, will allow to extend saved data for a command. */
2316 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2317 AssertRCReturn(rc, rc);
2318
2319 /* See current version above. */
2320 if (!fCancelled)
2321 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2322 else
2323 {
2324 Log(("vmmdevR3HgcmLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
2325 enmCmdType, GCPhys, cbRequest));
2326 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2327 }
2328 }
2329
2330 /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
2331 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2332 AssertRCReturn(rc, rc);
2333 }
2334 else
2335 {
2336 /* Ancient. Only the guest physical address is saved. */
2337 uint32_t iCmd;
2338 for (iCmd = 0; iCmd < cCmds; ++iCmd)
2339 {
2340 RTGCPHYS GCPhys;
2341 uint32_t cbRequest;
2342
2343 pHlp->pfnSSMGetGCPhys(pSSM, &GCPhys);
2344 rc = pHlp->pfnSSMGetU32(pSSM, &cbRequest);
2345 AssertRCReturn(rc, rc);
2346
2347 LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
2348
2349 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_LOADSTATE, GCPhys, cbRequest, 0, 0 /*fRequestor*/);
2350 AssertReturn(pCmd, VERR_NO_MEMORY);
2351
2352 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2353 }
2354 }
2355
2356 return rc;
2357}
2358
2359/** Restore HGCM connect command loaded from old saved state.
2360 *
2361 * @returns VBox status code that the guest should see.
2362 * @param pThisCC The VMMDev ring-3 instance data.
2363 * @param uSavedStateVersion The saved state version the command has been loaded from.
2364 * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
2365 * @param pReq The guest request (cached in host memory).
2366 * @param cbReq Size of the guest request.
2367 * @param enmRequestType Type of the HGCM request.
2368 * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
2369 */
2370static int vmmdevR3HgcmRestoreConnect(PVMMDEVCC pThisCC, uint32_t uSavedStateVersion, const VBOXHGCMCMD *pLoadedCmd,
2371 VMMDevHGCMConnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
2372 VBOXHGCMCMD **ppRestoredCmd)
2373{
2374 /* Verify the request. */
2375 ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
2376 if (uSavedStateVersion >= 9)
2377 ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT, VERR_MISMATCH);
2378
2379 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_CONNECT, pLoadedCmd->GCPhys, cbReq, 0,
2380 pReq->header.header.fRequestor);
2381 AssertReturn(pCmd, VERR_NO_MEMORY);
2382
2383 Assert(pLoadedCmd->fCancelled == false);
2384 pCmd->fCancelled = false;
2385 pCmd->fRestored = true;
2386 pCmd->enmRequestType = enmRequestType;
2387
2388 vmmdevR3HgcmConnectFetch(pReq, pCmd);
2389
2390 *ppRestoredCmd = pCmd;
2391 return VINF_SUCCESS;
2392}
2393
2394/** Restore HGCM disconnect command loaded from old saved state.
2395 *
2396 * @returns VBox status code that the guest should see.
2397 * @param pThisCC The VMMDev ring-3 instance data.
2398 * @param uSavedStateVersion The saved state version the command has been loaded from.
2399 * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
2400 * @param pReq The guest request (cached in host memory).
2401 * @param cbReq Size of the guest request.
2402 * @param enmRequestType Type of the HGCM request.
2403 * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
2404 */
2405static int vmmdevR3HgcmRestoreDisconnect(PVMMDEVCC pThisCC, uint32_t uSavedStateVersion, const VBOXHGCMCMD *pLoadedCmd,
2406 VMMDevHGCMDisconnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
2407 VBOXHGCMCMD **ppRestoredCmd)
2408{
2409 /* Verify the request. */
2410 ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
2411 if (uSavedStateVersion >= 9)
2412 ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT, VERR_MISMATCH);
2413
2414 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_DISCONNECT, pLoadedCmd->GCPhys, cbReq, 0,
2415 pReq->header.header.fRequestor);
2416 AssertReturn(pCmd, VERR_NO_MEMORY);
2417
2418 Assert(pLoadedCmd->fCancelled == false);
2419 pCmd->fCancelled = false;
2420 pCmd->fRestored = true;
2421 pCmd->enmRequestType = enmRequestType;
2422
2423 vmmdevR3HgcmDisconnectFetch(pReq, pCmd);
2424
2425 *ppRestoredCmd = pCmd;
2426 return VINF_SUCCESS;
2427}
2428
2429/** Restore HGCM call command loaded from old saved state.
2430 *
2431 * @returns VBox status code that the guest should see.
2432 * @param pDevIns The device instance.
2433 * @param pThis The VMMDev shared instance data.
2434 * @param pThisCC The VMMDev ring-3 instance data.
2435 * @param uSavedStateVersion The saved state version the command has been loaded from.
2436 * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
2437 * @param pReq The guest request (cached in host memory).
2438 * @param cbReq Size of the guest request.
2439 * @param enmRequestType Type of the HGCM request.
2440 * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
2441 */
2442static int vmmdevR3HgcmRestoreCall(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t uSavedStateVersion,
2443 const VBOXHGCMCMD *pLoadedCmd, VMMDevHGCMCall *pReq, uint32_t cbReq,
2444 VMMDevRequestType enmRequestType, VBOXHGCMCMD **ppRestoredCmd)
2445{
2446 /* Verify the request. */
2447 ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
2448 if (uSavedStateVersion >= 9)
2449 {
2450 ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_MISMATCH);
2451 Assert(pLoadedCmd->fCancelled == false);
2452 }
2453
2454 PVBOXHGCMCMD pCmd;
2455 uint32_t cbHGCMParmStruct;
2456 int rc = vmmdevR3HgcmCallAlloc(pThisCC, pReq, cbReq, pLoadedCmd->GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
2457 if (RT_FAILURE(rc))
2458 return rc;
2459
2460 /* pLoadedCmd is fake, it does not contain actual call parameters. Only pagelists for LinAddr. */
2461 pCmd->fCancelled = false;
2462 pCmd->fRestored = true;
2463 pCmd->enmRequestType = enmRequestType;
2464
2465 rc = vmmdevR3HgcmCallFetchGuestParms(pDevIns, pThisCC, pCmd, pReq, cbReq, enmRequestType, cbHGCMParmStruct);
2466 if (RT_SUCCESS(rc))
2467 {
2468 /* Update LinAddr parameters from pLoadedCmd.
2469 * pLoadedCmd->u.call.cParms is actually the number of LinAddrs, see vmmdevR3HgcmLoadState.
2470 */
2471 uint32_t iLinAddr;
2472 for (iLinAddr = 0; iLinAddr < pLoadedCmd->u.call.cParms; ++iLinAddr)
2473 {
2474 VBOXHGCMGUESTPARM * const pLoadedParm = &pLoadedCmd->u.call.paGuestParms[iLinAddr];
2475 /* pLoadedParm->cbData is actually index of the LinAddr parameter, see vmmdevR3HgcmLoadState. */
2476 const uint32_t iParm = pLoadedParm->u.ptr.cbData;
2477 ASSERT_GUEST_STMT_BREAK(iParm < pCmd->u.call.cParms, rc = VERR_MISMATCH);
2478
2479 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[iParm];
2480 ASSERT_GUEST_STMT_BREAK( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
2481 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
2482 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr,
2483 rc = VERR_MISMATCH);
2484 ASSERT_GUEST_STMT_BREAK( pLoadedParm->u.ptr.offFirstPage == pGuestParm->u.ptr.offFirstPage
2485 && pLoadedParm->u.ptr.cPages == pGuestParm->u.ptr.cPages,
2486 rc = VERR_MISMATCH);
2487 memcpy(pGuestParm->u.ptr.paPages, pLoadedParm->u.ptr.paPages, pGuestParm->u.ptr.cPages * sizeof(RTGCPHYS));
2488 }
2489 }
2490
2491 if (RT_SUCCESS(rc))
2492 *ppRestoredCmd = pCmd;
2493 else
2494 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2495
2496 return rc;
2497}
2498
2499/** Allocate and initialize a HGCM command using the given request (pReqHdr)
2500 * and command loaded from saved state (pCmd).
2501 *
2502 * @returns VBox status code that the guest should see.
2503 * @param pDevIns The device instance.
2504 * @param pThis The VMMDev shared instance data.
2505 * @param pThisCC The VMMDev ring-3 instance data.
2506 * @param uSavedStateVersion Saved state version.
2507 * @param pLoadedCmd HGCM command which needs restoration.
2508 * @param pReqHdr The request (cached in host memory).
2509 * @param cbReq Size of the entire request (including HGCM parameters).
2510 * @param ppRestoredCmd Where to store pointer to restored command.
2511 */
2512static int vmmdevR3HgcmRestoreCommand(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t uSavedStateVersion,
2513 const VBOXHGCMCMD *pLoadedCmd, const VMMDevHGCMRequestHeader *pReqHdr, uint32_t cbReq,
2514 VBOXHGCMCMD **ppRestoredCmd)
2515{
2516 int rc;
2517
2518 /* Verify the request. */
2519 ASSERT_GUEST_RETURN(cbReq >= sizeof(VMMDevHGCMRequestHeader), VERR_MISMATCH);
2520 ASSERT_GUEST_RETURN(cbReq == pReqHdr->header.size, VERR_MISMATCH);
2521
2522 const VMMDevRequestType enmRequestType = pReqHdr->header.requestType;
2523 switch (enmRequestType)
2524 {
2525 case VMMDevReq_HGCMConnect:
2526 {
2527 VMMDevHGCMConnect *pReq = (VMMDevHGCMConnect *)pReqHdr;
2528 rc = vmmdevR3HgcmRestoreConnect(pThisCC, uSavedStateVersion, pLoadedCmd, pReq, cbReq, enmRequestType, ppRestoredCmd);
2529 break;
2530 }
2531
2532 case VMMDevReq_HGCMDisconnect:
2533 {
2534 VMMDevHGCMDisconnect *pReq = (VMMDevHGCMDisconnect *)pReqHdr;
2535 rc = vmmdevR3HgcmRestoreDisconnect(pThisCC, uSavedStateVersion, pLoadedCmd, pReq, cbReq, enmRequestType, ppRestoredCmd);
2536 break;
2537 }
2538
2539#ifdef VBOX_WITH_64_BITS_GUESTS
2540 case VMMDevReq_HGCMCall64:
2541#endif
2542 case VMMDevReq_HGCMCall32:
2543 {
2544 VMMDevHGCMCall *pReq = (VMMDevHGCMCall *)pReqHdr;
2545 rc = vmmdevR3HgcmRestoreCall(pDevIns, pThis, pThisCC, uSavedStateVersion, pLoadedCmd,
2546 pReq, cbReq, enmRequestType, ppRestoredCmd);
2547 break;
2548 }
2549
2550 default:
2551 ASSERT_GUEST_FAILED_RETURN(VERR_MISMATCH);
2552 }
2553
2554 return rc;
2555}
2556
2557/** Resubmit pending HGCM commands which were loaded form saved state.
2558 *
2559 * @returns VBox status code.
2560 * @param pDevIns The device instance.
2561 * @param pThis The VMMDev shared instance data.
2562 * @param pThisCC The VMMDev ring-3 instance data.
2563 *
2564 * @thread EMT
2565 */
2566int vmmdevR3HgcmLoadStateDone(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC)
2567{
2568 /*
2569 * Resubmit pending HGCM commands to services.
2570 *
2571 * pThisCC->pHGCMCmdList contains commands loaded by vmmdevR3HgcmLoadState.
2572 *
2573 * Legacy saved states (pre VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
2574 * do not have enough information about the command parameters,
2575 * therefore it is necessary to reload at least some data from the
2576 * guest memory to construct commands.
2577 *
2578 * There are two types of legacy saved states which contain:
2579 * 1) the guest physical address and size of request;
2580 * 2) additionally page lists for LinAddr parameters.
2581 *
2582 * Legacy commands have enmCmdType = VBOXHGCMCMDTYPE_LOADSTATE?
2583 */
2584
2585 int rcFunc = VINF_SUCCESS; /* This status code will make the function fail. I.e. VM will not start. */
2586
2587 /* Get local copy of the list of loaded commands. */
2588 RTLISTANCHOR listLoadedCommands;
2589 RTListMove(&listLoadedCommands, &pThisCC->listHGCMCmd);
2590
2591 /* Resubmit commands. */
2592 PVBOXHGCMCMD pCmd, pNext;
2593 RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
2594 {
2595 int rcCmd = VINF_SUCCESS; /* This status code will make the HGCM command fail for the guest. */
2596
2597 RTListNodeRemove(&pCmd->node);
2598
2599 /*
2600 * Re-read the request from the guest memory.
2601 * It will be used to:
2602 * * reconstruct commands if legacy saved state has been restored;
2603 * * report an error to the guest if resubmit failed.
2604 */
2605 VMMDevHGCMRequestHeader *pReqHdr = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
2606 AssertBreakStmt(pReqHdr, vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd); rcFunc = VERR_NO_MEMORY);
2607
2608 PDMDevHlpPhysRead(pDevIns, pCmd->GCPhys, pReqHdr, pCmd->cbRequest);
2609 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2610
2611 if (pThisCC->pHGCMDrv)
2612 {
2613 /*
2614 * Reconstruct legacy commands.
2615 */
2616 if (RT_LIKELY( pThisCC->uSavedStateVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS
2617 && !pCmd->fRestoreFromGuestMem))
2618 { /* likely */ }
2619 else
2620 {
2621 PVBOXHGCMCMD pRestoredCmd = NULL;
2622 rcCmd = vmmdevR3HgcmRestoreCommand(pDevIns, pThis, pThisCC, pThisCC->uSavedStateVersion, pCmd,
2623 pReqHdr, pCmd->cbRequest, &pRestoredCmd);
2624 if (RT_SUCCESS(rcCmd))
2625 {
2626 Assert(pCmd != pRestoredCmd); /* vmmdevR3HgcmRestoreCommand must allocate restored command. */
2627 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2628 pCmd = pRestoredCmd;
2629 }
2630 }
2631
2632 /* Resubmit commands. */
2633 if (RT_SUCCESS(rcCmd))
2634 {
2635 switch (pCmd->enmCmdType)
2636 {
2637 case VBOXHGCMCMDTYPE_CONNECT:
2638 {
2639 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2640 rcCmd = pThisCC->pHGCMDrv->pfnConnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.connect.pLoc,
2641 &pCmd->u.connect.u32ClientID);
2642 if (RT_FAILURE(rcCmd))
2643 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
2644 break;
2645 }
2646
2647 case VBOXHGCMCMDTYPE_DISCONNECT:
2648 {
2649 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2650 rcCmd = pThisCC->pHGCMDrv->pfnDisconnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
2651 if (RT_FAILURE(rcCmd))
2652 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
2653 break;
2654 }
2655
2656 case VBOXHGCMCMDTYPE_CALL:
2657 {
2658 rcCmd = vmmdevR3HgcmInitHostParameters(pDevIns, pThisCC, pCmd, (uint8_t const *)pReqHdr);
2659 if (RT_SUCCESS(rcCmd))
2660 {
2661 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2662
2663 /* Pass the function call to HGCM connector for actual processing */
2664 uint64_t tsNow;
2665 STAM_GET_TS(tsNow);
2666 rcCmd = pThisCC->pHGCMDrv->pfnCall(pThisCC->pHGCMDrv, pCmd,
2667 pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
2668 pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsNow);
2669 if (RT_FAILURE(rcCmd))
2670 {
2671 LogFunc(("pfnCall rc = %Rrc\n", rcCmd));
2672 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
2673 }
2674 }
2675 break;
2676 }
2677
2678 default:
2679 AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
2680 }
2681 }
2682 }
2683 else
2684 AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
2685
2686 if (RT_SUCCESS(rcCmd))
2687 { /* likely */ }
2688 else
2689 {
2690 /* Return the error to the guest. Guest may try to repeat the call. */
2691 pReqHdr->result = rcCmd;
2692 pReqHdr->header.rc = rcCmd;
2693 pReqHdr->fu32Flags |= VBOX_HGCM_REQ_DONE;
2694
2695 /* Write back only the header. */
2696 PDMDevHlpPhysWrite(pDevIns, pCmd->GCPhys, pReqHdr, sizeof(*pReqHdr));
2697
2698 VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM);
2699
2700 /* Deallocate the command memory. */
2701 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2702 }
2703
2704 RTMemFree(pReqHdr);
2705 }
2706
2707 if (RT_FAILURE(rcFunc))
2708 {
2709 RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
2710 {
2711 RTListNodeRemove(&pCmd->node);
2712 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2713 }
2714 }
2715
2716 return rcFunc;
2717}
2718
2719
2720/**
2721 * Counterpart to vmmdevR3HgcmInit().
2722 *
2723 * @param pDevIns The device instance.
2724 * @param pThis The VMMDev shared instance data.
2725 * @param pThisCC The VMMDev ring-3 instance data.
2726 */
2727void vmmdevR3HgcmDestroy(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC)
2728{
2729 LogFlowFunc(("\n"));
2730
2731 if (RTCritSectIsInitialized(&pThisCC->critsectHGCMCmdList))
2732 {
2733 PVBOXHGCMCMD pCmd, pNext;
2734 RTListForEachSafe(&pThisCC->listHGCMCmd, pCmd, pNext, VBOXHGCMCMD, node)
2735 {
2736 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
2737 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2738 }
2739
2740 RTCritSectDelete(&pThisCC->critsectHGCMCmdList);
2741 }
2742
2743 AssertCompile(NIL_RTMEMCACHE == (RTMEMCACHE)0);
2744 if (pThisCC->hHgcmCmdCache != NIL_RTMEMCACHE)
2745 {
2746 RTMemCacheDestroy(pThisCC->hHgcmCmdCache);
2747 pThisCC->hHgcmCmdCache = NIL_RTMEMCACHE;
2748 }
2749}
2750
2751
2752/**
2753 * Initializes the HGCM specific state.
2754 *
2755 * Keeps VBOXHGCMCMDCACHED and friends local.
2756 *
2757 * @returns VBox status code.
2758 * @param pThisCC The VMMDev ring-3 instance data.
2759 */
2760int vmmdevR3HgcmInit(PVMMDEVCC pThisCC)
2761{
2762 LogFlowFunc(("\n"));
2763
2764 RTListInit(&pThisCC->listHGCMCmd);
2765
2766 int rc = RTCritSectInit(&pThisCC->critsectHGCMCmdList);
2767 AssertLogRelRCReturn(rc, rc);
2768
2769 rc = RTMemCacheCreate(&pThisCC->hHgcmCmdCache, sizeof(VBOXHGCMCMDCACHED), 64, _1M, NULL, NULL, NULL, 0);
2770 AssertLogRelRCReturn(rc, rc);
2771
2772 pThisCC->u32HGCMEnabled = 0;
2773
2774 return VINF_SUCCESS;
2775}
2776
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use