VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 62484

Last change on this file since 62484 was 59501, checked in by vboxsync, 9 years ago

Additions/common/VBoxGuest: change the regular guest heart beat logging to Log3.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 134.0 KB
Line 
1/* $Id: VBoxGuest.cpp 59501 2016-01-28 08:28:40Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/** @page pg_vbdrv VBoxGuest
28 *
29 * VBoxGuest is the device driver for VMMDev.
30 *
31 * The device driver is shipped as part of the guest additions. It has roots in
32 * the host VMM support driver (usually known as VBoxDrv), so fixes in platform
33 * specific code may apply to both drivers.
34 *
35 * The common code lives in VBoxGuest.cpp and is compiled both as C++ and C.
36 * The VBoxGuest.cpp source file shall not contain platform specific code,
37 * though it must occationally do a few \#ifdef RT_OS_XXX tests to cater for
38 * platform differences. Though, in those cases, it is common that more than
39 * one platform needs special handling.
40 *
41 * On most platforms the device driver should create two device nodes, one for
42 * full (unrestricted) access to the feature set, and one which only provides a
43 * restrict set of functions. These are generally referred to as 'vboxguest'
44 * and 'vboxuser' respectively. Currently, this two device approach is only
45 * implemented on Linux!
46 *
47 */
48
49
50/*********************************************************************************************************************************
51* Header Files *
52*********************************************************************************************************************************/
53#define LOG_GROUP LOG_GROUP_DEFAULT
54#include "VBoxGuestInternal.h"
55#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
56#include <VBox/log.h>
57#include <iprt/mem.h>
58#include <iprt/time.h>
59#include <iprt/memobj.h>
60#include <iprt/asm.h>
61#include <iprt/asm-amd64-x86.h>
62#include <iprt/string.h>
63#include <iprt/process.h>
64#include <iprt/assert.h>
65#include <iprt/param.h>
66#include <iprt/timer.h>
67#ifdef VBOX_WITH_HGCM
68# include <iprt/thread.h>
69#endif
70#include "version-generated.h"
71#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
72# include "revision-generated.h"
73#endif
74#ifdef RT_OS_WINDOWS
75# ifndef CTL_CODE
76# include <Windows.h>
77# endif
78#endif
79#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
80# include <iprt/rand.h>
81#endif
82
83
84/*********************************************************************************************************************************
85* Defined Constants And Macros *
86*********************************************************************************************************************************/
87#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
88
89
90/*********************************************************************************************************************************
91* Internal Functions *
92*********************************************************************************************************************************/
93#ifdef VBOX_WITH_HGCM
94static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
95#endif
96static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
97static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
98static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
99static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents);
100static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt);
101static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt);
102static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
103 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
104static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
105 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
106static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
107 uint32_t fOrMask, uint32_t fNoMask, bool fSessionTermination);
108static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask,
109 uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags, bool fSessionTermination);
110static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents);
111
112
113/*********************************************************************************************************************************
114* Global Variables *
115*********************************************************************************************************************************/
116static const uint32_t g_cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
117
118#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
119/**
120 * Drag in the rest of IRPT since we share it with the
121 * rest of the kernel modules on Solaris.
122 */
123PFNRT g_apfnVBoxGuestIPRTDeps[] =
124{
125 /* VirtioNet */
126 (PFNRT)RTRandBytes,
127 /* RTSemMutex* */
128 (PFNRT)RTSemMutexCreate,
129 (PFNRT)RTSemMutexDestroy,
130 (PFNRT)RTSemMutexRequest,
131 (PFNRT)RTSemMutexRequestNoResume,
132 (PFNRT)RTSemMutexRequestDebug,
133 (PFNRT)RTSemMutexRequestNoResumeDebug,
134 (PFNRT)RTSemMutexRelease,
135 (PFNRT)RTSemMutexIsOwned,
136 NULL
137};
138#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
139
140
141/**
142 * Reserves memory in which the VMM can relocate any guest mappings
143 * that are floating around.
144 *
145 * This operation is a little bit tricky since the VMM might not accept
146 * just any address because of address clashes between the three contexts
147 * it operates in, so use a small stack to perform this operation.
148 *
149 * @returns VBox status code (ignored).
150 * @param pDevExt The device extension.
151 */
152static int vgdrvInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
153{
154 /*
155 * Query the required space.
156 */
157 VMMDevReqHypervisorInfo *pReq;
158 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
159 if (RT_FAILURE(rc))
160 return rc;
161 pReq->hypervisorStart = 0;
162 pReq->hypervisorSize = 0;
163 rc = VbglGRPerform(&pReq->header);
164 if (RT_FAILURE(rc)) /* this shouldn't happen! */
165 {
166 VbglGRFree(&pReq->header);
167 return rc;
168 }
169
170 /*
171 * The VMM will report back if there is nothing it wants to map, like for
172 * instance in VT-x and AMD-V mode.
173 */
174 if (pReq->hypervisorSize == 0)
175 Log(("vgdrvInitFixateGuestMappings: nothing to do\n"));
176 else
177 {
178 /*
179 * We have to try several times since the host can be picky
180 * about certain addresses.
181 */
182 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
183 uint32_t cbHypervisor = pReq->hypervisorSize;
184 RTR0MEMOBJ ahTries[5];
185 uint32_t iTry;
186 bool fBitched = false;
187 Log(("vgdrvInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
188 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
189 {
190 /*
191 * Reserve space, or if that isn't supported, create a object for
192 * some fictive physical memory and map that in to kernel space.
193 *
194 * To make the code a bit uglier, most systems cannot help with
195 * 4MB alignment, so we have to deal with that in addition to
196 * having two ways of getting the memory.
197 */
198 uint32_t uAlignment = _4M;
199 RTR0MEMOBJ hObj;
200 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
201 if (rc == VERR_NOT_SUPPORTED)
202 {
203 uAlignment = PAGE_SIZE;
204 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
205 }
206 /*
207 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
208 * not implemented at all at the current platform, try to map the memory object into the
209 * virtual kernel space.
210 */
211 if (rc == VERR_NOT_SUPPORTED)
212 {
213 if (hFictive == NIL_RTR0MEMOBJ)
214 {
215 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
216 if (RT_FAILURE(rc))
217 break;
218 hFictive = hObj;
219 }
220 uAlignment = _4M;
221 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
222 if (rc == VERR_NOT_SUPPORTED)
223 {
224 uAlignment = PAGE_SIZE;
225 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
226 }
227 }
228 if (RT_FAILURE(rc))
229 {
230 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
231 rc, cbHypervisor, uAlignment, iTry));
232 fBitched = true;
233 break;
234 }
235
236 /*
237 * Try set it.
238 */
239 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
240 pReq->header.rc = VERR_INTERNAL_ERROR;
241 pReq->hypervisorSize = cbHypervisor;
242 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
243 if ( uAlignment == PAGE_SIZE
244 && pReq->hypervisorStart & (_4M - 1))
245 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
246 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
247
248 rc = VbglGRPerform(&pReq->header);
249 if (RT_SUCCESS(rc))
250 {
251 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
252 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
253 RTR0MemObjAddress(pDevExt->hGuestMappings),
254 RTR0MemObjSize(pDevExt->hGuestMappings),
255 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
256 break;
257 }
258 ahTries[iTry] = hObj;
259 }
260
261 /*
262 * Cleanup failed attempts.
263 */
264 while (iTry-- > 0)
265 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
266 if ( RT_FAILURE(rc)
267 && hFictive != NIL_RTR0PTR)
268 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
269 if (RT_FAILURE(rc) && !fBitched)
270 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
271 }
272 VbglGRFree(&pReq->header);
273
274 /*
275 * We ignore failed attempts for now.
276 */
277 return VINF_SUCCESS;
278}
279
280
281/**
282 * Undo what vgdrvInitFixateGuestMappings did.
283 *
284 * @param pDevExt The device extension.
285 */
286static void vgdrvTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
287{
288 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
289 {
290 /*
291 * Tell the host that we're going to free the memory we reserved for
292 * it, the free it up. (Leak the memory if anything goes wrong here.)
293 */
294 VMMDevReqHypervisorInfo *pReq;
295 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
296 if (RT_SUCCESS(rc))
297 {
298 pReq->hypervisorStart = 0;
299 pReq->hypervisorSize = 0;
300 rc = VbglGRPerform(&pReq->header);
301 VbglGRFree(&pReq->header);
302 }
303 if (RT_SUCCESS(rc))
304 {
305 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
306 AssertRC(rc);
307 }
308 else
309 LogRel(("vgdrvTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
310
311 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
312 }
313}
314
315
316
317/**
318 * Report the guest information to the host.
319 *
320 * @returns IPRT status code.
321 * @param enmOSType The OS type to report.
322 */
323static int vgdrvReportGuestInfo(VBOXOSTYPE enmOSType)
324{
325 /*
326 * Allocate and fill in the two guest info reports.
327 */
328 VMMDevReportGuestInfo2 *pReqInfo2 = NULL;
329 VMMDevReportGuestInfo *pReqInfo1 = NULL;
330 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReqInfo2, sizeof (VMMDevReportGuestInfo2), VMMDevReq_ReportGuestInfo2);
331 Log(("vgdrvReportGuestInfo: VbglGRAlloc VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
332 if (RT_SUCCESS(rc))
333 {
334 pReqInfo2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
335 pReqInfo2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
336 pReqInfo2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
337 pReqInfo2->guestInfo.additionsRevision = VBOX_SVN_REV;
338 pReqInfo2->guestInfo.additionsFeatures = 0; /* (no features defined yet) */
339 RTStrCopy(pReqInfo2->guestInfo.szName, sizeof(pReqInfo2->guestInfo.szName), VBOX_VERSION_STRING);
340
341 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReqInfo1, sizeof (VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
342 Log(("vgdrvReportGuestInfo: VbglGRAlloc VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
343 if (RT_SUCCESS(rc))
344 {
345 pReqInfo1->guestInfo.interfaceVersion = VMMDEV_VERSION;
346 pReqInfo1->guestInfo.osType = enmOSType;
347
348 /*
349 * There are two protocols here:
350 * 1. Info2 + Info1. Supported by >=3.2.51.
351 * 2. Info1 and optionally Info2. The old protocol.
352 *
353 * We try protocol 1 first. It will fail with VERR_NOT_SUPPORTED
354 * if not supported by the VMMDev (message ordering requirement).
355 */
356 rc = VbglGRPerform(&pReqInfo2->header);
357 Log(("vgdrvReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
358 if (RT_SUCCESS(rc))
359 {
360 rc = VbglGRPerform(&pReqInfo1->header);
361 Log(("vgdrvReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
362 }
363 else if ( rc == VERR_NOT_SUPPORTED
364 || rc == VERR_NOT_IMPLEMENTED)
365 {
366 rc = VbglGRPerform(&pReqInfo1->header);
367 Log(("vgdrvReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
368 if (RT_SUCCESS(rc))
369 {
370 rc = VbglGRPerform(&pReqInfo2->header);
371 Log(("vgdrvReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
372 if (rc == VERR_NOT_IMPLEMENTED)
373 rc = VINF_SUCCESS;
374 }
375 }
376 VbglGRFree(&pReqInfo1->header);
377 }
378 VbglGRFree(&pReqInfo2->header);
379 }
380
381 return rc;
382}
383
384
385/**
386 * Report the guest driver status to the host.
387 *
388 * @returns IPRT status code.
389 * @param fActive Flag whether the driver is now active or not.
390 */
391static int vgdrvReportDriverStatus(bool fActive)
392{
393 /*
394 * Report guest status of the VBox driver to the host.
395 */
396 VMMDevReportGuestStatus *pReq2 = NULL;
397 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestStatus);
398 Log(("vgdrvReportDriverStatus: VbglGRAlloc VMMDevReportGuestStatus completed with rc=%Rrc\n", rc));
399 if (RT_SUCCESS(rc))
400 {
401 pReq2->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
402 pReq2->guestStatus.status = fActive ?
403 VBoxGuestFacilityStatus_Active
404 : VBoxGuestFacilityStatus_Inactive;
405 pReq2->guestStatus.flags = 0;
406 rc = VbglGRPerform(&pReq2->header);
407 Log(("vgdrvReportDriverStatus: VbglGRPerform VMMDevReportGuestStatus completed with fActive=%d, rc=%Rrc\n",
408 fActive ? 1 : 0, rc));
409 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
410 rc = VINF_SUCCESS;
411 VbglGRFree(&pReq2->header);
412 }
413
414 return rc;
415}
416
417
418/** @name Memory Ballooning
419 * @{
420 */
421
422/**
423 * Inflate the balloon by one chunk represented by an R0 memory object.
424 *
425 * The caller owns the balloon mutex.
426 *
427 * @returns IPRT status code.
428 * @param pMemObj Pointer to the R0 memory object.
429 * @param pReq The pre-allocated request for performing the VMMDev call.
430 */
431static int vgdrvBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
432{
433 uint32_t iPage;
434 int rc;
435
436 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
437 {
438 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
439 pReq->aPhysPage[iPage] = phys;
440 }
441
442 pReq->fInflate = true;
443 pReq->header.size = g_cbChangeMemBalloonReq;
444 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
445
446 rc = VbglGRPerform(&pReq->header);
447 if (RT_FAILURE(rc))
448 LogRel(("vgdrvBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
449 return rc;
450}
451
452
453/**
454 * Deflate the balloon by one chunk - info the host and free the memory object.
455 *
456 * The caller owns the balloon mutex.
457 *
458 * @returns IPRT status code.
459 * @param pMemObj Pointer to the R0 memory object.
460 * The memory object will be freed afterwards.
461 * @param pReq The pre-allocated request for performing the VMMDev call.
462 */
463static int vgdrvBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
464{
465 uint32_t iPage;
466 int rc;
467
468 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
469 {
470 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
471 pReq->aPhysPage[iPage] = phys;
472 }
473
474 pReq->fInflate = false;
475 pReq->header.size = g_cbChangeMemBalloonReq;
476 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
477
478 rc = VbglGRPerform(&pReq->header);
479 if (RT_FAILURE(rc))
480 {
481 LogRel(("vgdrvBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
482 return rc;
483 }
484
485 rc = RTR0MemObjFree(*pMemObj, true);
486 if (RT_FAILURE(rc))
487 {
488 LogRel(("vgdrvBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
489 return rc;
490 }
491
492 *pMemObj = NIL_RTR0MEMOBJ;
493 return VINF_SUCCESS;
494}
495
496
497/**
498 * Inflate/deflate the memory balloon and notify the host.
499 *
500 * This is a worker used by vgdrvIoCtl_CheckMemoryBalloon - it takes the mutex.
501 *
502 * @returns VBox status code.
503 * @param pDevExt The device extension.
504 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
505 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
506 * (VINF_SUCCESS if set).
507 */
508static int vgdrvSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
509{
510 int rc = VINF_SUCCESS;
511
512 if (pDevExt->MemBalloon.fUseKernelAPI)
513 {
514 VMMDevChangeMemBalloon *pReq;
515 uint32_t i;
516
517 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
518 {
519 LogRel(("vgdrvSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
520 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
521 return VERR_INVALID_PARAMETER;
522 }
523
524 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
525 return VINF_SUCCESS; /* nothing to do */
526
527 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
528 && !pDevExt->MemBalloon.paMemObj)
529 {
530 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
531 if (!pDevExt->MemBalloon.paMemObj)
532 {
533 LogRel(("vgdrvSetBalloonSizeKernel: no memory for paMemObj!\n"));
534 return VERR_NO_MEMORY;
535 }
536 }
537
538 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
539 if (RT_FAILURE(rc))
540 return rc;
541
542 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
543 {
544 /* inflate */
545 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
546 {
547 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
548 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
549 if (RT_FAILURE(rc))
550 {
551 if (rc == VERR_NOT_SUPPORTED)
552 {
553 /* not supported -- fall back to the R3-allocated memory. */
554 rc = VINF_SUCCESS;
555 pDevExt->MemBalloon.fUseKernelAPI = false;
556 Assert(pDevExt->MemBalloon.cChunks == 0);
557 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
558 }
559 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
560 * cannot allocate more memory => don't try further, just stop here */
561 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
562 break;
563 }
564
565 rc = vgdrvBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
566 if (RT_FAILURE(rc))
567 {
568 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
569 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
570 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
571 break;
572 }
573 pDevExt->MemBalloon.cChunks++;
574 }
575 }
576 else
577 {
578 /* deflate */
579 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
580 {
581 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
582 if (RT_FAILURE(rc))
583 {
584 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
585 break;
586 }
587 pDevExt->MemBalloon.cChunks--;
588 }
589 }
590
591 VbglGRFree(&pReq->header);
592 }
593
594 /*
595 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
596 * the balloon changes via the other API.
597 */
598 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
599
600 return rc;
601}
602
603
604/**
605 * Inflate/deflate the balloon by one chunk.
606 *
607 * Worker for vgdrvIoCtl_ChangeMemoryBalloon - it takes the mutex.
608 *
609 * @returns VBox status code.
610 * @param pDevExt The device extension.
611 * @param pSession The session.
612 * @param u64ChunkAddr The address of the chunk to add to / remove from the
613 * balloon.
614 * @param fInflate Inflate if true, deflate if false.
615 */
616static int vgdrvSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint64_t u64ChunkAddr, bool fInflate)
617{
618 VMMDevChangeMemBalloon *pReq;
619 int rc = VINF_SUCCESS;
620 uint32_t i;
621 PRTR0MEMOBJ pMemObj = NULL;
622
623 if (fInflate)
624 {
625 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
626 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
627 {
628 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
629 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
630 return VERR_INVALID_PARAMETER;
631 }
632
633 if (!pDevExt->MemBalloon.paMemObj)
634 {
635 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
636 if (!pDevExt->MemBalloon.paMemObj)
637 {
638 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
639 return VERR_NO_MEMORY;
640 }
641 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
642 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
643 }
644 }
645 else
646 {
647 if (pDevExt->MemBalloon.cChunks == 0)
648 {
649 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
650 return VERR_INVALID_PARAMETER;
651 }
652 }
653
654 /*
655 * Enumerate all memory objects and check if the object is already registered.
656 */
657 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
658 {
659 if ( fInflate
660 && !pMemObj
661 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
662 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
663 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
664 {
665 if (fInflate)
666 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
667 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
668 break;
669 }
670 }
671 if (!pMemObj)
672 {
673 if (fInflate)
674 {
675 /* no free object pointer found -- should not happen */
676 return VERR_NO_MEMORY;
677 }
678
679 /* cannot free this memory as it wasn't provided before */
680 return VERR_NOT_FOUND;
681 }
682
683 /*
684 * Try inflate / default the balloon as requested.
685 */
686 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
687 if (RT_FAILURE(rc))
688 return rc;
689
690 if (fInflate)
691 {
692 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
693 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
694 if (RT_SUCCESS(rc))
695 {
696 rc = vgdrvBalloonInflate(pMemObj, pReq);
697 if (RT_SUCCESS(rc))
698 pDevExt->MemBalloon.cChunks++;
699 else
700 {
701 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
702 RTR0MemObjFree(*pMemObj, true);
703 *pMemObj = NIL_RTR0MEMOBJ;
704 }
705 }
706 }
707 else
708 {
709 rc = vgdrvBalloonDeflate(pMemObj, pReq);
710 if (RT_SUCCESS(rc))
711 pDevExt->MemBalloon.cChunks--;
712 else
713 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
714 }
715
716 VbglGRFree(&pReq->header);
717 return rc;
718}
719
720
721/**
722 * Cleanup the memory balloon of a session.
723 *
724 * Will request the balloon mutex, so it must be valid and the caller must not
725 * own it already.
726 *
727 * @param pDevExt The device extension.
728 * @param pSession The session. Can be NULL at unload.
729 */
730static void vgdrvCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
731{
732 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
733 if ( pDevExt->MemBalloon.pOwner == pSession
734 || pSession == NULL /*unload*/)
735 {
736 if (pDevExt->MemBalloon.paMemObj)
737 {
738 VMMDevChangeMemBalloon *pReq;
739 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
740 if (RT_SUCCESS(rc))
741 {
742 uint32_t i;
743 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
744 {
745 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
746 if (RT_FAILURE(rc))
747 {
748 LogRel(("vgdrvCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
749 rc, pDevExt->MemBalloon.cChunks));
750 break;
751 }
752 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
753 pDevExt->MemBalloon.cChunks--;
754 }
755 VbglGRFree(&pReq->header);
756 }
757 else
758 LogRel(("vgdrvCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
759 rc, pDevExt->MemBalloon.cChunks));
760 RTMemFree(pDevExt->MemBalloon.paMemObj);
761 pDevExt->MemBalloon.paMemObj = NULL;
762 }
763
764 pDevExt->MemBalloon.pOwner = NULL;
765 }
766 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
767}
768
769/** @} */
770
771
772
773/** @name Heartbeat
774 * @{
775 */
776
777/**
778 * Sends heartbeat to host.
779 *
780 * @returns VBox status code.
781 */
782static int vgdrvHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
783{
784 int rc;
785 if (pDevExt->pReqGuestHeartbeat)
786 {
787 rc = VbglGRPerform(pDevExt->pReqGuestHeartbeat);
788 Log3(("vgdrvHeartbeatSend: VbglGRPerform vgdrvHeartbeatSend completed with rc=%Rrc\n", rc));
789 }
790 else
791 rc = VERR_INVALID_STATE;
792 return rc;
793}
794
795
796/**
797 * Callback for heartbeat timer.
798 */
799static DECLCALLBACK(void) vgdrvHeartbeatTimerHandler(PRTTIMER hTimer, void *pvUser, uint64_t iTick)
800{
801 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
802 int rc;
803 AssertReturnVoid(pDevExt);
804
805 rc = vgdrvHeartbeatSend(pDevExt);
806 if (RT_FAILURE(rc))
807 Log(("HB Timer: vgdrvHeartbeatSend failed: rc=%Rrc\n", rc));
808
809 NOREF(hTimer); NOREF(iTick);
810}
811
812
813/**
814 * Configure the host to check guest's heartbeat
815 * and get heartbeat interval from the host.
816 *
817 * @returns VBox status code.
818 * @param pDevExt The device extension.
819 * @param fEnabled Set true to enable guest heartbeat checks on host.
820 */
821static int vgdrvHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
822{
823 VMMDevReqHeartbeat *pReq;
824 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
825 Log(("vgdrvHeartbeatHostConfigure: VbglGRAlloc vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
826 if (RT_SUCCESS(rc))
827 {
828 pReq->fEnabled = fEnabled;
829 pReq->cNsInterval = 0;
830 rc = VbglGRPerform(&pReq->header);
831 Log(("vgdrvHeartbeatHostConfigure: VbglGRPerform vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
832 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
833 VbglGRFree(&pReq->header);
834 }
835 return rc;
836}
837
838
839/**
840 * Initializes the heartbeat timer.
841 *
842 * This feature may be disabled by the host.
843 *
844 * @returns VBox status (ignored).
845 * @param pDevExt The device extension.
846 */
847static int vgdrvHeartbeatInit(PVBOXGUESTDEVEXT pDevExt)
848{
849 /*
850 * Make sure that heartbeat checking is disabled.
851 */
852 int rc = vgdrvHeartbeatHostConfigure(pDevExt, false);
853 if (RT_SUCCESS(rc))
854 {
855 rc = vgdrvHeartbeatHostConfigure(pDevExt, true);
856 if (RT_SUCCESS(rc))
857 {
858 /*
859 * Preallocate the request to use it from the timer callback because:
860 * 1) on Windows VbglGRAlloc must be called at IRQL <= APC_LEVEL
861 * and the timer callback runs at DISPATCH_LEVEL;
862 * 2) avoid repeated allocations.
863 */
864 rc = VbglGRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
865 if (RT_SUCCESS(rc))
866 {
867 LogRel(("vgdrvHeartbeatInit: Setting up heartbeat to trigger every %RU64 milliseconds\n",
868 pDevExt->cNsHeartbeatInterval / RT_NS_1MS));
869 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval, 0 /*fFlags*/,
870 (PFNRTTIMER)vgdrvHeartbeatTimerHandler, pDevExt);
871 if (RT_SUCCESS(rc))
872 {
873 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
874 if (RT_SUCCESS(rc))
875 return VINF_SUCCESS;
876
877 LogRel(("vgdrvHeartbeatInit: Heartbeat timer failed to start, rc=%Rrc\n", rc));
878 }
879 else
880 LogRel(("vgdrvHeartbeatInit: Failed to create heartbeat timer: %Rrc\n", rc));
881
882 VbglGRFree(pDevExt->pReqGuestHeartbeat);
883 pDevExt->pReqGuestHeartbeat = NULL;
884 }
885 else
886 LogRel(("vgdrvHeartbeatInit: VbglGRAlloc(VMMDevReq_GuestHeartbeat): %Rrc\n", rc));
887
888 LogRel(("vgdrvHeartbeatInit: Failed to set up the timer, guest heartbeat is disabled\n"));
889 vgdrvHeartbeatHostConfigure(pDevExt, false);
890 }
891 else
892 LogRel(("vgdrvHeartbeatInit: Failed to configure host for heartbeat checking: rc=%Rrc\n", rc));
893 }
894 return rc;
895}
896
897/** @} */
898
899
900/**
901 * Helper to reinit the VMMDev communication after hibernation.
902 *
903 * @returns VBox status code.
904 * @param pDevExt The device extension.
905 * @param enmOSType The OS type.
906 *
907 * @todo Call this on all platforms, not just windows.
908 */
909int VGDrvCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
910{
911 int rc = vgdrvReportGuestInfo(enmOSType);
912 if (RT_SUCCESS(rc))
913 {
914 rc = vgdrvReportDriverStatus(true /* Driver is active */);
915 if (RT_FAILURE(rc))
916 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
917 }
918 else
919 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
920 LogFlow(("VGDrvCommonReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
921 return rc;
922}
923
924
925/**
926 * Initializes the VBoxGuest device extension when the
927 * device driver is loaded.
928 *
929 * The native code locates the VMMDev on the PCI bus and retrieve
930 * the MMIO and I/O port ranges, this function will take care of
931 * mapping the MMIO memory (if present). Upon successful return
932 * the native code should set up the interrupt handler.
933 *
934 * @returns VBox status code.
935 *
936 * @param pDevExt The device extension. Allocated by the native code.
937 * @param IOPortBase The base of the I/O port range.
938 * @param pvMMIOBase The base of the MMIO memory mapping.
939 * This is optional, pass NULL if not present.
940 * @param cbMMIO The size of the MMIO memory mapping.
941 * This is optional, pass 0 if not present.
942 * @param enmOSType The guest OS type to report to the VMMDev.
943 * @param fFixedEvents Events that will be enabled upon init and no client
944 * will ever be allowed to mask.
945 */
946int VGDrvCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
947 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
948{
949 int rc, rc2;
950
951#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
952 /*
953 * Create the release log.
954 */
955 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
956 PRTLOGGER pRelLogger;
957 rc = RTLogCreate(&pRelLogger, 0 /*fFlags*/, "all", "VBOXGUEST_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
958 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
959 if (RT_SUCCESS(rc))
960 RTLogRelSetDefaultInstance(pRelLogger);
961 /** @todo Add native hook for getting logger config parameters and setting
962 * them. On linux we should use the module parameter stuff... */
963#endif
964
965 /*
966 * Adjust fFixedEvents.
967 */
968#ifdef VBOX_WITH_HGCM
969 fFixedEvents |= VMMDEV_EVENT_HGCM;
970#endif
971
972 /*
973 * Initialize the data.
974 */
975 pDevExt->IOPortBase = IOPortBase;
976 pDevExt->pVMMDevMemory = NULL;
977 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
978 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
979 pDevExt->pIrqAckEvents = NULL;
980 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
981 RTListInit(&pDevExt->WaitList);
982#ifdef VBOX_WITH_HGCM
983 RTListInit(&pDevExt->HGCMWaitList);
984#endif
985#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
986 RTListInit(&pDevExt->WakeUpList);
987#endif
988 RTListInit(&pDevExt->WokenUpList);
989 RTListInit(&pDevExt->FreeList);
990 RTListInit(&pDevExt->SessionList);
991 pDevExt->cSessions = 0;
992 pDevExt->fLoggingEnabled = false;
993 pDevExt->f32PendingEvents = 0;
994 pDevExt->u32MousePosChangedSeq = 0;
995 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
996 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
997 pDevExt->MemBalloon.cChunks = 0;
998 pDevExt->MemBalloon.cMaxChunks = 0;
999 pDevExt->MemBalloon.fUseKernelAPI = true;
1000 pDevExt->MemBalloon.paMemObj = NULL;
1001 pDevExt->MemBalloon.pOwner = NULL;
1002 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
1003 pDevExt->MouseNotifyCallback.pvUser = NULL;
1004 pDevExt->pReqGuestHeartbeat = NULL;
1005
1006 pDevExt->fFixedEvents = fFixedEvents;
1007 vgdrvBitUsageTrackerClear(&pDevExt->EventFilterTracker);
1008 pDevExt->fEventFilterHost = UINT32_MAX; /* forces a report */
1009
1010 vgdrvBitUsageTrackerClear(&pDevExt->MouseStatusTracker);
1011 pDevExt->fMouseStatusHost = UINT32_MAX; /* forces a report */
1012
1013 pDevExt->fAcquireModeGuestCaps = 0;
1014 pDevExt->fSetModeGuestCaps = 0;
1015 pDevExt->fAcquiredGuestCaps = 0;
1016 vgdrvBitUsageTrackerClear(&pDevExt->SetGuestCapsTracker);
1017 pDevExt->fGuestCapsHost = UINT32_MAX; /* forces a report */
1018
1019 /*
1020 * If there is an MMIO region validate the version and size.
1021 */
1022 if (pvMMIOBase)
1023 {
1024 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1025 Assert(cbMMIO);
1026 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1027 && pVMMDev->u32Size >= 32
1028 && pVMMDev->u32Size <= cbMMIO)
1029 {
1030 pDevExt->pVMMDevMemory = pVMMDev;
1031 Log(("VGDrvCommonInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
1032 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1033 }
1034 else /* try live without it. */
1035 LogRel(("VGDrvCommonInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
1036 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1037 }
1038
1039 /*
1040 * Create the wait and session spinlocks as well as the ballooning mutex.
1041 */
1042 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1043 if (RT_SUCCESS(rc))
1044 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1045 if (RT_FAILURE(rc))
1046 {
1047 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1048 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
1049 RTSpinlockDestroy(pDevExt->EventSpinlock);
1050 return rc;
1051 }
1052
1053 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1054 if (RT_FAILURE(rc))
1055 {
1056 LogRel(("VGDrvCommonInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1057 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1058 RTSpinlockDestroy(pDevExt->EventSpinlock);
1059 return rc;
1060 }
1061
1062 /*
1063 * Initialize the guest library and report the guest info back to VMMDev,
1064 * set the interrupt control filter mask, and fixate the guest mappings
1065 * made by the VMM.
1066 */
1067 rc = VbglInitPrimary(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
1068 if (RT_SUCCESS(rc))
1069 {
1070 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1071 if (RT_SUCCESS(rc))
1072 {
1073 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
1074 Assert(pDevExt->PhysIrqAckEvents != 0);
1075
1076 rc = vgdrvReportGuestInfo(enmOSType);
1077 if (RT_SUCCESS(rc))
1078 {
1079 /*
1080 * Set the fixed event and make sure the host doesn't have any lingering
1081 * the guest capabilities or mouse status bits set.
1082 */
1083 rc = vgdrvResetEventFilterOnHost(pDevExt, pDevExt->fFixedEvents);
1084 if (RT_SUCCESS(rc))
1085 {
1086 rc = vgdrvResetCapabilitiesOnHost(pDevExt);
1087 if (RT_SUCCESS(rc))
1088 {
1089 rc = vgdrvResetMouseStatusOnHost(pDevExt);
1090 if (RT_SUCCESS(rc))
1091 {
1092 /*
1093 * Initialize stuff which may fail without requiring the driver init to fail.
1094 */
1095 vgdrvInitFixateGuestMappings(pDevExt);
1096 vgdrvHeartbeatInit(pDevExt);
1097
1098 /*
1099 * Done!
1100 */
1101 rc = vgdrvReportDriverStatus(true /* Driver is active */);
1102 if (RT_FAILURE(rc))
1103 LogRel(("VGDrvCommonInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1104
1105 LogFlowFunc(("VGDrvCommonInitDevExt: returns success\n"));
1106 return VINF_SUCCESS;
1107 }
1108 LogRel(("VGDrvCommonInitDevExt: failed to clear mouse status: rc=%Rrc\n", rc));
1109 }
1110 else
1111 LogRel(("VGDrvCommonInitDevExt: failed to clear guest capabilities: rc=%Rrc\n", rc));
1112 }
1113 else
1114 LogRel(("VGDrvCommonInitDevExt: failed to set fixed event filter: rc=%Rrc\n", rc));
1115 }
1116 else
1117 LogRel(("VGDrvCommonInitDevExt: VBoxReportGuestInfo failed: rc=%Rrc\n", rc));
1118 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1119 }
1120 else
1121 LogRel(("VGDrvCommonInitDevExt: VBoxGRAlloc failed: rc=%Rrc\n", rc));
1122
1123 VbglTerminate();
1124 }
1125 else
1126 LogRel(("VGDrvCommonInitDevExt: VbglInit failed: rc=%Rrc\n", rc));
1127
1128 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1129 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1130 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1131
1132#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1133 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1134 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1135#endif
1136 return rc; /* (failed) */
1137}
1138
1139
1140/**
1141 * Deletes all the items in a wait chain.
1142 * @param pList The head of the chain.
1143 */
1144static void vgdrvDeleteWaitList(PRTLISTNODE pList)
1145{
1146 while (!RTListIsEmpty(pList))
1147 {
1148 int rc2;
1149 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1150 RTListNodeRemove(&pWait->ListNode);
1151
1152 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1153 pWait->Event = NIL_RTSEMEVENTMULTI;
1154 pWait->pSession = NULL;
1155 RTMemFree(pWait);
1156 }
1157}
1158
1159
1160/**
1161 * Destroys the VBoxGuest device extension.
1162 *
1163 * The native code should call this before the driver is loaded,
1164 * but don't call this on shutdown.
1165 *
1166 * @param pDevExt The device extension.
1167 */
1168void VGDrvCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1169{
1170 int rc2;
1171 Log(("VGDrvCommonDeleteDevExt:\n"));
1172 Log(("VBoxGuest: The additions driver is terminating.\n"));
1173
1174 /*
1175 * Stop and destroy HB timer and
1176 * disable host heartbeat checking.
1177 */
1178 if (pDevExt->pHeartbeatTimer)
1179 {
1180 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1181 vgdrvHeartbeatHostConfigure(pDevExt, false);
1182 }
1183
1184 VbglGRFree(pDevExt->pReqGuestHeartbeat);
1185 pDevExt->pReqGuestHeartbeat = NULL;
1186
1187 /*
1188 * Clean up the bits that involves the host first.
1189 */
1190 vgdrvTermUnfixGuestMappings(pDevExt);
1191 if (!RTListIsEmpty(&pDevExt->SessionList))
1192 {
1193 LogRelFunc(("session list not empty!\n"));
1194 RTListInit(&pDevExt->SessionList);
1195 }
1196 /* Update the host flags (mouse status etc) not to reflect this session. */
1197 pDevExt->fFixedEvents = 0;
1198 vgdrvResetEventFilterOnHost(pDevExt, 0 /*fFixedEvents*/);
1199 vgdrvResetCapabilitiesOnHost(pDevExt);
1200 vgdrvResetMouseStatusOnHost(pDevExt);
1201
1202 vgdrvCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1203
1204 /*
1205 * Cleanup all the other resources.
1206 */
1207 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1208 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1209 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1210
1211 vgdrvDeleteWaitList(&pDevExt->WaitList);
1212#ifdef VBOX_WITH_HGCM
1213 vgdrvDeleteWaitList(&pDevExt->HGCMWaitList);
1214#endif
1215#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1216 vgdrvDeleteWaitList(&pDevExt->WakeUpList);
1217#endif
1218 vgdrvDeleteWaitList(&pDevExt->WokenUpList);
1219 vgdrvDeleteWaitList(&pDevExt->FreeList);
1220
1221 VbglTerminate();
1222
1223 pDevExt->pVMMDevMemory = NULL;
1224
1225 pDevExt->IOPortBase = 0;
1226 pDevExt->pIrqAckEvents = NULL;
1227
1228#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1229 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1230 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1231#endif
1232
1233}
1234
1235
1236/**
1237 * Creates a VBoxGuest user session.
1238 *
1239 * The native code calls this when a ring-3 client opens the device.
1240 * Use VGDrvCommonCreateKernelSession when a ring-0 client connects.
1241 *
1242 * @returns VBox status code.
1243 * @param pDevExt The device extension.
1244 * @param ppSession Where to store the session on success.
1245 */
1246int VGDrvCommonCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1247{
1248 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1249 if (RT_UNLIKELY(!pSession))
1250 {
1251 LogRel(("VGDrvCommonCreateUserSession: no memory!\n"));
1252 return VERR_NO_MEMORY;
1253 }
1254
1255 pSession->Process = RTProcSelf();
1256 pSession->R0Process = RTR0ProcHandleSelf();
1257 pSession->pDevExt = pDevExt;
1258 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1259 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1260 pDevExt->cSessions++;
1261 RTSpinlockRelease(pDevExt->SessionSpinlock);
1262
1263 *ppSession = pSession;
1264 LogFlow(("VGDrvCommonCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1265 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1266 return VINF_SUCCESS;
1267}
1268
1269
1270/**
1271 * Creates a VBoxGuest kernel session.
1272 *
1273 * The native code calls this when a ring-0 client connects to the device.
1274 * Use VGDrvCommonCreateUserSession when a ring-3 client opens the device.
1275 *
1276 * @returns VBox status code.
1277 * @param pDevExt The device extension.
1278 * @param ppSession Where to store the session on success.
1279 */
1280int VGDrvCommonCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1281{
1282 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1283 if (RT_UNLIKELY(!pSession))
1284 {
1285 LogRel(("VGDrvCommonCreateKernelSession: no memory!\n"));
1286 return VERR_NO_MEMORY;
1287 }
1288
1289 pSession->Process = NIL_RTPROCESS;
1290 pSession->R0Process = NIL_RTR0PROCESS;
1291 pSession->pDevExt = pDevExt;
1292 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1293 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1294 pDevExt->cSessions++;
1295 RTSpinlockRelease(pDevExt->SessionSpinlock);
1296
1297 *ppSession = pSession;
1298 LogFlow(("VGDrvCommonCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1299 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1300 return VINF_SUCCESS;
1301}
1302
1303
1304/**
1305 * Closes a VBoxGuest session.
1306 *
1307 * @param pDevExt The device extension.
1308 * @param pSession The session to close (and free).
1309 */
1310void VGDrvCommonCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1311{
1312#ifdef VBOX_WITH_HGCM
1313 unsigned i;
1314#endif
1315 LogFlow(("VGDrvCommonCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1316 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1317
1318 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1319 RTListNodeRemove(&pSession->ListNode);
1320 pDevExt->cSessions--;
1321 RTSpinlockRelease(pDevExt->SessionSpinlock);
1322 vgdrvAcquireSessionCapabilities(pDevExt, pSession, 0, UINT32_MAX, VBOXGUESTCAPSACQUIRE_FLAGS_NONE,
1323 true /*fSessionTermination*/);
1324 vgdrvSetSessionCapabilities(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1325 vgdrvSetSessionEventFilter(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1326 vgdrvSetSessionMouseStatus(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1327
1328 vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
1329
1330#ifdef VBOX_WITH_HGCM
1331 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1332 if (pSession->aHGCMClientIds[i])
1333 {
1334 VBoxGuestHGCMDisconnectInfo Info;
1335 Info.result = 0;
1336 Info.u32ClientID = pSession->aHGCMClientIds[i];
1337 pSession->aHGCMClientIds[i] = 0;
1338 Log(("VGDrvCommonCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1339 VbglR0HGCMInternalDisconnect(&Info, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1340 }
1341#endif
1342
1343 pSession->pDevExt = NULL;
1344 pSession->Process = NIL_RTPROCESS;
1345 pSession->R0Process = NIL_RTR0PROCESS;
1346 vgdrvCloseMemBalloon(pDevExt, pSession);
1347 RTMemFree(pSession);
1348}
1349
1350
1351/**
1352 * Allocates a wait-for-event entry.
1353 *
1354 * @returns The wait-for-event entry.
1355 * @param pDevExt The device extension.
1356 * @param pSession The session that's allocating this. Can be NULL.
1357 */
1358static PVBOXGUESTWAIT vgdrvWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1359{
1360 /*
1361 * Allocate it one way or the other.
1362 */
1363 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1364 if (pWait)
1365 {
1366 RTSpinlockAcquire(pDevExt->EventSpinlock);
1367
1368 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1369 if (pWait)
1370 RTListNodeRemove(&pWait->ListNode);
1371
1372 RTSpinlockRelease(pDevExt->EventSpinlock);
1373 }
1374 if (!pWait)
1375 {
1376 int rc;
1377
1378 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1379 if (!pWait)
1380 {
1381 LogRelMax(32, ("vgdrvWaitAlloc: out-of-memory!\n"));
1382 return NULL;
1383 }
1384
1385 rc = RTSemEventMultiCreate(&pWait->Event);
1386 if (RT_FAILURE(rc))
1387 {
1388 LogRelMax(32, ("vgdrvWaitAlloc: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1389 RTMemFree(pWait);
1390 return NULL;
1391 }
1392
1393 pWait->ListNode.pNext = NULL;
1394 pWait->ListNode.pPrev = NULL;
1395 }
1396
1397 /*
1398 * Zero members just as an precaution.
1399 */
1400 pWait->fReqEvents = 0;
1401 pWait->fResEvents = 0;
1402#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1403 pWait->fPendingWakeUp = false;
1404 pWait->fFreeMe = false;
1405#endif
1406 pWait->pSession = pSession;
1407#ifdef VBOX_WITH_HGCM
1408 pWait->pHGCMReq = NULL;
1409#endif
1410 RTSemEventMultiReset(pWait->Event);
1411 return pWait;
1412}
1413
1414
1415/**
1416 * Frees the wait-for-event entry.
1417 *
1418 * The caller must own the wait spinlock !
1419 * The entry must be in a list!
1420 *
1421 * @param pDevExt The device extension.
1422 * @param pWait The wait-for-event entry to free.
1423 */
1424static void vgdrvWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1425{
1426 pWait->fReqEvents = 0;
1427 pWait->fResEvents = 0;
1428#ifdef VBOX_WITH_HGCM
1429 pWait->pHGCMReq = NULL;
1430#endif
1431#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1432 Assert(!pWait->fFreeMe);
1433 if (pWait->fPendingWakeUp)
1434 pWait->fFreeMe = true;
1435 else
1436#endif
1437 {
1438 RTListNodeRemove(&pWait->ListNode);
1439 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1440 }
1441}
1442
1443
1444/**
1445 * Frees the wait-for-event entry.
1446 *
1447 * @param pDevExt The device extension.
1448 * @param pWait The wait-for-event entry to free.
1449 */
1450static void vgdrvWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1451{
1452 RTSpinlockAcquire(pDevExt->EventSpinlock);
1453 vgdrvWaitFreeLocked(pDevExt, pWait);
1454 RTSpinlockRelease(pDevExt->EventSpinlock);
1455}
1456
1457
1458#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1459/**
1460 * Processes the wake-up list.
1461 *
1462 * All entries in the wake-up list gets signalled and moved to the woken-up
1463 * list.
1464 *
1465 * @param pDevExt The device extension.
1466 */
1467void VGDrvCommonWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1468{
1469 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1470 {
1471 RTSpinlockAcquire(pDevExt->EventSpinlock);
1472 for (;;)
1473 {
1474 int rc;
1475 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1476 if (!pWait)
1477 break;
1478 pWait->fPendingWakeUp = true;
1479 RTSpinlockRelease(pDevExt->EventSpinlock);
1480
1481 rc = RTSemEventMultiSignal(pWait->Event);
1482 AssertRC(rc);
1483
1484 RTSpinlockAcquire(pDevExt->EventSpinlock);
1485 pWait->fPendingWakeUp = false;
1486 if (!pWait->fFreeMe)
1487 {
1488 RTListNodeRemove(&pWait->ListNode);
1489 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1490 }
1491 else
1492 {
1493 pWait->fFreeMe = false;
1494 vgdrvWaitFreeLocked(pDevExt, pWait);
1495 }
1496 }
1497 RTSpinlockRelease(pDevExt->EventSpinlock);
1498 }
1499}
1500#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1501
1502
1503/**
1504 * Implements the fast (no input or output) type of IOCtls.
1505 *
1506 * This is currently just a placeholder stub inherited from the support driver code.
1507 *
1508 * @returns VBox status code.
1509 * @param iFunction The IOCtl function number.
1510 * @param pDevExt The device extension.
1511 * @param pSession The session.
1512 */
1513int VGDrvCommonIoCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1514{
1515 LogFlow(("VGDrvCommonIoCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1516
1517 NOREF(iFunction);
1518 NOREF(pDevExt);
1519 NOREF(pSession);
1520 return VERR_NOT_SUPPORTED;
1521}
1522
1523
1524/**
1525 * Return the VMM device port.
1526 *
1527 * returns IPRT status code.
1528 * @param pDevExt The device extension.
1529 * @param pInfo The request info.
1530 * @param pcbDataReturned (out) contains the number of bytes to return.
1531 */
1532static int vgdrvIoCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1533{
1534 LogFlow(("VBOXGUEST_IOCTL_GETVMMDEVPORT\n"));
1535
1536 pInfo->portAddress = pDevExt->IOPortBase;
1537 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1538 if (pcbDataReturned)
1539 *pcbDataReturned = sizeof(*pInfo);
1540 return VINF_SUCCESS;
1541}
1542
1543
1544#ifndef RT_OS_WINDOWS
1545/**
1546 * Set the callback for the kernel mouse handler.
1547 *
1548 * returns IPRT status code.
1549 * @param pDevExt The device extension.
1550 * @param pNotify The new callback information.
1551 */
1552int vgdrvIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1553{
1554 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->pfnNotify, pNotify->pvUser));
1555
1556 RTSpinlockAcquire(pDevExt->EventSpinlock);
1557 pDevExt->MouseNotifyCallback = *pNotify;
1558 RTSpinlockRelease(pDevExt->EventSpinlock);
1559 return VINF_SUCCESS;
1560}
1561#endif
1562
1563
1564/**
1565 * Worker vgdrvIoCtl_WaitEvent.
1566 *
1567 * The caller enters the spinlock, we leave it.
1568 *
1569 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1570 */
1571DECLINLINE(int) vbdgCheckWaitEventCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1572 VBoxGuestWaitEventInfo *pInfo, int iEvent, const uint32_t fReqEvents)
1573{
1574 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1575 if (fMatches & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
1576 fMatches &= vgdrvGetAllowedEventMaskForSession(pDevExt, pSession);
1577 if (fMatches || pSession->fPendingCancelWaitEvents)
1578 {
1579 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1580 RTSpinlockRelease(pDevExt->EventSpinlock);
1581
1582 pInfo->u32EventFlagsOut = fMatches;
1583 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1584 if (fReqEvents & ~((uint32_t)1 << iEvent))
1585 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1586 else
1587 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1588 pSession->fPendingCancelWaitEvents = false;
1589 return VINF_SUCCESS;
1590 }
1591
1592 RTSpinlockRelease(pDevExt->EventSpinlock);
1593 return VERR_TIMEOUT;
1594}
1595
1596
1597static int vgdrvIoCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1598 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1599{
1600 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1601 uint32_t fResEvents;
1602 int iEvent;
1603 PVBOXGUESTWAIT pWait;
1604 int rc;
1605
1606 pInfo->u32EventFlagsOut = 0;
1607 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1608 if (pcbDataReturned)
1609 *pcbDataReturned = sizeof(*pInfo);
1610
1611 /*
1612 * Copy and verify the input mask.
1613 */
1614 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1615 if (RT_UNLIKELY(iEvent < 0))
1616 {
1617 LogRel(("VBOXGUEST_IOCTL_WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1618 return VERR_INVALID_PARAMETER;
1619 }
1620
1621 /*
1622 * Check the condition up front, before doing the wait-for-event allocations.
1623 */
1624 RTSpinlockAcquire(pDevExt->EventSpinlock);
1625 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1626 if (rc == VINF_SUCCESS)
1627 return rc;
1628
1629 if (!pInfo->u32TimeoutIn)
1630 {
1631 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1632 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT\n"));
1633 return VERR_TIMEOUT;
1634 }
1635
1636 pWait = vgdrvWaitAlloc(pDevExt, pSession);
1637 if (!pWait)
1638 return VERR_NO_MEMORY;
1639 pWait->fReqEvents = fReqEvents;
1640
1641 /*
1642 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1643 * If the wait condition is met, return.
1644 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1645 */
1646 RTSpinlockAcquire(pDevExt->EventSpinlock);
1647 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1648 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1649 if (rc == VINF_SUCCESS)
1650 {
1651 vgdrvWaitFreeUnlocked(pDevExt, pWait);
1652 return rc;
1653 }
1654
1655 if (fInterruptible)
1656 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1657 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1658 else
1659 rc = RTSemEventMultiWait(pWait->Event,
1660 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1661
1662 /*
1663 * There is one special case here and that's when the semaphore is
1664 * destroyed upon device driver unload. This shouldn't happen of course,
1665 * but in case it does, just get out of here ASAP.
1666 */
1667 if (rc == VERR_SEM_DESTROYED)
1668 return rc;
1669
1670 /*
1671 * Unlink the wait item and dispose of it.
1672 */
1673 RTSpinlockAcquire(pDevExt->EventSpinlock);
1674 fResEvents = pWait->fResEvents;
1675 vgdrvWaitFreeLocked(pDevExt, pWait);
1676 RTSpinlockRelease(pDevExt->EventSpinlock);
1677
1678 /*
1679 * Now deal with the return code.
1680 */
1681 if ( fResEvents
1682 && fResEvents != UINT32_MAX)
1683 {
1684 pInfo->u32EventFlagsOut = fResEvents;
1685 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1686 if (fReqEvents & ~((uint32_t)1 << iEvent))
1687 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1688 else
1689 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1690 rc = VINF_SUCCESS;
1691 }
1692 else if ( fResEvents == UINT32_MAX
1693 || rc == VERR_INTERRUPTED)
1694 {
1695 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1696 rc = VERR_INTERRUPTED;
1697 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_INTERRUPTED\n"));
1698 }
1699 else if (rc == VERR_TIMEOUT)
1700 {
1701 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1702 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1703 }
1704 else
1705 {
1706 if (RT_SUCCESS(rc))
1707 {
1708 LogRelMax(32, ("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc but no events!\n", rc));
1709 rc = VERR_INTERNAL_ERROR;
1710 }
1711 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1712 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc\n", rc));
1713 }
1714
1715 return rc;
1716}
1717
1718
1719static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1720{
1721 PVBOXGUESTWAIT pWait;
1722 PVBOXGUESTWAIT pSafe;
1723 int rc = 0;
1724 /* Was as least one WAITEVENT in process for this session? If not we
1725 * set a flag that the next call should be interrupted immediately. This
1726 * is needed so that a user thread can reliably interrupt another one in a
1727 * WAITEVENT loop. */
1728 bool fCancelledOne = false;
1729
1730 LogFlow(("VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS\n"));
1731
1732 /*
1733 * Walk the event list and wake up anyone with a matching session.
1734 */
1735 RTSpinlockAcquire(pDevExt->EventSpinlock);
1736 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1737 {
1738 if (pWait->pSession == pSession)
1739 {
1740 fCancelledOne = true;
1741 pWait->fResEvents = UINT32_MAX;
1742 RTListNodeRemove(&pWait->ListNode);
1743#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1744 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1745#else
1746 rc |= RTSemEventMultiSignal(pWait->Event);
1747 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1748#endif
1749 }
1750 }
1751 if (!fCancelledOne)
1752 pSession->fPendingCancelWaitEvents = true;
1753 RTSpinlockRelease(pDevExt->EventSpinlock);
1754 Assert(rc == 0);
1755 NOREF(rc);
1756
1757#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1758 VGDrvCommonWaitDoWakeUps(pDevExt);
1759#endif
1760
1761 return VINF_SUCCESS;
1762}
1763
1764
1765/**
1766 * Checks if the VMM request is allowed in the context of the given session.
1767 *
1768 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1769 * @param pDevExt The device extension.
1770 * @param pSession The calling session.
1771 * @param enmType The request type.
1772 * @param pReqHdr The request.
1773 */
1774static int vgdrvCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1775 VMMDevRequestHeader const *pReqHdr)
1776{
1777 /*
1778 * Categorize the request being made.
1779 */
1780 /** @todo This need quite some more work! */
1781 enum
1782 {
1783 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1784 } enmRequired;
1785 switch (enmType)
1786 {
1787 /*
1788 * Deny access to anything we don't know or provide specialized I/O controls for.
1789 */
1790#ifdef VBOX_WITH_HGCM
1791 case VMMDevReq_HGCMConnect:
1792 case VMMDevReq_HGCMDisconnect:
1793# ifdef VBOX_WITH_64_BITS_GUESTS
1794 case VMMDevReq_HGCMCall32:
1795 case VMMDevReq_HGCMCall64:
1796# else
1797 case VMMDevReq_HGCMCall:
1798# endif /* VBOX_WITH_64_BITS_GUESTS */
1799 case VMMDevReq_HGCMCancel:
1800 case VMMDevReq_HGCMCancel2:
1801#endif /* VBOX_WITH_HGCM */
1802 case VMMDevReq_SetGuestCapabilities:
1803 default:
1804 enmRequired = kLevel_NoOne;
1805 break;
1806
1807 /*
1808 * There are a few things only this driver can do (and it doesn't use
1809 * the VMMRequst I/O control route anyway, but whatever).
1810 */
1811 case VMMDevReq_ReportGuestInfo:
1812 case VMMDevReq_ReportGuestInfo2:
1813 case VMMDevReq_GetHypervisorInfo:
1814 case VMMDevReq_SetHypervisorInfo:
1815 case VMMDevReq_RegisterPatchMemory:
1816 case VMMDevReq_DeregisterPatchMemory:
1817 case VMMDevReq_GetMemBalloonChangeRequest:
1818 enmRequired = kLevel_OnlyVBoxGuest;
1819 break;
1820
1821 /*
1822 * Trusted users apps only.
1823 */
1824 case VMMDevReq_QueryCredentials:
1825 case VMMDevReq_ReportCredentialsJudgement:
1826 case VMMDevReq_RegisterSharedModule:
1827 case VMMDevReq_UnregisterSharedModule:
1828 case VMMDevReq_WriteCoreDump:
1829 case VMMDevReq_GetCpuHotPlugRequest:
1830 case VMMDevReq_SetCpuHotPlugStatus:
1831 case VMMDevReq_CheckSharedModules:
1832 case VMMDevReq_GetPageSharingStatus:
1833 case VMMDevReq_DebugIsPageShared:
1834 case VMMDevReq_ReportGuestStats:
1835 case VMMDevReq_ReportGuestUserState:
1836 case VMMDevReq_GetStatisticsChangeRequest:
1837 case VMMDevReq_ChangeMemBalloon:
1838 enmRequired = kLevel_TrustedUsers;
1839 break;
1840
1841 /*
1842 * Anyone.
1843 */
1844 case VMMDevReq_GetMouseStatus:
1845 case VMMDevReq_SetMouseStatus:
1846 case VMMDevReq_SetPointerShape:
1847 case VMMDevReq_GetHostVersion:
1848 case VMMDevReq_Idle:
1849 case VMMDevReq_GetHostTime:
1850 case VMMDevReq_SetPowerStatus:
1851 case VMMDevReq_AcknowledgeEvents:
1852 case VMMDevReq_CtlGuestFilterMask:
1853 case VMMDevReq_ReportGuestStatus:
1854 case VMMDevReq_GetDisplayChangeRequest:
1855 case VMMDevReq_VideoModeSupported:
1856 case VMMDevReq_GetHeightReduction:
1857 case VMMDevReq_GetDisplayChangeRequest2:
1858 case VMMDevReq_VideoModeSupported2:
1859 case VMMDevReq_VideoAccelEnable:
1860 case VMMDevReq_VideoAccelFlush:
1861 case VMMDevReq_VideoSetVisibleRegion:
1862 case VMMDevReq_GetDisplayChangeRequestEx:
1863 case VMMDevReq_GetSeamlessChangeRequest:
1864 case VMMDevReq_GetVRDPChangeRequest:
1865 case VMMDevReq_LogString:
1866 case VMMDevReq_GetSessionId:
1867 enmRequired = kLevel_AllUsers;
1868 break;
1869
1870 /*
1871 * Depends on the request parameters...
1872 */
1873 /** @todo this have to be changed into an I/O control and the facilities
1874 * tracked in the session so they can automatically be failed when the
1875 * session terminates without reporting the new status.
1876 *
1877 * The information presented by IGuest is not reliable without this! */
1878 case VMMDevReq_ReportGuestCapabilities:
1879 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1880 {
1881 case VBoxGuestFacilityType_All:
1882 case VBoxGuestFacilityType_VBoxGuestDriver:
1883 enmRequired = kLevel_OnlyVBoxGuest;
1884 break;
1885 case VBoxGuestFacilityType_VBoxService:
1886 enmRequired = kLevel_TrustedUsers;
1887 break;
1888 case VBoxGuestFacilityType_VBoxTrayClient:
1889 case VBoxGuestFacilityType_Seamless:
1890 case VBoxGuestFacilityType_Graphics:
1891 default:
1892 enmRequired = kLevel_AllUsers;
1893 break;
1894 }
1895 break;
1896 }
1897
1898 /*
1899 * Check against the session.
1900 */
1901 switch (enmRequired)
1902 {
1903 default:
1904 case kLevel_NoOne:
1905 break;
1906 case kLevel_OnlyVBoxGuest:
1907 case kLevel_OnlyKernel:
1908 if (pSession->R0Process == NIL_RTR0PROCESS)
1909 return VINF_SUCCESS;
1910 break;
1911 case kLevel_TrustedUsers:
1912 case kLevel_AllUsers:
1913 return VINF_SUCCESS;
1914 }
1915
1916 return VERR_PERMISSION_DENIED;
1917}
1918
1919static int vgdrvIoCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1920 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1921{
1922 int rc;
1923 VMMDevRequestHeader *pReqCopy;
1924
1925 /*
1926 * Validate the header and request size.
1927 */
1928 const VMMDevRequestType enmType = pReqHdr->requestType;
1929 const uint32_t cbReq = pReqHdr->size;
1930 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
1931
1932 LogFlow(("VBOXGUEST_IOCTL_VMMREQUEST: type %d\n", pReqHdr->requestType));
1933
1934 if (cbReq < cbMinSize)
1935 {
1936 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1937 cbReq, cbMinSize, enmType));
1938 return VERR_INVALID_PARAMETER;
1939 }
1940 if (cbReq > cbData)
1941 {
1942 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1943 cbData, cbReq, enmType));
1944 return VERR_INVALID_PARAMETER;
1945 }
1946 rc = VbglGRVerify(pReqHdr, cbData);
1947 if (RT_FAILURE(rc))
1948 {
1949 Log(("VBOXGUEST_IOCTL_VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1950 cbData, cbReq, enmType, rc));
1951 return rc;
1952 }
1953
1954 rc = vgdrvCheckIfVmmReqIsAllowed(pDevExt, pSession, enmType, pReqHdr);
1955 if (RT_FAILURE(rc))
1956 {
1957 Log(("VBOXGUEST_IOCTL_VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1958 return rc;
1959 }
1960
1961 /*
1962 * Make a copy of the request in the physical memory heap so
1963 * the VBoxGuestLibrary can more easily deal with the request.
1964 * (This is really a waste of time since the OS or the OS specific
1965 * code has already buffered or locked the input/output buffer, but
1966 * it does makes things a bit simpler wrt to phys address.)
1967 */
1968 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1969 if (RT_FAILURE(rc))
1970 {
1971 Log(("VBOXGUEST_IOCTL_VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1972 cbReq, cbReq, rc));
1973 return rc;
1974 }
1975 memcpy(pReqCopy, pReqHdr, cbReq);
1976
1977 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1978 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1979
1980 rc = VbglGRPerform(pReqCopy);
1981 if ( RT_SUCCESS(rc)
1982 && RT_SUCCESS(pReqCopy->rc))
1983 {
1984 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1985 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1986
1987 memcpy(pReqHdr, pReqCopy, cbReq);
1988 if (pcbDataReturned)
1989 *pcbDataReturned = cbReq;
1990 }
1991 else if (RT_FAILURE(rc))
1992 Log(("VBOXGUEST_IOCTL_VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1993 else
1994 {
1995 Log(("VBOXGUEST_IOCTL_VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1996 rc = pReqCopy->rc;
1997 }
1998
1999 VbglGRFree(pReqCopy);
2000 return rc;
2001}
2002
2003
2004#ifdef VBOX_WITH_HGCM
2005
2006AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2007
2008/** Worker for vgdrvHgcmAsyncWaitCallback*. */
2009static int vgdrvHgcmAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2010 bool fInterruptible, uint32_t cMillies)
2011{
2012 int rc;
2013
2014 /*
2015 * Check to see if the condition was met by the time we got here.
2016 *
2017 * We create a simple poll loop here for dealing with out-of-memory
2018 * conditions since the caller isn't necessarily able to deal with
2019 * us returning too early.
2020 */
2021 PVBOXGUESTWAIT pWait;
2022 for (;;)
2023 {
2024 RTSpinlockAcquire(pDevExt->EventSpinlock);
2025 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2026 {
2027 RTSpinlockRelease(pDevExt->EventSpinlock);
2028 return VINF_SUCCESS;
2029 }
2030 RTSpinlockRelease(pDevExt->EventSpinlock);
2031
2032 pWait = vgdrvWaitAlloc(pDevExt, NULL);
2033 if (pWait)
2034 break;
2035 if (fInterruptible)
2036 return VERR_INTERRUPTED;
2037 RTThreadSleep(1);
2038 }
2039 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2040 pWait->pHGCMReq = pHdr;
2041
2042 /*
2043 * Re-enter the spinlock and re-check for the condition.
2044 * If the condition is met, return.
2045 * Otherwise link us into the HGCM wait list and go to sleep.
2046 */
2047 RTSpinlockAcquire(pDevExt->EventSpinlock);
2048 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2049 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2050 {
2051 vgdrvWaitFreeLocked(pDevExt, pWait);
2052 RTSpinlockRelease(pDevExt->EventSpinlock);
2053 return VINF_SUCCESS;
2054 }
2055 RTSpinlockRelease(pDevExt->EventSpinlock);
2056
2057 if (fInterruptible)
2058 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2059 else
2060 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2061 if (rc == VERR_SEM_DESTROYED)
2062 return rc;
2063
2064 /*
2065 * Unlink, free and return.
2066 */
2067 if ( RT_FAILURE(rc)
2068 && rc != VERR_TIMEOUT
2069 && ( !fInterruptible
2070 || rc != VERR_INTERRUPTED))
2071 LogRel(("vgdrvHgcmAsyncWaitCallback: wait failed! %Rrc\n", rc));
2072
2073 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2074 return rc;
2075}
2076
2077
2078/**
2079 * This is a callback for dealing with async waits.
2080 *
2081 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2082 */
2083static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2084{
2085 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2086 LogFlow(("vgdrvHgcmAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2087 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2088 false /* fInterruptible */, u32User /* cMillies */);
2089}
2090
2091
2092/**
2093 * This is a callback for dealing with async waits with a timeout.
2094 *
2095 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2096 */
2097static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2098{
2099 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2100 LogFlow(("vgdrvHgcmAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2101 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2102 true /* fInterruptible */, u32User /* cMillies */);
2103}
2104
2105
2106static int vgdrvIoCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2107 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
2108{
2109 int rc;
2110
2111 /*
2112 * The VbglHGCMConnect call will invoke the callback if the HGCM
2113 * call is performed in an ASYNC fashion. The function is not able
2114 * to deal with cancelled requests.
2115 */
2116 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: %.128s\n",
2117 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2118 ? pInfo->Loc.u.host.achName : "<not local host>"));
2119
2120 rc = VbglR0HGCMInternalConnect(pInfo, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2121 if (RT_SUCCESS(rc))
2122 {
2123 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
2124 pInfo->u32ClientID, pInfo->result, rc));
2125 if (RT_SUCCESS(pInfo->result))
2126 {
2127 /*
2128 * Append the client id to the client id table.
2129 * If the table has somehow become filled up, we'll disconnect the session.
2130 */
2131 unsigned i;
2132 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2133 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2134 if (!pSession->aHGCMClientIds[i])
2135 {
2136 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
2137 break;
2138 }
2139 RTSpinlockRelease(pDevExt->SessionSpinlock);
2140 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2141 {
2142 VBoxGuestHGCMDisconnectInfo Info;
2143 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2144 Info.result = 0;
2145 Info.u32ClientID = pInfo->u32ClientID;
2146 VbglR0HGCMInternalDisconnect(&Info, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2147 return VERR_TOO_MANY_OPEN_FILES;
2148 }
2149 }
2150 else
2151 rc = pInfo->result;
2152 if (pcbDataReturned)
2153 *pcbDataReturned = sizeof(*pInfo);
2154 }
2155 return rc;
2156}
2157
2158
2159static int vgdrvIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2160 VBoxGuestHGCMDisconnectInfo *pInfo, size_t *pcbDataReturned)
2161{
2162 /*
2163 * Validate the client id and invalidate its entry while we're in the call.
2164 */
2165 int rc;
2166 const uint32_t u32ClientId = pInfo->u32ClientID;
2167 unsigned i;
2168 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2169 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2170 if (pSession->aHGCMClientIds[i] == u32ClientId)
2171 {
2172 pSession->aHGCMClientIds[i] = UINT32_MAX;
2173 break;
2174 }
2175 RTSpinlockRelease(pDevExt->SessionSpinlock);
2176 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2177 {
2178 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
2179 return VERR_INVALID_HANDLE;
2180 }
2181
2182 /*
2183 * The VbglHGCMConnect call will invoke the callback if the HGCM
2184 * call is performed in an ASYNC fashion. The function is not able
2185 * to deal with cancelled requests.
2186 */
2187 Log(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
2188 rc = VbglR0HGCMInternalDisconnect(pInfo, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2189 if (RT_SUCCESS(rc))
2190 {
2191 LogFlow(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
2192 if (pcbDataReturned)
2193 *pcbDataReturned = sizeof(*pInfo);
2194 }
2195
2196 /* Update the client id array according to the result. */
2197 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2198 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2199 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
2200 RTSpinlockRelease(pDevExt->SessionSpinlock);
2201
2202 return rc;
2203}
2204
2205
2206static int vgdrvIoCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMCallInfo *pInfo,
2207 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2208 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
2209{
2210 const uint32_t u32ClientId = pInfo->u32ClientID;
2211 uint32_t fFlags;
2212 size_t cbActual;
2213 unsigned i;
2214 int rc;
2215
2216 /*
2217 * Some more validations.
2218 */
2219 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2220 {
2221 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2222 return VERR_INVALID_PARAMETER;
2223 }
2224
2225 cbActual = cbExtra + sizeof(*pInfo);
2226#ifdef RT_ARCH_AMD64
2227 if (f32bit)
2228 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2229 else
2230#endif
2231 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2232 if (cbData < cbActual)
2233 {
2234 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2235 cbData, cbData, cbActual, cbActual));
2236 return VERR_INVALID_PARAMETER;
2237 }
2238
2239 /*
2240 * Validate the client id.
2241 */
2242 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2243 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2244 if (pSession->aHGCMClientIds[i] == u32ClientId)
2245 break;
2246 RTSpinlockRelease(pDevExt->SessionSpinlock);
2247 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2248 {
2249 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2250 return VERR_INVALID_HANDLE;
2251 }
2252
2253 /*
2254 * The VbglHGCMCall call will invoke the callback if the HGCM
2255 * call is performed in an ASYNC fashion. This function can
2256 * deal with cancelled requests, so we let user more requests
2257 * be interruptible (should add a flag for this later I guess).
2258 */
2259 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2260 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2261 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2262#ifdef RT_ARCH_AMD64
2263 if (f32bit)
2264 {
2265 if (fInterruptible)
2266 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2267 else
2268 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2269 }
2270 else
2271#endif
2272 {
2273 if (fInterruptible)
2274 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2275 else
2276 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2277 }
2278 if (RT_SUCCESS(rc))
2279 {
2280 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: result=%Rrc\n", pInfo->result));
2281 if (pcbDataReturned)
2282 *pcbDataReturned = cbActual;
2283 }
2284 else
2285 {
2286 if ( rc != VERR_INTERRUPTED
2287 && rc != VERR_TIMEOUT)
2288 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2289 else
2290 Log(("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2291 }
2292 return rc;
2293}
2294
2295#endif /* VBOX_WITH_HGCM */
2296
2297/**
2298 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2299 *
2300 * Ask the host for the size of the balloon and try to set it accordingly. If
2301 * this approach fails because it's not supported, return with fHandleInR3 set
2302 * and let the user land supply memory we can lock via the other ioctl.
2303 *
2304 * @returns VBox status code.
2305 *
2306 * @param pDevExt The device extension.
2307 * @param pSession The session.
2308 * @param pInfo The output buffer.
2309 * @param pcbDataReturned Where to store the amount of returned data. Can
2310 * be NULL.
2311 */
2312static int vgdrvIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2313 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2314{
2315 VMMDevGetMemBalloonChangeRequest *pReq;
2316 int rc;
2317
2318 LogFlow(("VBOXGUEST_IOCTL_CHECK_BALLOON:\n"));
2319 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2320 AssertRCReturn(rc, rc);
2321
2322 /*
2323 * The first user trying to query/change the balloon becomes the
2324 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2325 */
2326 if ( pDevExt->MemBalloon.pOwner != pSession
2327 && pDevExt->MemBalloon.pOwner == NULL)
2328 pDevExt->MemBalloon.pOwner = pSession;
2329
2330 if (pDevExt->MemBalloon.pOwner == pSession)
2331 {
2332 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2333 if (RT_SUCCESS(rc))
2334 {
2335 /*
2336 * This is a response to that event. Setting this bit means that
2337 * we request the value from the host and change the guest memory
2338 * balloon according to this value.
2339 */
2340 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2341 rc = VbglGRPerform(&pReq->header);
2342 if (RT_SUCCESS(rc))
2343 {
2344 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2345 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2346
2347 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2348 pInfo->fHandleInR3 = false;
2349
2350 rc = vgdrvSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2351 /* Ignore various out of memory failures. */
2352 if ( rc == VERR_NO_MEMORY
2353 || rc == VERR_NO_PHYS_MEMORY
2354 || rc == VERR_NO_CONT_MEMORY)
2355 rc = VINF_SUCCESS;
2356
2357 if (pcbDataReturned)
2358 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2359 }
2360 else
2361 LogRel(("VBOXGUEST_IOCTL_CHECK_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2362 VbglGRFree(&pReq->header);
2363 }
2364 }
2365 else
2366 rc = VERR_PERMISSION_DENIED;
2367
2368 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2369 LogFlow(("VBOXGUEST_IOCTL_CHECK_BALLOON returns %Rrc\n", rc));
2370 return rc;
2371}
2372
2373
2374/**
2375 * Handle a request for changing the memory balloon.
2376 *
2377 * @returns VBox status code.
2378 *
2379 * @param pDevExt The device extention.
2380 * @param pSession The session.
2381 * @param pInfo The change request structure (input).
2382 * @param pcbDataReturned Where to store the amount of returned data. Can
2383 * be NULL.
2384 */
2385static int vgdrvIoCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2386 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2387{
2388 int rc;
2389 LogFlow(("VBOXGUEST_IOCTL_CHANGE_BALLOON: fInflate=%RTbool u64ChunkAddr=%#RX64\n", pInfo->fInflate, pInfo->u64ChunkAddr));
2390
2391 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2392 AssertRCReturn(rc, rc);
2393
2394 if (!pDevExt->MemBalloon.fUseKernelAPI)
2395 {
2396 /*
2397 * The first user trying to query/change the balloon becomes the
2398 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2399 */
2400 if ( pDevExt->MemBalloon.pOwner != pSession
2401 && pDevExt->MemBalloon.pOwner == NULL)
2402 pDevExt->MemBalloon.pOwner = pSession;
2403
2404 if (pDevExt->MemBalloon.pOwner == pSession)
2405 {
2406 rc = vgdrvSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2407 if (pcbDataReturned)
2408 *pcbDataReturned = 0;
2409 }
2410 else
2411 rc = VERR_PERMISSION_DENIED;
2412 }
2413 else
2414 rc = VERR_PERMISSION_DENIED;
2415
2416 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2417 return rc;
2418}
2419
2420
2421/**
2422 * Handle a request for writing a core dump of the guest on the host.
2423 *
2424 * @returns VBox status code.
2425 *
2426 * @param pDevExt The device extension.
2427 * @param pInfo The output buffer.
2428 */
2429static int vgdrvIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2430{
2431 VMMDevReqWriteCoreDump *pReq = NULL;
2432 int rc;
2433 LogFlow(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP\n"));
2434
2435 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2436 if (RT_SUCCESS(rc))
2437 {
2438 pReq->fFlags = pInfo->fFlags;
2439 rc = VbglGRPerform(&pReq->header);
2440 if (RT_FAILURE(rc))
2441 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2442
2443 VbglGRFree(&pReq->header);
2444 }
2445 else
2446 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2447 sizeof(*pReq), sizeof(*pReq), rc));
2448 return rc;
2449}
2450
2451
2452/**
2453 * Guest backdoor logging.
2454 *
2455 * @returns VBox status code.
2456 *
2457 * @param pDevExt The device extension.
2458 * @param pch The log message (need not be NULL terminated).
2459 * @param cbData Size of the buffer.
2460 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2461 * @param fUserSession Copy of VBOXGUESTSESSION::fUserSession for the
2462 * call. True normal user, false root user.
2463 */
2464static int vgdrvIoCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned, bool fUserSession)
2465{
2466 if (pDevExt->fLoggingEnabled)
2467 RTLogBackdoorPrintf("%.*s", cbData, pch);
2468 else if (!fUserSession)
2469 LogRel(("%.*s", cbData, pch));
2470 else
2471 Log(("%.*s", cbData, pch));
2472 if (pcbDataReturned)
2473 *pcbDataReturned = 0;
2474 return VINF_SUCCESS;
2475}
2476
2477
2478/** @name Guest Capabilities, Mouse Status and Event Filter
2479 * @{
2480 */
2481
2482/**
2483 * Clears a bit usage tracker (init time).
2484 *
2485 * @param pTracker The tracker to clear.
2486 */
2487static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
2488{
2489 uint32_t iBit;
2490 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2491
2492 for (iBit = 0; iBit < 32; iBit++)
2493 pTracker->acPerBitUsage[iBit] = 0;
2494 pTracker->fMask = 0;
2495}
2496
2497
2498#ifdef VBOX_STRICT
2499/**
2500 * Checks that pTracker->fMask is correct and that the usage values are within
2501 * the valid range.
2502 *
2503 * @param pTracker The tracker.
2504 * @param cMax Max valid usage value.
2505 * @param pszWhat Identifies the tracker in assertions.
2506 */
2507static void vgdrvBitUsageTrackerCheckMask(PCVBOXGUESTBITUSAGETRACER pTracker, uint32_t cMax, const char *pszWhat)
2508{
2509 uint32_t fMask = 0;
2510 uint32_t iBit;
2511 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2512
2513 for (iBit = 0; iBit < 32; iBit++)
2514 if (pTracker->acPerBitUsage[iBit])
2515 {
2516 fMask |= RT_BIT_32(iBit);
2517 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2518 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2519 }
2520
2521 AssertMsg(fMask == pTracker->fMask, ("%s: %#x vs %#x\n", pszWhat, fMask, pTracker->fMask));
2522}
2523#endif
2524
2525
2526/**
2527 * Applies a change to the bit usage tracker.
2528 *
2529 *
2530 * @returns true if the mask changed, false if not.
2531 * @param pTracker The bit usage tracker.
2532 * @param fChanged The bits to change.
2533 * @param fPrevious The previous value of the bits.
2534 * @param cMax The max valid usage value for assertions.
2535 * @param pszWhat Identifies the tracker in assertions.
2536 */
2537static bool vgdrvBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker, uint32_t fChanged, uint32_t fPrevious,
2538 uint32_t cMax, const char *pszWhat)
2539{
2540 bool fGlobalChange = false;
2541 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2542
2543 while (fChanged)
2544 {
2545 uint32_t const iBit = ASMBitFirstSetU32(fChanged) - 1;
2546 uint32_t const fBitMask = RT_BIT_32(iBit);
2547 Assert(iBit < 32); Assert(fBitMask & fChanged);
2548
2549 if (fBitMask & fPrevious)
2550 {
2551 pTracker->acPerBitUsage[iBit] -= 1;
2552 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2553 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2554 if (pTracker->acPerBitUsage[iBit] == 0)
2555 {
2556 fGlobalChange = true;
2557 pTracker->fMask &= ~fBitMask;
2558 }
2559 }
2560 else
2561 {
2562 pTracker->acPerBitUsage[iBit] += 1;
2563 AssertMsg(pTracker->acPerBitUsage[iBit] > 0 && pTracker->acPerBitUsage[iBit] <= cMax,
2564 ("pTracker->acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2565 if (pTracker->acPerBitUsage[iBit] == 1)
2566 {
2567 fGlobalChange = true;
2568 pTracker->fMask |= fBitMask;
2569 }
2570 }
2571
2572 fChanged &= ~fBitMask;
2573 }
2574
2575#ifdef VBOX_STRICT
2576 vgdrvBitUsageTrackerCheckMask(pTracker, cMax, pszWhat);
2577#endif
2578 NOREF(pszWhat); NOREF(cMax);
2579 return fGlobalChange;
2580}
2581
2582
2583/**
2584 * Init and termination worker for resetting the (host) event filter on the host
2585 *
2586 * @returns VBox status code.
2587 * @param pDevExt The device extension.
2588 * @param fFixedEvents Fixed events (init time).
2589 */
2590static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents)
2591{
2592 VMMDevCtlGuestFilterMask *pReq;
2593 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
2594 if (RT_SUCCESS(rc))
2595 {
2596 pReq->u32NotMask = UINT32_MAX & ~fFixedEvents;
2597 pReq->u32OrMask = fFixedEvents;
2598 rc = VbglGRPerform(&pReq->header);
2599 if (RT_FAILURE(rc))
2600 LogRelFunc(("failed with rc=%Rrc\n", rc));
2601 VbglGRFree(&pReq->header);
2602 }
2603 return rc;
2604}
2605
2606
2607/**
2608 * Changes the event filter mask for the given session.
2609 *
2610 * This is called in response to VBOXGUEST_IOCTL_CTL_FILTER_MASK as well as to
2611 * do session cleanup.
2612 *
2613 * @returns VBox status code.
2614 * @param pDevExt The device extension.
2615 * @param pSession The session.
2616 * @param fOrMask The events to add.
2617 * @param fNotMask The events to remove.
2618 * @param fSessionTermination Set if we're called by the session cleanup code.
2619 * This tweaks the error handling so we perform
2620 * proper session cleanup even if the host
2621 * misbehaves.
2622 *
2623 * @remarks Takes the session spinlock.
2624 */
2625static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2626 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2627{
2628 VMMDevCtlGuestFilterMask *pReq;
2629 uint32_t fChanged;
2630 uint32_t fPrevious;
2631 int rc;
2632
2633 /*
2634 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2635 */
2636 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
2637 if (RT_SUCCESS(rc))
2638 { /* nothing */ }
2639 else if (!fSessionTermination)
2640 {
2641 LogRel(("vgdrvSetSessionFilterMask: VbglGRAlloc failure: %Rrc\n", rc));
2642 return rc;
2643 }
2644 else
2645 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2646
2647
2648 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2649
2650 /*
2651 * Apply the changes to the session mask.
2652 */
2653 fPrevious = pSession->fEventFilter;
2654 pSession->fEventFilter |= fOrMask;
2655 pSession->fEventFilter &= ~fNotMask;
2656
2657 /*
2658 * If anything actually changed, update the global usage counters.
2659 */
2660 fChanged = fPrevious ^ pSession->fEventFilter;
2661 if (fChanged)
2662 {
2663 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, fPrevious,
2664 pDevExt->cSessions, "EventFilterTracker");
2665
2666 /*
2667 * If there are global changes, update the event filter on the host.
2668 */
2669 if (fGlobalChange || pDevExt->fEventFilterHost == UINT32_MAX)
2670 {
2671 Assert(pReq || fSessionTermination);
2672 if (pReq)
2673 {
2674 pReq->u32OrMask = pDevExt->fFixedEvents | pDevExt->EventFilterTracker.fMask;
2675 if (pReq->u32OrMask == pDevExt->fEventFilterHost)
2676 rc = VINF_SUCCESS;
2677 else
2678 {
2679 pDevExt->fEventFilterHost = pReq->u32OrMask;
2680 pReq->u32NotMask = ~pReq->u32OrMask;
2681 rc = VbglGRPerform(&pReq->header);
2682 if (RT_FAILURE(rc))
2683 {
2684 /*
2685 * Failed, roll back (unless it's session termination time).
2686 */
2687 pDevExt->fEventFilterHost = UINT32_MAX;
2688 if (!fSessionTermination)
2689 {
2690 vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, pSession->fEventFilter,
2691 pDevExt->cSessions, "EventFilterTracker");
2692 pSession->fEventFilter = fPrevious;
2693 }
2694 }
2695 }
2696 }
2697 else
2698 rc = VINF_SUCCESS;
2699 }
2700 }
2701
2702 RTSpinlockRelease(pDevExt->SessionSpinlock);
2703 if (pReq)
2704 VbglGRFree(&pReq->header);
2705 return rc;
2706}
2707
2708
2709/**
2710 * Handle VBOXGUEST_IOCTL_CTL_FILTER_MASK.
2711 *
2712 * @returns VBox status code.
2713 *
2714 * @param pDevExt The device extension.
2715 * @param pSession The session.
2716 * @param pInfo The request.
2717 */
2718static int vgdrvIoCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestFilterMaskInfo *pInfo)
2719{
2720 LogFlow(("VBOXGUEST_IOCTL_CTL_FILTER_MASK: or=%#x not=%#x\n", pInfo->u32OrMask, pInfo->u32NotMask));
2721
2722 if ((pInfo->u32OrMask | pInfo->u32NotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
2723 {
2724 Log(("VBOXGUEST_IOCTL_CTL_FILTER_MASK: or=%#x not=%#x: Invalid masks!\n", pInfo->u32OrMask, pInfo->u32NotMask));
2725 return VERR_INVALID_PARAMETER;
2726 }
2727
2728 return vgdrvSetSessionEventFilter(pDevExt, pSession, pInfo->u32OrMask, pInfo->u32NotMask, false /*fSessionTermination*/);
2729}
2730
2731
2732/**
2733 * Init and termination worker for set mouse feature status to zero on the host.
2734 *
2735 * @returns VBox status code.
2736 * @param pDevExt The device extension.
2737 */
2738static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt)
2739{
2740 VMMDevReqMouseStatus *pReq;
2741 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2742 if (RT_SUCCESS(rc))
2743 {
2744 pReq->mouseFeatures = 0;
2745 pReq->pointerXPos = 0;
2746 pReq->pointerYPos = 0;
2747 rc = VbglGRPerform(&pReq->header);
2748 if (RT_FAILURE(rc))
2749 LogRelFunc(("failed with rc=%Rrc\n", rc));
2750 VbglGRFree(&pReq->header);
2751 }
2752 return rc;
2753}
2754
2755
2756/**
2757 * Changes the mouse status mask for the given session.
2758 *
2759 * This is called in response to VBOXGUEST_IOCTL_SET_MOUSE_STATUS as well as to
2760 * do session cleanup.
2761 *
2762 * @returns VBox status code.
2763 * @param pDevExt The device extension.
2764 * @param pSession The session.
2765 * @param fOrMask The status flags to add.
2766 * @param fNotMask The status flags to remove.
2767 * @param fSessionTermination Set if we're called by the session cleanup code.
2768 * This tweaks the error handling so we perform
2769 * proper session cleanup even if the host
2770 * misbehaves.
2771 *
2772 * @remarks Takes the session spinlock.
2773 */
2774static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2775 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2776{
2777 VMMDevReqMouseStatus *pReq;
2778 uint32_t fChanged;
2779 uint32_t fPrevious;
2780 int rc;
2781
2782 /*
2783 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2784 */
2785 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2786 if (RT_SUCCESS(rc))
2787 { /* nothing */ }
2788 else if (!fSessionTermination)
2789 {
2790 LogRel(("vgdrvSetSessionMouseStatus: VbglGRAlloc failure: %Rrc\n", rc));
2791 return rc;
2792 }
2793 else
2794 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2795
2796
2797 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2798
2799 /*
2800 * Apply the changes to the session mask.
2801 */
2802 fPrevious = pSession->fMouseStatus;
2803 pSession->fMouseStatus |= fOrMask;
2804 pSession->fMouseStatus &= ~fNotMask;
2805
2806 /*
2807 * If anything actually changed, update the global usage counters.
2808 */
2809 fChanged = fPrevious ^ pSession->fMouseStatus;
2810 if (fChanged)
2811 {
2812 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, fPrevious,
2813 pDevExt->cSessions, "MouseStatusTracker");
2814
2815 /*
2816 * If there are global changes, update the event filter on the host.
2817 */
2818 if (fGlobalChange || pDevExt->fMouseStatusHost == UINT32_MAX)
2819 {
2820 Assert(pReq || fSessionTermination);
2821 if (pReq)
2822 {
2823 pReq->mouseFeatures = pDevExt->MouseStatusTracker.fMask;
2824 if (pReq->mouseFeatures == pDevExt->fMouseStatusHost)
2825 rc = VINF_SUCCESS;
2826 else
2827 {
2828 pDevExt->fMouseStatusHost = pReq->mouseFeatures;
2829 pReq->pointerXPos = 0;
2830 pReq->pointerYPos = 0;
2831 rc = VbglGRPerform(&pReq->header);
2832 if (RT_FAILURE(rc))
2833 {
2834 /*
2835 * Failed, roll back (unless it's session termination time).
2836 */
2837 pDevExt->fMouseStatusHost = UINT32_MAX;
2838 if (!fSessionTermination)
2839 {
2840 vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, pSession->fMouseStatus,
2841 pDevExt->cSessions, "MouseStatusTracker");
2842 pSession->fMouseStatus = fPrevious;
2843 }
2844 }
2845 }
2846 }
2847 else
2848 rc = VINF_SUCCESS;
2849 }
2850 }
2851
2852 RTSpinlockRelease(pDevExt->SessionSpinlock);
2853 if (pReq)
2854 VbglGRFree(&pReq->header);
2855 return rc;
2856}
2857
2858
2859/**
2860 * Sets the mouse status features for this session and updates them globally.
2861 *
2862 * @returns VBox status code.
2863 *
2864 * @param pDevExt The device extention.
2865 * @param pSession The session.
2866 * @param fFeatures New bitmap of enabled features.
2867 */
2868static int vgdrvIoCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2869{
2870 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_STATUS: features=%#x\n", fFeatures));
2871
2872 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
2873 return VERR_INVALID_PARAMETER;
2874
2875 return vgdrvSetSessionMouseStatus(pDevExt, pSession, fFeatures, ~fFeatures, false /*fSessionTermination*/);
2876}
2877
2878
2879/**
2880 * Return the mask of VMM device events that this session is allowed to see (wrt
2881 * to "acquire" mode guest capabilities).
2882 *
2883 * The events associated with guest capabilities in "acquire" mode will be
2884 * restricted to sessions which has acquired the respective capabilities.
2885 * If someone else tries to wait for acquired events, they won't be woken up
2886 * when the event becomes pending. Should some other thread in the session
2887 * acquire the capability while the corresponding event is pending, the waiting
2888 * thread will woken up.
2889 *
2890 * @returns Mask of events valid for the given session.
2891 * @param pDevExt The device extension.
2892 * @param pSession The session.
2893 *
2894 * @remarks Needs only be called when dispatching events in the
2895 * VBOXGUEST_ACQUIRE_STYLE_EVENTS mask.
2896 */
2897static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2898{
2899 uint32_t fAcquireModeGuestCaps;
2900 uint32_t fAcquiredGuestCaps;
2901 uint32_t fAllowedEvents;
2902
2903 /*
2904 * Note! Reads pSession->fAcquiredGuestCaps and pDevExt->fAcquireModeGuestCaps
2905 * WITHOUT holding VBOXGUESTDEVEXT::SessionSpinlock.
2906 */
2907 fAcquireModeGuestCaps = ASMAtomicUoReadU32(&pDevExt->fAcquireModeGuestCaps);
2908 if (fAcquireModeGuestCaps == 0)
2909 return VMMDEV_EVENT_VALID_EVENT_MASK;
2910 fAcquiredGuestCaps = ASMAtomicUoReadU32(&pSession->fAcquiredGuestCaps);
2911
2912 /*
2913 * Calculate which events to allow according to the cap config and caps
2914 * acquired by the session.
2915 */
2916 fAllowedEvents = VMMDEV_EVENT_VALID_EVENT_MASK;
2917 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
2918 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
2919 fAllowedEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
2920
2921 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
2922 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
2923 fAllowedEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
2924
2925 return fAllowedEvents;
2926}
2927
2928
2929/**
2930 * Init and termination worker for set guest capabilities to zero on the host.
2931 *
2932 * @returns VBox status code.
2933 * @param pDevExt The device extension.
2934 */
2935static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt)
2936{
2937 VMMDevReqGuestCapabilities2 *pReq;
2938 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
2939 if (RT_SUCCESS(rc))
2940 {
2941 pReq->u32NotMask = UINT32_MAX;
2942 pReq->u32OrMask = 0;
2943 rc = VbglGRPerform(&pReq->header);
2944
2945 if (RT_FAILURE(rc))
2946 LogRelFunc(("failed with rc=%Rrc\n", rc));
2947 VbglGRFree(&pReq->header);
2948 }
2949 return rc;
2950}
2951
2952
2953/**
2954 * Sets the guest capabilities to the host while holding the lock.
2955 *
2956 * This will ASSUME that we're the ones in charge of the mask, so
2957 * we'll simply clear all bits we don't set.
2958 *
2959 * @returns VBox status code.
2960 * @param pDevExt The device extension.
2961 * @param pReq The request.
2962 */
2963static int vgdrvUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT pDevExt, VMMDevReqGuestCapabilities2 *pReq)
2964{
2965 int rc;
2966
2967 pReq->u32OrMask = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
2968 if (pReq->u32OrMask == pDevExt->fGuestCapsHost)
2969 rc = VINF_SUCCESS;
2970 else
2971 {
2972 pDevExt->fGuestCapsHost = pReq->u32OrMask;
2973 pReq->u32NotMask = ~pReq->u32OrMask;
2974 rc = VbglGRPerform(&pReq->header);
2975 if (RT_FAILURE(rc))
2976 pDevExt->fGuestCapsHost = UINT32_MAX;
2977 }
2978
2979 return rc;
2980}
2981
2982
2983/**
2984 * Switch a set of capabilities into "acquire" mode and (maybe) acquire them for
2985 * the given session.
2986 *
2987 * This is called in response to VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE as well as
2988 * to do session cleanup.
2989 *
2990 * @returns VBox status code.
2991 * @param pDevExt The device extension.
2992 * @param pSession The session.
2993 * @param fOrMask The capabilities to add .
2994 * @param fNotMask The capabilities to remove. Ignored in
2995 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE.
2996 * @param enmFlags Confusing operation modifier.
2997 * VBOXGUESTCAPSACQUIRE_FLAGS_NONE means to both
2998 * configure and acquire/release the capabilities.
2999 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3000 * means only configure capabilities in the
3001 * @a fOrMask capabilities for "acquire" mode.
3002 * @param fSessionTermination Set if we're called by the session cleanup code.
3003 * This tweaks the error handling so we perform
3004 * proper session cleanup even if the host
3005 * misbehaves.
3006 *
3007 * @remarks Takes both the session and event spinlocks.
3008 */
3009static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3010 uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags,
3011 bool fSessionTermination)
3012{
3013 uint32_t fCurrentOwnedCaps;
3014 uint32_t fSessionRemovedCaps;
3015 uint32_t fSessionAddedCaps;
3016 uint32_t fOtherConflictingCaps;
3017 VMMDevReqGuestCapabilities2 *pReq = NULL;
3018 int rc;
3019
3020
3021 /*
3022 * Validate and adjust input.
3023 */
3024 if (fOrMask & ~( VMMDEV_GUEST_SUPPORTS_SEAMLESS
3025 | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING
3026 | VMMDEV_GUEST_SUPPORTS_GRAPHICS ) )
3027 {
3028 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x -- invalid fOrMask\n",
3029 pSession, fOrMask, fNotMask, enmFlags));
3030 return VERR_INVALID_PARAMETER;
3031 }
3032
3033 if ( enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3034 && enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_NONE)
3035 {
3036 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: invalid enmFlags %d\n",
3037 pSession, fOrMask, fNotMask, enmFlags));
3038 return VERR_INVALID_PARAMETER;
3039 }
3040 Assert(!fOrMask || !fSessionTermination);
3041
3042 /* The fNotMask no need to have all values valid, invalid ones will simply be ignored. */
3043 fNotMask &= ~fOrMask;
3044
3045 /*
3046 * Preallocate a update request if we're about to do more than just configure
3047 * the capability mode.
3048 */
3049 if (enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
3050 {
3051 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3052 if (RT_SUCCESS(rc))
3053 { /* do nothing */ }
3054 else if (!fSessionTermination)
3055 {
3056 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: VbglGRAlloc failure: %Rrc\n",
3057 pSession, fOrMask, fNotMask, enmFlags, rc));
3058 return rc;
3059 }
3060 else
3061 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3062 }
3063
3064 /*
3065 * Try switch the capabilities in the OR mask into "acquire" mode.
3066 *
3067 * Note! We currently ignore anyone which may already have "set" the capabilities
3068 * in fOrMask. Perhaps not the best way to handle it, but it's simple...
3069 */
3070 RTSpinlockAcquire(pDevExt->EventSpinlock);
3071
3072 if (!(pDevExt->fSetModeGuestCaps & fOrMask))
3073 pDevExt->fAcquireModeGuestCaps |= fOrMask;
3074 else
3075 {
3076 RTSpinlockRelease(pDevExt->EventSpinlock);
3077
3078 if (pReq)
3079 VbglGRFree(&pReq->header);
3080 AssertMsgFailed(("Trying to change caps mode: %#x\n", fOrMask));
3081 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: calling caps acquire for set caps\n",
3082 pSession, fOrMask, fNotMask, enmFlags));
3083 return VERR_INVALID_STATE;
3084 }
3085
3086 /*
3087 * If we only wanted to switch the capabilities into "acquire" mode, we're done now.
3088 */
3089 if (enmFlags & VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
3090 {
3091 RTSpinlockRelease(pDevExt->EventSpinlock);
3092
3093 Assert(!pReq);
3094 Log(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: configured acquire caps: 0x%x\n",
3095 pSession, fOrMask, fNotMask, enmFlags));
3096 return VINF_SUCCESS;
3097 }
3098 Assert(pReq || fSessionTermination);
3099
3100 /*
3101 * Caller wants to acquire/release the capabilities too.
3102 *
3103 * Note! The mode change of the capabilities above won't be reverted on
3104 * failure, this is intentional.
3105 */
3106 fCurrentOwnedCaps = pSession->fAcquiredGuestCaps;
3107 fSessionRemovedCaps = fCurrentOwnedCaps & fNotMask;
3108 fSessionAddedCaps = fOrMask & ~fCurrentOwnedCaps;
3109 fOtherConflictingCaps = pDevExt->fAcquiredGuestCaps & ~fCurrentOwnedCaps;
3110 fOtherConflictingCaps &= fSessionAddedCaps;
3111
3112 if (!fOtherConflictingCaps)
3113 {
3114 if (fSessionAddedCaps)
3115 {
3116 pSession->fAcquiredGuestCaps |= fSessionAddedCaps;
3117 pDevExt->fAcquiredGuestCaps |= fSessionAddedCaps;
3118 }
3119
3120 if (fSessionRemovedCaps)
3121 {
3122 pSession->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3123 pDevExt->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3124 }
3125
3126 /*
3127 * If something changes (which is very likely), tell the host.
3128 */
3129 if (fSessionAddedCaps || fSessionRemovedCaps || pDevExt->fGuestCapsHost == UINT32_MAX)
3130 {
3131 Assert(pReq || fSessionTermination);
3132 if (pReq)
3133 {
3134 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3135 if (RT_FAILURE(rc) && !fSessionTermination)
3136 {
3137 /* Failed, roll back. */
3138 if (fSessionAddedCaps)
3139 {
3140 pSession->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3141 pDevExt->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3142 }
3143 if (fSessionRemovedCaps)
3144 {
3145 pSession->fAcquiredGuestCaps |= fSessionRemovedCaps;
3146 pDevExt->fAcquiredGuestCaps |= fSessionRemovedCaps;
3147 }
3148
3149 RTSpinlockRelease(pDevExt->EventSpinlock);
3150 LogRel(("vgdrvAcquireSessionCapabilities: vgdrvUpdateCapabilitiesOnHostWithReqAndLock failed: rc=%Rrc\n", rc));
3151 VbglGRFree(&pReq->header);
3152 return rc;
3153 }
3154 }
3155 }
3156 }
3157 else
3158 {
3159 RTSpinlockRelease(pDevExt->EventSpinlock);
3160
3161 Log(("vgdrvAcquireSessionCapabilities: Caps %#x were busy\n", fOtherConflictingCaps));
3162 VbglGRFree(&pReq->header);
3163 return VERR_RESOURCE_BUSY;
3164 }
3165
3166 RTSpinlockRelease(pDevExt->EventSpinlock);
3167 if (pReq)
3168 VbglGRFree(&pReq->header);
3169
3170 /*
3171 * If we added a capability, check if that means some other thread in our
3172 * session should be unblocked because there are events pending.
3173 *
3174 * HACK ALERT! When the seamless support capability is added we generate a
3175 * seamless change event so that the ring-3 client can sync with
3176 * the seamless state. Although this introduces a spurious
3177 * wakeups of the ring-3 client, it solves the problem of client
3178 * state inconsistency in multiuser environment (on Windows).
3179 */
3180 if (fSessionAddedCaps)
3181 {
3182 uint32_t fGenFakeEvents = 0;
3183 if (fSessionAddedCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3184 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3185
3186 RTSpinlockAcquire(pDevExt->EventSpinlock);
3187 if (fGenFakeEvents || pDevExt->f32PendingEvents)
3188 vgdrvDispatchEventsLocked(pDevExt, fGenFakeEvents);
3189 RTSpinlockRelease(pDevExt->EventSpinlock);
3190
3191#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3192 VGDrvCommonWaitDoWakeUps(pDevExt);
3193#endif
3194 }
3195
3196 return VINF_SUCCESS;
3197}
3198
3199
3200/**
3201 * Handle VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE.
3202 *
3203 * @returns VBox status code.
3204 *
3205 * @param pDevExt The device extension.
3206 * @param pSession The session.
3207 * @param pAcquire The request.
3208 */
3209static int vgdrvIoCtl_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
3210{
3211 int rc;
3212 LogFlow(("VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE: or=%#x not=%#x flags=%#x\n",
3213 pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags));
3214
3215 rc = vgdrvAcquireSessionCapabilities(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags,
3216 false /*fSessionTermination*/);
3217 if (RT_FAILURE(rc))
3218 LogRel(("VGDrvCommonIoCtl: GUEST_CAPS_ACQUIRE failed rc=%Rrc\n", rc));
3219 pAcquire->rc = rc;
3220 return VINF_SUCCESS;
3221}
3222
3223
3224/**
3225 * Sets the guest capabilities for a session.
3226 *
3227 * @returns VBox status code.
3228 * @param pDevExt The device extension.
3229 * @param pSession The session.
3230 * @param fOrMask The capabilities to add.
3231 * @param fNotMask The capabilities to remove.
3232 * @param fSessionTermination Set if we're called by the session cleanup code.
3233 * This tweaks the error handling so we perform
3234 * proper session cleanup even if the host
3235 * misbehaves.
3236 *
3237 * @remarks Takes the session spinlock.
3238 */
3239static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3240 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3241{
3242 /*
3243 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3244 */
3245 VMMDevReqGuestCapabilities2 *pReq;
3246 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3247 if (RT_SUCCESS(rc))
3248 { /* nothing */ }
3249 else if (!fSessionTermination)
3250 {
3251 LogRel(("vgdrvSetSessionCapabilities: VbglGRAlloc failure: %Rrc\n", rc));
3252 return rc;
3253 }
3254 else
3255 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3256
3257
3258 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3259
3260#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3261 /*
3262 * Capabilities in "acquire" mode cannot be set via this API.
3263 * (Acquire mode is only used on windows at the time of writing.)
3264 */
3265 if (!(fOrMask & pDevExt->fAcquireModeGuestCaps))
3266#endif
3267 {
3268 /*
3269 * Apply the changes to the session mask.
3270 */
3271 uint32_t fChanged;
3272 uint32_t fPrevious = pSession->fCapabilities;
3273 pSession->fCapabilities |= fOrMask;
3274 pSession->fCapabilities &= ~fNotMask;
3275
3276 /*
3277 * If anything actually changed, update the global usage counters.
3278 */
3279 fChanged = fPrevious ^ pSession->fCapabilities;
3280 if (fChanged)
3281 {
3282 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, fPrevious,
3283 pDevExt->cSessions, "SetGuestCapsTracker");
3284
3285 /*
3286 * If there are global changes, update the capabilities on the host.
3287 */
3288 if (fGlobalChange || pDevExt->fGuestCapsHost == UINT32_MAX)
3289 {
3290 Assert(pReq || fSessionTermination);
3291 if (pReq)
3292 {
3293 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3294
3295 /* On failure, roll back (unless it's session termination time). */
3296 if (RT_FAILURE(rc) && !fSessionTermination)
3297 {
3298 vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, pSession->fCapabilities,
3299 pDevExt->cSessions, "SetGuestCapsTracker");
3300 pSession->fCapabilities = fPrevious;
3301 }
3302 }
3303 }
3304 }
3305 }
3306#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3307 else
3308 rc = VERR_RESOURCE_BUSY;
3309#endif
3310
3311 RTSpinlockRelease(pDevExt->SessionSpinlock);
3312 if (pReq)
3313 VbglGRFree(&pReq->header);
3314 return rc;
3315}
3316
3317
3318/**
3319 * Handle VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES.
3320 *
3321 * @returns VBox status code.
3322 *
3323 * @param pDevExt The device extension.
3324 * @param pSession The session.
3325 * @param pInfo The request.
3326 */
3327static int vgdrvIoCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestSetCapabilitiesInfo *pInfo)
3328{
3329 int rc;
3330 LogFlow(("VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES: or=%#x not=%#x\n", pInfo->u32OrMask, pInfo->u32NotMask));
3331
3332 if (!((pInfo->u32OrMask | pInfo->u32NotMask) & ~VMMDEV_GUEST_CAPABILITIES_MASK))
3333 rc = vgdrvSetSessionCapabilities(pDevExt, pSession, pInfo->u32OrMask, pInfo->u32NotMask, false /*fSessionTermination*/);
3334 else
3335 rc = VERR_INVALID_PARAMETER;
3336
3337 return rc;
3338}
3339
3340/** @} */
3341
3342
3343/**
3344 * Common IOCtl for user to kernel and kernel to kernel communication.
3345 *
3346 * This function only does the basic validation and then invokes
3347 * worker functions that takes care of each specific function.
3348 *
3349 * @returns VBox status code.
3350 *
3351 * @param iFunction The requested function.
3352 * @param pDevExt The device extension.
3353 * @param pSession The client session.
3354 * @param pvData The input/output data buffer. Can be NULL depending on the function.
3355 * @param cbData The max size of the data buffer.
3356 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
3357 */
3358int VGDrvCommonIoCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3359 void *pvData, size_t cbData, size_t *pcbDataReturned)
3360{
3361 int rc;
3362 LogFlow(("VGDrvCommonIoCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
3363 iFunction, pDevExt, pSession, pvData, cbData));
3364
3365 /*
3366 * Make sure the returned data size is set to zero.
3367 */
3368 if (pcbDataReturned)
3369 *pcbDataReturned = 0;
3370
3371 /*
3372 * Define some helper macros to simplify validation.
3373 */
3374#define CHECKRET_RING0(mnemonic) \
3375 do { \
3376 if (pSession->R0Process != NIL_RTR0PROCESS) \
3377 { \
3378 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
3379 pSession->Process, (uintptr_t)pSession->R0Process)); \
3380 return VERR_PERMISSION_DENIED; \
3381 } \
3382 } while (0)
3383#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
3384 do { \
3385 if (cbData < (cbMin)) \
3386 { \
3387 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
3388 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
3389 return VERR_BUFFER_OVERFLOW; \
3390 } \
3391 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
3392 { \
3393 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
3394 return VERR_INVALID_POINTER; \
3395 } \
3396 } while (0)
3397#define CHECKRET_SIZE(mnemonic, cb) \
3398 do { \
3399 if (cbData != (cb)) \
3400 { \
3401 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
3402 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
3403 return VERR_BUFFER_OVERFLOW; \
3404 } \
3405 if ((cb) != 0 && !VALID_PTR(pvData)) \
3406 { \
3407 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
3408 return VERR_INVALID_POINTER; \
3409 } \
3410 } while (0)
3411
3412
3413 /*
3414 * Deal with variably sized requests first.
3415 */
3416 rc = VINF_SUCCESS;
3417 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
3418 {
3419 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
3420 rc = vgdrvIoCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
3421 }
3422#ifdef VBOX_WITH_HGCM
3423 /*
3424 * These ones are a bit tricky.
3425 */
3426 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
3427 {
3428 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
3429 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
3430 rc = vgdrvIoCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
3431 fInterruptible, false /*f32bit*/, false /* fUserData */,
3432 0, cbData, pcbDataReturned);
3433 }
3434 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
3435 {
3436 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
3437 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
3438 rc = vgdrvIoCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
3439 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
3440 false /*f32bit*/, false /* fUserData */,
3441 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
3442 }
3443 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
3444 {
3445 bool fInterruptible = true;
3446 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
3447 rc = vgdrvIoCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
3448 fInterruptible, false /*f32bit*/, true /* fUserData */,
3449 0, cbData, pcbDataReturned);
3450 }
3451# ifdef RT_ARCH_AMD64
3452 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
3453 {
3454 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
3455 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
3456 rc = vgdrvIoCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
3457 fInterruptible, true /*f32bit*/, false /* fUserData */,
3458 0, cbData, pcbDataReturned);
3459 }
3460 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
3461 {
3462 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
3463 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
3464 rc = vgdrvIoCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
3465 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
3466 true /*f32bit*/, false /* fUserData */,
3467 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
3468 }
3469# endif
3470#endif /* VBOX_WITH_HGCM */
3471 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
3472 {
3473 CHECKRET_MIN_SIZE("LOG", 1);
3474 rc = vgdrvIoCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned, pSession->fUserSession);
3475 }
3476 else
3477 {
3478 switch (iFunction)
3479 {
3480 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
3481 CHECKRET_RING0("GETVMMDEVPORT");
3482 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
3483 rc = vgdrvIoCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
3484 break;
3485
3486#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
3487 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
3488 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
3489 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
3490 rc = vgdrvIoCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
3491 break;
3492#endif
3493
3494 case VBOXGUEST_IOCTL_WAITEVENT:
3495 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
3496 rc = vgdrvIoCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
3497 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
3498 break;
3499
3500 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
3501 CHECKRET_SIZE("CANCEL_ALL_WAITEVENTS", 0);
3502 rc = vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
3503 break;
3504
3505 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
3506 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
3507 rc = vgdrvIoCtl_CtlFilterMask(pDevExt, pSession, (VBoxGuestFilterMaskInfo *)pvData);
3508 break;
3509
3510#ifdef VBOX_WITH_HGCM
3511 case VBOXGUEST_IOCTL_HGCM_CONNECT:
3512# ifdef RT_ARCH_AMD64
3513 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
3514# endif
3515 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
3516 rc = vgdrvIoCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
3517 break;
3518
3519 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
3520# ifdef RT_ARCH_AMD64
3521 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
3522# endif
3523 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
3524 rc = vgdrvIoCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
3525 break;
3526#endif /* VBOX_WITH_HGCM */
3527
3528 case VBOXGUEST_IOCTL_CHECK_BALLOON:
3529 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
3530 rc = vgdrvIoCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
3531 break;
3532
3533 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
3534 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
3535 rc = vgdrvIoCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
3536 break;
3537
3538 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
3539 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
3540 rc = vgdrvIoCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
3541 break;
3542
3543 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
3544 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
3545 rc = vgdrvIoCtl_SetMouseStatus(pDevExt, pSession, *(uint32_t *)pvData);
3546 break;
3547
3548#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
3549 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
3550 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
3551 rc = VGDrvNtIOCtl_DpcLatencyChecker();
3552 break;
3553#endif
3554
3555 case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
3556 CHECKRET_SIZE("GUEST_CAPS_ACQUIRE", sizeof(VBoxGuestCapsAquire));
3557 rc = vgdrvIoCtl_GuestCapsAcquire(pDevExt, pSession, (VBoxGuestCapsAquire *)pvData);
3558 *pcbDataReturned = sizeof(VBoxGuestCapsAquire);
3559 break;
3560
3561 case VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES:
3562 CHECKRET_MIN_SIZE("SET_GUEST_CAPABILITIES", sizeof(VBoxGuestSetCapabilitiesInfo));
3563 rc = vgdrvIoCtl_SetCapabilities(pDevExt, pSession, (VBoxGuestSetCapabilitiesInfo *)pvData);
3564 break;
3565
3566 default:
3567 {
3568 LogRel(("VGDrvCommonIoCtl: Unknown request iFunction=%#x stripped size=%#x\n",
3569 iFunction, VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
3570 rc = VERR_NOT_SUPPORTED;
3571 break;
3572 }
3573 }
3574 }
3575
3576 LogFlow(("VGDrvCommonIoCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
3577 return rc;
3578}
3579
3580
3581/**
3582 * Used by VGDrvCommonISR as well as the acquire guest capability code.
3583 *
3584 * @returns VINF_SUCCESS on success. On failure, ORed together
3585 * RTSemEventMultiSignal errors (completes processing despite errors).
3586 * @param pDevExt The VBoxGuest device extension.
3587 * @param fEvents The events to dispatch.
3588 */
3589static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents)
3590{
3591 PVBOXGUESTWAIT pWait;
3592 PVBOXGUESTWAIT pSafe;
3593 int rc = VINF_SUCCESS;
3594
3595 fEvents |= pDevExt->f32PendingEvents;
3596
3597 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3598 {
3599 uint32_t fHandledEvents = pWait->fReqEvents & fEvents;
3600 if ( fHandledEvents != 0
3601 && !pWait->fResEvents)
3602 {
3603 /* Does this one wait on any of the events we're dispatching? We do a quick
3604 check first, then deal with VBOXGUEST_ACQUIRE_STYLE_EVENTS as applicable. */
3605 if (fHandledEvents & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
3606 fHandledEvents &= vgdrvGetAllowedEventMaskForSession(pDevExt, pWait->pSession);
3607 if (fHandledEvents)
3608 {
3609 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3610 fEvents &= ~pWait->fResEvents;
3611 RTListNodeRemove(&pWait->ListNode);
3612#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3613 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3614#else
3615 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3616 rc |= RTSemEventMultiSignal(pWait->Event);
3617#endif
3618 if (!fEvents)
3619 break;
3620 }
3621 }
3622 }
3623
3624 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3625 return rc;
3626}
3627
3628
3629/**
3630 * Common interrupt service routine.
3631 *
3632 * This deals with events and with waking up thread waiting for those events.
3633 *
3634 * @returns true if it was our interrupt, false if it wasn't.
3635 * @param pDevExt The VBoxGuest device extension.
3636 */
3637bool VGDrvCommonISR(PVBOXGUESTDEVEXT pDevExt)
3638{
3639 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
3640 bool fMousePositionChanged = false;
3641 int rc = 0;
3642 bool fOurIrq;
3643
3644 /*
3645 * Make sure we've initialized the device extension.
3646 */
3647 if (RT_UNLIKELY(!pReq))
3648 return false;
3649
3650 /*
3651 * Enter the spinlock and check if it's our IRQ or not.
3652 */
3653 RTSpinlockAcquire(pDevExt->EventSpinlock);
3654 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
3655 if (fOurIrq)
3656 {
3657 /*
3658 * Acknowlegde events.
3659 * We don't use VbglGRPerform here as it may take another spinlocks.
3660 */
3661 pReq->header.rc = VERR_INTERNAL_ERROR;
3662 pReq->events = 0;
3663 ASMCompilerBarrier();
3664 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
3665 ASMCompilerBarrier(); /* paranoia */
3666 if (RT_SUCCESS(pReq->header.rc))
3667 {
3668 uint32_t fEvents = pReq->events;
3669
3670 Log3(("VGDrvCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
3671
3672 /*
3673 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
3674 */
3675 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
3676 {
3677 fMousePositionChanged = true;
3678 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
3679#ifndef RT_OS_WINDOWS
3680 if (pDevExt->MouseNotifyCallback.pfnNotify)
3681 pDevExt->MouseNotifyCallback.pfnNotify(pDevExt->MouseNotifyCallback.pvUser);
3682#endif
3683 }
3684
3685#ifdef VBOX_WITH_HGCM
3686 /*
3687 * The HGCM event/list is kind of different in that we evaluate all entries.
3688 */
3689 if (fEvents & VMMDEV_EVENT_HGCM)
3690 {
3691 PVBOXGUESTWAIT pWait;
3692 PVBOXGUESTWAIT pSafe;
3693 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3694 {
3695 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
3696 {
3697 pWait->fResEvents = VMMDEV_EVENT_HGCM;
3698 RTListNodeRemove(&pWait->ListNode);
3699# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3700 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3701# else
3702 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3703 rc |= RTSemEventMultiSignal(pWait->Event);
3704# endif
3705 }
3706 }
3707 fEvents &= ~VMMDEV_EVENT_HGCM;
3708 }
3709#endif
3710
3711 /*
3712 * Normal FIFO waiter evaluation.
3713 */
3714 rc |= vgdrvDispatchEventsLocked(pDevExt, fEvents);
3715 }
3716 else /* something is serious wrong... */
3717 Log(("VGDrvCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
3718 pReq->header.rc, pReq->events));
3719 }
3720 else
3721 Log3(("VGDrvCommonISR: not ours\n"));
3722
3723 RTSpinlockRelease(pDevExt->EventSpinlock);
3724
3725#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
3726 /*
3727 * Do wake-ups.
3728 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3729 * care of it. Same on darwin, doing it in the work loop callback.
3730 */
3731 VGDrvCommonWaitDoWakeUps(pDevExt);
3732#endif
3733
3734 /*
3735 * Work the poll and async notification queues on OSes that implements that.
3736 * (Do this outside the spinlock to prevent some recursive spinlocking.)
3737 */
3738 if (fMousePositionChanged)
3739 {
3740 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
3741 VGDrvNativeISRMousePollEvent(pDevExt);
3742 }
3743
3744 Assert(rc == 0);
3745 NOREF(rc);
3746 return fOurIrq;
3747}
3748
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette