VirtualBox

source: vbox/trunk/src/VBox/Additions/os2/VBoxGuest/VBoxGuest.cpp@ 4753

Last change on this file since 4753 was 4071, checked in by vboxsync, 17 years ago

Biggest check-in ever. New source code headers for all (C) innotek files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 47.7 KB
Line 
1/** $Id: */
2/** @file
3 * VBoxGuest - Guest Additions Driver.
4 */
5
6/*
7 * Copyright (C) 2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_DEFAULT
24#include "VBoxGuestInternal.h"
25#include <VBox/VBoxDev.h> /* for VMMDEV_RAM_SIZE */
26#include <VBox/log.h>
27#include <iprt/mem.h>
28#include <iprt/time.h>
29#include <iprt/memobj.h>
30#include <iprt/asm.h>
31#include <iprt/string.h>
32#include <iprt/process.h>
33#include <iprt/assert.h>
34#include <iprt/param.h>
35#ifdef VBOX_HGCM
36# include <iprt/thread.h>
37#endif
38
39
40/*******************************************************************************
41* Internal Functions *
42*******************************************************************************/
43#ifdef VBOX_HGCM
44static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
45#endif
46
47
48
49/**
50 * Reserves memory in which the VMM can relocate any guest mappings
51 * that are floating around.
52 *
53 * This operation is a little bit tricky since the VMM might not accept
54 * just any address because of address clashes between the three contexts
55 * it operates in, so use a small stack to perform this operation.
56 *
57 * @returns VBox status code (ignored).
58 * @param pDevExt The device extension.
59 */
60static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
61{
62 /** @todo implement this using RTR0MemObjReserveKernel() (it needs to be implemented everywhere too). */
63 return VINF_SUCCESS;
64}
65
66
67/**
68 * Initializes the interrupt filter mask.
69 *
70 * This will ASSUME that we're the ones in carge over the mask, so
71 * we'll simply clear all bits we don't set.
72 *
73 * @returns VBox status code (ignored).
74 * @param pDevExt The device extension.
75 * @param fMask The new mask.
76 */
77static int vboxGuestInitFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
78{
79 VMMDevCtlGuestFilterMask *pReq;
80 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
81 if (RT_SUCCESS(rc))
82 {
83 pReq->u32OrMask = fMask;
84 pReq->u32NotMask = ~fMask; /* It's an AND mask. */
85 rc = VbglGRPerform(&pReq->header);
86 if ( RT_FAILURE(rc)
87 || RT_FAILURE(pReq->header.rc))
88 LogRel(("vboxGuestInitCtlFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
89 rc, pReq->header.rc));
90 VbglGRFree(&pReq->header);
91 }
92 return rc;
93}
94
95
96/**
97 * Report guest information to the VMMDev.
98 *
99 * @returns VBox status code.
100 * @param pDevExt The device extension.
101 * @param enmOSType The OS type to report.
102 */
103static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
104{
105 VMMDevReportGuestInfo *pReq;
106 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
107 if (RT_SUCCESS(rc))
108 {
109 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
110 pReq->guestInfo.osType = enmOSType;
111 rc = VbglGRPerform(&pReq->header);
112 if ( RT_FAILURE(rc)
113 || RT_FAILURE(pReq->header.rc))
114 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
115 rc, pReq->header.rc));
116 VbglGRFree(&pReq->header);
117 }
118 return rc;
119}
120
121
122
123/**
124 * Maps the VMMDev memory.
125 *
126 * @returns VBox status code.
127 * @retval VERR_VERSION_MISMATCH The VMMDev memory didn't meet our expectations.
128 *
129 * @param pDevExt The device extension.
130 */
131static int vboxGuestInitMapMemory(PVBOXGUESTDEVEXT pDevExt)
132{
133 const RTCCPHYS PhysMMIOBase = pDevExt->PhysMMIOBase;
134
135 /*
136 * Create a physical memory object for it.
137 *
138 * Since we don't know the actual size (OS/2 doesn't at least), we make
139 * a qualified guess using the VMMDEV_RAM_SIZE.
140 */
141 size_t cb = RT_ALIGN_Z(VMMDEV_RAM_SIZE, PAGE_SIZE);
142 int rc = RTR0MemObjEnterPhys(&pDevExt->MemObjMMIO, PhysMMIOBase, cb);
143 if (RT_FAILURE(rc))
144 {
145 cb = _4K;
146 rc = RTR0MemObjEnterPhys(&pDevExt->MemObjMMIO, PhysMMIOBase, cb);
147 }
148 if (RT_FAILURE(rc))
149 {
150 Log(("vboxGuestInitMapMemory: RTR0MemObjEnterPhys(,%RCp,%zx) -> %Rrc\n",
151 PhysMMIOBase, cb, rc));
152 return rc;
153 }
154
155 /*
156 * Map the object into kernel space.
157 *
158 * We want a normal mapping with normal caching, which good in two ways. First
159 * since the API doesn't have any flags indicating how the mapping should be cached.
160 * And second, because PGM doesn't necessarily respect the cache/writethru bits
161 * anyway for normal RAM.
162 */
163 rc = RTR0MemObjMapKernel(&pDevExt->MemMapMMIO, pDevExt->MemObjMMIO, (void *)-1, 0,
164 RTMEM_PROT_READ | RTMEM_PROT_WRITE);
165 if (RT_SUCCESS(rc))
166 {
167 /*
168 * Validate the VMM memory.
169 */
170 VMMDevMemory *pVMMDev = (VMMDevMemory *)RTR0MemObjAddress(pDevExt->MemMapMMIO);
171 Assert(pVMMDev);
172 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
173 && pVMMDev->u32Size >= 32 /* just for checking sanity */)
174 {
175 /*
176 * Did we hit the the correct size? If not we'll have to
177 * redo the mapping using the correct size.
178 */
179 if (RT_ALIGN_32(pVMMDev->u32Size, PAGE_SIZE) == cb)
180 {
181 pDevExt->pVMMDevMemory = pVMMDev;
182 return VINF_SUCCESS;
183 }
184
185 Log(("vboxGuestInitMapMemory: Actual size %#RX32 (tried %#zx)\n", pVMMDev->u32Size, cb));
186 cb = RT_ALIGN_32(pVMMDev->u32Size, PAGE_SIZE);
187
188 rc = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc);
189 pDevExt->MemObjMMIO = pDevExt->MemMapMMIO = NIL_RTR0MEMOBJ;
190
191 rc = RTR0MemObjEnterPhys(&pDevExt->MemObjMMIO, PhysMMIOBase, cb);
192 if (RT_SUCCESS(rc))
193 {
194 rc = RTR0MemObjMapKernel(&pDevExt->MemMapMMIO, pDevExt->MemObjMMIO, (void *)-1, 0,
195 RTMEM_PROT_READ | RTMEM_PROT_WRITE);
196 if (RT_SUCCESS(rc))
197 {
198 pDevExt->pVMMDevMemory = (VMMDevMemory *)RTR0MemObjAddress(pDevExt->MemMapMMIO);
199 Assert(pDevExt->pVMMDevMemory);
200 return VINF_SUCCESS;
201 }
202
203 Log(("vboxGuestInitMapMemory: RTR0MemObjMapKernel [%RCp,%zx] -> %Rrc (2nd)\n",
204 PhysMMIOBase, cb, rc));
205 }
206 else
207 Log(("vboxGuestInitMapMemory: RTR0MemObjEnterPhys(,%RCp,%zx) -> %Rrc (2nd)\n",
208 PhysMMIOBase, cb, rc));
209 }
210 else
211 {
212 rc = VERR_VERSION_MISMATCH;
213 LogRel(("vboxGuestInitMapMemory: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32\n",
214 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size));
215 }
216
217 }
218 else
219 Log(("vboxGuestInitMapMemory: RTR0MemObjMapKernel [%RCp,%zx] -> %Rrc\n",
220 PhysMMIOBase, cb, rc));
221
222 int rc2 = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc2);
223 return rc;
224}
225
226
227/**
228 * Initializes the VBoxGuest device extension when the
229 * device driver is loaded.
230 *
231 * The native code locates the VMMDev on the PCI bus and retrieve
232 * the MMIO and I/O port ranges, this function will take care of
233 * mapping the MMIO memory (if present). Upon successful return
234 * the native code should set up the interrupt handler.
235 *
236 * @returns VBox status code.
237 *
238 * @param pDevExt The device extension. Allocated by the native code.
239 * @param IOPortBase The base of the I/O port range.
240 * @param PhysMMIOBase The base of the MMIO memory range.
241 * This is optional, pass NIL_RTCCPHYS if not present.
242 * @param enmOSType The guest OS type to report to the VMMDev.
243 */
244int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase, RTCCPHYS PhysMMIOBase,
245 VBOXOSTYPE enmOSType)
246{
247 int rc, rc2;
248
249 /*
250 * Initalize the data.
251 */
252 pDevExt->PhysMMIOBase = PhysMMIOBase;
253 pDevExt->IOPortBase = IOPortBase;
254 pDevExt->MemObjMMIO = NIL_RTR0MEMOBJ;
255 pDevExt->pVMMDevMemory = NULL;
256 pDevExt->pIrqAckEvents = NULL;
257 pDevExt->WaitList.pHead = NULL;
258 pDevExt->WaitList.pTail = NULL;
259#ifdef VBOX_HGCM
260 pDevExt->HGCMWaitList.pHead = NULL;
261 pDevExt->HGCMWaitList.pTail = NULL;
262#endif
263 pDevExt->FreeList.pHead = NULL;
264 pDevExt->FreeList.pTail = NULL;
265 pDevExt->f32PendingEvents = 0;
266 pDevExt->u32ClipboardClientId = 0;
267
268 /*
269 * If there is an MMIO region map it into kernel memory.
270 */
271 if (PhysMMIOBase != NIL_RTCCPHYS)
272 {
273 AssertMsgReturn(PhysMMIOBase >= _1M, ("%RCp\n", PhysMMIOBase), VERR_INTERNAL_ERROR);
274 rc = vboxGuestInitMapMemory(pDevExt);
275 if (RT_SUCCESS(rc))
276 Log(("VBoxGuestInitDevExt: VMMDevMemory: phys=%RCp mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
277 PhysMMIOBase, pDevExt->pVMMDevMemory, pDevExt->pVMMDevMemory->u32Size,
278 RT_ALIGN_32(pDevExt->pVMMDevMemory->u32Size, PAGE_SIZE), pDevExt->pVMMDevMemory->u32Version));
279 else if (rc == VERR_VERSION_MISMATCH)
280 Assert(!pDevExt->pVMMDevMemory); /* We can live without it (I think). */
281 else
282 return rc;
283 }
284
285 /*
286 * Create the wait and seesion spinlocks.
287 */
288 rc = RTSpinlockCreate(&pDevExt->WaitSpinlock);
289 if (RT_SUCCESS(rc))
290 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
291 if (RT_FAILURE(rc))
292 {
293 Log(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
294 if (pDevExt->WaitSpinlock != NIL_RTSPINLOCK)
295 RTSpinlockDestroy(pDevExt->WaitSpinlock);
296 rc2 = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc2);
297 return rc;
298 }
299
300 /*
301 * Initialize the guest library and report the guest info back to VMMDev,
302 * set the interrupt control filter mask, and fixate the guest mappings
303 * made by the VMM.
304 */
305 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
306 if (RT_SUCCESS(rc))
307 {
308 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
309 if (RT_SUCCESS(rc))
310 {
311 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
312 if (RT_SUCCESS(rc))
313 {
314#ifdef VBOX_HGCM
315 rc = vboxGuestInitFilterMask(pDevExt, VMMDEV_EVENT_HGCM);
316#else
317 rc = vboxGuestInitFilterMask(pDevExt, 0);
318#endif
319 if (RT_SUCCESS(rc))
320 {
321 vboxGuestInitFixateGuestMappings(pDevExt);
322 Log(("VBoxGuestInitDevExt: returns success\n"));
323 return VINF_SUCCESS;
324 }
325 }
326
327 /* failure cleanup */
328 }
329 else
330 Log(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
331
332 VbglTerminate();
333 }
334 else
335 Log(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
336
337 rc2 = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc2);
338 rc2 = RTSpinlockDestroy(pDevExt->WaitSpinlock); AssertRC(rc2);
339 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
340 return rc; /* (failed) */
341}
342
343
344/**
345 * Deletes all the items in a wait chain.
346 * @param pWait The head of the chain.
347 */
348static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
349{
350 while (pList->pHead)
351 {
352 PVBOXGUESTWAIT pWait = pList->pHead;
353 pList->pHead = pWait->pNext;
354
355 pWait->pNext = NULL;
356 pWait->pPrev = NULL;
357 int rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
358 pWait->Event = NIL_RTSEMEVENTMULTI;
359 RTMemFree(pWait);
360 }
361 pList->pHead = NULL;
362 pList->pTail = NULL;
363}
364
365
366/**
367 * Destroys the VBoxGuest device extension.
368 *
369 * The native code should call this before the driver is loaded,
370 * but don't call this on shutdown.
371 *
372 * @param pDevExt The device extension.
373 */
374void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
375{
376 int rc2;
377 Log(("VBoxGuestDeleteDevExt:\n"));
378
379 rc2 = RTSpinlockDestroy(pDevExt->WaitSpinlock); AssertRC(rc2);
380
381 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
382#ifdef VBOX_HGCM
383 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
384#endif
385 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
386
387 VbglTerminate();
388
389 rc2 = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc2);
390 pDevExt->MemObjMMIO = pDevExt->MemMapMMIO = NIL_RTR0MEMOBJ;
391 pDevExt->pVMMDevMemory = NULL;
392
393 pDevExt->IOPortBase = 0;
394 pDevExt->pIrqAckEvents = NULL;
395}
396
397
398/**
399 * Creates a VBoxGuest user session.
400 *
401 * The native code calls this when a ring-3 client opens the device.
402 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
403 *
404 * @returns VBox status code.
405 * @param pDevExt The device extension.
406 * @param ppSession Where to store the session on success.
407 */
408int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
409{
410 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
411 if (RT_UNLIKELY(!pSession))
412 {
413 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
414 return VERR_NO_MEMORY;
415 }
416
417 pSession->Process = RTProcSelf();
418 pSession->R0Process = RTR0ProcHandleSelf();
419 pSession->pDevExt = pDevExt;
420
421 *ppSession = pSession;
422 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
423 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
424 return VINF_SUCCESS;
425}
426
427
428/**
429 * Creates a VBoxGuest kernel session.
430 *
431 * The native code calls this when a ring-0 client connects to the device.
432 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
433 *
434 * @returns VBox status code.
435 * @param pDevExt The device extension.
436 * @param ppSession Where to store the session on success.
437 */
438int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
439{
440 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
441 if (RT_UNLIKELY(!pSession))
442 {
443 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
444 return VERR_NO_MEMORY;
445 }
446
447 pSession->Process = NIL_RTPROCESS;
448 pSession->R0Process = NIL_RTR0PROCESS;
449 pSession->pDevExt = pDevExt;
450
451 *ppSession = pSession;
452 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
453 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
454 return VINF_SUCCESS;
455}
456
457
458
459/**
460 * Closes a VBoxGuest session.
461 *
462 * @param pDevExt The device extension.
463 * @param pSession The session to close (and free).
464 */
465void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
466{
467 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
468 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
469
470#ifdef VBOX_HGCM
471 for (unsigned i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
472 if (pSession->aHGCMClientIds[i])
473 {
474 VBoxGuestHGCMDisconnectInfo Info;
475 Info.result = 0;
476 Info.u32ClientID = pSession->aHGCMClientIds[i];
477 pSession->aHGCMClientIds[i] = 0;
478 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
479 VbglHGCMDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
480 }
481#endif
482
483 pSession->pDevExt = NULL;
484 pSession->Process = NIL_RTPROCESS;
485 pSession->R0Process = NIL_RTR0PROCESS;
486 RTMemFree(pSession);
487}
488
489
490/**
491 * Links the wait-for-event entry into the tail of the given list.
492 *
493 * @param pList The list to link it into.
494 * @param pWait The wait for event entry to append.
495 */
496DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
497{
498 const PVBOXGUESTWAIT pTail = pList->pTail;
499 pWait->pNext = NULL;
500 pWait->pPrev = pTail;
501 if (pTail)
502 pTail->pNext = pWait;
503 else
504 pList->pHead = pWait;
505 pList->pTail = pWait;
506}
507
508
509/**
510 * Unlinks the wait-for-event entry.
511 *
512 * @param pList The list to unlink it from.
513 * @param pWait The wait for event entry to unlink.
514 */
515DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
516{
517 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
518 const PVBOXGUESTWAIT pNext = pWait->pNext;
519 if (pNext)
520 pNext->pPrev = pPrev;
521 else
522 pList->pTail = pPrev;
523 if (pPrev)
524 pPrev->pNext = pNext;
525 else
526 pList->pHead = pNext;
527}
528
529
530/**
531 * Allocates a wiat-for-event entry.
532 *
533 * @returns The wait-for-event entry.
534 * @param pDevExt The device extension.
535 */
536static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt)
537{
538 /*
539 * Allocate it one way or the other.
540 */
541 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
542 if (pWait)
543 {
544 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
545 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
546
547 pWait = pDevExt->FreeList.pTail;
548 if (pWait)
549 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
550
551 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
552 }
553 if (!pWait)
554 {
555 static unsigned s_cErrors = 0;
556
557 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
558 if (!pWait)
559 {
560 if (s_cErrors++ < 32)
561 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
562 return NULL;
563 }
564
565 int rc = RTSemEventMultiCreate(&pWait->Event);
566 if (RT_FAILURE(rc))
567 {
568 if (s_cErrors++ < 32)
569 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
570 RTMemFree(pWait);
571 return NULL;
572 }
573 }
574
575 /*
576 * Zero members just as an precaution.
577 */
578 pWait->pNext = NULL;
579 pWait->pPrev = NULL;
580 pWait->fReqEvents = 0;
581 pWait->fResEvents = 0;
582#ifdef VBOX_HGCM
583 pWait->pHGCMReq = NULL;
584#endif
585 RTSemEventMultiReset(pWait->Event);
586 return pWait;
587}
588
589
590/**
591 * Frees the wait-for-event entry.
592 * The caller must own the wait spinlock!
593 *
594 * @param pDevExt The device extension.
595 * @param pWait The wait-for-event entry to free.
596 */
597static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
598{
599 pWait->fReqEvents = 0;
600 pWait->fResEvents = 0;
601#ifdef VBOX_HGCM
602 pWait->pHGCMReq = NULL;
603#endif
604 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
605}
606
607
608/**
609 * Frees the wait-for-event entry.
610 *
611 * @param pDevExt The device extension.
612 * @param pWait The wait-for-event entry to free.
613 */
614static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
615{
616 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
617 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
618 VBoxGuestWaitFreeLocked(pDevExt, pWait);
619 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
620}
621
622
623/**
624 * Implements the fast (no input or output) type of IOCtls.
625 *
626 * This is currently just a placeholder stub inherited from the support driver code.
627 *
628 * @returns VBox status code.
629 * @param iFunction The IOCtl function number.
630 * @param pDevExt The device extension.
631 * @param pSession The session.
632 */
633int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
634{
635 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
636
637 return VERR_NOT_SUPPORTED;
638}
639
640
641
642static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
643{
644 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
645 pInfo->portAddress = pDevExt->IOPortBase;
646 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
647 if (pcbDataReturned)
648 *pcbDataReturned = sizeof(*pInfo);
649 return VINF_SUCCESS;
650}
651
652
653/**
654 * Worker VBoxGuestCommonIOCtl_WaitEvent.
655 * The caller enters the spinlock, we may or may not leave it.
656 *
657 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
658 */
659DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
660 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
661{
662 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
663 if (fMatches)
664 {
665 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
666 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, pTmp);
667
668 pInfo->u32EventFlagsOut = fMatches;
669 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
670 if (fReqEvents & ~((uint32_t)1 << iEvent))
671 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
672 else
673 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
674 return VINF_SUCCESS;
675 }
676 return VERR_TIMEOUT;
677}
678
679
680static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned,
681 bool fInterruptible)
682{
683 pInfo->u32EventFlagsOut = 0;
684 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
685 if (pcbDataReturned)
686 *pcbDataReturned = sizeof(*pInfo);
687
688 /*
689 * Copy and verify the input mask.
690 */
691 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
692 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
693 if (RT_UNLIKELY(iEvent < 0))
694 {
695 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
696 return VERR_INVALID_PARAMETER;
697 }
698
699 /*
700 * Check the condition up front, before doing the wait-for-event allocations.
701 */
702 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
703 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
704 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
705 if (rc == VINF_SUCCESS)
706 return rc;
707 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
708
709 if (!pInfo->u32TimeoutIn)
710 {
711 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
712 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VINF_TIMEOUT\n"));
713 return VERR_TIMEOUT;
714 }
715
716 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt);
717 if (!pWait)
718 return VERR_NO_MEMORY;
719 pWait->fReqEvents = fReqEvents;
720
721 /*
722 * We've got the wait entry now, re-enter the spinlock and check for the condition.
723 * If the wait condition is met, return.
724 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
725 */
726 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
727 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
728 if (rc == VINF_SUCCESS)
729 {
730 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
731 return rc;
732 }
733 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
734 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
735
736 if (fInterruptible)
737 rc = RTSemEventMultiWaitNoResume(pWait->Event,
738 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
739 else
740 rc = RTSemEventMultiWait(pWait->Event,
741 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
742
743 /*
744 * There is one special case here and that's when the semaphore is
745 * destroyed upon device driver unload. This shouldn't happen of course,
746 * but in case it does, just get out of here ASAP.
747 */
748 if (rc == VERR_SEM_DESTROYED)
749 return rc;
750
751 /*
752 * Unlink the wait item and dispose of it.
753 */
754 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
755 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
756 const uint32_t fResEvents = pWait->fResEvents;
757 VBoxGuestWaitFreeLocked(pDevExt, pWait);
758 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
759
760 /*
761 * Now deal with the return code.
762 */
763 if (fResEvents)
764 {
765 pInfo->u32EventFlagsOut = fResEvents;
766 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
767 if (fReqEvents & ~((uint32_t)1 << iEvent))
768 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
769 else
770 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
771 rc = VINF_SUCCESS;
772 }
773 else if (rc == VERR_TIMEOUT)
774 {
775 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
776 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VINF_TIMEOUT\n"));
777 }
778 else if (rc == VERR_INTERRUPTED)
779 {
780 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
781 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
782 }
783 else
784 {
785 if (RT_SUCCESS(rc))
786 {
787 static unsigned s_cErrors = 0;
788 if (s_cErrors++ < 32)
789 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
790 rc = VERR_INTERNAL_ERROR;
791 }
792 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
793 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
794 }
795
796 return rc;
797}
798
799
800static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, VMMDevRequestHeader *pReqHdr,
801 size_t cbData, size_t *pcbDataReturned)
802{
803 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
804
805 /*
806 * Validate the header and request size.
807 */
808 const uint32_t cbReq = pReqHdr->size;
809 const uint32_t cbMinSize = vmmdevGetRequestSize(pReqHdr->requestType);
810 if (cbReq < cbMinSize)
811 {
812 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
813 cbReq, cbMinSize, pReqHdr->requestType));
814 return VERR_INVALID_PARAMETER;
815 }
816 if (cbReq > cbData)
817 {
818 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
819 cbData, cbReq, pReqHdr->requestType));
820 return VERR_INVALID_PARAMETER;
821 }
822
823 /*
824 * Make a copy of the request in the physical memory heap so
825 * the VBoxGuestLibrary can more easily deal with the request.
826 * (This is really a waste of time since the OS or the OS specific
827 * code has already buffered or locked the input/output buffer, but
828 * it does makes things a bit simpler wrt to phys address.)
829 */
830 VMMDevRequestHeader *pReqCopy;
831 int rc = VbglGRAlloc(&pReqCopy, cbReq, pReqHdr->requestType);
832 if (RT_FAILURE(rc))
833 {
834 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
835 cbReq, cbReq, rc));
836 return rc;
837 }
838
839 memcpy(pReqCopy, pReqHdr, cbReq);
840 rc = VbglGRPerform(pReqCopy);
841 if ( RT_SUCCESS(rc)
842 && RT_SUCCESS(pReqCopy->rc))
843 {
844 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
845 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
846
847 memcpy(pReqHdr, pReqCopy, cbReq);
848 if (pcbDataReturned)
849 *pcbDataReturned = cbReq;
850 }
851 else if (RT_FAILURE(rc))
852 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
853 else
854 {
855 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
856 rc = pReqCopy->rc;
857 }
858
859 VbglGRFree(pReqCopy);
860 return rc;
861}
862
863
864static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
865{
866 VMMDevCtlGuestFilterMask *pReq;
867 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
868 if (RT_FAILURE(rc))
869 {
870 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
871 sizeof(*pReq), sizeof(*pReq), rc));
872 return rc;
873 }
874
875 pReq->u32OrMask = pInfo->u32OrMask;
876 pReq->u32NotMask = pInfo->u32NotMask;
877
878 rc = VbglGRPerform(&pReq->header);
879 if (RT_FAILURE(rc))
880 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
881 else if (RT_FAILURE(pReq->header.rc))
882 {
883 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
884 rc = pReq->header.rc;
885 }
886
887 VbglGRFree(&pReq->header);
888 return rc;
889}
890
891
892#ifdef VBOX_HGCM
893
894/**
895 * This is a callback for dealing with async waits.
896 *
897 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
898 */
899static DECLCALLBACK(void)
900VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User)
901{
902 VMMDevHGCMRequestHeader volatile *pHdr = (VMMDevHGCMRequestHeader volatile *)pHdrNonVolatile;
903 const bool fInterruptible = (bool)u32User;
904 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
905 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
906
907 /*
908 * Check to see if the condition was met by the time we got here.
909 *
910 * We create a simple poll loop here for dealing with out-of-memory
911 * conditions since the caller isn't necessarily able to deal with
912 * us returning too early.
913 */
914 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
915 PVBOXGUESTWAIT pWait;
916 for (;;)
917 {
918 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
919 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
920 {
921 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
922 return;
923 }
924 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
925
926 pWait = VBoxGuestWaitAlloc(pDevExt);
927 if (pWait)
928 break;
929 if (fInterruptible)
930 return;
931 RTThreadSleep(1);
932 }
933 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
934 pWait->pHGCMReq = pHdr;
935
936 /*
937 * Re-enter the spinlock and re-check for the condition.
938 * If the condition is met, return.
939 * Otherwise link us into the HGCM wait list and go to sleep.
940 */
941 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
942 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
943 {
944 VBoxGuestWaitFreeLocked(pDevExt, pWait);
945 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
946 return;
947 }
948 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
949 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
950
951 int rc;
952 if (fInterruptible)
953 rc = RTSemEventMultiWaitNoResume(pWait->Event, RT_INDEFINITE_WAIT);
954 else
955 rc = RTSemEventMultiWait(pWait->Event, RT_INDEFINITE_WAIT);
956
957 /*
958 * This shouldn't ever return failure...
959 * Unlink, free and return.
960 */
961 if (rc == VERR_SEM_DESTROYED)
962 return;
963 if (RT_FAILURE(rc))
964 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
965
966 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
967 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
968 VBoxGuestWaitFreeLocked(pDevExt, pWait);
969 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
970}
971
972
973static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
974 size_t *pcbDataReturned)
975{
976 /*
977 * The VbglHGCMConnect call will invoke the callback if the HGCM
978 * call is performed in an ASYNC fashion. The function is not able
979 * to deal with cancelled requests.
980 */
981 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
982 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
983 ? pInfo->Loc.u.host.achName : "<not local host>"));
984
985 int rc = VbglHGCMConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
986 if (RT_SUCCESS(rc))
987 {
988 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
989 pInfo->u32ClientID, pInfo->result, rc));
990 if (RT_SUCCESS(pInfo->result))
991 {
992 /*
993 * Append the client id to the client id table.
994 * If the table has somehow become filled up, we'll disconnect the session.
995 */
996 unsigned i;
997 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
998 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
999 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1000 if (!pSession->aHGCMClientIds[i])
1001 {
1002 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1003 break;
1004 }
1005 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1006 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1007 {
1008 static unsigned s_cErrors = 0;
1009 if (s_cErrors++ < 32)
1010 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1011
1012 VBoxGuestHGCMDisconnectInfo Info;
1013 Info.result = 0;
1014 Info.u32ClientID = pInfo->u32ClientID;
1015 VbglHGCMDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
1016 return VERR_TOO_MANY_OPEN_FILES;
1017 }
1018 }
1019 if (pcbDataReturned)
1020 *pcbDataReturned = sizeof(*pInfo);
1021 }
1022 return rc;
1023}
1024
1025
1026static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1027 size_t *pcbDataReturned)
1028{
1029 /*
1030 * Validate the client id and invalidate its entry while we're in the call.
1031 */
1032 const uint32_t u32ClientId = pInfo->u32ClientID;
1033 unsigned i;
1034 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1035 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1036 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1037 if (pSession->aHGCMClientIds[i] == u32ClientId)
1038 {
1039 pSession->aHGCMClientIds[i] = UINT32_MAX;
1040 break;
1041 }
1042 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1043 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1044 {
1045 static unsigned s_cErrors = 0;
1046 if (s_cErrors++ > 32)
1047 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1048 return VERR_INVALID_HANDLE;
1049 }
1050
1051 /*
1052 * The VbglHGCMConnect call will invoke the callback if the HGCM
1053 * call is performed in an ASYNC fashion. The function is not able
1054 * to deal with cancelled requests.
1055 */
1056 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1057 int rc = VbglHGCMDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
1058 if (RT_SUCCESS(rc))
1059 {
1060 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1061 if (pcbDataReturned)
1062 *pcbDataReturned = sizeof(*pInfo);
1063 }
1064
1065 /* Update the client id array according to the result. */
1066 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1067 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1068 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1069 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1070
1071 return rc;
1072}
1073
1074
1075static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMCallInfo *pInfo,
1076 size_t cbData, size_t *pcbDataReturned)
1077{
1078 /*
1079 * Some more validations.
1080 */
1081 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1082 {
1083 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1084 return VERR_INVALID_PARAMETER;
1085 }
1086 const size_t cbActual = sizeof(*pInfo) + pInfo->cParms * sizeof(HGCMFunctionParameter);
1087 if (cbData < cbActual)
1088 {
1089 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1090 cbData, cbActual));
1091 return VERR_INVALID_PARAMETER;
1092 }
1093
1094 /*
1095 * Validate the client id.
1096 */
1097 const uint32_t u32ClientId = pInfo->u32ClientID;
1098 unsigned i;
1099 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1100 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1101 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1102 if (pSession->aHGCMClientIds[i] == u32ClientId)
1103 break;
1104 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1105 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1106 {
1107 static unsigned s_cErrors = 0;
1108 if (s_cErrors++ > 32)
1109 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", u32ClientId));
1110 return VERR_INVALID_HANDLE;
1111 }
1112
1113 /*
1114 * The VbglHGCMCall call will invoke the callback if the HGCM
1115 * call is performed in an ASYNC fashion. This function can
1116 * deal with cancelled requests, so we let user more requests
1117 * be interruptible (should add a flag for this later I guess).
1118 */
1119 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1120 int rc = VbglHGCMCall(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, pSession->R0Process != NIL_RTR0PROCESS);
1121 if (RT_SUCCESS(rc))
1122 {
1123 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1124 if (pcbDataReturned)
1125 *pcbDataReturned = cbActual;
1126 }
1127 return rc;
1128}
1129
1130
1131/**
1132 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1133 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1134 */
1135static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1136{
1137 int rc;
1138 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1139
1140
1141 /*
1142 * If there is an old client, try disconnect it first.
1143 */
1144 if (pDevExt->u32ClipboardClientId != 0)
1145 {
1146 VBoxGuestHGCMDisconnectInfo Info;
1147 Info.result = (uint32_t)VERR_WRONG_ORDER; /** @todo Vitali, why is this member unsigned? */
1148 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1149 rc = VbglHGCMDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, 0);
1150 if (RT_SUCCESS(rc))
1151 {
1152 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1153 return rc;
1154 }
1155 if (RT_FAILURE((int32_t)Info.result))
1156 {
1157 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1158 return Info.result;
1159 }
1160 pDevExt->u32ClipboardClientId = 0;
1161 }
1162
1163 /*
1164 * Try connect.
1165 */
1166 VBoxGuestHGCMConnectInfo Info;
1167 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1168 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1169 Info.u32ClientID = 0;
1170 Info.result = (uint32_t)VERR_WRONG_ORDER;
1171
1172 rc = VbglHGCMConnect(&Info,VBoxGuestHGCMAsyncWaitCallback, pDevExt, 0);
1173 if (RT_FAILURE(rc))
1174 {
1175 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1176 return rc;
1177 }
1178 if (RT_FAILURE((int32_t)Info.result))
1179 {
1180 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1181 return rc;
1182 }
1183
1184 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1185
1186 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1187 *pu32ClientId = Info.u32ClientID;
1188 if (pcbDataReturned)
1189 *pcbDataReturned = sizeof(uint32_t);
1190
1191 return VINF_SUCCESS;
1192}
1193
1194#endif /* VBOX_HGCM */
1195
1196
1197/**
1198 * Common IOCtl for user to kernel and kernel to kernel communcation.
1199 *
1200 * This function only does the basic validation and then invokes
1201 * worker functions that takes care of each specific function.
1202 *
1203 * @returns VBox status code.
1204 *
1205 * @param iFunction The requested function.
1206 * @param pDevExt The device extension.
1207 * @param pSession The client session.
1208 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1209 * @param cbData The max size of the data buffer.
1210 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1211 */
1212int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1213 void *pvData, size_t cbData, size_t *pcbDataReturned)
1214{
1215 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1216 iFunction, pDevExt, pSession, pvData, cbData));
1217
1218 /*
1219 * Define some helper macros to simplify validation.
1220 */
1221#define CHECKRET_RING0(mnemonic) \
1222 do { \
1223 if (pSession->R0Process != NIL_RTR0PROCESS) \
1224 { \
1225 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1226 pSession->Process, (uintptr_t)pSession->R0Process)); \
1227 return VERR_PERMISSION_DENIED; \
1228 } \
1229 } while (0)
1230#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1231 do { \
1232 if (cbData < (cbMin)) \
1233 { \
1234 Log(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
1235 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
1236 return VERR_BUFFER_OVERFLOW; \
1237 } \
1238 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
1239 { \
1240 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
1241 return VERR_INVALID_POINTER; \
1242 } \
1243 } while (0)
1244
1245
1246 /*
1247 * Deal with variably sized requests first.
1248 */
1249 int rc = VINF_SUCCESS;
1250 if ( iFunction >= VBOXGUEST_IOCTL_VMMREQUEST(0)
1251 && iFunction <= VBOXGUEST_IOCTL_VMMREQUEST(0xfff)) /** @todo find a better way to do this*/
1252 {
1253 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
1254 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
1255 }
1256#ifdef VBOX_HGCM
1257 /*
1258 * This one is tricky and can be done later.
1259 */
1260 else if ( iFunction >= VBOXGUEST_IOCTL_HGCM_CALL(0)
1261 && iFunction <= VBOXGUEST_IOCTL_HGCM_CALL(0xfff))
1262 {
1263 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1264 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, cbData, pcbDataReturned);
1265 }
1266#endif /* VBOX_HGCM */
1267 else
1268 {
1269 switch (iFunction)
1270 {
1271 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
1272 CHECKRET_RING0("GETVMMDEVPORT");
1273 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
1274 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
1275 break;
1276
1277 case VBOXGUEST_IOCTL_WAITEVENT:
1278 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
1279 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, (VBoxGuestWaitEventInfo *)pvData, pcbDataReturned,
1280 pSession->R0Process != NIL_RTR0PROCESS);
1281 break;
1282
1283 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
1284 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
1285 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
1286 break;
1287
1288#ifdef VBOX_HGCM
1289 case VBOXGUEST_IOCTL_HGCM_CONNECT:
1290 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
1291 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
1292 break;
1293
1294 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
1295 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
1296 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
1297 break;
1298
1299
1300 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
1301 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
1302 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
1303 break;
1304#endif /* VBOX_HGCM */
1305
1306 default:
1307 {
1308 Log(("VBoxGuestCommonIOCtl: Unkown request %#x\n", iFunction));
1309 rc = VERR_NOT_SUPPORTED;
1310 break;
1311 }
1312 }
1313 }
1314
1315 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
1316 return rc;
1317}
1318
1319
1320
1321/**
1322 * Common interrupt service routine.
1323 *
1324 * This deals with events and with waking up thread waiting for those events.
1325 *
1326 * @returns true if it was our interrupt, false if it wasn't.
1327 * @param pDevExt The VBoxGuest device extension.
1328 */
1329bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
1330{
1331 /*
1332 * Now we have to find out whether it was our IRQ. Read the event mask
1333 * from our device to see if there are any pending events.
1334 */
1335 bool fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
1336 if (fOurIrq)
1337 {
1338 /* Acknowlegde events. */
1339 VMMDevEvents *pReq = pDevExt->pIrqAckEvents;
1340 int rc = VbglGRPerform(&pReq->header);
1341 if ( RT_SUCCESS(rc)
1342 && RT_SUCCESS(pReq->header.rc))
1343 {
1344 uint32_t fEvents = pReq->events;
1345 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
1346
1347 /*
1348 * Enter the spinlock and examin the waiting threads.
1349 */
1350 int rc2 = 0;
1351 PVBOXGUESTWAIT pWait;
1352 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1353 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
1354
1355#ifdef VBOX_HGCM
1356 /* The HGCM event/list is kind of different in that we evaluate all entries. */
1357 if (fEvents & VMMDEV_EVENT_HGCM)
1358 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
1359 if ( !pWait->fResEvents
1360 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
1361 {
1362 pWait->fResEvents = VMMDEV_EVENT_HGCM;
1363 rc2 |= RTSemEventMultiSignal(pWait->Event);
1364 }
1365#endif
1366
1367 /* Normal FIFO evaluation. */
1368 fEvents |= pDevExt->f32PendingEvents;
1369 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1370 if (!pWait->fResEvents)
1371 {
1372 pWait->fResEvents = pWait->fReqEvents & fEvents;
1373 fEvents &= ~pWait->fResEvents;
1374 rc2 |= RTSemEventMultiSignal(pWait->Event);
1375 if (!fEvents)
1376 break;
1377 }
1378
1379 ASMAtomicXchgU32(&pDevExt->f32PendingEvents, fEvents);
1380 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
1381 Assert(rc2 == 0);
1382 }
1383 else /* something is serious wrong... */
1384 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d, header rc=%d (events=%#x)!!\n",
1385 rc, pReq->header.rc, pReq->events));
1386 }
1387 else
1388 LogFlow(("VBoxGuestCommonISR: not ours\n"));
1389
1390 return fOurIrq;
1391}
1392
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette