VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/VBoxGuest/VBoxGuest.cpp@ 10552

Last change on this file since 10552 was 10552, checked in by vboxsync, 16 years ago

More IOCTLs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 54.2 KB
Line 
1/** @file
2 *
3 * VBoxGuest -- VirtualBox Win32 guest support driver
4 *
5 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
6 *
7 * This file is part of VirtualBox Open Source Edition (OSE), as
8 * available from http://www.virtualbox.org. This file is free software;
9 * you can redistribute it and/or modify it under the terms of the GNU
10 * General Public License (GPL) as published by the Free Software
11 * Foundation, in version 2 as it comes in the "COPYING" file of the
12 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
13 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
14 *
15 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
16 * Clara, CA 95054 USA or visit http://www.sun.com if you need
17 * additional information or have any questions.
18 */
19
20// enable backdoor logging
21//#define LOG_ENABLED
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#include "VBoxGuest_Internal.h"
27#ifdef TARGET_NT4
28#include "NTLegacy.h"
29#else
30#include "VBoxGuestPnP.h"
31#endif
32#include "Helper.h"
33#include <excpt.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/asm.h>
37#include <stdio.h>
38#include <VBox/VBoxGuestLib.h>
39#include <VBoxGuestInternal.h>
40
41#ifdef TARGET_NT4
42/* XP DDK #defines ExFreePool to ExFreePoolWithTag. The latter does not exist on NT4, so...
43 * The same for ExAllocatePool.
44 */
45#undef ExAllocatePool
46#undef ExFreePool
47#endif
48
49/*******************************************************************************
50* Defined Constants And Macros *
51*******************************************************************************/
52
53
54/*******************************************************************************
55* Internal Functions *
56*******************************************************************************/
57extern "C"
58{
59static NTSTATUS VBoxGuestAddDevice(PDRIVER_OBJECT pDrvObj, PDEVICE_OBJECT pDevObj);
60static void VBoxGuestUnload(PDRIVER_OBJECT pDrvObj);
61static NTSTATUS VBoxGuestCreate(PDEVICE_OBJECT pDevObj, PIRP pIrp);
62static NTSTATUS VBoxGuestClose(PDEVICE_OBJECT pDevObj, PIRP pIrp);
63static NTSTATUS VBoxGuestDeviceControl(PDEVICE_OBJECT pDevObj, PIRP pIrp);
64static NTSTATUS VBoxGuestSystemControl(PDEVICE_OBJECT pDevObj, PIRP pIrp);
65static NTSTATUS VBoxGuestShutdown(PDEVICE_OBJECT pDevObj, PIRP pIrp);
66static NTSTATUS VBoxGuestNotSupportedStub(PDEVICE_OBJECT pDevObj, PIRP pIrp);
67static VOID vboxWorkerThread(PVOID context);
68static VOID reserveHypervisorMemory(PVBOXGUESTDEVEXT pDevExt);
69static VOID vboxIdleThread(PVOID context);
70}
71
72
73/*******************************************************************************
74* Exported Functions *
75*******************************************************************************/
76__BEGIN_DECLS
77ULONG DriverEntry(PDRIVER_OBJECT pDrvObj, PUNICODE_STRING pRegPath);
78__END_DECLS
79
80#ifdef ALLOC_PRAGMA
81#pragma alloc_text (INIT, DriverEntry)
82#pragma alloc_text (PAGE, createThreads)
83#pragma alloc_text (PAGE, unreserveHypervisorMemory)
84#pragma alloc_text (PAGE, VBoxGuestAddDevice)
85#pragma alloc_text (PAGE, VBoxGuestUnload)
86#pragma alloc_text (PAGE, VBoxGuestCreate)
87#pragma alloc_text (PAGE, VBoxGuestClose)
88#pragma alloc_text (PAGE, VBoxGuestDeviceControl)
89#pragma alloc_text (PAGE, VBoxGuestShutdown)
90#pragma alloc_text (PAGE, VBoxGuestNotSupportedStub)
91/* Note: at least the isr handler should be in non-pageable memory! */
92/*#pragma alloc_text (PAGE, VBoxGuestDpcHandler)
93 #pragma alloc_text (PAGE, VBoxGuestIsrHandler) */
94#pragma alloc_text (PAGE, vboxWorkerThread)
95#pragma alloc_text (PAGE, reserveHypervisorMemory)
96#pragma alloc_text (PAGE, vboxIdleThread)
97#endif
98
99winVersion_t winVersion;
100
101/**
102 * Driver entry point.
103 *
104 * @returns appropriate status code.
105 * @param pDrvObj Pointer to driver object.
106 * @param pRegPath Registry base path.
107 */
108ULONG DriverEntry(PDRIVER_OBJECT pDrvObj, PUNICODE_STRING pRegPath)
109{
110 NTSTATUS rc = STATUS_SUCCESS;
111
112 dprintf(("VBoxGuest::DriverEntry. Driver built: %s %s\n", __DATE__, __TIME__));
113
114 ULONG majorVersion;
115 ULONG minorVersion;
116 ULONG buildNumber;
117 PsGetVersion(&majorVersion, &minorVersion, &buildNumber, NULL);
118 dprintf(("VBoxGuest::DriverEntry: running on Windows NT version %d.%d, build %d\n", majorVersion, minorVersion, buildNumber));
119 switch (majorVersion)
120 {
121 case 6:
122 winVersion = WINVISTA;
123 break;
124 case 5:
125 switch (minorVersion)
126 {
127 case 2:
128 winVersion = WIN2K3;
129 break;
130 case 1:
131 winVersion = WINXP;
132 break;
133 case 0:
134 winVersion = WIN2K;
135 break;
136 default:
137 dprintf(("VBoxGuest::DriverEntry: unknown version of Windows, refusing!\n"));
138 return STATUS_DRIVER_UNABLE_TO_LOAD;
139 }
140 break;
141 case 4:
142 winVersion = WINNT4;
143 break;
144 default:
145 dprintf(("VBoxGuest::DriverEntry: NT4 required!\n"));
146 return STATUS_DRIVER_UNABLE_TO_LOAD;
147 }
148
149 /*
150 * Setup the driver entry points in pDrvObj.
151 */
152 pDrvObj->DriverUnload = VBoxGuestUnload;
153 pDrvObj->MajorFunction[IRP_MJ_CREATE] = VBoxGuestCreate;
154 pDrvObj->MajorFunction[IRP_MJ_CLOSE] = VBoxGuestClose;
155 pDrvObj->MajorFunction[IRP_MJ_DEVICE_CONTROL] = VBoxGuestDeviceControl;
156 pDrvObj->MajorFunction[IRP_MJ_INTERNAL_DEVICE_CONTROL] = VBoxGuestDeviceControl;
157 pDrvObj->MajorFunction[IRP_MJ_SHUTDOWN] = VBoxGuestShutdown;
158 pDrvObj->MajorFunction[IRP_MJ_READ] = VBoxGuestNotSupportedStub;
159 pDrvObj->MajorFunction[IRP_MJ_WRITE] = VBoxGuestNotSupportedStub;
160#ifdef TARGET_NT4
161 rc = ntCreateDevice(pDrvObj, NULL, pRegPath);
162#else
163 pDrvObj->MajorFunction[IRP_MJ_PNP] = VBoxGuestPnP;
164 pDrvObj->MajorFunction[IRP_MJ_POWER] = VBoxGuestPower;
165 pDrvObj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = VBoxGuestSystemControl;
166 pDrvObj->DriverExtension->AddDevice = (PDRIVER_ADD_DEVICE)VBoxGuestAddDevice;
167#endif
168
169 dprintf(("VBoxGuest::DriverEntry returning %#x\n", rc));
170 return rc;
171}
172
173#ifndef TARGET_NT4
174/**
175 * Handle request from the Plug & Play subsystem
176 *
177 * @returns NT status code
178 * @param pDrvObj Driver object
179 * @param pDevObj Device object
180 */
181static NTSTATUS VBoxGuestAddDevice(PDRIVER_OBJECT pDrvObj, PDEVICE_OBJECT pDevObj)
182{
183 NTSTATUS rc;
184 dprintf(("VBoxGuest::VBoxGuestAddDevice\n"));
185
186 /*
187 * Create device.
188 */
189 PDEVICE_OBJECT deviceObject = NULL;
190 UNICODE_STRING devName;
191 RtlInitUnicodeString(&devName, VBOXGUEST_DEVICE_NAME_NT);
192 rc = IoCreateDevice(pDrvObj, sizeof(VBOXGUESTDEVEXT), &devName, FILE_DEVICE_UNKNOWN, 0, FALSE, &deviceObject);
193 if (!NT_SUCCESS(rc))
194 {
195 dprintf(("VBoxGuest::VBoxGuestAddDevice: IoCreateDevice failed with rc=%#x!\n", rc));
196 return rc;
197 }
198 UNICODE_STRING win32Name;
199 RtlInitUnicodeString(&win32Name, VBOXGUEST_DEVICE_NAME_DOS);
200 rc = IoCreateSymbolicLink(&win32Name, &devName);
201 if (!NT_SUCCESS(rc))
202 {
203 dprintf(("VBoxGuest::VBoxGuestAddDevice: IoCreateSymbolicLink failed with rc=%#x!\n", rc));
204 IoDeleteDevice(deviceObject);
205 return rc;
206 }
207
208 /*
209 * Setup the device extension.
210 */
211 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)deviceObject->DeviceExtension;
212 RtlZeroMemory(pDevExt, sizeof(VBOXGUESTDEVEXT));
213
214 pDevExt->deviceObject = deviceObject;
215 pDevExt->devState = STOPPED;
216
217 pDevExt->nextLowerDriver = IoAttachDeviceToDeviceStack(deviceObject, pDevObj);
218 if (pDevExt->nextLowerDriver == NULL)
219 {
220 dprintf(("VBoxGuest::VBoxGuestAddDevice: IoAttachDeviceToDeviceStack did not give a nextLowerDrive\n"));
221 IoDeleteSymbolicLink(&win32Name);
222 IoDeleteDevice(deviceObject);
223 return STATUS_DEVICE_NOT_CONNECTED;
224 }
225
226 // driver is ready now
227 deviceObject->Flags &= ~DO_DEVICE_INITIALIZING;
228
229 dprintf(("VBoxGuest::VBoxGuestAddDevice: returning with rc = 0x%x\n", rc));
230 return rc;
231}
232#endif
233
234
235/**
236 * Unload the driver.
237 *
238 * @param pDrvObj Driver object.
239 */
240void VBoxGuestUnload(PDRIVER_OBJECT pDrvObj)
241{
242 dprintf(("VBoxGuest::VBoxGuestUnload\n"));
243#ifdef TARGET_NT4
244 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pDrvObj->DeviceObject->DeviceExtension;
245 unreserveHypervisorMemory(pDevExt);
246 if (pDevExt->workerThread)
247 {
248 dprintf(("VBoxGuest::VBoxGuestUnload: waiting for the worker thread to terminate...\n"));
249 pDevExt->stopThread = TRUE;
250 KeSetEvent(&pDevExt->workerThreadRequest, 0, FALSE);
251 KeWaitForSingleObject(pDevExt->workerThread,
252 Executive, KernelMode, FALSE, NULL);
253 dprintf(("VBoxGuest::VBoxGuestUnload: returned from KeWaitForSingleObject for worker thread\n"));
254 }
255 if (pDevExt->idleThread)
256 {
257 dprintf(("VBoxGuest::VBoxGuestUnload: waiting for the idle thread to terminate...\n"));
258 pDevExt->stopThread = TRUE;
259 KeWaitForSingleObject(pDevExt->idleThread,
260 Executive, KernelMode, FALSE, NULL);
261 dprintf(("VBoxGuest::VBoxGuestUnload: returned from KeWaitForSingleObject for idle thread\n"));
262 }
263
264 hlpVBoxUnmapVMMDevMemory (pDevExt);
265
266 VBoxCleanupMemBalloon(pDevExt);
267
268 /*
269 * I don't think it's possible to unload a driver which processes have
270 * opened, at least we'll blindly assume that here.
271 */
272 UNICODE_STRING win32Name;
273 RtlInitUnicodeString(&win32Name, VBOXGUEST_DEVICE_NAME_DOS);
274 NTSTATUS rc = IoDeleteSymbolicLink(&win32Name);
275 IoDeleteDevice(pDrvObj->DeviceObject);
276#endif
277 dprintf(("VBoxGuest::VBoxGuestUnload: returning\n"));
278}
279
280
281/**
282 * Create (i.e. Open) file entry point.
283 *
284 * @param pDevObj Device object.
285 * @param pIrp Request packet.
286 */
287NTSTATUS VBoxGuestCreate(PDEVICE_OBJECT pDevObj, PIRP pIrp)
288{
289 dprintf(("VBoxGuest::VBoxGuestCreate\n"));
290
291 PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
292 PFILE_OBJECT pFileObj = pStack->FileObject;
293 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pDevObj->DeviceExtension;
294
295 /*
296 * We are not remotely similar to a directory...
297 * (But this is possible.)
298 */
299 if (pStack->Parameters.Create.Options & FILE_DIRECTORY_FILE)
300 {
301 dprintf(("VBoxGuest::VBoxGuestCreate: we're not a directory!\n"));
302 pIrp->IoStatus.Status = STATUS_NOT_A_DIRECTORY;
303 pIrp->IoStatus.Information = 0;
304 IoCompleteRequest(pIrp, IO_NO_INCREMENT);
305 return STATUS_NOT_A_DIRECTORY;
306 }
307
308 NTSTATUS rcNt = pIrp->IoStatus.Status = STATUS_SUCCESS;
309 pIrp->IoStatus.Information = 0;
310 IoCompleteRequest(pIrp, IO_NO_INCREMENT);
311
312 dprintf(("VBoxGuest::VBoxGuestCreate: returning 0x%x\n", rcNt));
313 return rcNt;
314}
315
316
317/**
318 * Close file entry point.
319 *
320 * @param pDevObj Device object.
321 * @param pIrp Request packet.
322 */
323NTSTATUS VBoxGuestClose(PDEVICE_OBJECT pDevObj, PIRP pIrp)
324{
325 dprintf(("VBoxGuest::VBoxGuestClose\n"));
326
327 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pDevObj->DeviceExtension;
328 PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
329 PFILE_OBJECT pFileObj = pStack->FileObject;
330 dprintf(("VBoxGuest::VBoxGuestClose: pDevExt=%p pFileObj=%p pSession=%p\n",
331 pDevExt, pFileObj, pFileObj->FsContext));
332
333 pFileObj->FsContext = NULL;
334 pIrp->IoStatus.Information = 0;
335 pIrp->IoStatus.Status = STATUS_SUCCESS;
336 IoCompleteRequest(pIrp, IO_NO_INCREMENT);
337
338 return STATUS_SUCCESS;
339}
340
341#ifdef VBOX_HGCM
342DECLVBGL(void) VBoxHGCMCallback (VMMDevHGCMRequestHeader *pHeader, void *pvData, uint32_t u32Data)
343{
344 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvData;
345
346 dprintf(("VBoxHGCMCallback\n"));
347
348 /* Possible problem with request completion right between the fu32Flags check and KeWaitForSingleObject
349 * call; introduce a timeout to make sure we don't wait indefinitely.
350 */
351
352 while ((pHeader->fu32Flags & VBOX_HGCM_REQ_DONE) == 0)
353 {
354 /* Specifying UserMode so killing the user process will abort the wait.
355 * @todo Since VbglGRCancel is not yet implemented, the wait itself must
356 * be not interruptible. The wait can be interrupted only when the
357 * calling process is being killed.
358 * When alertable is TRUE, the wait sometimes ends with STATUS_USER_APC.
359 */
360 NTSTATUS rc = KeWaitForSingleObject (&pDevExt->keventNotification, Executive,
361 UserMode,
362 FALSE, /* Not Alertable */
363 &pDevExt->HGCMWaitTimeout
364 );
365 dprintf(("VBoxHGCMCallback: Wait returned %d fu32Flags=%x\n", rc, pHeader->fu32Flags));
366
367 if (rc == STATUS_TIMEOUT)
368 continue;
369
370 if (rc != STATUS_WAIT_0)
371 {
372 dprintf(("VBoxHGCMCallback: The external event was signalled or the wait timed out or terminated rc = 0x%08X.\n", rc));
373 break;
374 }
375
376 dprintf(("VBoxHGCMCallback: fu32Flags = %08X\n", pHeader->fu32Flags));
377 }
378 return;
379}
380
381NTSTATUS vboxHGCMVerifyIOBuffers (PIO_STACK_LOCATION pStack, unsigned cb)
382{
383 if (pStack->Parameters.DeviceIoControl.OutputBufferLength < cb)
384 {
385 dprintf(("VBoxGuest::vboxHGCMVerifyIOBuffers: OutputBufferLength %d < %d\n",
386 pStack->Parameters.DeviceIoControl.OutputBufferLength, cb));
387 return STATUS_INVALID_PARAMETER;
388 }
389
390 if (pStack->Parameters.DeviceIoControl.InputBufferLength < cb)
391 {
392 dprintf(("VBoxGuest::vboxHGCMVerifyIOBuffers: InputBufferLength %d < %d\n",
393 pStack->Parameters.DeviceIoControl.InputBufferLength, cb));
394 return STATUS_INVALID_PARAMETER;
395 }
396
397 return STATUS_SUCCESS;
398}
399
400#endif /* VBOX_HGCM */
401
402static bool IsPowerOfTwo (uint32_t val)
403{
404 return (val & (val - 1)) == 0;
405}
406
407static bool CtlGuestFilterMask (uint32_t u32OrMask, uint32_t u32NotMask)
408{
409 bool result = false;
410 VMMDevCtlGuestFilterMask *req;
411 int rc = VbglGRAlloc ((VMMDevRequestHeader **) &req, sizeof (*req),
412 VMMDevReq_CtlGuestFilterMask);
413
414 if (VBOX_SUCCESS (rc))
415 {
416 req->u32OrMask = u32OrMask;
417 req->u32NotMask = u32NotMask;
418
419 rc = VbglGRPerform (&req->header);
420 if (VBOX_FAILURE (rc) || VBOX_FAILURE (req->header.rc))
421 {
422 dprintf (("VBoxGuest::VBoxGuestDeviceControl: error issuing request to VMMDev! "
423 "rc = %d, VMMDev rc = %Vrc\n", rc, req->header.rc));
424 }
425 else
426 {
427 result = true;
428 }
429 VbglGRFree (&req->header);
430 }
431
432 return result;
433}
434
435#ifdef VBOX_WITH_MANAGEMENT
436static int VBoxGuestSetBalloonSize(PVBOXGUESTDEVEXT pDevExt, uint32_t u32BalloonSize)
437{
438 VMMDevChangeMemBalloon *req = NULL;
439 int rc = VINF_SUCCESS;
440
441 if (u32BalloonSize > pDevExt->MemBalloon.cMaxBalloons)
442 {
443 AssertMsgFailed(("VBoxGuestSetBalloonSize illegal balloon size %d (max=%d)\n", u32BalloonSize, pDevExt->MemBalloon.cMaxBalloons));
444 return VERR_INVALID_PARAMETER;
445 }
446
447 if (u32BalloonSize == pDevExt->MemBalloon.cBalloons)
448 return VINF_SUCCESS; /* nothing to do */
449
450 /* Allocate request packet */
451 rc = VbglGRAlloc((VMMDevRequestHeader **)&req, RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]), VMMDevReq_ChangeMemBalloon);
452 if (VBOX_FAILURE(rc))
453 return rc;
454
455 vmmdevInitRequest(&req->header, VMMDevReq_ChangeMemBalloon);
456
457 if (u32BalloonSize > pDevExt->MemBalloon.cBalloons)
458 {
459 /* inflate */
460 for (uint32_t i=pDevExt->MemBalloon.cBalloons;i<u32BalloonSize;i++)
461 {
462#ifndef TARGET_NT4
463 /*
464 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
465 */
466 PHYSICAL_ADDRESS Zero;
467 PHYSICAL_ADDRESS HighAddr;
468 Zero.QuadPart = 0;
469 HighAddr.QuadPart = _4G - 1;
470 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE);
471 if (pMdl)
472 {
473 if (MmGetMdlByteCount(pMdl) < VMMDEV_MEMORY_BALLOON_CHUNK_SIZE)
474 {
475 MmFreePagesFromMdl(pMdl);
476 ExFreePool(pMdl);
477 rc = VERR_NO_MEMORY;
478 goto end;
479 }
480 }
481#else
482 PVOID pvBalloon;
483 pvBalloon = ExAllocatePool(PagedPool, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE);
484 if (!pvBalloon)
485 {
486 rc = VERR_NO_MEMORY;
487 goto end;
488 }
489
490 PMDL pMdl = IoAllocateMdl (pvBalloon, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, FALSE, FALSE, NULL);
491 if (pMdl == NULL)
492 {
493 rc = VERR_NO_MEMORY;
494 ExFreePool(pvBalloon);
495 AssertMsgFailed(("IoAllocateMdl %VGv %x failed!!\n", pvBalloon, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE));
496 goto end;
497 }
498 else
499 {
500 __try {
501 /* Calls to MmProbeAndLockPages must be enclosed in a try/except block. */
502 MmProbeAndLockPages (pMdl, KernelMode, IoModifyAccess);
503 }
504 __except(EXCEPTION_EXECUTE_HANDLER)
505 {
506 dprintf(("MmProbeAndLockPages failed!\n"));
507 rc = VERR_NO_MEMORY;
508 IoFreeMdl (pMdl);
509 ExFreePool(pvBalloon);
510 goto end;
511 }
512 }
513#endif
514
515 PPFN_NUMBER pPageDesc = MmGetMdlPfnArray(pMdl);
516
517 /* Copy manually as RTGCPHYS is always 64 bits */
518 for (uint32_t j=0;j<VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;j++)
519 req->aPhysPage[j] = pPageDesc[j];
520
521 req->header.size = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
522 req->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
523 req->fInflate = true;
524
525 rc = VbglGRPerform(&req->header);
526 if (VBOX_FAILURE(rc) || VBOX_FAILURE(req->header.rc))
527 {
528 dprintf(("VBoxGuest::VBoxGuestSetBalloonSize: error issuing request to VMMDev!"
529 "rc = %d, VMMDev rc = %Vrc\n", rc, req->header.rc));
530
531#ifndef TARGET_NT4
532 MmFreePagesFromMdl(pMdl);
533 ExFreePool(pMdl);
534#else
535 IoFreeMdl (pMdl);
536 ExFreePool(pvBalloon);
537#endif
538 goto end;
539 }
540 else
541 {
542#ifndef TARGET_NT4
543 dprintf(("VBoxGuest::VBoxGuestSetBalloonSize %d MB added chunk at %x\n", i, pMdl));
544#else
545 dprintf(("VBoxGuest::VBoxGuestSetBalloonSize %d MB added chunk at %x\n", i, pvBalloon));
546#endif
547 pDevExt->MemBalloon.paMdlMemBalloon[i] = pMdl;
548 pDevExt->MemBalloon.cBalloons++;
549 }
550 }
551 }
552 else
553 {
554 /* deflate */
555 for (uint32_t _i=pDevExt->MemBalloon.cBalloons;_i>u32BalloonSize;_i--)
556 {
557 uint32_t index = _i - 1;
558 PMDL pMdl = pDevExt->MemBalloon.paMdlMemBalloon[index];
559
560 Assert(pMdl);
561 if (pMdl)
562 {
563#ifdef TARGET_NT4
564 PVOID pvBalloon = MmGetMdlVirtualAddress(pMdl);
565#endif
566
567 PPFN_NUMBER pPageDesc = MmGetMdlPfnArray(pMdl);
568
569 /* Copy manually as RTGCPHYS is always 64 bits */
570 for (uint32_t j=0;j<VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;j++)
571 req->aPhysPage[j] = pPageDesc[j];
572
573 req->header.size = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
574 req->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
575 req->fInflate = false;
576
577 rc = VbglGRPerform(&req->header);
578 if (VBOX_FAILURE(rc) || VBOX_FAILURE(req->header.rc))
579 {
580 AssertMsgFailed(("VBoxGuest::VBoxGuestSetBalloonSize: error issuing request to VMMDev! rc = %d, VMMDev rc = %Vrc\n", rc, req->header.rc));
581 break;
582 }
583
584 /* Free the ballooned memory */
585#ifndef TARGET_NT4
586 dprintf(("VBoxGuest::VBoxGuestSetBalloonSize %d MB free chunk at %x\n", index, pMdl));
587 MmFreePagesFromMdl(pMdl);
588 ExFreePool(pMdl);
589#else
590 dprintf(("VBoxGuest::VBoxGuestSetBalloonSize %d MB free chunk at %x\n", index, pvBalloon));
591 MmUnlockPages (pMdl);
592 IoFreeMdl (pMdl);
593 ExFreePool(pvBalloon);
594#endif
595
596 pDevExt->MemBalloon.paMdlMemBalloon[index] = NULL;
597 pDevExt->MemBalloon.cBalloons--;
598 }
599 }
600 }
601 Assert(pDevExt->MemBalloon.cBalloons <= pDevExt->MemBalloon.cMaxBalloons);
602
603end:
604 VbglGRFree(&req->header);
605 return rc;
606}
607
608static int VBoxGuestQueryMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, ULONG *pMemBalloonSize)
609{
610 /* just perform the request */
611 VMMDevGetMemBalloonChangeRequest *req = NULL;
612
613 dprintf(("VBoxGuestQueryMemoryBalloon\n"));
614
615 int rc = VbglGRAlloc((VMMDevRequestHeader **)&req, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
616 vmmdevInitRequest(&req->header, VMMDevReq_GetMemBalloonChangeRequest);
617 req->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
618
619 if (VBOX_SUCCESS(rc))
620 {
621 rc = VbglGRPerform(&req->header);
622
623 if (VBOX_FAILURE(rc) || VBOX_FAILURE(req->header.rc))
624 {
625 dprintf(("VBoxGuest::VBoxGuestDeviceControl VBOXGUEST_IOCTL_CTL_CHECK_BALLOON: error issuing request to VMMDev!"
626 "rc = %d, VMMDev rc = %Vrc\n", rc, req->header.rc));
627 }
628 else
629 {
630 if (!pDevExt->MemBalloon.paMdlMemBalloon)
631 {
632 pDevExt->MemBalloon.cMaxBalloons = req->u32PhysMemSize;
633 pDevExt->MemBalloon.paMdlMemBalloon = (PMDL *)ExAllocatePool(PagedPool, req->u32PhysMemSize * sizeof(PMDL));
634 Assert(pDevExt->MemBalloon.paMdlMemBalloon);
635 if (!pDevExt->MemBalloon.paMdlMemBalloon)
636 return VERR_NO_MEMORY;
637 }
638 Assert(pDevExt->MemBalloon.cMaxBalloons == req->u32PhysMemSize);
639
640 rc = VBoxGuestSetBalloonSize(pDevExt, req->u32BalloonSize);
641 /* ignore out of memory failures */
642 if (rc == VERR_NO_MEMORY)
643 rc = VINF_SUCCESS;
644
645 if (pMemBalloonSize)
646 *pMemBalloonSize = pDevExt->MemBalloon.cBalloons;
647 }
648
649 VbglGRFree(&req->header);
650 }
651 return rc;
652}
653#endif
654
655void VBoxInitMemBalloon(PVBOXGUESTDEVEXT pDevExt)
656{
657#ifdef VBOX_WITH_MANAGEMENT
658 ULONG dummy;
659
660 pDevExt->MemBalloon.cBalloons = 0;
661 pDevExt->MemBalloon.cMaxBalloons = 0;
662 pDevExt->MemBalloon.paMdlMemBalloon = NULL;
663
664 VBoxGuestQueryMemoryBalloon(pDevExt, &dummy);
665#endif
666}
667
668void VBoxCleanupMemBalloon(PVBOXGUESTDEVEXT pDevExt)
669{
670#ifdef VBOX_WITH_MANAGEMENT
671 if (pDevExt->MemBalloon.paMdlMemBalloon)
672 {
673 /* Clean up the memory balloon leftovers */
674 VBoxGuestSetBalloonSize(pDevExt, 0);
675 ExFreePool(pDevExt->MemBalloon.paMdlMemBalloon);
676 pDevExt->MemBalloon.paMdlMemBalloon = NULL;
677 }
678 Assert(pDevExt->MemBalloon.cBalloons == 0);
679#endif
680}
681
682/**
683 * Device I/O Control entry point.
684 *
685 * @param pDevObj Device object.
686 * @param pIrp Request packet.
687 */
688NTSTATUS VBoxGuestDeviceControl(PDEVICE_OBJECT pDevObj, PIRP pIrp)
689{
690 dprintf(("VBoxGuest::VBoxGuestDeviceControl\n"));
691
692 NTSTATUS Status = STATUS_SUCCESS;
693
694 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pDevObj->DeviceExtension;
695
696 PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
697
698 char *pBuf = (char *)pIrp->AssociatedIrp.SystemBuffer; /* all requests are buffered. */
699
700 unsigned cbOut = 0;
701
702 switch (pStack->Parameters.DeviceIoControl.IoControlCode)
703 {
704 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
705 {
706 dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_GETVMMDEVPORT\n"));
707
708 if (pStack->Parameters.DeviceIoControl.OutputBufferLength < sizeof (VBoxGuestPortInfo))
709 {
710 Status = STATUS_BUFFER_TOO_SMALL;
711 break;
712 }
713
714 VBoxGuestPortInfo *portInfo = (VBoxGuestPortInfo*)pBuf;
715
716 portInfo->portAddress = pDevExt->startPortAddress;
717 portInfo->pVMMDevMemory = pDevExt->pVMMDevMemory;
718
719 cbOut = sizeof(VBoxGuestPortInfo);
720
721 break;
722 }
723
724 case VBOXGUEST_IOCTL_WAITEVENT:
725 {
726 /* Need to be extended to support multiple waiters for an event,
727 * array of counters for each event, event mask is computed, each
728 * time a wait event is arrived.
729 */
730 dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_WAITEVENT\n"));
731
732 if (pStack->Parameters.DeviceIoControl.OutputBufferLength < sizeof(VBoxGuestWaitEventInfo))
733 {
734 dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d < sizeof(VBoxGuestWaitEventInfo)\n",
735 pStack->Parameters.DeviceIoControl.OutputBufferLength, sizeof(VBoxGuestWaitEventInfo)));
736 Status = STATUS_BUFFER_TOO_SMALL;
737 break;
738 }
739
740 if (pStack->Parameters.DeviceIoControl.InputBufferLength < sizeof(VBoxGuestWaitEventInfo)) {
741 dprintf(("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d < sizeof(VBoxGuestWaitEventInfo)\n",
742 pStack->Parameters.DeviceIoControl.InputBufferLength, sizeof(VBoxGuestWaitEventInfo)));
743 Status = STATUS_BUFFER_TOO_SMALL;
744 break;
745 }
746
747 VBoxGuestWaitEventInfo *eventInfo = (VBoxGuestWaitEventInfo *)pBuf;
748
749 if (!eventInfo->u32EventMaskIn || !IsPowerOfTwo (eventInfo->u32EventMaskIn)) {
750 dprintf (("VBoxGuest::VBoxGuestDeviceControl: Invalid input mask %#x\n",
751 eventInfo->u32EventMaskIn));
752 Status = STATUS_INVALID_PARAMETER;
753 break;
754 }
755
756 eventInfo->u32EventFlagsOut = 0;
757 int iBitOffset = ASMBitFirstSetU32 (eventInfo->u32EventMaskIn) - 1;
758 bool fTimeout = (eventInfo->u32TimeoutIn != ~0L);
759
760 dprintf (("mask = %d, iBitOffset = %d\n", iBitOffset, eventInfo->u32EventMaskIn));
761
762 /* Possible problem with request completion right between the pending event check and KeWaitForSingleObject
763 * call; introduce a timeout (if none was specified) to make sure we don't wait indefinitely.
764 */
765 LARGE_INTEGER timeout;
766 timeout.QuadPart = (fTimeout) ? eventInfo->u32TimeoutIn : 250;
767 timeout.QuadPart *= -10000;
768
769 NTSTATUS rc = STATUS_SUCCESS;
770
771 for (;;)
772 {
773 bool fEventPending = ASMAtomicBitTestAndClear(&pDevExt->u32Events, iBitOffset);
774 if (fEventPending)
775 {
776 eventInfo->u32EventFlagsOut = 1 << iBitOffset;
777 break;
778 }
779
780 rc = KeWaitForSingleObject (&pDevExt->keventNotification, Executive /** @todo UserRequest? */,
781 KernelMode, TRUE, &timeout);
782 dprintf(("VBOXGUEST_IOCTL_WAITEVENT: Wait returned %d -> event %x\n", rc, eventInfo->u32EventFlagsOut));
783
784 if (!fTimeout && rc == STATUS_TIMEOUT)
785 continue;
786
787 if (rc != STATUS_SUCCESS)
788 {
789 /* There was a timeout or wait was interrupted, etc. */
790 break;
791 }
792 }
793
794 dprintf (("u32EventFlagsOut = %#x\n", eventInfo->u32EventFlagsOut));
795 cbOut = sizeof(VBoxGuestWaitEventInfo);
796 break;
797 }
798
799 case VBOXGUEST_IOCTL_VMMREQUEST(sizeof(VMMDevRequestHeader)):
800 {
801 dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_VMMREQUEST\n"));
802
803#define CHECK_SIZE(s) \
804 if (pStack->Parameters.DeviceIoControl.OutputBufferLength < s) \
805 { \
806 dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d < %d\n", \
807 pStack->Parameters.DeviceIoControl.OutputBufferLength, s)); \
808 Status = STATUS_BUFFER_TOO_SMALL; \
809 break; \
810 } \
811 if (pStack->Parameters.DeviceIoControl.InputBufferLength < s) { \
812 dprintf(("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d < %d\n", \
813 pStack->Parameters.DeviceIoControl.InputBufferLength, s)); \
814 Status = STATUS_BUFFER_TOO_SMALL; \
815 break; \
816 }
817
818 /* get the request header */
819 CHECK_SIZE(sizeof(VMMDevRequestHeader));
820 VMMDevRequestHeader *requestHeader = (VMMDevRequestHeader *)pBuf;
821 if (!vmmdevGetRequestSize(requestHeader->requestType))
822 {
823 Status = STATUS_INVALID_PARAMETER;
824 break;
825 }
826 /* make sure the buffers suit the request */
827 CHECK_SIZE(vmmdevGetRequestSize(requestHeader->requestType));
828
829 /* just perform the request */
830 VMMDevRequestHeader *req = NULL;
831
832 int rc = VbglGRAlloc((VMMDevRequestHeader **)&req, requestHeader->size, requestHeader->requestType);
833
834 if (VBOX_SUCCESS(rc))
835 {
836 /* copy the request information */
837 memcpy((void*)req, (void*)pBuf, requestHeader->size);
838 rc = VbglGRPerform(req);
839
840 if (VBOX_FAILURE(rc) || VBOX_FAILURE(req->rc))
841 {
842 dprintf(("VBoxGuest::VBoxGuestDeviceControl VBOXGUEST_IOCTL_VMMREQUEST: error issuing request to VMMDev!"
843 "rc = %d, VMMDev rc = %Vrc\n", rc, req->rc));
844 Status = STATUS_UNSUCCESSFUL;
845 }
846 else
847 {
848 /* copy result */
849 memcpy((void*)pBuf, (void*)req, requestHeader->size);
850 cbOut = requestHeader->size;
851 }
852
853 VbglGRFree(req);
854 }
855 else
856 {
857 Status = STATUS_UNSUCCESSFUL;
858 }
859#undef CHECK_SIZE
860 break;
861 }
862
863 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
864 {
865 VBoxGuestFilterMaskInfo *maskInfo;
866
867 if (pStack->Parameters.DeviceIoControl.InputBufferLength < sizeof(VBoxGuestFilterMaskInfo)) {
868 dprintf (("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d < %d\n",
869 pStack->Parameters.DeviceIoControl.InputBufferLength,
870 sizeof (VBoxGuestFilterMaskInfo)));
871 Status = STATUS_BUFFER_TOO_SMALL;
872 break;
873
874 }
875
876 maskInfo = (VBoxGuestFilterMaskInfo *) pBuf;
877 if (!CtlGuestFilterMask (maskInfo->u32OrMask, maskInfo->u32NotMask))
878 {
879 Status = STATUS_UNSUCCESSFUL;
880 }
881 break;
882 }
883
884#ifdef VBOX_HGCM
885 /* HGCM offers blocking IOCTLSs just like waitevent and actually
886 * uses the same waiting code.
887 */
888 case VBOXGUEST_IOCTL_HGCM_CONNECT:
889 {
890 dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_HGCM_CONNECT\n"));
891
892 if (pStack->Parameters.DeviceIoControl.OutputBufferLength != sizeof(VBoxGuestHGCMConnectInfo))
893 {
894 dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d != sizeof(VBoxGuestHGCMConnectInfo) %d\n",
895 pStack->Parameters.DeviceIoControl.OutputBufferLength, sizeof(VBoxGuestHGCMConnectInfo)));
896 Status = STATUS_INVALID_PARAMETER;
897 break;
898 }
899
900 if (pStack->Parameters.DeviceIoControl.InputBufferLength != sizeof(VBoxGuestHGCMConnectInfo)) {
901 dprintf(("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d != sizeof(VBoxGuestHGCMConnectInfo) %d\n",
902 pStack->Parameters.DeviceIoControl.InputBufferLength, sizeof(VBoxGuestHGCMConnectInfo)));
903 Status = STATUS_INVALID_PARAMETER;
904 break;
905 }
906
907 VBoxGuestHGCMConnectInfo *ptr = (VBoxGuestHGCMConnectInfo *)pBuf;
908
909 /* If request will be processed asynchronously, execution will
910 * go to VBoxHGCMCallback. There it will wait for the request event, signalled from IRQ.
911 * On IRQ arrival, the VBoxHGCMCallback(s) will check the request memory and, if completion
912 * flag is set, returns.
913 */
914
915 dprintf(("a) ptr->u32ClientID = %d\n", ptr->u32ClientID));
916
917 int rc = VbglHGCMConnect (ptr, VBoxHGCMCallback, pDevExt, 0);
918
919 dprintf(("b) ptr->u32ClientID = %d\n", ptr->u32ClientID));
920
921 if (VBOX_FAILURE(rc))
922 {
923 dprintf(("VBOXGUEST_IOCTL_HGCM_CONNECT: vbox rc = %Vrc\n", rc));
924 Status = STATUS_UNSUCCESSFUL;
925 }
926 else
927 {
928 cbOut = pStack->Parameters.DeviceIoControl.OutputBufferLength;
929 }
930
931 } break;
932
933 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
934 {
935 dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_HGCM_DISCONNECT\n"));
936
937 if (pStack->Parameters.DeviceIoControl.OutputBufferLength != sizeof(VBoxGuestHGCMDisconnectInfo))
938 {
939 dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d != sizeof(VBoxGuestHGCMDisconnectInfo) %d\n",
940 pStack->Parameters.DeviceIoControl.OutputBufferLength, sizeof(VBoxGuestHGCMDisconnectInfo)));
941 Status = STATUS_INVALID_PARAMETER;
942 break;
943 }
944
945 if (pStack->Parameters.DeviceIoControl.InputBufferLength != sizeof(VBoxGuestHGCMDisconnectInfo)) {
946 dprintf(("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d != sizeof(VBoxGuestHGCMDisconnectInfo) %d\n",
947 pStack->Parameters.DeviceIoControl.InputBufferLength, sizeof(VBoxGuestHGCMDisconnectInfo)));
948 Status = STATUS_INVALID_PARAMETER;
949 break;
950 }
951
952 VBoxGuestHGCMDisconnectInfo *ptr = (VBoxGuestHGCMDisconnectInfo *)pBuf;
953
954 /* If request will be processed asynchronously, execution will
955 * go to VBoxHGCMCallback. There it will wait for the request event, signalled from IRQ.
956 * On IRQ arrival, the VBoxHGCMCallback(s) will check the request memory and, if completion
957 * flag is set, returns.
958 */
959
960 int rc = VbglHGCMDisconnect (ptr, VBoxHGCMCallback, pDevExt, 0);
961
962 if (VBOX_FAILURE(rc))
963 {
964 dprintf(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: vbox rc = %Vrc\n", rc));
965 Status = STATUS_UNSUCCESSFUL;
966 }
967 else
968 {
969 cbOut = pStack->Parameters.DeviceIoControl.OutputBufferLength;
970 }
971
972 } break;
973
974 case VBOXGUEST_IOCTL_HGCM_CALL(sizeof(VBoxGuestHGCMCallInfo)):
975 {
976 dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_HGCM_CALL\n"));
977
978 Status = vboxHGCMVerifyIOBuffers (pStack,
979 sizeof (VBoxGuestHGCMCallInfo));
980
981 if (Status != STATUS_SUCCESS)
982 {
983 dprintf(("VBoxGuest::VBoxGuestDeviceControl: invalid parameter. Status: %p\n", Status));
984 break;
985 }
986
987 VBoxGuestHGCMCallInfo *ptr = (VBoxGuestHGCMCallInfo *)pBuf;
988
989 int rc = VbglHGCMCall (ptr, VBoxHGCMCallback, pDevExt, 0);
990
991 if (VBOX_FAILURE(rc))
992 {
993 dprintf(("VBOXGUEST_IOCTL_HGCM_CALL: vbox rc = %Vrc\n", rc));
994 Status = STATUS_UNSUCCESSFUL;
995 }
996 else
997 {
998 cbOut = pStack->Parameters.DeviceIoControl.OutputBufferLength;
999 }
1000
1001 } break;
1002#endif /* VBOX_HGCM */
1003
1004#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
1005 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
1006 {
1007 if (!pDevExt->fVRDPEnabled)
1008 {
1009 KUSER_SHARED_DATA *pSharedUserData = (KUSER_SHARED_DATA *)KI_USER_SHARED_DATA;
1010
1011 pDevExt->fVRDPEnabled = TRUE;
1012 pDevExt->ulOldActiveConsoleId = pSharedUserData->ActiveConsoleId;
1013 pSharedUserData->ActiveConsoleId = 2;
1014 }
1015 break;
1016 }
1017
1018 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
1019 {
1020 if (pDevExt->fVRDPEnabled)
1021 {
1022 KUSER_SHARED_DATA *pSharedUserData = (KUSER_SHARED_DATA *)KI_USER_SHARED_DATA;
1023
1024 pDevExt->fVRDPEnabled = FALSE;
1025 pSharedUserData->ActiveConsoleId = pDevExt->ulOldActiveConsoleId;
1026 pDevExt->ulOldActiveConsoleId = 0;
1027 }
1028 break;
1029 }
1030#endif
1031
1032#ifdef VBOX_WITH_MANAGEMENT
1033 case VBOXGUEST_IOCTL_CTL_CHECK_BALLOON_MASK:
1034 {
1035 ULONG *pMemBalloonSize = (ULONG *) pBuf;
1036
1037 if (pStack->Parameters.DeviceIoControl.OutputBufferLength != sizeof(ULONG))
1038 {
1039 dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d != sizeof(ULONG) %d\n",
1040 pStack->Parameters.DeviceIoControl.OutputBufferLength, sizeof(ULONG)));
1041 Status = STATUS_INVALID_PARAMETER;
1042 break;
1043 }
1044
1045 int rc = VBoxGuestQueryMemoryBalloon(pDevExt, pMemBalloonSize);
1046 if (VBOX_FAILURE(rc))
1047 {
1048 dprintf(("VBOXGUEST_IOCTL_CTL_CHECK_BALLOON: vbox rc = %Vrc\n", rc));
1049 Status = STATUS_UNSUCCESSFUL;
1050 }
1051 else
1052 {
1053 cbOut = pStack->Parameters.DeviceIoControl.OutputBufferLength;
1054 }
1055 break;
1056 }
1057#endif
1058
1059 default:
1060 Status = STATUS_INVALID_PARAMETER;
1061 break;
1062 }
1063
1064 pIrp->IoStatus.Status = Status;
1065 pIrp->IoStatus.Information = cbOut;
1066
1067 IoCompleteRequest(pIrp, IO_NO_INCREMENT);
1068
1069 dprintf(("VBoxGuest::VBoxGuestDeviceControl: returned cbOut=%d rc=%#x\n", cbOut, Status));
1070
1071 return Status;
1072}
1073
1074
1075/**
1076 * IRP_MJ_SYSTEM_CONTROL handler
1077 *
1078 * @returns NT status code
1079 * @param pDevObj Device object.
1080 * @param pIrp IRP.
1081 */
1082NTSTATUS VBoxGuestSystemControl(PDEVICE_OBJECT pDevObj, PIRP pIrp)
1083{
1084 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pDevObj->DeviceExtension;
1085
1086 dprintf(("VBoxGuest::VBoxGuestSystemControl\n"));
1087
1088 /* Always pass it on to the next driver. */
1089 IoSkipCurrentIrpStackLocation(pIrp);
1090
1091 return IoCallDriver(pDevExt->nextLowerDriver, pIrp);
1092}
1093
1094/**
1095 * IRP_MJ_SHUTDOWN handler
1096 *
1097 * @returns NT status code
1098 * @param pDevObj Device object.
1099 * @param pIrp IRP.
1100 */
1101NTSTATUS VBoxGuestShutdown(PDEVICE_OBJECT pDevObj, PIRP pIrp)
1102{
1103 VMMDevPowerStateRequest *req = NULL;
1104
1105 dprintf(("VBoxGuest::VBoxGuestShutdown\n"));
1106
1107 int rc = VbglGRAlloc ((VMMDevRequestHeader **)&req, sizeof (VMMDevPowerStateRequest), VMMDevReq_SetPowerStatus);
1108
1109 if (VBOX_SUCCESS(rc))
1110 {
1111 req->powerState = VMMDevPowerState_PowerOff;
1112
1113 rc = VbglGRPerform (&req->header);
1114
1115 if (VBOX_FAILURE(rc) || VBOX_FAILURE(req->header.rc))
1116 {
1117 dprintf(("VBoxGuest::PowerStateRequest: error performing request to VMMDev."
1118 "rc = %d, VMMDev rc = %Vrc\n", rc, req->header.rc));
1119 }
1120
1121 VbglGRFree (&req->header);
1122 }
1123
1124 return STATUS_SUCCESS;
1125}
1126
1127/**
1128 * Stub function for functions we don't implemented.
1129 *
1130 * @returns STATUS_NOT_SUPPORTED
1131 * @param pDevObj Device object.
1132 * @param pIrp IRP.
1133 */
1134NTSTATUS VBoxGuestNotSupportedStub(PDEVICE_OBJECT pDevObj, PIRP pIrp)
1135{
1136 dprintf(("VBoxGuest::VBoxGuestNotSupportedStub\n"));
1137 pDevObj = pDevObj;
1138
1139 pIrp->IoStatus.Information = 0;
1140 pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED;
1141 IoCompleteRequest(pIrp, IO_NO_INCREMENT);
1142
1143 return STATUS_NOT_SUPPORTED;
1144}
1145
1146/**
1147 * DPC handler
1148 *
1149 * @param dpc DPC descriptor.
1150 * @param pDevObj Device object.
1151 * @param irp Interrupt request packet.
1152 * @param context Context specific pointer.
1153 */
1154VOID VBoxGuestDpcHandler(PKDPC dpc, PDEVICE_OBJECT pDevObj,
1155 PIRP irp, PVOID context)
1156{
1157 /* Unblock handlers waiting for arrived events.
1158 *
1159 * Events are very low things, there is one event flag (1 or more bit)
1160 * for each event. Each event is processed by exactly one handler.
1161 *
1162 * Assume that we trust additions and that other drivers will
1163 * handle its respective events without trying to fetch all events.
1164 *
1165 * Anyway design assures that wrong event processing will affect only guest.
1166 *
1167 * Event handler calls VMMDev IOCTL for waiting an event.
1168 * It supplies event mask. IOCTL blocks on EventNotification.
1169 * Here we just signal an the EventNotification to all waiting
1170 * threads, the IOCTL handler analyzes events and either
1171 * return to caller or blocks again.
1172 *
1173 * If we do not have too many events this is a simple and good
1174 * approach. Other way is to have as many Event objects as the callers
1175 * and wake up only callers waiting for the specific event.
1176 *
1177 * Now with the 'wake up all' appoach we probably do not need the DPC
1178 * handler and can signal event directly from ISR.
1179 *
1180 */
1181
1182 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pDevObj->DeviceExtension;
1183
1184 dprintf(("VBoxGuest::VBoxGuestDpcHandler\n"));
1185
1186 KePulseEvent(&pDevExt->keventNotification, 0, FALSE);
1187
1188}
1189
1190/**
1191 * ISR handler
1192 *
1193 * @return BOOLEAN indicates whether the IRQ came from us (TRUE) or not (FALSE)
1194 * @param interrupt Interrupt that was triggered.
1195 * @param serviceContext Context specific pointer.
1196 */
1197BOOLEAN VBoxGuestIsrHandler(PKINTERRUPT interrupt, PVOID serviceContext)
1198{
1199 NTSTATUS rc;
1200 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)serviceContext;
1201 BOOLEAN fIRQTaken = FALSE;
1202
1203 dprintf(("VBoxGuest::VBoxGuestIsrHandler haveEvents = %d\n",
1204 pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents));
1205
1206 /*
1207 * now we have to find out whether it was our IRQ. Read the event mask
1208 * from our device to see if there are any pending events
1209 */
1210 if (pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents)
1211 {
1212 /* Acknowlegde events. */
1213 VMMDevEvents *req = pDevExt->irqAckEvents;
1214
1215 rc = VbglGRPerform (&req->header);
1216 if (VBOX_SUCCESS(rc) && VBOX_SUCCESS(req->header.rc))
1217 {
1218 dprintf(("VBoxGuest::VBoxGuestIsrHandler: acknowledge events succeeded %#x\n",
1219 req->events));
1220
1221 ASMAtomicOrU32((uint32_t *)&pDevExt->u32Events, req->events);
1222 IoRequestDpc(pDevExt->deviceObject, pDevExt->currentIrp, NULL);
1223 }
1224 else
1225 {
1226 /* This can't be actually. This is sign of a serious problem. */
1227 dprintf(("VBoxGuest::VBoxGuestIsrHandler: "
1228 "acknowledge events failed rc = %d, header rc = %d\n",
1229 rc, req->header.rc));
1230 }
1231
1232 /* Mark IRQ as taken, there were events for us. */
1233 fIRQTaken = TRUE;
1234 }
1235
1236 return fIRQTaken;
1237}
1238
1239/**
1240 * Worker thread to do periodic things such as synchronize the
1241 * system time and notify other drivers of events.
1242 *
1243 * @param pDevExt device extension pointer
1244 */
1245VOID vboxWorkerThread(PVOID context)
1246{
1247 PVBOXGUESTDEVEXT pDevExt;
1248
1249 pDevExt = (PVBOXGUESTDEVEXT)context;
1250 dprintf(("VBoxGuest::vboxWorkerThread entered\n"));
1251
1252 VMMDevReqHostTime *req = NULL;
1253
1254 int rc = VbglGRAlloc ((VMMDevRequestHeader **)&req, sizeof (VMMDevReqHostTime), VMMDevReq_GetHostTime);
1255
1256 if (VBOX_FAILURE(rc))
1257 {
1258 dprintf(("VBoxGuest::vboxWorkerThread: could not allocate request buffer, exiting rc = %d!\n", rc));
1259 return;
1260 }
1261
1262 /* perform the hypervisor address space reservation */
1263 reserveHypervisorMemory(pDevExt);
1264
1265 do
1266 {
1267 /*
1268 * Do the time sync
1269 */
1270 {
1271 LARGE_INTEGER systemTime;
1272 #define TICKSPERSEC 10000000
1273 #define TICKSPERMSEC 10000
1274 #define SECSPERDAY 86400
1275 #define SECS_1601_TO_1970 ((369 * 365 + 89) * (uint64_t)SECSPERDAY)
1276 #define TICKS_1601_TO_1970 (SECS_1601_TO_1970 * TICKSPERSEC)
1277
1278
1279 req->header.rc = VERR_GENERAL_FAILURE;
1280
1281 rc = VbglGRPerform (&req->header);
1282
1283 if (VBOX_SUCCESS(rc) && VBOX_SUCCESS(req->header.rc))
1284 {
1285 uint64_t hostTime = req->time;
1286
1287 // Windows was originally designed in 1601...
1288 systemTime.QuadPart = hostTime * (uint64_t)TICKSPERMSEC + (uint64_t)TICKS_1601_TO_1970;
1289 dprintf(("VBoxGuest::vboxWorkerThread: synching time with host time (msec/UTC): %llu\n", hostTime));
1290 ZwSetSystemTime(&systemTime, NULL);
1291 }
1292 else
1293 {
1294 dprintf(("VBoxGuest::PowerStateRequest: error performing request to VMMDev."
1295 "rc = %d, VMMDev rc = %Vrc\n", rc, req->header.rc));
1296 }
1297 }
1298
1299 /*
1300 * Go asleep unless we're supposed to terminate
1301 */
1302 if (!pDevExt->stopThread)
1303 {
1304 ULONG secWait = 60;
1305 dprintf(("VBoxGuest::vboxWorkerThread: waiting for %u seconds...\n", secWait));
1306 LARGE_INTEGER dueTime;
1307 dueTime.QuadPart = -10000 * 1000 * (int)secWait;
1308 if (KeWaitForSingleObject(&pDevExt->workerThreadRequest, Executive,
1309 KernelMode, FALSE, &dueTime) == STATUS_SUCCESS)
1310 {
1311 KeResetEvent(&pDevExt->workerThreadRequest);
1312 }
1313 }
1314 } while (!pDevExt->stopThread);
1315
1316 dprintf(("VBoxGuest::vboxWorkerThread: we've been asked to terminate!\n"));
1317
1318 /* free our request buffer */
1319 VbglGRFree (&req->header);
1320
1321 if (pDevExt->workerThread)
1322 {
1323 ObDereferenceObject(pDevExt->workerThread);
1324 pDevExt->workerThread = NULL;
1325 }
1326 dprintf(("VBoxGuest::vboxWorkerThread: now really gone!\n"));
1327}
1328
1329/**
1330 * Create driver worker threads
1331 *
1332 * @returns NTSTATUS NT status code
1333 * @param pDevExt VBoxGuest device extension
1334 */
1335NTSTATUS createThreads(PVBOXGUESTDEVEXT pDevExt)
1336{
1337 NTSTATUS rc;
1338 HANDLE threadHandle;
1339 OBJECT_ATTRIBUTES objAttributes;
1340
1341 dprintf(("VBoxGuest::createThreads\n"));
1342
1343 // first setup the request semaphore
1344 KeInitializeEvent(&pDevExt->workerThreadRequest, SynchronizationEvent, FALSE);
1345
1346// the API has slightly changed after NT4
1347#ifdef TARGET_NT4
1348#ifdef OBJ_KERNEL_HANDLE
1349#undef OBJ_KERNEL_HANDLE
1350#endif
1351#define OBJ_KERNEL_HANDLE 0
1352#endif
1353
1354 /*
1355 * The worker thread
1356 */
1357 InitializeObjectAttributes(&objAttributes,
1358 NULL,
1359 OBJ_KERNEL_HANDLE,
1360 NULL,
1361 NULL);
1362
1363 rc = PsCreateSystemThread(&threadHandle,
1364 THREAD_ALL_ACCESS,
1365 &objAttributes,
1366 (HANDLE)0L,
1367 NULL,
1368 vboxWorkerThread,
1369 pDevExt);
1370 dprintf(("VBoxGuest::createThreads: PsCreateSystemThread for worker thread returned: 0x%x\n", rc));
1371 rc = ObReferenceObjectByHandle(threadHandle,
1372 THREAD_ALL_ACCESS,
1373 NULL,
1374 KernelMode,
1375 (PVOID*)&pDevExt->workerThread,
1376 NULL);
1377 ZwClose(threadHandle);
1378
1379 /*
1380 * The idle thread
1381 */
1382#if 0 /// @todo Windows "sees" that time is lost and reports 100% usage
1383 rc = PsCreateSystemThread(&threadHandle,
1384 THREAD_ALL_ACCESS,
1385 &objAttributes,
1386 (HANDLE)0L,
1387 NULL,
1388 vboxIdleThread,
1389 pDevExt);
1390 dprintf(("VBoxGuest::createThreads: PsCreateSystemThread for idle thread returned: 0x%x\n", rc));
1391 rc = ObReferenceObjectByHandle(threadHandle,
1392 THREAD_ALL_ACCESS,
1393 NULL,
1394 KernelMode,
1395 (PVOID*)&pDevExt->idleThread,
1396 NULL);
1397 ZwClose(threadHandle);
1398#endif
1399
1400 return rc;
1401}
1402
1403/**
1404 * Helper routine to reserve address space for the hypervisor
1405 * and communicate its position.
1406 *
1407 * @param pDevExt Device extension structure.
1408 */
1409VOID reserveHypervisorMemory(PVBOXGUESTDEVEXT pDevExt)
1410{
1411 // @todo rc handling
1412 uint32_t hypervisorSize;
1413
1414 VMMDevReqHypervisorInfo *req = NULL;
1415
1416 int rc = VbglGRAlloc ((VMMDevRequestHeader **)&req, sizeof (VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
1417
1418 if (VBOX_SUCCESS(rc))
1419 {
1420 req->hypervisorStart = 0;
1421 req->hypervisorSize = 0;
1422
1423 rc = VbglGRPerform (&req->header);
1424
1425 if (VBOX_SUCCESS(rc) && VBOX_SUCCESS(req->header.rc))
1426 {
1427 hypervisorSize = req->hypervisorSize;
1428
1429 if (!hypervisorSize)
1430 {
1431 dprintf(("VBoxGuest::reserveHypervisorMemory: host returned 0, not doing anything\n"));
1432 return;
1433 }
1434
1435 dprintf(("VBoxGuest::reserveHypervisorMemory: host wants %u bytes of hypervisor address space\n", hypervisorSize));
1436
1437 // Map fictive physical memory into the kernel address space to reserve virtual
1438 // address space. This API does not perform any checks but just allocate the
1439 // PTEs (which we don't really need/want but there isn't any other clean method).
1440 // The hypervisor only likes 4MB aligned virtual addresses, so we have to allocate
1441 // 4MB more than we are actually supposed to in order to guarantee that. Maybe we
1442 // can come up with a less lavish algorithm lateron.
1443 PHYSICAL_ADDRESS physAddr;
1444 physAddr.QuadPart = HYPERVISOR_PHYSICAL_START;
1445 pDevExt->hypervisorMappingSize = hypervisorSize + 0x400000;
1446 pDevExt->hypervisorMapping = MmMapIoSpace(physAddr,
1447 pDevExt->hypervisorMappingSize,
1448 MmNonCached);
1449 if (!pDevExt->hypervisorMapping)
1450 {
1451 dprintf(("VBoxGuest::reserveHypervisorMemory: MmMapIoSpace returned NULL!\n"));
1452 return;
1453 }
1454
1455 dprintf(("VBoxGuest::reserveHypervisorMemory: MmMapIoSpace returned %p\n", pDevExt->hypervisorMapping));
1456 dprintf(("VBoxGuest::reserveHypervisorMemory: communicating %p to host\n",
1457 RT_ALIGN_P(pDevExt->hypervisorMapping, 0x400000)));
1458
1459 /* align at 4MB */
1460 req->hypervisorStart = (RTGCPTR)RT_ALIGN_P(pDevExt->hypervisorMapping, 0x400000);
1461
1462 req->header.requestType = VMMDevReq_SetHypervisorInfo;
1463 req->header.rc = VERR_GENERAL_FAILURE;
1464
1465 /* issue request */
1466 rc = VbglGRPerform (&req->header);
1467
1468 if (VBOX_FAILURE(rc) || VBOX_FAILURE(req->header.rc))
1469 {
1470 dprintf(("VBoxGuest::reserveHypervisorMemory: error communicating physical address to VMMDev!"
1471 "rc = %d, VMMDev rc = %Vrc\n", rc, req->header.rc));
1472 }
1473 }
1474 else
1475 {
1476 dprintf(("VBoxGuest::reserveHypervisorMemory: request failed with rc %d, VMMDev rc = %Vrc\n", rc, req->header.rc));
1477 }
1478 VbglGRFree (&req->header);
1479 }
1480
1481 return;
1482}
1483
1484/**
1485 * Helper function to unregister a virtual address space mapping
1486 *
1487 * @param pDevExt Device extension
1488 */
1489VOID unreserveHypervisorMemory(PVBOXGUESTDEVEXT pDevExt)
1490{
1491 VMMDevReqHypervisorInfo *req = NULL;
1492
1493 int rc = VbglGRAlloc ((VMMDevRequestHeader **)&req, sizeof (VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
1494
1495 if (VBOX_SUCCESS(rc))
1496 {
1497 /* tell the hypervisor that the mapping is no longer available */
1498
1499 req->hypervisorStart = 0;
1500 req->hypervisorSize = 0;
1501
1502 rc = VbglGRPerform (&req->header);
1503
1504 if (VBOX_FAILURE(rc) || VBOX_FAILURE(req->header.rc))
1505 {
1506 dprintf(("VBoxGuest::unreserveHypervisorMemory: error communicating physical address to VMMDev!"
1507 "rc = %d, VMMDev rc = %Vrc\n", rc, req->header.rc));
1508 }
1509
1510 VbglGRFree (&req->header);
1511 }
1512
1513 if (!pDevExt->hypervisorMapping)
1514 {
1515 dprintf(("VBoxGuest::unreserveHypervisorMemory: there is no mapping, returning\n"));
1516 return;
1517 }
1518
1519 // unmap fictive IO space
1520 MmUnmapIoSpace(pDevExt->hypervisorMapping, pDevExt->hypervisorMappingSize);
1521 dprintf(("VBoxGuest::unreserveHypervisorMemmory: done\n"));
1522}
1523
1524/**
1525 * Idle thread that runs at the lowest priority possible
1526 * and whenever scheduled, makes a VMMDev call to give up
1527 * timeslices. This is so prevent Windows from thinking that
1528 * nothing is happening on the machine and doing stupid things
1529 * that would steal time from other VMs it doesn't know of.
1530 *
1531 * @param pDevExt device extension pointer
1532 */
1533VOID vboxIdleThread(PVOID context)
1534{
1535 PVBOXGUESTDEVEXT pDevExt;
1536
1537 pDevExt = (PVBOXGUESTDEVEXT)context;
1538 dprintf(("VBoxGuest::vboxIdleThread entered\n"));
1539
1540 /* set priority as low as possible */
1541 KeSetPriorityThread(KeGetCurrentThread(), LOW_PRIORITY);
1542
1543 /* allocate VMMDev request structure */
1544 VMMDevReqIdle *req;
1545 int rc = VbglGRAlloc((VMMDevRequestHeader **)&req, sizeof (VMMDevReqHypervisorInfo), VMMDevReq_Idle);
1546 if (VBOX_FAILURE(rc))
1547 {
1548 dprintf(("VBoxGuest::vboxIdleThread: error %Vrc allocating request structure!\n"));
1549 return;
1550 }
1551
1552 do
1553 {
1554 //dprintf(("VBoxGuest: performing idle request..\n"));
1555 /* perform idle request */
1556 VbglGRPerform(&req->header);
1557
1558 } while (!pDevExt->stopThread);
1559
1560 VbglGRFree(&req->header);
1561
1562 dprintf(("VBoxGuest::vboxIdleThread leaving\n"));
1563}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette