VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuestLib/HGCMInternal.cpp@ 40415

Last change on this file since 40415 was 40359, checked in by vboxsync, 13 years ago

VBoxGuest: disable page list HGCM transfers for Locked buffers to prevent crashes on Win guests

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 44.5 KB
Line 
1/* $Revision: 40359 $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/* Entire file is ifdef'ed with VBGL_VBOXGUEST */
28#ifdef VBGL_VBOXGUEST
29
30/*******************************************************************************
31* Header Files *
32*******************************************************************************/
33#define LOG_GROUP LOG_GROUP_HGCM
34
35#include "VBGLInternal.h"
36#include <iprt/alloca.h>
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/mem.h>
40#include <iprt/memobj.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43#include <iprt/time.h>
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48/** The max parameter buffer size for a user request. */
49#define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
50/** The max parameter buffer size for a kernel request. */
51#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
52#ifdef RT_OS_LINUX
53/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
54 * side effects. */
55# define USE_BOUNCE_BUFFERS
56#endif
57
58
59/*******************************************************************************
60* Structures and Typedefs *
61*******************************************************************************/
62/**
63 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
64 */
65struct VbglR0ParmInfo
66{
67 uint32_t cLockBufs;
68 struct
69 {
70 uint32_t iParm;
71 RTR0MEMOBJ hObj;
72#ifdef USE_BOUNCE_BUFFERS
73 void *pvSmallBuf;
74#endif
75 } aLockBufs[10];
76};
77
78
79
80/* These functions can be only used by VBoxGuest. */
81
82DECLVBGL(int) VbglR0HGCMInternalConnect (VBoxGuestHGCMConnectInfo *pConnectInfo,
83 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
84{
85 VMMDevHGCMConnect *pHGCMConnect;
86 int rc;
87
88 if (!pConnectInfo || !pfnAsyncCallback)
89 return VERR_INVALID_PARAMETER;
90
91 pHGCMConnect = NULL;
92
93 /* Allocate request */
94 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMConnect, sizeof (VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
95
96 if (RT_SUCCESS(rc))
97 {
98 /* Initialize request memory */
99 pHGCMConnect->header.fu32Flags = 0;
100
101 memcpy (&pHGCMConnect->loc, &pConnectInfo->Loc, sizeof (HGCMServiceLocation));
102 pHGCMConnect->u32ClientID = 0;
103
104 /* Issue request */
105 rc = VbglGRPerform (&pHGCMConnect->header.header);
106
107 if (RT_SUCCESS(rc))
108 {
109 /* Check if host decides to process the request asynchronously. */
110 if (rc == VINF_HGCM_ASYNC_EXECUTE)
111 {
112 /* Wait for request completion interrupt notification from host */
113 pfnAsyncCallback (&pHGCMConnect->header, pvAsyncData, u32AsyncData);
114 }
115
116 pConnectInfo->result = pHGCMConnect->header.result;
117
118 if (RT_SUCCESS (pConnectInfo->result))
119 pConnectInfo->u32ClientID = pHGCMConnect->u32ClientID;
120 }
121
122 VbglGRFree (&pHGCMConnect->header.header);
123 }
124
125 return rc;
126}
127
128
129DECLR0VBGL(int) VbglR0HGCMInternalDisconnect (VBoxGuestHGCMDisconnectInfo *pDisconnectInfo,
130 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
131{
132 VMMDevHGCMDisconnect *pHGCMDisconnect;
133 int rc;
134
135 if (!pDisconnectInfo || !pfnAsyncCallback)
136 return VERR_INVALID_PARAMETER;
137
138 pHGCMDisconnect = NULL;
139
140 /* Allocate request */
141 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
142
143 if (RT_SUCCESS(rc))
144 {
145 /* Initialize request memory */
146 pHGCMDisconnect->header.fu32Flags = 0;
147
148 pHGCMDisconnect->u32ClientID = pDisconnectInfo->u32ClientID;
149
150 /* Issue request */
151 rc = VbglGRPerform (&pHGCMDisconnect->header.header);
152
153 if (RT_SUCCESS(rc))
154 {
155 /* Check if host decides to process the request asynchronously. */
156 if (rc == VINF_HGCM_ASYNC_EXECUTE)
157 {
158 /* Wait for request completion interrupt notification from host */
159 pfnAsyncCallback (&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
160 }
161
162 pDisconnectInfo->result = pHGCMDisconnect->header.result;
163 }
164
165 VbglGRFree (&pHGCMDisconnect->header.header);
166 }
167
168 return rc;
169}
170
171
172/**
173 * Preprocesses the HGCM call, validating and locking/buffering parameters.
174 *
175 * @returns VBox status code.
176 *
177 * @param pCallInfo The call info.
178 * @param cbCallInfo The size of the call info structure.
179 * @param fIsUser Is it a user request or kernel request.
180 * @param pcbExtra Where to return the extra request space needed for
181 * physical page lists.
182 */
183static int vbglR0HGCMInternalPreprocessCall(VBoxGuestHGCMCallInfo const *pCallInfo, uint32_t cbCallInfo,
184 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
185{
186 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
187 uint32_t cParms = pCallInfo->cParms;
188 uint32_t iParm;
189 uint32_t cb;
190
191 /*
192 * Lock down the any linear buffers so we can get their addresses
193 * and figure out how much extra storage we need for page lists.
194 *
195 * Note! With kernel mode users we can be assertive. For user mode users
196 * we should just (debug) log it and fail without any fanfare.
197 */
198 *pcbExtra = 0;
199 pParmInfo->cLockBufs = 0;
200 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
201 {
202 switch (pSrcParm->type)
203 {
204 case VMMDevHGCMParmType_32bit:
205 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
206 break;
207
208 case VMMDevHGCMParmType_64bit:
209 Log4(("GstHGCMCall: parm=%u type=64bit: %#018x\n", iParm, pSrcParm->u.value64));
210 break;
211
212 case VMMDevHGCMParmType_PageList:
213 if (fIsUser)
214 return VERR_INVALID_PARAMETER;
215 cb = pSrcParm->u.PageList.size;
216 if (cb)
217 {
218 uint32_t off = pSrcParm->u.PageList.offset;
219 HGCMPageListInfo *pPgLst;
220 uint32_t cPages;
221 uint32_t u32;
222
223 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
224 VERR_OUT_OF_RANGE);
225 AssertMsgReturn( off >= pCallInfo->cParms * sizeof(HGCMFunctionParameter)
226 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
227 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, pCallInfo->cParms, cbCallInfo),
228 VERR_INVALID_PARAMETER);
229
230 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
231 cPages = pPgLst->cPages;
232 u32 = RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]) + off;
233 AssertMsgReturn(u32 <= cbCallInfo,
234 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
235 VERR_INVALID_PARAMETER);
236 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
237 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
238 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
239 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
240 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
241 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
242 u32 = cPages;
243 while (u32-- > 0)
244 {
245 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
246 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
247 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
248 VERR_INVALID_PARAMETER);
249 }
250
251 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[pPgLst->cPages]);
252 }
253 else
254 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
255 break;
256
257 case VMMDevHGCMParmType_LinAddr_Locked_In:
258 case VMMDevHGCMParmType_LinAddr_Locked_Out:
259 case VMMDevHGCMParmType_LinAddr_Locked:
260 if (fIsUser)
261 return VERR_INVALID_PARAMETER;
262 /* always perform it as !VBGLR0_CAN_USE_PHYS_PAGE_LIST() since otherwise
263 * we end up creating a RTR0MEMOBJ and doing page lock again, which leads to undefined behavior and possible BSOD on Win */
264 //if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
265 {
266 cb = pSrcParm->u.Pointer.size;
267 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
268 VERR_OUT_OF_RANGE);
269 if (cb != 0)
270 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
271 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
272 else
273 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
274 break;
275 }
276 /* fall thru */
277
278 case VMMDevHGCMParmType_LinAddr_In:
279 case VMMDevHGCMParmType_LinAddr_Out:
280 case VMMDevHGCMParmType_LinAddr:
281 cb = pSrcParm->u.Pointer.size;
282 if (cb != 0)
283 {
284#ifdef USE_BOUNCE_BUFFERS
285 void *pvSmallBuf = NULL;
286#endif
287 uint32_t iLockBuf = pParmInfo->cLockBufs;
288 RTR0MEMOBJ hObj;
289 int rc;
290 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
291 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
292 ? RTMEM_PROT_READ
293 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
294
295 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
296 if (!fIsUser)
297 {
298 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
299 VERR_OUT_OF_RANGE);
300 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
301 if (RT_FAILURE(rc))
302 {
303 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
304 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
305 return rc;
306 }
307 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
308 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
309 }
310 else if (cb > VBGLR0_MAX_HGCM_USER_PARM)
311 {
312 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
313 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
314 cb, VBGLR0_MAX_HGCM_USER_PARM));
315 return VERR_OUT_OF_RANGE;
316 }
317 else
318 {
319#ifndef USE_BOUNCE_BUFFERS
320 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
321 if (RT_FAILURE(rc))
322 {
323 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
324 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
325 return rc;
326 }
327 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
328 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
329
330#else /* USE_BOUNCE_BUFFERS */
331 /*
332 * This is a bit massive, but we don't want to waste a
333 * whole page for a 3 byte string buffer (guest props).
334 *
335 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
336 * the system is using some power of two allocator.
337 */
338 /** @todo A more efficient strategy would be to combine buffers. However it
339 * is probably going to be more massive than the current code, so
340 * it can wait till later. */
341 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
342 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
343 if (cb <= PAGE_SIZE / 2 - 16)
344 {
345 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
346 if (RT_UNLIKELY(!pvSmallBuf))
347 return VERR_NO_MEMORY;
348 if (fCopyIn)
349 {
350 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
351 if (RT_FAILURE(rc))
352 {
353 RTMemTmpFree(pvSmallBuf);
354 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
355 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
356 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
357 return rc;
358 }
359 }
360 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
361 if (RT_FAILURE(rc))
362 {
363 RTMemTmpFree(pvSmallBuf);
364 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
365 rc, pvSmallBuf, cb));
366 return rc;
367 }
368 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
369 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
370 }
371 else
372 {
373 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
374 if (RT_FAILURE(rc))
375 return rc;
376 if (!fCopyIn)
377 memset(RTR0MemObjAddress(hObj), '\0', cb);
378 else
379 {
380 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
381 if (RT_FAILURE(rc))
382 {
383 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
384 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
385 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
386 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
387 return rc;
388 }
389 }
390 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
391 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
392 }
393#endif /* USE_BOUNCE_BUFFERS */
394 }
395
396 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
397 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
398#ifdef USE_BOUNCE_BUFFERS
399 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
400#endif
401 pParmInfo->cLockBufs = iLockBuf + 1;
402
403 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
404 {
405 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
406 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
407 }
408 }
409 else
410 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
411 break;
412
413 default:
414 return VERR_INVALID_PARAMETER;
415 }
416 }
417
418 return VINF_SUCCESS;
419}
420
421
422/**
423 * Translates locked linear address to the normal type.
424 * The locked types are only for the guest side and not handled by the host.
425 *
426 * @returns normal linear address type.
427 * @param enmType The type.
428 */
429static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
430{
431 switch (enmType)
432 {
433 case VMMDevHGCMParmType_LinAddr_Locked_In:
434 return VMMDevHGCMParmType_LinAddr_In;
435 case VMMDevHGCMParmType_LinAddr_Locked_Out:
436 return VMMDevHGCMParmType_LinAddr_Out;
437 case VMMDevHGCMParmType_LinAddr_Locked:
438 return VMMDevHGCMParmType_LinAddr;
439 default:
440 return enmType;
441 }
442}
443
444
445/**
446 * Translates linear address types to page list direction flags.
447 *
448 * @returns page list flags.
449 * @param enmType The type.
450 */
451static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
452{
453 switch (enmType)
454 {
455 case VMMDevHGCMParmType_LinAddr_In:
456 case VMMDevHGCMParmType_LinAddr_Locked_In:
457 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
458
459 case VMMDevHGCMParmType_LinAddr_Out:
460 case VMMDevHGCMParmType_LinAddr_Locked_Out:
461 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
462
463 default: AssertFailed();
464 case VMMDevHGCMParmType_LinAddr:
465 case VMMDevHGCMParmType_LinAddr_Locked:
466 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
467 }
468}
469
470
471/**
472 * Initializes the call request that we're sending to the host.
473 *
474 * @returns VBox status code.
475 *
476 * @param pCallInfo The call info.
477 * @param cbCallInfo The size of the call info structure.
478 * @param fIsUser Is it a user request or kernel request.
479 * @param pcbExtra Where to return the extra request space needed for
480 * physical page lists.
481 */
482static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, VBoxGuestHGCMCallInfo const *pCallInfo,
483 uint32_t cbCallInfo, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
484{
485 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
486 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
487 uint32_t cParms = pCallInfo->cParms;
488 uint32_t offExtra = (uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall;
489 uint32_t iLockBuf = 0;
490 uint32_t iParm;
491
492
493 /*
494 * The call request headers.
495 */
496 pHGCMCall->header.fu32Flags = 0;
497 pHGCMCall->header.result = VINF_SUCCESS;
498
499 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
500 pHGCMCall->u32Function = pCallInfo->u32Function;
501 pHGCMCall->cParms = cParms;
502
503 /*
504 * The parameters.
505 */
506 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
507 {
508 switch (pSrcParm->type)
509 {
510 case VMMDevHGCMParmType_32bit:
511 case VMMDevHGCMParmType_64bit:
512 *pDstParm = *pSrcParm;
513 break;
514
515 case VMMDevHGCMParmType_PageList:
516 pDstParm->type = VMMDevHGCMParmType_PageList;
517 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
518 if (pSrcParm->u.PageList.size)
519 {
520 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
521 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
522 uint32_t const cPages = pSrcPgLst->cPages;
523 uint32_t iPage;
524
525 pDstParm->u.PageList.offset = offExtra;
526 pDstPgLst->flags = pSrcPgLst->flags;
527 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
528 pDstPgLst->cPages = cPages;
529 for (iPage = 0; iPage < cPages; iPage++)
530 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
531
532 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
533 }
534 else
535 pDstParm->u.PageList.offset = 0;
536 break;
537
538 case VMMDevHGCMParmType_LinAddr_Locked_In:
539 case VMMDevHGCMParmType_LinAddr_Locked_Out:
540 case VMMDevHGCMParmType_LinAddr_Locked:
541 /* always perform it as !VBGLR0_CAN_USE_PHYS_PAGE_LIST() since otherwise
542 * we end up creating a RTR0MEMOBJ and doing page lock again, which leads to undefined behavior and possible BSOD on Win */
543// if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
544 {
545 *pDstParm = *pSrcParm;
546 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
547 break;
548 }
549 /* fall thru */
550
551 case VMMDevHGCMParmType_LinAddr_In:
552 case VMMDevHGCMParmType_LinAddr_Out:
553 case VMMDevHGCMParmType_LinAddr:
554 if (pSrcParm->u.Pointer.size != 0)
555 {
556#ifdef USE_BOUNCE_BUFFERS
557 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
558#endif
559 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
560 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
561
562 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
563 {
564 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
565 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
566 size_t iPage;
567
568 pDstParm->type = VMMDevHGCMParmType_PageList;
569 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
570 pDstParm->u.PageList.offset = offExtra;
571 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
572#ifdef USE_BOUNCE_BUFFERS
573 if (fIsUser)
574 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
575 else
576#endif
577 pDstPgLst->offFirstPage = pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK;
578 pDstPgLst->cPages = cPages; Assert(pDstPgLst->cPages == cPages);
579 for (iPage = 0; iPage < cPages; iPage++)
580 {
581 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
582 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
583 }
584
585 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
586 }
587 else
588 {
589 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
590 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
591#ifdef USE_BOUNCE_BUFFERS
592 if (fIsUser)
593 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
594 ? (uintptr_t)pvSmallBuf
595 : (uintptr_t)RTR0MemObjAddress(hObj);
596 else
597#endif
598 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
599 }
600 iLockBuf++;
601 }
602 else
603 {
604 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
605 pDstParm->u.Pointer.size = 0;
606 pDstParm->u.Pointer.u.linearAddr = 0;
607 }
608 break;
609
610 default:
611 AssertFailed();
612 pDstParm->type = VMMDevHGCMParmType_Invalid;
613 break;
614 }
615 }
616}
617
618
619/**
620 * Performs the call and completion wait.
621 *
622 * @returns VBox status code of this operation, not necessarily the call.
623 *
624 * @param pHGCMCall The HGCM call info.
625 * @param pfnAsyncCallback The async callback that will wait for the call
626 * to complete.
627 * @param pvAsyncData Argument for the callback.
628 * @param u32AsyncData Argument for the callback.
629 * @param pfLeakIt Where to return the leak it / free it,
630 * indicator. Cancellation fun.
631 */
632static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
633 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
634{
635 int rc;
636
637 Log(("calling VbglGRPerform\n"));
638 rc = VbglGRPerform(&pHGCMCall->header.header);
639 Log(("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
640
641 /*
642 * If the call failed, but as a result of the request itself, then pretend
643 * success. Upper layers will interpret the result code in the packet.
644 */
645 if ( RT_FAILURE(rc)
646 && rc == pHGCMCall->header.result)
647 {
648 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
649 rc = VINF_SUCCESS;
650 }
651
652 /*
653 * Check if host decides to process the request asynchronously,
654 * if so, we wait for it to complete using the caller supplied callback.
655 */
656 *pfLeakIt = false;
657 if (rc == VINF_HGCM_ASYNC_EXECUTE)
658 {
659 Log(("Processing HGCM call asynchronously\n"));
660 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
661 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
662 {
663 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
664 rc = VINF_SUCCESS;
665 }
666 else
667 {
668 /*
669 * The request didn't complete in time or the call was interrupted,
670 * the RC from the callback indicates which. Try cancel the request.
671 *
672 * This is a bit messy because we're racing request completion. Sorry.
673 */
674 /** @todo It would be nice if we could use the waiter callback to do further
675 * waiting in case of a completion race. If it wasn't for WINNT having its own
676 * version of all that stuff, I would've done it already. */
677 VMMDevHGCMCancel2 *pCancelReq;
678 int rc2 = VbglGRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
679 if (RT_SUCCESS(rc2))
680 {
681 pCancelReq->physReqToCancel = VbglPhysHeapGetPhysAddr(pHGCMCall);
682 rc2 = VbglGRPerform(&pCancelReq->header);
683 VbglGRFree(&pCancelReq->header);
684 }
685#if 1 /** @todo ADDVER: Remove this on next minor version change. */
686 if (rc2 == VERR_NOT_IMPLEMENTED)
687 {
688 /* host is too old, or we're out of heap. */
689 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
690 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
691 rc2 = VbglGRPerform(&pHGCMCall->header.header);
692 if (rc2 == VERR_INVALID_PARAMETER)
693 rc2 = VERR_NOT_FOUND;
694 else if (RT_SUCCESS(rc))
695 RTThreadSleep(1);
696 }
697#endif
698 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
699 if (RT_SUCCESS(rc2))
700 {
701 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
702 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
703 }
704 else
705 {
706 /*
707 * Wait for a bit while the host (hopefully) completes it.
708 */
709 uint64_t u64Start = RTTimeSystemMilliTS();
710 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
711 uint64_t cElapsed = 0;
712 if (rc2 != VERR_NOT_FOUND)
713 {
714 static unsigned s_cErrors = 0;
715 if (s_cErrors++ < 32)
716 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
717 }
718 else
719 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
720
721 do
722 {
723 ASMCompilerBarrier(); /* paranoia */
724 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
725 break;
726 RTThreadSleep(1);
727 cElapsed = RTTimeSystemMilliTS() - u64Start;
728 } while (cElapsed < cMilliesToWait);
729
730 ASMCompilerBarrier(); /* paranoia^2 */
731 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
732 rc = VINF_SUCCESS;
733 else
734 {
735 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
736 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
737 *pfLeakIt = true;
738 }
739 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
740 }
741 }
742 }
743
744 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
745 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
746 return rc;
747}
748
749
750/**
751 * Copies the result of the call back to the caller info structure and user
752 * buffers (if using bounce buffers).
753 *
754 * @returns rc, unless RTR0MemUserCopyTo fails.
755 * @param pCallInfo Call info structure to update.
756 * @param pHGCMCall HGCM call request.
757 * @param pParmInfo Parameter locking/buffering info.
758 * @param fIsUser Is it a user (true) or kernel request.
759 * @param rc The current result code. Passed along to
760 * preserve informational status codes.
761 */
762static int vbglR0HGCMInternalCopyBackResult(VBoxGuestHGCMCallInfo *pCallInfo, VMMDevHGCMCall const *pHGCMCall,
763 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
764{
765 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
766 HGCMFunctionParameter *pDstParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
767 uint32_t cParms = pCallInfo->cParms;
768#ifdef USE_BOUNCE_BUFFERS
769 uint32_t iLockBuf = 0;
770#endif
771 uint32_t iParm;
772
773 /*
774 * The call result.
775 */
776 pCallInfo->result = pHGCMCall->header.result;
777
778 /*
779 * Copy back parameters.
780 */
781 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
782 {
783 switch (pDstParm->type)
784 {
785 case VMMDevHGCMParmType_32bit:
786 case VMMDevHGCMParmType_64bit:
787 *pDstParm = *pSrcParm;
788 break;
789
790 case VMMDevHGCMParmType_PageList:
791 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
792 break;
793
794 case VMMDevHGCMParmType_LinAddr_Locked_In:
795 case VMMDevHGCMParmType_LinAddr_In:
796#ifdef USE_BOUNCE_BUFFERS
797 if ( fIsUser
798 && iLockBuf < pParmInfo->cLockBufs
799 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
800 iLockBuf++;
801#endif
802 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
803 break;
804
805 case VMMDevHGCMParmType_LinAddr_Locked_Out:
806 case VMMDevHGCMParmType_LinAddr_Locked:
807 /* always perform it as !VBGLR0_CAN_USE_PHYS_PAGE_LIST() since otherwise
808 * we end up creating a RTR0MEMOBJ and doing page lock again, which leads to undefined behavior and possible BSOD on Win */
809// if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
810 {
811 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
812 break;
813 }
814 /* fall thru */
815
816 case VMMDevHGCMParmType_LinAddr_Out:
817 case VMMDevHGCMParmType_LinAddr:
818 {
819#ifdef USE_BOUNCE_BUFFERS
820 if (fIsUser)
821 {
822 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
823 if (cbOut)
824 {
825 int rc2;
826 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
827 rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
828 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
829 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
830 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
831 cbOut);
832 if (RT_FAILURE(rc2))
833 return rc2;
834 iLockBuf++;
835 }
836 else if ( iLockBuf < pParmInfo->cLockBufs
837 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
838 iLockBuf++;
839 }
840#endif
841 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
842 break;
843 }
844
845 default:
846 AssertFailed();
847 rc = VERR_INTERNAL_ERROR_4;
848 break;
849 }
850 }
851
852#ifdef USE_BOUNCE_BUFFERS
853 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
854#endif
855 return rc;
856}
857
858
859DECLR0VBGL(int) VbglR0HGCMInternalCall(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
860 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
861{
862 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
863 struct VbglR0ParmInfo ParmInfo;
864 size_t cbExtra;
865 int rc;
866
867 /*
868 * Basic validation.
869 */
870 AssertMsgReturn( !pCallInfo
871 || !pfnAsyncCallback
872 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
873 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
874 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
875 VERR_INVALID_PARAMETER);
876 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
877 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
878 VERR_INVALID_PARAMETER);
879
880 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
881 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
882
883 /*
884 * Validate, lock and buffer the parameters for the call.
885 * This will calculate the amount of extra space for physical page list.
886 */
887 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
888 if (RT_SUCCESS(rc))
889 {
890 /*
891 * Allocate the request buffer and recreate the call request.
892 */
893 VMMDevHGCMCall *pHGCMCall;
894 rc = VbglGRAlloc((VMMDevRequestHeader **)&pHGCMCall,
895 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
896 VMMDevReq_HGCMCall);
897 if (RT_SUCCESS(rc))
898 {
899 bool fLeakIt;
900 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fIsUser, &ParmInfo);
901
902 /*
903 * Perform the call.
904 */
905 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
906 if (RT_SUCCESS(rc))
907 {
908 /*
909 * Copy back the result (parameters and buffers that changed).
910 */
911 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
912 }
913 else
914 {
915 if ( rc != VERR_INTERRUPTED
916 && rc != VERR_TIMEOUT)
917 {
918 static unsigned s_cErrors = 0;
919 if (s_cErrors++ < 32)
920 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
921 }
922 }
923
924 if (!fLeakIt)
925 VbglGRFree(&pHGCMCall->header.header);
926 }
927 }
928 else
929 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
930
931 /*
932 * Release locks and free bounce buffers.
933 */
934 if (ParmInfo.cLockBufs)
935 while (ParmInfo.cLockBufs-- > 0)
936 {
937 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
938#ifdef USE_BOUNCE_BUFFERS
939 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
940#endif
941 }
942
943 return rc;
944}
945
946
947#if ARCH_BITS == 64
948DECLR0VBGL(int) VbglR0HGCMInternalCall32(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
949 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
950{
951 VBoxGuestHGCMCallInfo *pCallInfo64 = NULL;
952 HGCMFunctionParameter *pParm64 = NULL;
953 HGCMFunctionParameter32 *pParm32 = NULL;
954 uint32_t cParms = 0;
955 uint32_t iParm = 0;
956 int rc = VINF_SUCCESS;
957
958 /*
959 * Input validation.
960 */
961 AssertMsgReturn( !pCallInfo
962 || !pfnAsyncCallback
963 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
964 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
965 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
966 VERR_INVALID_PARAMETER);
967 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
968 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
969 VERR_INVALID_PARAMETER);
970
971 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
972#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
973 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
974#endif
975
976 cParms = pCallInfo->cParms;
977 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
978
979 /*
980 * The simple approach, allocate a temporary request and convert the parameters.
981 */
982 pCallInfo64 = (VBoxGuestHGCMCallInfo *)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
983 if (!pCallInfo64)
984 return VERR_NO_TMP_MEMORY;
985
986 *pCallInfo64 = *pCallInfo;
987 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
988 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
989 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
990 {
991 switch (pParm32->type)
992 {
993 case VMMDevHGCMParmType_32bit:
994 pParm64->type = VMMDevHGCMParmType_32bit;
995 pParm64->u.value32 = pParm32->u.value32;
996 break;
997
998 case VMMDevHGCMParmType_64bit:
999 pParm64->type = VMMDevHGCMParmType_64bit;
1000 pParm64->u.value64 = pParm32->u.value64;
1001 break;
1002
1003 case VMMDevHGCMParmType_LinAddr_Out:
1004 case VMMDevHGCMParmType_LinAddr:
1005 case VMMDevHGCMParmType_LinAddr_In:
1006 pParm64->type = pParm32->type;
1007 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
1008 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
1009 break;
1010
1011 default:
1012 rc = VERR_INVALID_PARAMETER;
1013 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1014 break;
1015 }
1016 if (RT_FAILURE(rc))
1017 break;
1018 }
1019 if (RT_SUCCESS(rc))
1020 {
1021 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1022 pfnAsyncCallback, pvAsyncData, u32AsyncData);
1023
1024 if (RT_SUCCESS(rc))
1025 {
1026 *pCallInfo = *pCallInfo64;
1027
1028 /*
1029 * Copy back.
1030 */
1031 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
1032 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
1033 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1034 {
1035 switch (pParm64->type)
1036 {
1037 case VMMDevHGCMParmType_32bit:
1038 pParm32->u.value32 = pParm64->u.value32;
1039 break;
1040
1041 case VMMDevHGCMParmType_64bit:
1042 pParm32->u.value64 = pParm64->u.value64;
1043 break;
1044
1045 case VMMDevHGCMParmType_LinAddr_Out:
1046 case VMMDevHGCMParmType_LinAddr:
1047 case VMMDevHGCMParmType_LinAddr_In:
1048 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1049 break;
1050
1051 default:
1052 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1053 rc = VERR_INTERNAL_ERROR_3;
1054 break;
1055 }
1056 }
1057 }
1058 else
1059 {
1060 static unsigned s_cErrors = 0;
1061 if (s_cErrors++ < 32)
1062 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1063 }
1064 }
1065 else
1066 {
1067 static unsigned s_cErrors = 0;
1068 if (s_cErrors++ < 32)
1069 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1070 }
1071
1072 RTMemTmpFree(pCallInfo64);
1073 return rc;
1074}
1075#endif /* ARCH_BITS == 64 */
1076
1077#endif /* VBGL_VBOXGUEST */
1078
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette