VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuestLib/HGCMInternal.cpp@ 26425

Last change on this file since 26425 was 26425, checked in by vboxsync, 15 years ago

alternative license for VBoxGuestLib is CDDL

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 43.6 KB
Line 
1/* $Revision: 26425 $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/* Entire file is ifdef'ed with VBGL_VBOXGUEST */
32#ifdef VBGL_VBOXGUEST
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#include "VBGLInternal.h"
38#include <iprt/alloca.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/mem.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44#include <iprt/thread.h>
45#include <iprt/time.h>
46
47
48/*******************************************************************************
49* Defined Constants And Macros *
50*******************************************************************************/
51/** The max parameter buffer size for a user request. */
52#define VBGLR0_MAX_HGCM_USER_PARM (16*_1M)
53/** The max parameter buffer size for a kernel request. */
54#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
55#ifdef RT_OS_LINUX
56/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
57 * side effects. */
58# define USE_BOUNCE_BUFFERS
59#endif
60
61
62/*******************************************************************************
63* Structures and Typedefs *
64*******************************************************************************/
65/**
66 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
67 */
68struct VbglR0ParmInfo
69{
70 uint32_t cLockBufs;
71 struct
72 {
73 uint32_t iParm;
74 RTR0MEMOBJ hObj;
75#ifdef USE_BOUNCE_BUFFERS
76 void *pvSmallBuf;
77#endif
78 } aLockBufs[10];
79};
80
81
82
83/* These functions can be only used by VBoxGuest. */
84
85DECLVBGL(int) VbglR0HGCMInternalConnect (VBoxGuestHGCMConnectInfo *pConnectInfo,
86 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
87{
88 VMMDevHGCMConnect *pHGCMConnect;
89 int rc;
90
91 if (!pConnectInfo || !pfnAsyncCallback)
92 return VERR_INVALID_PARAMETER;
93
94 pHGCMConnect = NULL;
95
96 /* Allocate request */
97 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMConnect, sizeof (VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
98
99 if (RT_SUCCESS(rc))
100 {
101 /* Initialize request memory */
102 pHGCMConnect->header.fu32Flags = 0;
103
104 memcpy (&pHGCMConnect->loc, &pConnectInfo->Loc, sizeof (HGCMServiceLocation));
105 pHGCMConnect->u32ClientID = 0;
106
107 /* Issue request */
108 rc = VbglGRPerform (&pHGCMConnect->header.header);
109
110 if (RT_SUCCESS(rc))
111 {
112 /* Check if host decides to process the request asynchronously. */
113 if (rc == VINF_HGCM_ASYNC_EXECUTE)
114 {
115 /* Wait for request completion interrupt notification from host */
116 pfnAsyncCallback (&pHGCMConnect->header, pvAsyncData, u32AsyncData);
117 }
118
119 pConnectInfo->result = pHGCMConnect->header.result;
120
121 if (RT_SUCCESS (pConnectInfo->result))
122 pConnectInfo->u32ClientID = pHGCMConnect->u32ClientID;
123 }
124
125 VbglGRFree (&pHGCMConnect->header.header);
126 }
127
128 return rc;
129}
130
131
132DECLR0VBGL(int) VbglR0HGCMInternalDisconnect (VBoxGuestHGCMDisconnectInfo *pDisconnectInfo,
133 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
134{
135 VMMDevHGCMDisconnect *pHGCMDisconnect;
136 int rc;
137
138 if (!pDisconnectInfo || !pfnAsyncCallback)
139 return VERR_INVALID_PARAMETER;
140
141 pHGCMDisconnect = NULL;
142
143 /* Allocate request */
144 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
145
146 if (RT_SUCCESS(rc))
147 {
148 /* Initialize request memory */
149 pHGCMDisconnect->header.fu32Flags = 0;
150
151 pHGCMDisconnect->u32ClientID = pDisconnectInfo->u32ClientID;
152
153 /* Issue request */
154 rc = VbglGRPerform (&pHGCMDisconnect->header.header);
155
156 if (RT_SUCCESS(rc))
157 {
158 /* Check if host decides to process the request asynchronously. */
159 if (rc == VINF_HGCM_ASYNC_EXECUTE)
160 {
161 /* Wait for request completion interrupt notification from host */
162 pfnAsyncCallback (&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
163 }
164
165 pDisconnectInfo->result = pHGCMDisconnect->header.result;
166 }
167
168 VbglGRFree (&pHGCMDisconnect->header.header);
169 }
170
171 return rc;
172}
173
174
175/**
176 * Preprocesses the HGCM call, validating and locking/buffering parameters.
177 *
178 * @returns VBox status code.
179 *
180 * @param pCallInfo The call info.
181 * @param cbCallInfo The size of the call info structure.
182 * @param fIsUser Is it a user request or kernel request.
183 * @param pcbExtra Where to return the extra request space needed for
184 * physical page lists.
185 */
186static int vbglR0HGCMInternalPreprocessCall(VBoxGuestHGCMCallInfo const *pCallInfo, uint32_t cbCallInfo,
187 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
188{
189 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
190 uint32_t cParms = pCallInfo->cParms;
191 uint32_t iParm;
192 uint32_t cb;
193
194 /*
195 * Lock down the any linear buffers so we can get their addresses
196 * and figure out how much extra storage we need for page lists.
197 *
198 * Note! With kernel mode users we can be assertive. For user mode users
199 * we should just (debug) log it and fail without any fanfare.
200 */
201 *pcbExtra = 0;
202 pParmInfo->cLockBufs = 0;
203 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
204 {
205 switch (pSrcParm->type)
206 {
207 case VMMDevHGCMParmType_32bit:
208 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
209 break;
210
211 case VMMDevHGCMParmType_64bit:
212 Log4(("GstHGCMCall: parm=%u type=64bit: %#018x\n", iParm, pSrcParm->u.value64));
213 break;
214
215 case VMMDevHGCMParmType_PageList:
216 if (fIsUser)
217 return VERR_INVALID_PARAMETER;
218 cb = pSrcParm->u.PageList.size;
219 if (cb)
220 {
221 uint32_t off = pSrcParm->u.PageList.offset;
222 HGCMPageListInfo *pPgLst;
223 uint32_t cPages;
224 uint32_t u32;
225
226 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
227 VERR_OUT_OF_RANGE);
228 AssertMsgReturn( off >= pCallInfo->cParms * sizeof(HGCMFunctionParameter)
229 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
230 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, pCallInfo->cParms, cbCallInfo),
231 VERR_INVALID_PARAMETER);
232
233 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
234 cPages = pPgLst->cPages;
235 u32 = RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]) + off;
236 AssertMsgReturn(u32 <= cbCallInfo,
237 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
238 VERR_INVALID_PARAMETER);
239 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
240 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
241 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
242 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
243 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
244 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
245 u32 = cPages;
246 while (u32-- > 0)
247 {
248 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
249 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
250 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
251 VERR_INVALID_PARAMETER);
252 }
253
254 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[pPgLst->cPages]);
255 }
256 else
257 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
258 break;
259
260 case VMMDevHGCMParmType_LinAddr_Locked_In:
261 case VMMDevHGCMParmType_LinAddr_Locked_Out:
262 case VMMDevHGCMParmType_LinAddr_Locked:
263 if (fIsUser)
264 return VERR_INVALID_PARAMETER;
265 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
266 {
267 cb = pSrcParm->u.Pointer.size;
268 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
269 VERR_OUT_OF_RANGE);
270 if (cb != 0)
271 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
272 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
273 else
274 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
275 break;
276 }
277 /* fall thru */
278
279 case VMMDevHGCMParmType_LinAddr_In:
280 case VMMDevHGCMParmType_LinAddr_Out:
281 case VMMDevHGCMParmType_LinAddr:
282 cb = pSrcParm->u.Pointer.size;
283 if (cb != 0)
284 {
285#ifdef USE_BOUNCE_BUFFERS
286 void *pvSmallBuf = NULL;
287#endif
288 uint32_t iLockBuf = pParmInfo->cLockBufs;
289 RTR0MEMOBJ hObj;
290 int rc;
291 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
292 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
293 ? RTMEM_PROT_READ
294 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
295
296 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
297 if (!fIsUser)
298 {
299 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
300 VERR_OUT_OF_RANGE);
301 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
302 if (RT_FAILURE(rc))
303 {
304 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
305 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
306 return rc;
307 }
308 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
309 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
310 }
311 else
312 {
313 if (cb > VBGLR0_MAX_HGCM_USER_PARM)
314 {
315 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
316 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
317 cb, VBGLR0_MAX_HGCM_USER_PARM));
318 return VERR_OUT_OF_RANGE;
319 }
320
321#ifndef USE_BOUNCE_BUFFERS
322 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
323 if (RT_FAILURE(rc))
324 {
325 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
326 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
327 return rc;
328 }
329 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
330 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
331
332#else /* USE_BOUNCE_BUFFERS */
333 /*
334 * This is a bit massive, but we don't want to waste a
335 * whole page for a 3 byte string buffer (guest props).
336 *
337 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
338 * the system is using some power of two allocator.
339 */
340 /** @todo A more efficient strategy would be to combine buffers. However it
341 * is probably going to be more massive than the current code, so
342 * it can wait till later. */
343 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
344 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
345 if (cb <= PAGE_SIZE / 2 - 16)
346 {
347 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
348 if (RT_UNLIKELY(!pvSmallBuf))
349 return VERR_NO_MEMORY;
350 if (fCopyIn)
351 {
352 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
353 if (RT_FAILURE(rc))
354 {
355 RTMemTmpFree(pvSmallBuf);
356 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
357 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
358 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
359 return rc;
360 }
361 }
362 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
363 if (RT_FAILURE(rc))
364 {
365 RTMemTmpFree(pvSmallBuf);
366 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
367 rc, pvSmallBuf, cb));
368 return rc;
369 }
370 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
371 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
372 }
373 else
374 {
375 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
376 if (RT_FAILURE(rc))
377 return rc;
378 if (!fCopyIn)
379 memset(RTR0MemObjAddress(hObj), '\0', cb);
380 else
381 {
382 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
383 if (RT_FAILURE(rc))
384 {
385 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
386 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
387 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
388 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
389 return rc;
390 }
391 }
392 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
393 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
394 }
395#endif /* USE_BOUNCE_BUFFERS */
396 }
397
398 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
399 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
400#ifdef USE_BOUNCE_BUFFERS
401 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
402#endif
403 pParmInfo->cLockBufs = iLockBuf + 1;
404
405 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
406 {
407 size_t cPages = RTR0MemObjSize(hObj);
408 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
409 }
410 }
411 else
412 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
413 break;
414
415 default:
416 return VERR_INVALID_PARAMETER;
417 }
418 }
419
420 return VINF_SUCCESS;
421}
422
423
424/**
425 * Translates locked linear address to the normal type.
426 * The locked types are only for the guest side and not handled by the host.
427 *
428 * @returns normal linear address type.
429 * @param enmType The type.
430 */
431static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
432{
433 switch (enmType)
434 {
435 case VMMDevHGCMParmType_LinAddr_Locked_In:
436 return VMMDevHGCMParmType_LinAddr_In;
437 case VMMDevHGCMParmType_LinAddr_Locked_Out:
438 return VMMDevHGCMParmType_LinAddr_Out;
439 case VMMDevHGCMParmType_LinAddr_Locked:
440 return VMMDevHGCMParmType_LinAddr;
441 default:
442 return enmType;
443 }
444}
445
446
447/**
448 * Translates linear address types to page list direction flags.
449 *
450 * @returns page list flags.
451 * @param enmType The type.
452 */
453static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
454{
455 switch (enmType)
456 {
457 case VMMDevHGCMParmType_LinAddr_In:
458 case VMMDevHGCMParmType_LinAddr_Locked_In:
459 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
460
461 case VMMDevHGCMParmType_LinAddr_Out:
462 case VMMDevHGCMParmType_LinAddr_Locked_Out:
463 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
464
465 default: AssertFailed();
466 case VMMDevHGCMParmType_LinAddr:
467 case VMMDevHGCMParmType_LinAddr_Locked:
468 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
469 }
470}
471
472
473/**
474 * Initializes the call request that we're sending to the host.
475 *
476 * @returns VBox status code.
477 *
478 * @param pCallInfo The call info.
479 * @param cbCallInfo The size of the call info structure.
480 * @param fIsUser Is it a user request or kernel request.
481 * @param pcbExtra Where to return the extra request space needed for
482 * physical page lists.
483 */
484static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, VBoxGuestHGCMCallInfo const *pCallInfo,
485 uint32_t cbCallInfo, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
486{
487 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
488 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
489 uint32_t cParms = pCallInfo->cParms;
490 uint32_t offExtra = (uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall;
491 uint32_t iLockBuf = 0;
492 uint32_t iParm;
493
494
495 /*
496 * The call request headers.
497 */
498 pHGCMCall->header.fu32Flags = 0;
499 pHGCMCall->header.result = VINF_SUCCESS;
500
501 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
502 pHGCMCall->u32Function = pCallInfo->u32Function;
503 pHGCMCall->cParms = cParms;
504
505 /*
506 * The parameters.
507 */
508 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
509 {
510 switch (pSrcParm->type)
511 {
512 case VMMDevHGCMParmType_32bit:
513 case VMMDevHGCMParmType_64bit:
514 *pDstParm = *pSrcParm;
515 break;
516
517 case VMMDevHGCMParmType_PageList:
518 pDstParm->type = VMMDevHGCMParmType_PageList;
519 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
520 if (pSrcParm->u.PageList.size)
521 {
522 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
523 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
524 uint32_t const cPages = pSrcPgLst->cPages;
525 uint32_t iPage;
526
527 pDstParm->u.PageList.offset = offExtra;
528 pDstPgLst->flags = pSrcPgLst->flags;
529 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
530 pDstPgLst->cPages = cPages;
531 for (iPage = 0; iPage < cPages; iPage++)
532 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
533
534 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
535 }
536 else
537 pDstParm->u.PageList.offset = 0;
538 break;
539
540 case VMMDevHGCMParmType_LinAddr_Locked_In:
541 case VMMDevHGCMParmType_LinAddr_Locked_Out:
542 case VMMDevHGCMParmType_LinAddr_Locked:
543 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
544 {
545 *pDstParm = *pSrcParm;
546 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
547 break;
548 }
549 /* fall thru */
550
551 case VMMDevHGCMParmType_LinAddr_In:
552 case VMMDevHGCMParmType_LinAddr_Out:
553 case VMMDevHGCMParmType_LinAddr:
554 if (pSrcParm->u.Pointer.size != 0)
555 {
556#ifdef USE_BOUNCE_BUFFERS
557 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
558#endif
559 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
560 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
561
562 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
563 {
564 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
565 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
566 size_t iPage;
567
568 pDstParm->type = VMMDevHGCMParmType_PageList;
569 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
570 pDstParm->u.PageList.offset = offExtra;
571 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
572#ifdef USE_BOUNCE_BUFFERS
573 if (fIsUser)
574 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
575 else
576#endif
577 pDstPgLst->offFirstPage = pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK;
578 pDstPgLst->cPages = cPages; Assert(pDstPgLst->cPages == cPages);
579 for (iPage = 0; iPage < cPages; iPage++)
580 {
581 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
582 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
583 }
584
585 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
586 }
587 else
588 {
589 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
590 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
591#ifdef USE_BOUNCE_BUFFERS
592 if (fIsUser)
593 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
594 ? (uintptr_t)pvSmallBuf
595 : (uintptr_t)RTR0MemObjAddress(hObj);
596 else
597#endif
598 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
599 }
600 iLockBuf++;
601 }
602 else
603 {
604 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
605 pDstParm->u.Pointer.size = 0;
606 pDstParm->u.Pointer.u.linearAddr = 0;
607 }
608 break;
609
610 default:
611 AssertFailed();
612 pDstParm->type = VMMDevHGCMParmType_Invalid;
613 break;
614 }
615 }
616}
617
618
619/**
620 * Performs the call and completion wait.
621 *
622 * @returns VBox status code of this operation, not necessarily the call.
623 *
624 * @param pHGCMCall The HGCM call info.
625 * @param pfnAsyncCallback The async callback that will wait for the call
626 * to complete.
627 * @param pvAsyncData Argument for the callback.
628 * @param u32AsyncData Argument for the callback.
629 * @param pfLeakIt Where to return the leak it / free it,
630 * indicator. Cancellation fun.
631 */
632static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
633 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
634{
635 int rc;
636
637 Log(("calling VbglGRPerform\n"));
638 rc = VbglGRPerform(&pHGCMCall->header.header);
639 Log(("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
640
641 /*
642 * If the call failed, but as a result of the request itself, then pretend
643 * success. Upper layers will interpret the result code in the packet.
644 */
645 if ( RT_FAILURE(rc)
646 && rc == pHGCMCall->header.result)
647 {
648 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
649 rc = VINF_SUCCESS;
650 }
651
652 /*
653 * Check if host decides to process the request asynchronously,
654 * if so, we wait for it to complete using the caller supplied callback.
655 */
656 *pfLeakIt = false;
657 if (rc == VINF_HGCM_ASYNC_EXECUTE)
658 {
659 Log(("Processing HGCM call asynchronously\n"));
660 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
661 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
662 {
663 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
664 rc = VINF_SUCCESS;
665 }
666 else
667 {
668 /*
669 * The request didn't complete in time or the call was interrupted,
670 * the RC from the callback indicates which. Try cancel the request.
671 *
672 * This is a bit messy because we're racing request completion. Sorry.
673 */
674 /** @todo It would be nice if we could use the waiter callback to do further
675 * waiting in case of a completion race. If it wasn't for WINNT having its own
676 * version of all that stuff, I would've done it already. */
677 VMMDevHGCMCancel2 *pCancelReq;
678 int rc2 = VbglGRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
679 if (RT_SUCCESS(rc2))
680 {
681 pCancelReq->physReqToCancel = VbglPhysHeapGetPhysAddr(pHGCMCall);
682 rc2 = VbglGRPerform(&pCancelReq->header);
683 VbglGRFree(&pCancelReq->header);
684 }
685#if 1 /** @todo ADDVER: Remove this on next minor version change. */
686 if (rc2 == VERR_NOT_IMPLEMENTED)
687 {
688 /* host is too old, or we're out of heap. */
689 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
690 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
691 rc2 = VbglGRPerform(&pHGCMCall->header.header);
692 if (rc2 == VERR_INVALID_PARAMETER)
693 rc2 = VERR_NOT_FOUND;
694 else if (RT_SUCCESS(rc))
695 RTThreadSleep(1);
696 }
697#endif
698 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
699 if (RT_SUCCESS(rc2))
700 {
701 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
702 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
703 }
704 else
705 {
706 /*
707 * Wait for a bit while the host (hopefully) completes it.
708 */
709 uint64_t u64Start = RTTimeSystemMilliTS();
710 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
711 uint64_t cElapsed = 0;
712 if (rc2 != VERR_NOT_FOUND)
713 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
714 else
715 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
716
717 do
718 {
719 ASMCompilerBarrier(); /* paranoia */
720 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
721 break;
722 RTThreadSleep(1);
723 cElapsed = RTTimeSystemMilliTS() - u64Start;
724 } while (cElapsed < cMilliesToWait);
725
726 ASMCompilerBarrier(); /* paranoia^2 */
727 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
728 rc = VINF_SUCCESS;
729 else
730 {
731 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
732 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
733 *pfLeakIt = true;
734 }
735 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
736 }
737 }
738 }
739
740 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
741 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
742 return rc;
743}
744
745
746/**
747 * Copies the result of the call back to the caller info structure and user
748 * buffers (if using bounce buffers).
749 *
750 * @returns rc, unless RTR0MemUserCopyTo fails.
751 * @param pCallInfo Call info structure to update.
752 * @param pHGCMCall HGCM call request.
753 * @param pParmInfo Paramter locking/buffering info.
754 * @param fIsUser Is it a user (true) or kernel request.
755 * @param rc The current result code. Passed along to
756 * preserve informational status codes.
757 */
758static int vbglR0HGCMInternalCopyBackResult(VBoxGuestHGCMCallInfo *pCallInfo, VMMDevHGCMCall const *pHGCMCall,
759 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
760{
761 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
762 HGCMFunctionParameter *pDstParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
763 uint32_t cParms = pCallInfo->cParms;
764#ifdef USE_BOUNCE_BUFFERS
765 uint32_t iLockBuf = 0;
766#endif
767 uint32_t iParm;
768
769 /*
770 * The call result.
771 */
772 pCallInfo->result = pHGCMCall->header.result;
773
774 /*
775 * Copy back parameters.
776 */
777 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
778 {
779 switch (pDstParm->type)
780 {
781 case VMMDevHGCMParmType_32bit:
782 case VMMDevHGCMParmType_64bit:
783 *pDstParm = *pSrcParm;
784 break;
785
786 case VMMDevHGCMParmType_PageList:
787 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
788 break;
789
790 case VMMDevHGCMParmType_LinAddr_Locked_In:
791 case VMMDevHGCMParmType_LinAddr_In:
792#ifdef USE_BOUNCE_BUFFERS
793 if ( fIsUser
794 && iLockBuf < pParmInfo->cLockBufs
795 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
796 iLockBuf++;
797#endif
798 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
799 break;
800
801 case VMMDevHGCMParmType_LinAddr_Locked_Out:
802 case VMMDevHGCMParmType_LinAddr_Locked:
803 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
804 {
805 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
806 break;
807 }
808 /* fall thru */
809
810 case VMMDevHGCMParmType_LinAddr_Out:
811 case VMMDevHGCMParmType_LinAddr:
812 {
813#ifdef USE_BOUNCE_BUFFERS
814 if (fIsUser)
815 {
816 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
817 if (cbOut)
818 {
819 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
820 int rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
821 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
822 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
823 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
824 cbOut);
825 if (RT_FAILURE(rc2))
826 return rc2;
827 iLockBuf++;
828 }
829 else if ( iLockBuf < pParmInfo->cLockBufs
830 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
831 iLockBuf++;
832 }
833#endif
834 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
835 break;
836 }
837
838 default:
839 AssertFailed();
840 rc = VERR_INTERNAL_ERROR_4;
841 break;
842 }
843 }
844
845#ifdef USE_BOUNCE_BUFFERS
846 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
847#endif
848 return rc;
849}
850
851
852DECLR0VBGL(int) VbglR0HGCMInternalCall(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
853 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
854{
855 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
856 struct VbglR0ParmInfo ParmInfo;
857 size_t cbExtra;
858 int rc;
859
860 /*
861 * Basic validation.
862 */
863 AssertMsgReturn( !pCallInfo
864 || !pfnAsyncCallback
865 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
866 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
867 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
868 VERR_INVALID_PARAMETER);
869 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
870 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
871 VERR_INVALID_PARAMETER);
872
873 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
874 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
875
876 /*
877 * Validate, lock and buffer the parameters for the call.
878 * This will calculate the amount of extra space for physical page list.
879 */
880 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
881 if (RT_SUCCESS(rc))
882 {
883 /*
884 * Allocate the request buffer and recreate the call request.
885 */
886 VMMDevHGCMCall *pHGCMCall;
887 rc = VbglGRAlloc((VMMDevRequestHeader **)&pHGCMCall,
888 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
889 VMMDevReq_HGCMCall);
890 if (RT_SUCCESS(rc))
891 {
892 bool fLeakIt;
893 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fIsUser, &ParmInfo);
894
895 /*
896 * Perform the call.
897 */
898 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
899 if (RT_SUCCESS(rc))
900 {
901 /*
902 * Copy back the result (parameters and buffers that changed).
903 */
904 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
905 }
906 else
907 {
908 if (rc != VERR_INTERRUPTED)
909 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
910 }
911
912 if (!fLeakIt)
913 VbglGRFree(&pHGCMCall->header.header);
914 }
915 }
916 else
917 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
918
919 /*
920 * Release locks and free bounce buffers.
921 */
922 if (ParmInfo.cLockBufs)
923 while (ParmInfo.cLockBufs-- > 0)
924 {
925 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
926#ifdef USE_BOUNCE_BUFFERS
927 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
928#endif
929 }
930
931 return rc;
932}
933
934
935#if ARCH_BITS == 64
936DECLR0VBGL(int) VbglR0HGCMInternalCall32(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
937 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
938{
939 VBoxGuestHGCMCallInfo *pCallInfo64 = NULL;
940 HGCMFunctionParameter *pParm64 = NULL;
941 HGCMFunctionParameter32 *pParm32 = NULL;
942 uint32_t cParms = 0;
943 uint32_t iParm = 0;
944 int rc = VINF_SUCCESS;
945
946 /*
947 * Input validation.
948 */
949 AssertMsgReturn( !pCallInfo
950 || !pfnAsyncCallback
951 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
952 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
953 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
954 VERR_INVALID_PARAMETER);
955 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
956 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
957 VERR_INVALID_PARAMETER);
958
959 /* This Assert does not work on Solaris 64/32 mixed mode, not sure why, skipping for now */
960#ifndef RT_OS_SOLARIS
961 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
962#endif
963
964 cParms = pCallInfo->cParms;
965 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
966
967 /*
968 * The simple approach, allocate a temporary request and convert the parameters.
969 */
970 pCallInfo64 = (VBoxGuestHGCMCallInfo *)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
971 if (!pCallInfo64)
972 return VERR_NO_TMP_MEMORY;
973
974 *pCallInfo64 = *pCallInfo;
975 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
976 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
977 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
978 {
979 switch (pParm32->type)
980 {
981 case VMMDevHGCMParmType_32bit:
982 pParm64->type = VMMDevHGCMParmType_32bit;
983 pParm64->u.value32 = pParm32->u.value32;
984 break;
985
986 case VMMDevHGCMParmType_64bit:
987 pParm64->type = VMMDevHGCMParmType_64bit;
988 pParm64->u.value64 = pParm32->u.value64;
989 break;
990
991 case VMMDevHGCMParmType_LinAddr_Out:
992 case VMMDevHGCMParmType_LinAddr:
993 case VMMDevHGCMParmType_LinAddr_In:
994 pParm64->type = pParm32->type;
995 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
996 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
997 break;
998
999 default:
1000 rc = VERR_INVALID_PARAMETER;
1001 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1002 break;
1003 }
1004 if (RT_FAILURE(rc))
1005 break;
1006 }
1007 if (RT_SUCCESS(rc))
1008 {
1009 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1010 pfnAsyncCallback, pvAsyncData, u32AsyncData);
1011
1012 if (RT_SUCCESS(rc))
1013 {
1014 *pCallInfo = *pCallInfo64;
1015
1016 /*
1017 * Copy back.
1018 */
1019 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
1020 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
1021 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1022 {
1023 switch (pParm64->type)
1024 {
1025 case VMMDevHGCMParmType_32bit:
1026 LogRel(("pParm32->u.value32=%d\n", pParm32->u.value32));
1027 pParm32->u.value32 = pParm64->u.value32;
1028 break;
1029
1030 case VMMDevHGCMParmType_64bit:
1031 pParm32->u.value64 = pParm64->u.value64;
1032 break;
1033
1034 case VMMDevHGCMParmType_LinAddr_Out:
1035 case VMMDevHGCMParmType_LinAddr:
1036 case VMMDevHGCMParmType_LinAddr_In:
1037 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1038 break;
1039
1040 default:
1041 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1042 rc = VERR_INTERNAL_ERROR_3;
1043 break;
1044 }
1045 }
1046 }
1047 else
1048 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1049 }
1050 else
1051 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1052
1053 RTMemTmpFree(pCallInfo64);
1054 return rc;
1055}
1056#endif /* ARCH_BITS == 64 */
1057
1058#endif /* VBGL_VBOXGUEST */
1059
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette