VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibHGCMInternal.cpp@ 75547

Last change on this file since 75547 was 75126, checked in by vboxsync, 6 years ago

VBoxGuestR0LibHGCMInternal.cpp: We no longer seems to need alloca.h.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 45.7 KB
Line 
1/* $Id: VBoxGuestR0LibHGCMInternal.cpp 75126 2018-10-28 14:53:12Z vboxsync $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2018 Oracle Corporation
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_HGCM
36
37#include "VBoxGuestR0LibInternal.h"
38#include <iprt/asm.h>
39#include <iprt/assert.h>
40#include <iprt/mem.h>
41#include <iprt/memobj.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/time.h>
45
46#ifndef VBGL_VBOXGUEST
47# error "This file should only be part of the VBoxGuestR0LibBase library that is linked into VBoxGuest."
48#endif
49
50
51/*********************************************************************************************************************************
52* Defined Constants And Macros *
53*********************************************************************************************************************************/
54/** The max parameter buffer size for a user request. */
55#define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
56/** The max parameter buffer size for a kernel request. */
57#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
58#if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN)
59/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
60 * side effects.
61 * Darwin 32bit & 64bit also needs this because of 4GB/4GB user/kernel space. */
62# define USE_BOUNCE_BUFFERS
63#endif
64
65
66/*********************************************************************************************************************************
67* Structures and Typedefs *
68*********************************************************************************************************************************/
69/**
70 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
71 */
72struct VbglR0ParmInfo
73{
74 uint32_t cLockBufs;
75 struct
76 {
77 uint32_t iParm;
78 RTR0MEMOBJ hObj;
79#ifdef USE_BOUNCE_BUFFERS
80 void *pvSmallBuf;
81#endif
82 } aLockBufs[10];
83};
84
85
86
87/* These functions can be only used by VBoxGuest. */
88
89DECLR0VBGL(int) VbglR0HGCMInternalConnect(HGCMServiceLocation const *pLoc, uint32_t fRequestor, HGCMCLIENTID *pidClient,
90 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
91{
92 int rc;
93 if ( RT_VALID_PTR(pLoc)
94 && RT_VALID_PTR(pidClient)
95 && RT_VALID_PTR(pfnAsyncCallback))
96 {
97 /* Allocate request */
98 VMMDevHGCMConnect *pHGCMConnect = NULL;
99 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMConnect, sizeof(VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
100 if (RT_SUCCESS(rc))
101 {
102 /* Initialize request memory */
103 pHGCMConnect->header.header.fRequestor = fRequestor;
104
105 pHGCMConnect->header.fu32Flags = 0;
106
107 memcpy(&pHGCMConnect->loc, pLoc, sizeof(pHGCMConnect->loc));
108 pHGCMConnect->u32ClientID = 0;
109
110 /* Issue request */
111 rc = VbglR0GRPerform (&pHGCMConnect->header.header);
112 if (RT_SUCCESS(rc))
113 {
114 /* Check if host decides to process the request asynchronously. */
115 if (rc == VINF_HGCM_ASYNC_EXECUTE)
116 {
117 /* Wait for request completion interrupt notification from host */
118 pfnAsyncCallback(&pHGCMConnect->header, pvAsyncData, u32AsyncData);
119 }
120
121 rc = pHGCMConnect->header.result;
122 if (RT_SUCCESS(rc))
123 *pidClient = pHGCMConnect->u32ClientID;
124 }
125 VbglR0GRFree(&pHGCMConnect->header.header);
126 }
127 }
128 else
129 rc = VERR_INVALID_PARAMETER;
130 return rc;
131}
132
133
134DECLR0VBGL(int) VbglR0HGCMInternalDisconnect(HGCMCLIENTID idClient, uint32_t fRequestor,
135 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
136{
137 int rc;
138 if ( idClient != 0
139 && pfnAsyncCallback)
140 {
141 /* Allocate request */
142 VMMDevHGCMDisconnect *pHGCMDisconnect = NULL;
143 rc = VbglR0GRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
144 if (RT_SUCCESS(rc))
145 {
146 /* Initialize request memory */
147 pHGCMDisconnect->header.header.fRequestor = fRequestor;
148
149 pHGCMDisconnect->header.fu32Flags = 0;
150
151 pHGCMDisconnect->u32ClientID = idClient;
152
153 /* Issue request */
154 rc = VbglR0GRPerform(&pHGCMDisconnect->header.header);
155 if (RT_SUCCESS(rc))
156 {
157 /* Check if host decides to process the request asynchronously. */
158 if (rc == VINF_HGCM_ASYNC_EXECUTE)
159 {
160 /* Wait for request completion interrupt notification from host */
161 pfnAsyncCallback(&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
162 }
163
164 rc = pHGCMDisconnect->header.result;
165 }
166
167 VbglR0GRFree(&pHGCMDisconnect->header.header);
168 }
169 }
170 else
171 rc = VERR_INVALID_PARAMETER;
172 return rc;
173}
174
175
176/**
177 * Preprocesses the HGCM call, validating and locking/buffering parameters.
178 *
179 * @returns VBox status code.
180 *
181 * @param pCallInfo The call info.
182 * @param cbCallInfo The size of the call info structure.
183 * @param fIsUser Is it a user request or kernel request.
184 * @param pcbExtra Where to return the extra request space needed for
185 * physical page lists.
186 */
187static int vbglR0HGCMInternalPreprocessCall(PCVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
188 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
189{
190 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
191 uint32_t const cParms = pCallInfo->cParms;
192 uint32_t iParm;
193 uint32_t cb;
194
195 /*
196 * Lock down the any linear buffers so we can get their addresses
197 * and figure out how much extra storage we need for page lists.
198 *
199 * Note! With kernel mode users we can be assertive. For user mode users
200 * we should just (debug) log it and fail without any fanfare.
201 */
202 *pcbExtra = 0;
203 pParmInfo->cLockBufs = 0;
204 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
205 {
206 switch (pSrcParm->type)
207 {
208 case VMMDevHGCMParmType_32bit:
209 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
210 break;
211
212 case VMMDevHGCMParmType_64bit:
213 Log4(("GstHGCMCall: parm=%u type=64bit: %#018RX64\n", iParm, pSrcParm->u.value64));
214 break;
215
216 case VMMDevHGCMParmType_PageList:
217 if (fIsUser)
218 return VERR_INVALID_PARAMETER;
219 cb = pSrcParm->u.PageList.size;
220 if (cb)
221 {
222 uint32_t off = pSrcParm->u.PageList.offset;
223 HGCMPageListInfo *pPgLst;
224 uint32_t cPages;
225 uint32_t u32;
226
227 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
228 VERR_OUT_OF_RANGE);
229 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
230 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
231 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
232 VERR_INVALID_PARAMETER);
233
234 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
235 cPages = pPgLst->cPages;
236 u32 = RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]) + off;
237 AssertMsgReturn(u32 <= cbCallInfo,
238 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
239 VERR_INVALID_PARAMETER);
240 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
241 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
242 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
243 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
244 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
245 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
246 u32 = cPages;
247 while (u32-- > 0)
248 {
249 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
250 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
251 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
252 VERR_INVALID_PARAMETER);
253 }
254
255 *pcbExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[pPgLst->cPages]);
256 }
257 else
258 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
259 break;
260
261 case VMMDevHGCMParmType_LinAddr_Locked_In:
262 case VMMDevHGCMParmType_LinAddr_Locked_Out:
263 case VMMDevHGCMParmType_LinAddr_Locked:
264 if (fIsUser)
265 return VERR_INVALID_PARAMETER;
266 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
267 {
268 cb = pSrcParm->u.Pointer.size;
269 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
270 VERR_OUT_OF_RANGE);
271 if (cb != 0)
272 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
273 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
274 else
275 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
276 break;
277 }
278 RT_FALL_THRU();
279
280 case VMMDevHGCMParmType_LinAddr_In:
281 case VMMDevHGCMParmType_LinAddr_Out:
282 case VMMDevHGCMParmType_LinAddr:
283 cb = pSrcParm->u.Pointer.size;
284 if (cb != 0)
285 {
286#ifdef USE_BOUNCE_BUFFERS
287 void *pvSmallBuf = NULL;
288#endif
289 uint32_t iLockBuf = pParmInfo->cLockBufs;
290 RTR0MEMOBJ hObj;
291 int rc;
292 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
293 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
294 ? RTMEM_PROT_READ
295 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
296
297 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
298 if (!fIsUser)
299 {
300 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
301 VERR_OUT_OF_RANGE);
302 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
303 if (RT_FAILURE(rc))
304 {
305 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
306 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
307 return rc;
308 }
309 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
310 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
311 }
312 else if (cb > VBGLR0_MAX_HGCM_USER_PARM)
313 {
314 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
315 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
316 cb, VBGLR0_MAX_HGCM_USER_PARM));
317 return VERR_OUT_OF_RANGE;
318 }
319 else
320 {
321#ifndef USE_BOUNCE_BUFFERS
322 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
323 if (RT_FAILURE(rc))
324 {
325 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
326 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
327 return rc;
328 }
329 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
330 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
331
332#else /* USE_BOUNCE_BUFFERS */
333 /*
334 * This is a bit massive, but we don't want to waste a
335 * whole page for a 3 byte string buffer (guest props).
336 *
337 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
338 * the system is using some power of two allocator.
339 */
340 /** @todo A more efficient strategy would be to combine buffers. However it
341 * is probably going to be more massive than the current code, so
342 * it can wait till later. */
343 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
344 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
345 if (cb <= PAGE_SIZE / 2 - 16)
346 {
347 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
348 if (RT_UNLIKELY(!pvSmallBuf))
349 return VERR_NO_MEMORY;
350 if (fCopyIn)
351 {
352 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
353 if (RT_FAILURE(rc))
354 {
355 RTMemTmpFree(pvSmallBuf);
356 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
357 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
358 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
359 return rc;
360 }
361 }
362 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
363 if (RT_FAILURE(rc))
364 {
365 RTMemTmpFree(pvSmallBuf);
366 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
367 rc, pvSmallBuf, cb));
368 return rc;
369 }
370 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
371 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
372 }
373 else
374 {
375 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
376 if (RT_FAILURE(rc))
377 return rc;
378 if (!fCopyIn)
379 memset(RTR0MemObjAddress(hObj), '\0', cb);
380 else
381 {
382 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
383 if (RT_FAILURE(rc))
384 {
385 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
386 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
387 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
388 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
389 return rc;
390 }
391 }
392 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
393 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
394 }
395#endif /* USE_BOUNCE_BUFFERS */
396 }
397
398 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
399 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
400#ifdef USE_BOUNCE_BUFFERS
401 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
402#endif
403 pParmInfo->cLockBufs = iLockBuf + 1;
404
405 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
406 {
407 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
408 *pcbExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
409 }
410 }
411 else
412 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
413 break;
414
415 default:
416 return VERR_INVALID_PARAMETER;
417 }
418 }
419
420 return VINF_SUCCESS;
421}
422
423
424/**
425 * Translates locked linear address to the normal type.
426 * The locked types are only for the guest side and not handled by the host.
427 *
428 * @returns normal linear address type.
429 * @param enmType The type.
430 */
431static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
432{
433 switch (enmType)
434 {
435 case VMMDevHGCMParmType_LinAddr_Locked_In:
436 return VMMDevHGCMParmType_LinAddr_In;
437 case VMMDevHGCMParmType_LinAddr_Locked_Out:
438 return VMMDevHGCMParmType_LinAddr_Out;
439 case VMMDevHGCMParmType_LinAddr_Locked:
440 return VMMDevHGCMParmType_LinAddr;
441 default:
442 return enmType;
443 }
444}
445
446
447/**
448 * Translates linear address types to page list direction flags.
449 *
450 * @returns page list flags.
451 * @param enmType The type.
452 */
453static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
454{
455 switch (enmType)
456 {
457 case VMMDevHGCMParmType_LinAddr_In:
458 case VMMDevHGCMParmType_LinAddr_Locked_In:
459 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
460
461 case VMMDevHGCMParmType_LinAddr_Out:
462 case VMMDevHGCMParmType_LinAddr_Locked_Out:
463 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
464
465 default: AssertFailed();
466 case VMMDevHGCMParmType_LinAddr:
467 case VMMDevHGCMParmType_LinAddr_Locked:
468 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
469 }
470}
471
472
473/**
474 * Initializes the call request that we're sending to the host.
475 *
476 * @returns VBox status code.
477 *
478 * @param pCallInfo The call info.
479 * @param cbCallInfo The size of the call info structure.
480 * @param fRequestor VMMDEV_REQUESTOR_XXX.
481 * @param fIsUser Is it a user request or kernel request.
482 * @param pcbExtra Where to return the extra request space needed for
483 * physical page lists.
484 */
485static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, PCVBGLIOCHGCMCALL pCallInfo,
486 uint32_t cbCallInfo, uint32_t fRequestor, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
487{
488 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
489 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
490 uint32_t const cParms = pCallInfo->cParms;
491 uint32_t offExtra = (uint32_t)((uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall);
492 uint32_t iLockBuf = 0;
493 uint32_t iParm;
494 RT_NOREF1(cbCallInfo);
495#ifndef USE_BOUNCE_BUFFERS
496 RT_NOREF1(fIsUser);
497#endif
498
499 /*
500 * The call request headers.
501 */
502 pHGCMCall->header.header.fRequestor = !fIsUser || (fRequestor & VMMDEV_REQUESTOR_USERMODE) ? fRequestor
503 : VMMDEV_REQUESTOR_USERMODE | VMMDEV_REQUESTOR_USR_NOT_GIVEN
504 | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN | VMMDEV_REQUESTOR_CON_DONT_KNOW;
505
506 pHGCMCall->header.fu32Flags = 0;
507 pHGCMCall->header.result = VINF_SUCCESS;
508
509 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
510 pHGCMCall->u32Function = pCallInfo->u32Function;
511 pHGCMCall->cParms = cParms;
512
513 /*
514 * The parameters.
515 */
516 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
517 {
518 switch (pSrcParm->type)
519 {
520 case VMMDevHGCMParmType_32bit:
521 case VMMDevHGCMParmType_64bit:
522 *pDstParm = *pSrcParm;
523 break;
524
525 case VMMDevHGCMParmType_PageList:
526 pDstParm->type = VMMDevHGCMParmType_PageList;
527 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
528 if (pSrcParm->u.PageList.size)
529 {
530 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
531 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
532 uint32_t const cPages = pSrcPgLst->cPages;
533 uint32_t iPage;
534
535 pDstParm->u.PageList.offset = offExtra;
536 pDstPgLst->flags = pSrcPgLst->flags;
537 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
538 pDstPgLst->cPages = (uint16_t)cPages;
539 for (iPage = 0; iPage < cPages; iPage++)
540 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
541
542 offExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
543 }
544 else
545 pDstParm->u.PageList.offset = 0;
546 break;
547
548 case VMMDevHGCMParmType_LinAddr_Locked_In:
549 case VMMDevHGCMParmType_LinAddr_Locked_Out:
550 case VMMDevHGCMParmType_LinAddr_Locked:
551 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
552 {
553 *pDstParm = *pSrcParm;
554 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
555 break;
556 }
557 RT_FALL_THRU();
558
559 case VMMDevHGCMParmType_LinAddr_In:
560 case VMMDevHGCMParmType_LinAddr_Out:
561 case VMMDevHGCMParmType_LinAddr:
562 if (pSrcParm->u.Pointer.size != 0)
563 {
564#ifdef USE_BOUNCE_BUFFERS
565 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
566#endif
567 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
568 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
569
570 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
571 {
572 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
573 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
574 size_t iPage;
575
576 pDstParm->type = VMMDevHGCMParmType_PageList;
577 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
578 pDstParm->u.PageList.offset = offExtra;
579 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
580#ifdef USE_BOUNCE_BUFFERS
581 if (fIsUser)
582 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
583 else
584#endif
585 pDstPgLst->offFirstPage = (uint16_t)(pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK);
586 pDstPgLst->cPages = (uint16_t)cPages; Assert(pDstPgLst->cPages == cPages);
587 for (iPage = 0; iPage < cPages; iPage++)
588 {
589 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
590 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
591 }
592
593 offExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
594 }
595 else
596 {
597 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
598 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
599#ifdef USE_BOUNCE_BUFFERS
600 if (fIsUser)
601 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
602 ? (uintptr_t)pvSmallBuf
603 : (uintptr_t)RTR0MemObjAddress(hObj);
604 else
605#endif
606 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
607 }
608 iLockBuf++;
609 }
610 else
611 {
612 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
613 pDstParm->u.Pointer.size = 0;
614 pDstParm->u.Pointer.u.linearAddr = 0;
615 }
616 break;
617
618 default:
619 AssertFailed();
620 pDstParm->type = VMMDevHGCMParmType_Invalid;
621 break;
622 }
623 }
624}
625
626
627/**
628 * Performs the call and completion wait.
629 *
630 * @returns VBox status code of this operation, not necessarily the call.
631 *
632 * @param pHGCMCall The HGCM call info.
633 * @param pfnAsyncCallback The async callback that will wait for the call
634 * to complete.
635 * @param pvAsyncData Argument for the callback.
636 * @param u32AsyncData Argument for the callback.
637 * @param pfLeakIt Where to return the leak it / free it,
638 * indicator. Cancellation fun.
639 */
640static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
641 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
642{
643 int rc;
644
645 Log(("calling VbglR0GRPerform\n"));
646 rc = VbglR0GRPerform(&pHGCMCall->header.header);
647 Log(("VbglR0GRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
648
649 /*
650 * If the call failed, but as a result of the request itself, then pretend
651 * success. Upper layers will interpret the result code in the packet.
652 */
653 if ( RT_FAILURE(rc)
654 && rc == pHGCMCall->header.result)
655 {
656 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
657 rc = VINF_SUCCESS;
658 }
659
660 /*
661 * Check if host decides to process the request asynchronously,
662 * if so, we wait for it to complete using the caller supplied callback.
663 */
664 *pfLeakIt = false;
665 if (rc == VINF_HGCM_ASYNC_EXECUTE)
666 {
667 Log(("Processing HGCM call asynchronously\n"));
668 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
669 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
670 {
671 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
672 rc = VINF_SUCCESS;
673 }
674 else
675 {
676 /*
677 * The request didn't complete in time or the call was interrupted,
678 * the RC from the callback indicates which. Try cancel the request.
679 *
680 * This is a bit messy because we're racing request completion. Sorry.
681 */
682 /** @todo It would be nice if we could use the waiter callback to do further
683 * waiting in case of a completion race. If it wasn't for WINNT having its own
684 * version of all that stuff, I would've done it already. */
685 VMMDevHGCMCancel2 *pCancelReq;
686 int rc2 = VbglR0GRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
687 if (RT_SUCCESS(rc2))
688 {
689 pCancelReq->physReqToCancel = VbglR0PhysHeapGetPhysAddr(pHGCMCall);
690 rc2 = VbglR0GRPerform(&pCancelReq->header);
691 VbglR0GRFree(&pCancelReq->header);
692 }
693#if 1 /** @todo ADDVER: Remove this on next minor version change. */
694 if (rc2 == VERR_NOT_IMPLEMENTED)
695 {
696 /* host is too old, or we're out of heap. */
697 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
698 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
699 rc2 = VbglR0GRPerform(&pHGCMCall->header.header);
700 if (rc2 == VERR_INVALID_PARAMETER)
701 rc2 = VERR_NOT_FOUND;
702 else if (RT_SUCCESS(rc))
703 RTThreadSleep(1);
704 }
705#endif
706 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
707 if (RT_SUCCESS(rc2))
708 {
709 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
710 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
711 }
712 else
713 {
714 /*
715 * Wait for a bit while the host (hopefully) completes it.
716 */
717 uint64_t u64Start = RTTimeSystemMilliTS();
718 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
719 uint64_t cElapsed = 0;
720 if (rc2 != VERR_NOT_FOUND)
721 {
722 static unsigned s_cErrors = 0;
723 if (s_cErrors++ < 32)
724 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
725 }
726 else
727 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
728
729 do
730 {
731 ASMCompilerBarrier(); /* paranoia */
732 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
733 break;
734 RTThreadSleep(1);
735 cElapsed = RTTimeSystemMilliTS() - u64Start;
736 } while (cElapsed < cMilliesToWait);
737
738 ASMCompilerBarrier(); /* paranoia^2 */
739 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
740 rc = VINF_SUCCESS;
741 else
742 {
743 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
744 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
745 *pfLeakIt = true;
746 }
747 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
748 }
749 }
750 }
751
752 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
753 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
754 return rc;
755}
756
757
758/**
759 * Copies the result of the call back to the caller info structure and user
760 * buffers (if using bounce buffers).
761 *
762 * @returns rc, unless RTR0MemUserCopyTo fails.
763 * @param pCallInfo Call info structure to update.
764 * @param pHGCMCall HGCM call request.
765 * @param pParmInfo Parameter locking/buffering info.
766 * @param fIsUser Is it a user (true) or kernel request.
767 * @param rc The current result code. Passed along to
768 * preserve informational status codes.
769 */
770static int vbglR0HGCMInternalCopyBackResult(PVBGLIOCHGCMCALL pCallInfo, VMMDevHGCMCall const *pHGCMCall,
771 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
772{
773 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
774 HGCMFunctionParameter *pDstParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
775 uint32_t const cParms = pCallInfo->cParms;
776#ifdef USE_BOUNCE_BUFFERS
777 uint32_t iLockBuf = 0;
778#endif
779 uint32_t iParm;
780 RT_NOREF1(pParmInfo);
781#ifndef USE_BOUNCE_BUFFERS
782 RT_NOREF1(fIsUser);
783#endif
784
785 /*
786 * The call result.
787 */
788 pCallInfo->Hdr.rc = pHGCMCall->header.result;
789
790 /*
791 * Copy back parameters.
792 */
793 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
794 {
795 switch (pDstParm->type)
796 {
797 case VMMDevHGCMParmType_32bit:
798 case VMMDevHGCMParmType_64bit:
799 *pDstParm = *pSrcParm;
800 break;
801
802 case VMMDevHGCMParmType_PageList:
803 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
804 break;
805
806 case VMMDevHGCMParmType_LinAddr_Locked_In:
807 case VMMDevHGCMParmType_LinAddr_In:
808#ifdef USE_BOUNCE_BUFFERS
809 if ( fIsUser
810 && iLockBuf < pParmInfo->cLockBufs
811 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
812 iLockBuf++;
813#endif
814 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
815 break;
816
817 case VMMDevHGCMParmType_LinAddr_Locked_Out:
818 case VMMDevHGCMParmType_LinAddr_Locked:
819 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
820 {
821 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
822 break;
823 }
824 RT_FALL_THRU();
825
826 case VMMDevHGCMParmType_LinAddr_Out:
827 case VMMDevHGCMParmType_LinAddr:
828 {
829#ifdef USE_BOUNCE_BUFFERS
830 if (fIsUser)
831 {
832 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
833 if (cbOut)
834 {
835 int rc2;
836 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
837 rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
838 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
839 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
840 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
841 cbOut);
842 if (RT_FAILURE(rc2))
843 return rc2;
844 iLockBuf++;
845 }
846 else if ( iLockBuf < pParmInfo->cLockBufs
847 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
848 iLockBuf++;
849 }
850#endif
851 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
852 break;
853 }
854
855 default:
856 AssertFailed();
857 rc = VERR_INTERNAL_ERROR_4;
858 break;
859 }
860 }
861
862#ifdef USE_BOUNCE_BUFFERS
863 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
864#endif
865 return rc;
866}
867
868
869DECLR0VBGL(int) VbglR0HGCMInternalCall(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
870 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
871{
872 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
873 struct VbglR0ParmInfo ParmInfo;
874 size_t cbExtra;
875 int rc;
876
877 /*
878 * Basic validation.
879 */
880 AssertMsgReturn( !pCallInfo
881 || !pfnAsyncCallback
882 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
883 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
884 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
885 VERR_INVALID_PARAMETER);
886 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
887 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
888 VERR_INVALID_PARAMETER);
889
890 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
891 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
892
893 /*
894 * Validate, lock and buffer the parameters for the call.
895 * This will calculate the amount of extra space for physical page list.
896 */
897 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
898 if (RT_SUCCESS(rc))
899 {
900 /*
901 * Allocate the request buffer and recreate the call request.
902 */
903 VMMDevHGCMCall *pHGCMCall;
904 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMCall,
905 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
906 VMMDevReq_HGCMCall);
907 if (RT_SUCCESS(rc))
908 {
909 bool fLeakIt;
910 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fRequestor, fIsUser, &ParmInfo);
911
912 /*
913 * Perform the call.
914 */
915 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
916 if (RT_SUCCESS(rc))
917 {
918 /*
919 * Copy back the result (parameters and buffers that changed).
920 */
921 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
922 }
923 else
924 {
925 if ( rc != VERR_INTERRUPTED
926 && rc != VERR_TIMEOUT)
927 {
928 static unsigned s_cErrors = 0;
929 if (s_cErrors++ < 32)
930 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
931 }
932 }
933
934 if (!fLeakIt)
935 VbglR0GRFree(&pHGCMCall->header.header);
936 }
937 }
938 else
939 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
940
941 /*
942 * Release locks and free bounce buffers.
943 */
944 if (ParmInfo.cLockBufs)
945 while (ParmInfo.cLockBufs-- > 0)
946 {
947 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
948#ifdef USE_BOUNCE_BUFFERS
949 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
950#endif
951 }
952
953 return rc;
954}
955
956
957#if ARCH_BITS == 64
958DECLR0VBGL(int) VbglR0HGCMInternalCall32(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
959 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
960{
961 PVBGLIOCHGCMCALL pCallInfo64 = NULL;
962 HGCMFunctionParameter *pParm64 = NULL;
963 HGCMFunctionParameter32 *pParm32 = NULL;
964 uint32_t cParms = 0;
965 uint32_t iParm = 0;
966 int rc = VINF_SUCCESS;
967
968 /*
969 * Input validation.
970 */
971 AssertMsgReturn( !pCallInfo
972 || !pfnAsyncCallback
973 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
974 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
975 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
976 VERR_INVALID_PARAMETER);
977 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
978 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
979 VERR_INVALID_PARAMETER);
980
981 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
982#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
983 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
984#endif
985
986 cParms = pCallInfo->cParms;
987 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
988
989 /*
990 * The simple approach, allocate a temporary request and convert the parameters.
991 */
992 pCallInfo64 = (PVBGLIOCHGCMCALL)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
993 if (!pCallInfo64)
994 return VERR_NO_TMP_MEMORY;
995
996 *pCallInfo64 = *pCallInfo;
997 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
998 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
999 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1000 {
1001 switch (pParm32->type)
1002 {
1003 case VMMDevHGCMParmType_32bit:
1004 pParm64->type = VMMDevHGCMParmType_32bit;
1005 pParm64->u.value32 = pParm32->u.value32;
1006 break;
1007
1008 case VMMDevHGCMParmType_64bit:
1009 pParm64->type = VMMDevHGCMParmType_64bit;
1010 pParm64->u.value64 = pParm32->u.value64;
1011 break;
1012
1013 case VMMDevHGCMParmType_LinAddr_Out:
1014 case VMMDevHGCMParmType_LinAddr:
1015 case VMMDevHGCMParmType_LinAddr_In:
1016 pParm64->type = pParm32->type;
1017 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
1018 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
1019 break;
1020
1021 default:
1022 rc = VERR_INVALID_PARAMETER;
1023 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1024 break;
1025 }
1026 if (RT_FAILURE(rc))
1027 break;
1028 }
1029 if (RT_SUCCESS(rc))
1030 {
1031 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1032 fRequestor, pfnAsyncCallback, pvAsyncData, u32AsyncData);
1033
1034 if (RT_SUCCESS(rc))
1035 {
1036 *pCallInfo = *pCallInfo64;
1037
1038 /*
1039 * Copy back.
1040 */
1041 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1042 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1043 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1044 {
1045 switch (pParm64->type)
1046 {
1047 case VMMDevHGCMParmType_32bit:
1048 pParm32->u.value32 = pParm64->u.value32;
1049 break;
1050
1051 case VMMDevHGCMParmType_64bit:
1052 pParm32->u.value64 = pParm64->u.value64;
1053 break;
1054
1055 case VMMDevHGCMParmType_LinAddr_Out:
1056 case VMMDevHGCMParmType_LinAddr:
1057 case VMMDevHGCMParmType_LinAddr_In:
1058 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1059 break;
1060
1061 default:
1062 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1063 rc = VERR_INTERNAL_ERROR_3;
1064 break;
1065 }
1066 }
1067 }
1068 else
1069 {
1070 static unsigned s_cErrors = 0;
1071 if (s_cErrors++ < 32)
1072 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1073 }
1074 }
1075 else
1076 {
1077 static unsigned s_cErrors = 0;
1078 if (s_cErrors++ < 32)
1079 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1080 }
1081
1082 RTMemTmpFree(pCallInfo64);
1083 return rc;
1084}
1085#endif /* ARCH_BITS == 64 */
1086
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette