VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibHGCMInternal.cpp

Last change on this file was 107091, checked in by vboxsync, 2 months ago

iprt/cdefs.h,SUPDrvIOC.h,VBoxGuestR0Lib: Added a RT_UOFFSETOF_FLEX_ARRAY macro for calculating the size of structures ending with an variables length arrays. Only tested on windows... bugref:10585

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 50.4 KB
Line 
1/* $Id: VBoxGuestR0LibHGCMInternal.cpp 107091 2024-11-21 16:12:02Z vboxsync $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_HGCM
36
37#include "VBoxGuestR0LibInternal.h"
38#include <iprt/asm.h>
39#include <iprt/assert.h>
40#include <iprt/mem.h>
41#include <iprt/memobj.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/time.h>
45#include <VBox/err.h>
46
47#ifndef VBGL_VBOXGUEST
48# error "This file should only be part of the VBoxGuestR0LibBase library that is linked into VBoxGuest."
49#endif
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55/** The max parameter buffer size for a user request. */
56#define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
57/** The max parameter buffer size for a kernel request. */
58#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
59/** The max embedded buffer size. */
60#define VBGLR0_MAX_HGCM_EMBEDDED_BUFFER _64K
61
62#if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN)
63/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
64 * side effects.
65 * Darwin 32bit & 64bit also needs this because of 4GB/4GB user/kernel space. */
66# define USE_BOUNCE_BUFFERS
67#endif
68
69
70/*********************************************************************************************************************************
71* Structures and Typedefs *
72*********************************************************************************************************************************/
73/**
74 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
75 */
76struct VbglR0ParmInfo
77{
78 uint32_t cLockBufs;
79 struct
80 {
81 uint32_t iParm;
82 RTR0MEMOBJ hObj;
83#ifdef USE_BOUNCE_BUFFERS
84 void *pvSmallBuf;
85#endif
86 } aLockBufs[10];
87};
88
89
90
91/* These functions can be only used by VBoxGuest. */
92
93DECLR0VBGL(int) VbglR0HGCMInternalConnect(HGCMServiceLocation const *pLoc, uint32_t fRequestor, HGCMCLIENTID *pidClient,
94 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
95{
96 int rc;
97 if ( RT_VALID_PTR(pLoc)
98 && RT_VALID_PTR(pidClient)
99 && RT_VALID_PTR(pfnAsyncCallback))
100 {
101 /* Allocate request */
102 VMMDevHGCMConnect *pHGCMConnect = NULL;
103 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMConnect, sizeof(VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
104 if (RT_SUCCESS(rc))
105 {
106 /* Initialize request memory */
107 pHGCMConnect->header.header.fRequestor = fRequestor;
108
109 pHGCMConnect->header.fu32Flags = 0;
110
111 memcpy(&pHGCMConnect->loc, pLoc, sizeof(pHGCMConnect->loc));
112 pHGCMConnect->u32ClientID = 0;
113
114 /* Issue request */
115 rc = VbglR0GRPerform (&pHGCMConnect->header.header);
116 if (RT_SUCCESS(rc))
117 {
118 /* Check if host decides to process the request asynchronously. */
119 if (rc == VINF_HGCM_ASYNC_EXECUTE)
120 {
121 /* Wait for request completion interrupt notification from host */
122 pfnAsyncCallback(&pHGCMConnect->header, pvAsyncData, u32AsyncData);
123 }
124
125 rc = pHGCMConnect->header.result;
126 if (RT_SUCCESS(rc))
127 *pidClient = pHGCMConnect->u32ClientID;
128 }
129 VbglR0GRFree(&pHGCMConnect->header.header);
130 }
131 }
132 else
133 rc = VERR_INVALID_PARAMETER;
134 return rc;
135}
136
137
138DECLR0VBGL(int) VbglR0HGCMInternalDisconnect(HGCMCLIENTID idClient, uint32_t fRequestor,
139 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
140{
141 int rc;
142 if ( idClient != 0
143 && pfnAsyncCallback)
144 {
145 /* Allocate request */
146 VMMDevHGCMDisconnect *pHGCMDisconnect = NULL;
147 rc = VbglR0GRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
148 if (RT_SUCCESS(rc))
149 {
150 /* Initialize request memory */
151 pHGCMDisconnect->header.header.fRequestor = fRequestor;
152
153 pHGCMDisconnect->header.fu32Flags = 0;
154
155 pHGCMDisconnect->u32ClientID = idClient;
156
157 /* Issue request */
158 rc = VbglR0GRPerform(&pHGCMDisconnect->header.header);
159 if (RT_SUCCESS(rc))
160 {
161 /* Check if host decides to process the request asynchronously. */
162 if (rc == VINF_HGCM_ASYNC_EXECUTE)
163 {
164 /* Wait for request completion interrupt notification from host */
165 pfnAsyncCallback(&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
166 }
167
168 rc = pHGCMDisconnect->header.result;
169 }
170
171 VbglR0GRFree(&pHGCMDisconnect->header.header);
172 }
173 }
174 else
175 rc = VERR_INVALID_PARAMETER;
176 return rc;
177}
178
179
180/**
181 * Preprocesses the HGCM call, validating and locking/buffering parameters.
182 *
183 * @returns VBox status code.
184 *
185 * @param pCallInfo The call info.
186 * @param cbCallInfo The size of the call info structure.
187 * @param fIsUser Is it a user request or kernel request.
188 * @param pcbExtra Where to return the extra request space needed for
189 * physical page lists.
190 */
191static int vbglR0HGCMInternalPreprocessCall(PCVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
192 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
193{
194 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
195 uint32_t const cParms = pCallInfo->cParms;
196 uint32_t iParm;
197 uint32_t cb;
198
199 /*
200 * Lock down the any linear buffers so we can get their addresses
201 * and figure out how much extra storage we need for page lists.
202 *
203 * Note! With kernel mode users we can be assertive. For user mode users
204 * we should just (debug) log it and fail without any fanfare.
205 */
206 *pcbExtra = 0;
207 pParmInfo->cLockBufs = 0;
208 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
209 {
210 switch (pSrcParm->type)
211 {
212 case VMMDevHGCMParmType_32bit:
213 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
214 break;
215
216 case VMMDevHGCMParmType_64bit:
217 Log4(("GstHGCMCall: parm=%u type=64bit: %#018RX64\n", iParm, pSrcParm->u.value64));
218 break;
219
220 case VMMDevHGCMParmType_PageList:
221 case VMMDevHGCMParmType_ContiguousPageList:
222 if (fIsUser)
223 return VERR_INVALID_PARAMETER;
224 cb = pSrcParm->u.PageList.size;
225 if (cb)
226 {
227 uint32_t off = pSrcParm->u.PageList.offset;
228 HGCMPageListInfo *pPgLst;
229 uint32_t cPages;
230 uint32_t u32;
231
232 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
233 VERR_OUT_OF_RANGE);
234 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
235 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
236 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
237 VERR_INVALID_PARAMETER);
238
239 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
240 cPages = pPgLst->cPages;
241 u32 = RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]) + off;
242 AssertMsgReturn(u32 <= cbCallInfo,
243 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
244 VERR_INVALID_PARAMETER);
245 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
246 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
247 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
248 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
249 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
250 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
251 u32 = cPages;
252 while (u32-- > 0)
253 {
254 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
255 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
256 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
257 VERR_INVALID_PARAMETER);
258 }
259
260 *pcbExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[pPgLst->cPages]);
261 }
262 else
263 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
264 break;
265
266 case VMMDevHGCMParmType_Embedded:
267 if (fIsUser) /// @todo relax this.
268 return VERR_INVALID_PARAMETER;
269 cb = pSrcParm->u.Embedded.cbData;
270 if (cb)
271 {
272 uint32_t off = pSrcParm->u.Embedded.offData;
273 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_EMBEDDED_BUFFER, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_EMBEDDED_BUFFER),
274 VERR_INVALID_PARAMETER);
275 AssertMsgReturn(cb <= cbCallInfo - cParms * sizeof(HGCMFunctionParameter),
276 ("cb=%#x cParms=%#x cbCallInfo=%3x\n", cb, cParms, cbCallInfo),
277 VERR_INVALID_PARAMETER);
278 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
279 && off <= cbCallInfo - cb,
280 ("offData=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
281 VERR_INVALID_PARAMETER);
282 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pSrcParm->u.Embedded.fFlags),
283 ("%#x\n", pSrcParm->u.Embedded.fFlags), VERR_INVALID_PARAMETER);
284
285 *pcbExtra += RT_ALIGN_32(cb, 8);
286 }
287 else
288 Log4(("GstHGCMCall: parm=%u type=embed: cb=0\n", iParm));
289 break;
290
291
292 case VMMDevHGCMParmType_LinAddr_Locked_In:
293 case VMMDevHGCMParmType_LinAddr_Locked_Out:
294 case VMMDevHGCMParmType_LinAddr_Locked:
295 if (fIsUser)
296 return VERR_INVALID_PARAMETER;
297 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
298 {
299 cb = pSrcParm->u.Pointer.size;
300 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
301 VERR_OUT_OF_RANGE);
302 if (cb != 0)
303 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
304 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
305 else
306 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
307 break;
308 }
309 RT_FALL_THRU();
310
311 case VMMDevHGCMParmType_LinAddr_In:
312 case VMMDevHGCMParmType_LinAddr_Out:
313 case VMMDevHGCMParmType_LinAddr:
314 cb = pSrcParm->u.Pointer.size;
315 if (cb != 0)
316 {
317#ifdef USE_BOUNCE_BUFFERS
318 void *pvSmallBuf = NULL;
319#endif
320 uint32_t iLockBuf = pParmInfo->cLockBufs;
321 RTR0MEMOBJ hObj;
322 int rc;
323 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
324 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
325 ? RTMEM_PROT_READ
326 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
327
328 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
329 if (!fIsUser)
330 {
331 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
332 VERR_OUT_OF_RANGE);
333 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
334 if (RT_FAILURE(rc))
335 {
336 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
337 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
338 return rc;
339 }
340 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
341 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
342 }
343 else if (cb > VBGLR0_MAX_HGCM_USER_PARM)
344 {
345 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
346 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
347 cb, VBGLR0_MAX_HGCM_USER_PARM));
348 return VERR_OUT_OF_RANGE;
349 }
350 else
351 {
352#ifndef USE_BOUNCE_BUFFERS
353 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
354 if (RT_FAILURE(rc))
355 {
356 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
357 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
358 return rc;
359 }
360 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
361 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
362
363#else /* USE_BOUNCE_BUFFERS */
364 /*
365 * This is a bit massive, but we don't want to waste a
366 * whole page for a 3 byte string buffer (guest props).
367 *
368 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
369 * the system is using some power of two allocator.
370 */
371 /** @todo A more efficient strategy would be to combine buffers. However it
372 * is probably going to be more massive than the current code, so
373 * it can wait till later. */
374 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
375 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
376 if (cb <= PAGE_SIZE / 2 - 16)
377 {
378 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
379 if (RT_UNLIKELY(!pvSmallBuf))
380 return VERR_NO_MEMORY;
381 if (fCopyIn)
382 {
383 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
384 if (RT_FAILURE(rc))
385 {
386 RTMemTmpFree(pvSmallBuf);
387 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
388 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
389 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
390 return rc;
391 }
392 }
393 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
394 if (RT_FAILURE(rc))
395 {
396 RTMemTmpFree(pvSmallBuf);
397 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
398 rc, pvSmallBuf, cb));
399 return rc;
400 }
401 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
402 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
403 }
404 else
405 {
406 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
407 if (RT_FAILURE(rc))
408 return rc;
409 if (!fCopyIn)
410 memset(RTR0MemObjAddress(hObj), '\0', cb);
411 else
412 {
413 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
414 if (RT_FAILURE(rc))
415 {
416 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
417 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
418 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
419 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
420 return rc;
421 }
422 }
423 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
424 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
425 }
426#endif /* USE_BOUNCE_BUFFERS */
427 }
428
429 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
430 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
431#ifdef USE_BOUNCE_BUFFERS
432 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
433#endif
434 pParmInfo->cLockBufs = iLockBuf + 1;
435
436 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
437 {
438 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
439 *pcbExtra += RT_UOFFSETOF_FLEX_ARRAY(HGCMPageListInfo, aPages, cPages);
440 }
441 }
442 else
443 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
444 break;
445
446 default:
447 return VERR_INVALID_PARAMETER;
448 }
449 }
450
451 return VINF_SUCCESS;
452}
453
454
455/**
456 * Translates locked linear address to the normal type.
457 * The locked types are only for the guest side and not handled by the host.
458 *
459 * @returns normal linear address type.
460 * @param enmType The type.
461 */
462static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
463{
464 switch (enmType)
465 {
466 case VMMDevHGCMParmType_LinAddr_Locked_In:
467 return VMMDevHGCMParmType_LinAddr_In;
468 case VMMDevHGCMParmType_LinAddr_Locked_Out:
469 return VMMDevHGCMParmType_LinAddr_Out;
470 case VMMDevHGCMParmType_LinAddr_Locked:
471 return VMMDevHGCMParmType_LinAddr;
472 default:
473 return enmType;
474 }
475}
476
477
478/**
479 * Translates linear address types to page list direction flags.
480 *
481 * @returns page list flags.
482 * @param enmType The type.
483 */
484static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
485{
486 switch (enmType)
487 {
488 case VMMDevHGCMParmType_LinAddr_In:
489 case VMMDevHGCMParmType_LinAddr_Locked_In:
490 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
491
492 case VMMDevHGCMParmType_LinAddr_Out:
493 case VMMDevHGCMParmType_LinAddr_Locked_Out:
494 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
495
496 default: AssertFailed(); RT_FALL_THRU();
497 case VMMDevHGCMParmType_LinAddr:
498 case VMMDevHGCMParmType_LinAddr_Locked:
499 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
500 }
501}
502
503
504/**
505 * Initializes the call request that we're sending to the host.
506 *
507 * @returns VBox status code.
508 *
509 * @param pCallInfo The call info.
510 * @param cbCallInfo The size of the call info structure.
511 * @param fRequestor VMMDEV_REQUESTOR_XXX.
512 * @param fIsUser Is it a user request or kernel request.
513 * @param pcbExtra Where to return the extra request space needed for
514 * physical page lists.
515 */
516static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, PCVBGLIOCHGCMCALL pCallInfo,
517 uint32_t cbCallInfo, uint32_t fRequestor, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
518{
519 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
520 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
521 uint32_t const cParms = pCallInfo->cParms;
522 uint32_t offExtra = (uint32_t)((uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall);
523 uint32_t iLockBuf = 0;
524 uint32_t iParm;
525 RT_NOREF1(cbCallInfo);
526#ifndef USE_BOUNCE_BUFFERS
527 RT_NOREF1(fIsUser);
528#endif
529
530 /*
531 * The call request headers.
532 */
533 pHGCMCall->header.header.fRequestor = !fIsUser || (fRequestor & VMMDEV_REQUESTOR_USERMODE) ? fRequestor
534 : VMMDEV_REQUESTOR_USERMODE | VMMDEV_REQUESTOR_USR_NOT_GIVEN
535 | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN | VMMDEV_REQUESTOR_CON_DONT_KNOW;
536
537 pHGCMCall->header.fu32Flags = 0;
538 pHGCMCall->header.result = VINF_SUCCESS;
539
540 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
541 pHGCMCall->u32Function = pCallInfo->u32Function;
542 pHGCMCall->cParms = cParms;
543
544 /*
545 * The parameters.
546 */
547 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
548 {
549 switch (pSrcParm->type)
550 {
551 case VMMDevHGCMParmType_32bit:
552 case VMMDevHGCMParmType_64bit:
553 *pDstParm = *pSrcParm;
554 break;
555
556 case VMMDevHGCMParmType_PageList:
557 case VMMDevHGCMParmType_ContiguousPageList:
558 pDstParm->type = pSrcParm->type;
559 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
560 if (pSrcParm->u.PageList.size)
561 {
562 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
563 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
564 uint32_t const cPages = pSrcPgLst->cPages;
565 uint32_t iPage;
566
567 pDstParm->u.PageList.offset = offExtra;
568 pDstPgLst->flags = pSrcPgLst->flags;
569 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
570 pDstPgLst->cPages = (uint16_t)cPages;
571 for (iPage = 0; iPage < cPages; iPage++)
572 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
573
574 offExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
575 }
576 else
577 pDstParm->u.PageList.offset = 0; /** @todo will fail on the host side now */
578 break;
579
580 case VMMDevHGCMParmType_Embedded:
581 {
582 uint32_t const cb = pSrcParm->u.Embedded.cbData;
583 pDstParm->type = VMMDevHGCMParmType_Embedded;
584 pDstParm->u.Embedded.cbData = cb;
585 pDstParm->u.Embedded.offData = offExtra;
586 if (cb > 0)
587 {
588 uint8_t *pbDst = (uint8_t *)pHGCMCall + offExtra;
589 if (pSrcParm->u.Embedded.fFlags & VBOX_HGCM_F_PARM_DIRECTION_TO_HOST)
590 {
591 memcpy(pbDst, (uint8_t const *)pCallInfo + pSrcParm->u.Embedded.offData, cb);
592 if (RT_ALIGN(cb, 8) != cb)
593 memset(pbDst + cb, 0, RT_ALIGN(cb, 8) - cb);
594 }
595 else
596 RT_BZERO(pbDst, RT_ALIGN(cb, 8));
597 offExtra += RT_ALIGN(cb, 8);
598 }
599 break;
600 }
601
602 case VMMDevHGCMParmType_LinAddr_Locked_In:
603 case VMMDevHGCMParmType_LinAddr_Locked_Out:
604 case VMMDevHGCMParmType_LinAddr_Locked:
605 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
606 {
607 *pDstParm = *pSrcParm;
608 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
609 break;
610 }
611 RT_FALL_THRU();
612
613 case VMMDevHGCMParmType_LinAddr_In:
614 case VMMDevHGCMParmType_LinAddr_Out:
615 case VMMDevHGCMParmType_LinAddr:
616 if (pSrcParm->u.Pointer.size != 0)
617 {
618#ifdef USE_BOUNCE_BUFFERS
619 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
620#endif
621 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
622 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
623
624 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
625 {
626 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
627 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
628 size_t iPage;
629
630 pDstParm->type = VMMDevHGCMParmType_PageList;
631 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
632 pDstParm->u.PageList.offset = offExtra;
633 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
634#ifdef USE_BOUNCE_BUFFERS
635 if (fIsUser)
636 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
637 else
638#endif
639 pDstPgLst->offFirstPage = (uint16_t)(pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK);
640 pDstPgLst->cPages = (uint16_t)cPages; Assert(pDstPgLst->cPages == cPages);
641 for (iPage = 0; iPage < cPages; iPage++)
642 {
643 RTGCPHYS64 * paPages = pDstPgLst->aPages;
644 paPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
645 Assert(paPages[iPage] != NIL_RTHCPHYS);
646 }
647
648 offExtra += (uint32_t)RT_UOFFSETOF_FLEX_ARRAY(HGCMPageListInfo, aPages, cPages);
649 }
650 else
651 {
652 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
653 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
654#ifdef USE_BOUNCE_BUFFERS
655 if (fIsUser)
656 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
657 ? (uintptr_t)pvSmallBuf
658 : (uintptr_t)RTR0MemObjAddress(hObj);
659 else
660#endif
661 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
662 }
663 iLockBuf++;
664 }
665 else
666 {
667 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
668 pDstParm->u.Pointer.size = 0;
669 pDstParm->u.Pointer.u.linearAddr = 0;
670 }
671 break;
672
673 default:
674 AssertFailed();
675 pDstParm->type = VMMDevHGCMParmType_Invalid;
676 break;
677 }
678 }
679}
680
681
682/**
683 * Performs the call and completion wait.
684 *
685 * @returns VBox status code of this operation, not necessarily the call.
686 *
687 * @param pHGCMCall The HGCM call info.
688 * @param pfnAsyncCallback The async callback that will wait for the call
689 * to complete.
690 * @param pvAsyncData Argument for the callback.
691 * @param u32AsyncData Argument for the callback.
692 * @param pfLeakIt Where to return the leak it / free it,
693 * indicator. Cancellation fun.
694 */
695static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
696 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
697{
698 int rc;
699
700 Log(("calling VbglR0GRPerform\n"));
701 rc = VbglR0GRPerform(&pHGCMCall->header.header);
702 Log(("VbglR0GRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
703
704 /*
705 * If the call failed, but as a result of the request itself, then pretend
706 * success. Upper layers will interpret the result code in the packet.
707 */
708 if ( RT_FAILURE(rc)
709 && rc == pHGCMCall->header.result)
710 {
711 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
712 rc = VINF_SUCCESS;
713 }
714
715 /*
716 * Check if host decides to process the request asynchronously,
717 * if so, we wait for it to complete using the caller supplied callback.
718 */
719 *pfLeakIt = false;
720 if (rc == VINF_HGCM_ASYNC_EXECUTE)
721 {
722 Log(("Processing HGCM call asynchronously\n"));
723 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
724 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
725 {
726 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
727 rc = VINF_SUCCESS;
728 }
729 else
730 {
731 /*
732 * The request didn't complete in time or the call was interrupted,
733 * the RC from the callback indicates which. Try cancel the request.
734 *
735 * This is a bit messy because we're racing request completion. Sorry.
736 *
737 * Note! The VMMDevHGCMCancel2 request size was increased in 7.1 to fully
738 * handle 64-bit addresses. Using the new structure size with older
739 * VBox host versions will work, though, since they check for a
740 * minimum request size and will ignore any extra stuff they don't
741 * understand. Both ARM and x86 guests are little endian, this
742 * naturally wouldn't work for big-endian guests.
743 *
744 * Of course, they won't find requests with addresses above 4GB, but
745 * that's not a real issue since it is used by ARM guests only.
746 */
747 /** @todo It would be nice if we could use the waiter callback to do further
748 * waiting in case of a completion race. If it wasn't for WINNT having its own
749 * version of all that stuff, I would've done it already. */
750 VMMDevHGCMCancel2 *pCancelReq;
751 int rc2 = VbglR0GRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
752 if (RT_SUCCESS(rc2))
753 {
754 pCancelReq->physReqToCancel = VbglR0PhysHeapGetPhysAddr(pHGCMCall);
755 rc2 = VbglR0GRPerform(&pCancelReq->header);
756 VbglR0GRFree(&pCancelReq->header);
757 }
758 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
759 if (RT_SUCCESS(rc2))
760 {
761 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
762 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
763 }
764 else
765 {
766 /*
767 * Wait for a bit while the host (hopefully) completes it.
768 */
769 uint64_t u64Start = RTTimeSystemMilliTS();
770 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
771 uint64_t cElapsed = 0;
772 if (rc2 != VERR_NOT_FOUND)
773 {
774 static unsigned s_cErrors = 0;
775 if (s_cErrors++ < 32)
776 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
777 }
778 else
779 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
780
781 do
782 {
783 ASMCompilerBarrier(); /* paranoia */
784 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
785 break;
786 RTThreadSleep(1);
787 cElapsed = RTTimeSystemMilliTS() - u64Start;
788 } while (cElapsed < cMilliesToWait);
789
790 ASMCompilerBarrier(); /* paranoia^2 */
791 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
792 rc = VINF_SUCCESS;
793 else
794 {
795 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
796 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
797 *pfLeakIt = true;
798 }
799 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
800 }
801 }
802 }
803
804 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
805 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
806 return rc;
807}
808
809
810/**
811 * Copies the result of the call back to the caller info structure and user
812 * buffers (if using bounce buffers).
813 *
814 * @returns rc, unless RTR0MemUserCopyTo fails.
815 * @param pCallInfo Call info structure to update.
816 * @param cbCallInfo The size of the client request.
817 * @param pHGCMCall HGCM call request.
818 * @param cbHGCMCall The size of the HGCM call request.
819 * @param pParmInfo Parameter locking/buffering info.
820 * @param fIsUser Is it a user (true) or kernel request.
821 * @param rc The current result code. Passed along to
822 * preserve informational status codes.
823 */
824static int vbglR0HGCMInternalCopyBackResult(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
825 VMMDevHGCMCall const *pHGCMCall, uint32_t cbHGCMCall,
826 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
827{
828 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
829 HGCMFunctionParameter *pDstParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
830 uint32_t const cParms = pCallInfo->cParms;
831#ifdef USE_BOUNCE_BUFFERS
832 uint32_t iLockBuf = 0;
833#endif
834 uint32_t iParm;
835 RT_NOREF1(pParmInfo);
836#ifndef USE_BOUNCE_BUFFERS
837 RT_NOREF1(fIsUser);
838#endif
839
840 /*
841 * The call result.
842 */
843 pCallInfo->Hdr.rc = pHGCMCall->header.result;
844
845 /*
846 * Copy back parameters.
847 */
848 /** @todo This is assuming user data (pDstParm) is buffered. Not true
849 * on OS/2, though I'm not sure we care... */
850 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
851 {
852 switch (pDstParm->type)
853 {
854 case VMMDevHGCMParmType_32bit:
855 case VMMDevHGCMParmType_64bit:
856 *pDstParm = *pSrcParm;
857 break;
858
859 case VMMDevHGCMParmType_PageList:
860 case VMMDevHGCMParmType_ContiguousPageList:
861 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
862 break;
863
864 case VMMDevHGCMParmType_Embedded:
865 {
866 uint32_t const cbDst = pDstParm->u.Embedded.cbData;
867 uint32_t cbSrc;
868 pDstParm->u.Embedded.cbData = cbSrc = pSrcParm->u.Embedded.cbData;
869 if ( cbSrc > 0
870 && (pDstParm->u.Embedded.fFlags & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
871 {
872 uint32_t const offDst = pDstParm->u.Embedded.offData;
873 uint32_t const offSrc = pSrcParm->u.Embedded.offData;
874
875 AssertReturn(offDst < cbCallInfo, VERR_INTERNAL_ERROR_2);
876 AssertReturn(offDst >= sizeof(*pCallInfo) + cParms * sizeof(*pDstParm), VERR_INTERNAL_ERROR_2);
877 AssertReturn(cbDst <= cbCallInfo - offDst , VERR_INTERNAL_ERROR_2);
878
879 AssertReturn(offSrc < cbCallInfo, VERR_INTERNAL_ERROR_2);
880 AssertReturn(offSrc >= sizeof(*pHGCMCall) + cParms * sizeof(*pSrcParm), VERR_INTERNAL_ERROR_2);
881 if (cbSrc <= cbHGCMCall - offSrc)
882 { /* likely */ }
883 else
884 {
885 /* Special case: Buffer overflow w/ correct size given. */
886 AssertReturn(RT_FAILURE_NP(rc), VERR_INTERNAL_ERROR_2);
887 cbSrc = cbHGCMCall - offSrc;
888 }
889 memcpy((uint8_t *)pCallInfo + offDst, (uint8_t const *)pHGCMCall + offSrc, RT_MIN(cbSrc, cbDst));
890 }
891 break;
892 }
893
894 case VMMDevHGCMParmType_LinAddr_Locked_In:
895 case VMMDevHGCMParmType_LinAddr_In:
896#ifdef USE_BOUNCE_BUFFERS
897 if ( fIsUser
898 && iLockBuf < pParmInfo->cLockBufs
899 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
900 iLockBuf++;
901#endif
902 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
903 break;
904
905 case VMMDevHGCMParmType_LinAddr_Locked_Out:
906 case VMMDevHGCMParmType_LinAddr_Locked:
907 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
908 {
909 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
910 break;
911 }
912 RT_FALL_THRU();
913
914 case VMMDevHGCMParmType_LinAddr_Out:
915 case VMMDevHGCMParmType_LinAddr:
916 {
917#ifdef USE_BOUNCE_BUFFERS
918 if (fIsUser)
919 {
920 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
921 if (cbOut)
922 {
923 int rc2;
924 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
925 rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
926 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
927 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
928 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
929 cbOut);
930 if (RT_FAILURE(rc2))
931 return rc2;
932 iLockBuf++;
933 }
934 else if ( iLockBuf < pParmInfo->cLockBufs
935 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
936 iLockBuf++;
937 }
938#endif
939 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
940 break;
941 }
942
943 default:
944 AssertFailed();
945 rc = VERR_INTERNAL_ERROR_4;
946 break;
947 }
948 }
949
950#ifdef USE_BOUNCE_BUFFERS
951 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
952#endif
953 return rc;
954}
955
956
957DECLR0VBGL(int) VbglR0HGCMInternalCall(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
958 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
959{
960 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
961 struct VbglR0ParmInfo ParmInfo;
962 size_t cbExtra;
963 int rc;
964
965 /*
966 * Basic validation.
967 */
968 AssertMsgReturn( !pCallInfo
969 || !pfnAsyncCallback
970 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
971 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
972 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
973 VERR_INVALID_PARAMETER);
974 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
975 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
976 VERR_INVALID_PARAMETER);
977
978 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
979 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
980
981 /*
982 * Validate, lock and buffer the parameters for the call.
983 * This will calculate the amount of extra space for physical page list.
984 */
985 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
986 if (RT_SUCCESS(rc))
987 {
988 /*
989 * Allocate the request buffer and recreate the call request.
990 */
991 VMMDevHGCMCall *pHGCMCall;
992 uint32_t const cbHGCMCall = sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + (uint32_t)cbExtra;
993 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMCall, cbHGCMCall, VMMDevReq_HGCMCall);
994 if (RT_SUCCESS(rc))
995 {
996 bool fLeakIt;
997 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fRequestor, fIsUser, &ParmInfo);
998
999 /*
1000 * Perform the call.
1001 */
1002 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
1003 if (RT_SUCCESS(rc))
1004 {
1005 /*
1006 * Copy back the result (parameters and buffers that changed).
1007 */
1008 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, cbCallInfo, pHGCMCall, cbHGCMCall, &ParmInfo, fIsUser, rc);
1009 }
1010 else
1011 {
1012 if ( rc != VERR_INTERRUPTED
1013 && rc != VERR_TIMEOUT)
1014 {
1015 static unsigned s_cErrors = 0;
1016 if (s_cErrors++ < 32)
1017 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
1018 }
1019 }
1020
1021 if (!fLeakIt)
1022 VbglR0GRFree(&pHGCMCall->header.header);
1023 }
1024 }
1025 else
1026 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
1027
1028 /*
1029 * Release locks and free bounce buffers.
1030 */
1031 if (ParmInfo.cLockBufs)
1032 while (ParmInfo.cLockBufs-- > 0)
1033 {
1034 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
1035#ifdef USE_BOUNCE_BUFFERS
1036 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
1037#endif
1038 }
1039
1040 return rc;
1041}
1042
1043
1044#if ARCH_BITS == 64
1045DECLR0VBGL(int) VbglR0HGCMInternalCall32(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
1046 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
1047{
1048 PVBGLIOCHGCMCALL pCallInfo64 = NULL;
1049 HGCMFunctionParameter *pParm64 = NULL;
1050 HGCMFunctionParameter32 *pParm32 = NULL;
1051 uint32_t cParms = 0;
1052 uint32_t iParm = 0;
1053 int rc = VINF_SUCCESS;
1054
1055 /*
1056 * Input validation.
1057 */
1058 AssertMsgReturn( !pCallInfo
1059 || !pfnAsyncCallback
1060 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
1061 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
1062 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
1063 VERR_INVALID_PARAMETER);
1064 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
1065 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
1066 VERR_INVALID_PARAMETER);
1067
1068 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
1069#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
1070 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
1071#endif
1072
1073 cParms = pCallInfo->cParms;
1074 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
1075
1076 /*
1077 * The simple approach, allocate a temporary request and convert the parameters.
1078 */
1079 pCallInfo64 = (PVBGLIOCHGCMCALL)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
1080 if (!pCallInfo64)
1081 return VERR_NO_TMP_MEMORY;
1082
1083 *pCallInfo64 = *pCallInfo;
1084 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1085 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1086 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1087 {
1088 switch (pParm32->type)
1089 {
1090 case VMMDevHGCMParmType_32bit:
1091 pParm64->type = VMMDevHGCMParmType_32bit;
1092 pParm64->u.value32 = pParm32->u.value32;
1093 break;
1094
1095 case VMMDevHGCMParmType_64bit:
1096 pParm64->type = VMMDevHGCMParmType_64bit;
1097 pParm64->u.value64 = pParm32->u.value64;
1098 break;
1099
1100 case VMMDevHGCMParmType_LinAddr_Out:
1101 case VMMDevHGCMParmType_LinAddr:
1102 case VMMDevHGCMParmType_LinAddr_In:
1103 pParm64->type = pParm32->type;
1104 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
1105 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
1106 break;
1107
1108 default:
1109 rc = VERR_INVALID_PARAMETER;
1110 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1111 break;
1112 }
1113 if (RT_FAILURE(rc))
1114 break;
1115 }
1116 if (RT_SUCCESS(rc))
1117 {
1118 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1119 fRequestor, pfnAsyncCallback, pvAsyncData, u32AsyncData);
1120
1121 if (RT_SUCCESS(rc))
1122 {
1123 *pCallInfo = *pCallInfo64;
1124
1125 /*
1126 * Copy back.
1127 */
1128 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1129 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1130 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1131 {
1132 switch (pParm64->type)
1133 {
1134 case VMMDevHGCMParmType_32bit:
1135 pParm32->u.value32 = pParm64->u.value32;
1136 break;
1137
1138 case VMMDevHGCMParmType_64bit:
1139 pParm32->u.value64 = pParm64->u.value64;
1140 break;
1141
1142 case VMMDevHGCMParmType_LinAddr_Out:
1143 case VMMDevHGCMParmType_LinAddr:
1144 case VMMDevHGCMParmType_LinAddr_In:
1145 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1146 break;
1147
1148 default:
1149 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1150 rc = VERR_INTERNAL_ERROR_3;
1151 break;
1152 }
1153 }
1154 }
1155 else
1156 {
1157 static unsigned s_cErrors = 0;
1158 if (s_cErrors++ < 32)
1159 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1160 }
1161 }
1162 else
1163 {
1164 static unsigned s_cErrors = 0;
1165 if (s_cErrors++ < 32)
1166 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1167 }
1168
1169 RTMemTmpFree(pCallInfo64);
1170 return rc;
1171}
1172#endif /* ARCH_BITS == 64 */
1173
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette