VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 64733

Last change on this file since 64733 was 63839, checked in by vboxsync, 8 years ago

DevVGA: bugref:8387: keep u32ViewIndex for VBVA_SCREEN_F_BLANK2

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 115.3 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 63839 2016-09-14 17:47:30Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#include <VBox/VMMDev.h>
23#include <VBox/vmm/pdmdev.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/VBoxVideo.h>
26#include <iprt/semaphore.h>
27#include <iprt/thread.h>
28#include <iprt/mem.h>
29#include <iprt/asm.h>
30#include <iprt/list.h>
31#include <iprt/param.h>
32
33#include "DevVGA.h"
34#include "HGSMI/SHGSMIHost.h"
35
36#include <VBox/VBoxVideo3D.h>
37#include <VBox/VBoxVideoHost3D.h>
38
39#ifdef DEBUG_misha
40# define VBOXVDBG_MEMCACHE_DISABLE
41#endif
42
43#ifndef VBOXVDBG_MEMCACHE_DISABLE
44# include <iprt/memcache.h>
45#endif
46
47
48/*********************************************************************************************************************************
49* Defined Constants And Macros *
50*********************************************************************************************************************************/
51#ifdef DEBUG_misha
52# define WARN_BP() do { AssertFailed(); } while (0)
53#else
54# define WARN_BP() do { } while (0)
55#endif
56#define WARN(_msg) do { \
57 LogRel(_msg); \
58 WARN_BP(); \
59 } while (0)
60
61#define VBOXVDMATHREAD_STATE_TERMINATED 0
62#define VBOXVDMATHREAD_STATE_CREATING 1
63#define VBOXVDMATHREAD_STATE_CREATED 3
64#define VBOXVDMATHREAD_STATE_TERMINATING 4
65
66
67/*********************************************************************************************************************************
68* Structures and Typedefs *
69*********************************************************************************************************************************/
70struct VBOXVDMATHREAD;
71
72typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
73
74#ifdef VBOX_WITH_CRHGSMI
75static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
76#endif
77
78
79typedef struct VBOXVDMATHREAD
80{
81 RTTHREAD hWorkerThread;
82 RTSEMEVENT hEvent;
83 volatile uint32_t u32State;
84 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
85 void *pvChanged;
86} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
87
88
89/* state transformations:
90 *
91 * submitter | processor
92 *
93 * LISTENING ---> PROCESSING
94 *
95 * */
96#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
97#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
98
99#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
100#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
101#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
102
103typedef struct VBVAEXHOSTCONTEXT
104{
105 VBVABUFFER *pVBVA;
106 volatile int32_t i32State;
107 volatile int32_t i32EnableState;
108 volatile uint32_t u32cCtls;
109 /* critical section for accessing ctl lists */
110 RTCRITSECT CltCritSect;
111 RTLISTANCHOR GuestCtlList;
112 RTLISTANCHOR HostCtlList;
113#ifndef VBOXVDBG_MEMCACHE_DISABLE
114 RTMEMCACHE CtlCache;
115#endif
116} VBVAEXHOSTCONTEXT;
117
118typedef enum
119{
120 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
121 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
122 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
123 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
124 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
125 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
126 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
127 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
128 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
129 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
130 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
131 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
132 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
133} VBVAEXHOSTCTL_TYPE;
134
135struct VBVAEXHOSTCTL;
136
137typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
138
139typedef struct VBVAEXHOSTCTL
140{
141 RTLISTNODE Node;
142 VBVAEXHOSTCTL_TYPE enmType;
143 union
144 {
145 struct
146 {
147 uint8_t * pu8Cmd;
148 uint32_t cbCmd;
149 } cmd;
150
151 struct
152 {
153 PSSMHANDLE pSSM;
154 uint32_t u32Version;
155 } state;
156 } u;
157 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
158 void *pvComplete;
159} VBVAEXHOSTCTL;
160
161/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
162 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
163 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
164 * see mor edetailed comments in headers for function definitions */
165typedef enum
166{
167 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
168 VBVAEXHOST_DATA_TYPE_CMD,
169 VBVAEXHOST_DATA_TYPE_HOSTCTL,
170 VBVAEXHOST_DATA_TYPE_GUESTCTL
171} VBVAEXHOST_DATA_TYPE;
172
173
174#ifdef VBOX_WITH_CRHGSMI
175typedef struct VBOXVDMA_SOURCE
176{
177 VBVAINFOSCREEN Screen;
178 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
179} VBOXVDMA_SOURCE;
180#endif
181
182typedef struct VBOXVDMAHOST
183{
184 PHGSMIINSTANCE pHgsmi;
185 PVGASTATE pVGAState;
186#ifdef VBOX_WITH_CRHGSMI
187 VBVAEXHOSTCONTEXT CmdVbva;
188 VBOXVDMATHREAD Thread;
189 VBOXCRCMD_SVRINFO CrSrvInfo;
190 VBVAEXHOSTCTL* pCurRemainingHostCtl;
191 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
192 int32_t volatile i32cHostCrCtlCompleted;
193 RTCRITSECT CalloutCritSect;
194// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
195#endif
196#ifdef VBOX_VDMA_WITH_WATCHDOG
197 PTMTIMERR3 WatchDogTimer;
198#endif
199} VBOXVDMAHOST, *PVBOXVDMAHOST;
200
201
202/*********************************************************************************************************************************
203* Internal Functions *
204*********************************************************************************************************************************/
205#ifdef VBOX_WITH_CRHGSMI
206static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
207static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
208
209static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
210static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
211
212/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
213 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
214static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
215
216static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
217static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
218static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
219static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
220static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
221static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
222
223#endif /* VBOX_WITH_CRHGSMI */
224
225
226
227#ifdef VBOX_WITH_CRHGSMI
228
229static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
230{
231# ifndef VBOXVDBG_MEMCACHE_DISABLE
232 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
233# else
234 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
235# endif
236}
237
238static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
239{
240# ifndef VBOXVDBG_MEMCACHE_DISABLE
241 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
242# else
243 RTMemFree(pCtl);
244# endif
245}
246
247static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
248{
249 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
250 if (!pCtl)
251 {
252 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
253 return NULL;
254 }
255
256 pCtl->enmType = enmType;
257 return pCtl;
258}
259
260static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
261{
262 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
263
264 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
265 return VINF_SUCCESS;
266 return VERR_SEM_BUSY;
267}
268
269static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
270{
271 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
272
273 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
274 return NULL;
275
276 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
277 if (RT_SUCCESS(rc))
278 {
279 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
280 if (pCtl)
281 *pfHostCtl = true;
282 else if (!fHostOnlyMode)
283 {
284 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
285 {
286 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
287 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
288 * and there are no HostCtl commands*/
289 Assert(pCtl);
290 *pfHostCtl = false;
291 }
292 }
293
294 if (pCtl)
295 {
296 RTListNodeRemove(&pCtl->Node);
297 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
298 }
299
300 RTCritSectLeave(&pCmdVbva->CltCritSect);
301
302 return pCtl;
303 }
304 else
305 WARN(("RTCritSectEnter failed %d\n", rc));
306
307 return NULL;
308}
309
310static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
311{
312 bool fHostCtl = false;
313 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
314 Assert(!pCtl || fHostCtl);
315 return pCtl;
316}
317
318static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
319{
320 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
321 {
322 WARN(("Invalid state\n"));
323 return VERR_INVALID_STATE;
324 }
325
326 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
327 return VINF_SUCCESS;
328}
329
330static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
331{
332 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
333 {
334 WARN(("Invalid state\n"));
335 return VERR_INVALID_STATE;
336 }
337
338 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
339 return VINF_SUCCESS;
340}
341
342static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
343{
344 switch (pCtl->enmType)
345 {
346 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
347 {
348 VBoxVBVAExHPPause(pCmdVbva);
349 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
350 return true;
351 }
352 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
353 {
354 VBoxVBVAExHPResume(pCmdVbva);
355 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
356 return true;
357 }
358 default:
359 return false;
360 }
361}
362
363static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
364{
365 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
366
367 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
368}
369
370static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
371{
372 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
373 if (pCmdVbva->pVBVA)
374 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
375}
376
377static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
378{
379 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
380 if (pCmdVbva->pVBVA)
381 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
382}
383
384static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
385{
386 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
387 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
388
389 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
390
391 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
392 uint32_t indexRecordFree = pVBVA->indexRecordFree;
393
394 Log(("first = %d, free = %d\n",
395 indexRecordFirst, indexRecordFree));
396
397 if (indexRecordFirst == indexRecordFree)
398 {
399 /* No records to process. Return without assigning output variables. */
400 return VINF_EOF;
401 }
402
403 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
404
405 /* A new record need to be processed. */
406 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
407 {
408 /* the record is being recorded, try again */
409 return VINF_TRY_AGAIN;
410 }
411
412 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
413
414 if (!cbRecord)
415 {
416 /* the record is being recorded, try again */
417 return VINF_TRY_AGAIN;
418 }
419
420 /* we should not get partial commands here actually */
421 Assert(cbRecord);
422
423 /* The size of largest contiguous chunk in the ring biffer. */
424 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
425
426 /* The pointer to data in the ring buffer. */
427 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
428
429 /* Fetch or point the data. */
430 if (u32BytesTillBoundary >= cbRecord)
431 {
432 /* The command does not cross buffer boundary. Return address in the buffer. */
433 *ppCmd = pSrc;
434 *pcbCmd = cbRecord;
435 return VINF_SUCCESS;
436 }
437
438 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
439 return VERR_INVALID_STATE;
440}
441
442static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
443{
444 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
445 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
446
447 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
448}
449
450static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
451{
452 if (pCtl->pfnComplete)
453 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
454 else
455 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
456}
457
458
459static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
460{
461 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
462 VBVAEXHOSTCTL*pCtl;
463 bool fHostClt;
464
465 for (;;)
466 {
467 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
468 if (pCtl)
469 {
470 if (fHostClt)
471 {
472 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
473 {
474 *ppCmd = (uint8_t*)pCtl;
475 *pcbCmd = sizeof (*pCtl);
476 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
477 }
478 continue;
479 }
480 else
481 {
482 *ppCmd = (uint8_t*)pCtl;
483 *pcbCmd = sizeof (*pCtl);
484 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
485 }
486 }
487
488 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
489 return VBVAEXHOST_DATA_TYPE_NO_DATA;
490
491 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
492 switch (rc)
493 {
494 case VINF_SUCCESS:
495 return VBVAEXHOST_DATA_TYPE_CMD;
496 case VINF_EOF:
497 return VBVAEXHOST_DATA_TYPE_NO_DATA;
498 case VINF_TRY_AGAIN:
499 RTThreadSleep(1);
500 continue;
501 default:
502 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
503 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
504 return VBVAEXHOST_DATA_TYPE_NO_DATA;
505 }
506 }
507 /* not reached */
508}
509
510static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
511{
512 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
513 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
514 {
515 vboxVBVAExHPHgEventClear(pCmdVbva);
516 vboxVBVAExHPProcessorRelease(pCmdVbva);
517 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
518 * 1. we check the queue -> and it is empty
519 * 2. submitter adds command to the queue
520 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
521 * 4. we clear the "processing" state
522 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
523 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
524 **/
525 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
526 if (RT_SUCCESS(rc))
527 {
528 /* we are the processor now */
529 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
530 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
531 {
532 vboxVBVAExHPProcessorRelease(pCmdVbva);
533 return VBVAEXHOST_DATA_TYPE_NO_DATA;
534 }
535
536 vboxVBVAExHPHgEventSet(pCmdVbva);
537 }
538 }
539
540 return enmType;
541}
542
543DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
544{
545 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
546
547 if (pVBVA)
548 {
549 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
550 uint32_t indexRecordFree = pVBVA->indexRecordFree;
551
552 if (indexRecordFirst != indexRecordFree)
553 return true;
554 }
555
556 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
557}
558
559/** Checks whether the new commands are ready for processing
560 * @returns
561 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
562 * VINF_EOF - no commands in a queue
563 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
564 * VERR_INVALID_STATE - the VBVA is paused or pausing */
565static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
566{
567 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
568 if (RT_SUCCESS(rc))
569 {
570 /* we are the processor now */
571 if (vboxVBVAExHSHasCommands(pCmdVbva))
572 {
573 vboxVBVAExHPHgEventSet(pCmdVbva);
574 return VINF_SUCCESS;
575 }
576
577 vboxVBVAExHPProcessorRelease(pCmdVbva);
578 return VINF_EOF;
579 }
580 if (rc == VERR_SEM_BUSY)
581 return VINF_ALREADY_INITIALIZED;
582 return VERR_INVALID_STATE;
583}
584
585static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
586{
587 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
588 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
589 if (RT_SUCCESS(rc))
590 {
591# ifndef VBOXVDBG_MEMCACHE_DISABLE
592 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
593 0, /* size_t cbAlignment */
594 UINT32_MAX, /* uint32_t cMaxObjects */
595 NULL, /* PFNMEMCACHECTOR pfnCtor*/
596 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
597 NULL, /* void *pvUser*/
598 0 /* uint32_t fFlags*/
599 );
600 if (RT_SUCCESS(rc))
601# endif
602 {
603 RTListInit(&pCmdVbva->GuestCtlList);
604 RTListInit(&pCmdVbva->HostCtlList);
605 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
606 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
607 return VINF_SUCCESS;
608 }
609# ifndef VBOXVDBG_MEMCACHE_DISABLE
610 else
611 WARN(("RTMemCacheCreate failed %d\n", rc));
612# endif
613 }
614 else
615 WARN(("RTCritSectInit failed %d\n", rc));
616
617 return rc;
618}
619
620DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
621{
622 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
623}
624
625DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
626{
627 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
628}
629
630static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
631{
632 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
633 {
634 WARN(("VBVAEx is enabled already\n"));
635 return VERR_INVALID_STATE;
636 }
637
638 pCmdVbva->pVBVA = pVBVA;
639 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
640 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
641 return VINF_SUCCESS;
642}
643
644static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
645{
646 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
647 return VINF_SUCCESS;
648
649 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
650 return VINF_SUCCESS;
651}
652
653static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
654{
655 /* ensure the processor is stopped */
656 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
657
658 /* ensure no one tries to submit the command */
659 if (pCmdVbva->pVBVA)
660 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
661
662 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
663 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
664
665 RTCritSectDelete(&pCmdVbva->CltCritSect);
666
667# ifndef VBOXVDBG_MEMCACHE_DISABLE
668 RTMemCacheDestroy(pCmdVbva->CtlCache);
669# endif
670
671 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
672}
673
674static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
675{
676 RT_NOREF(pCmdVbva);
677 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
678 AssertRCReturn(rc, rc);
679 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
680 AssertRCReturn(rc, rc);
681 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
682 AssertRCReturn(rc, rc);
683
684 return VINF_SUCCESS;
685}
686
687static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
688{
689 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
690 {
691 WARN(("vbva not paused\n"));
692 return VERR_INVALID_STATE;
693 }
694
695 VBVAEXHOSTCTL* pCtl;
696 int rc;
697 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
698 {
699 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
700 AssertRCReturn(rc, rc);
701 }
702
703 rc = SSMR3PutU32(pSSM, 0);
704 AssertRCReturn(rc, rc);
705
706 return VINF_SUCCESS;
707}
708
709
710/** Saves state
711 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
712 */
713static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
714{
715 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
716 if (RT_FAILURE(rc))
717 {
718 WARN(("RTCritSectEnter failed %d\n", rc));
719 return rc;
720 }
721
722 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
723 if (RT_FAILURE(rc))
724 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
725
726 RTCritSectLeave(&pCmdVbva->CltCritSect);
727
728 return rc;
729}
730
731static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
732{
733 RT_NOREF(u32Version);
734 uint32_t u32;
735 int rc = SSMR3GetU32(pSSM, &u32);
736 AssertLogRelRCReturn(rc, rc);
737
738 if (!u32)
739 return VINF_EOF;
740
741 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
742 if (!pHCtl)
743 {
744 WARN(("VBoxVBVAExHCtlCreate failed\n"));
745 return VERR_NO_MEMORY;
746 }
747
748 rc = SSMR3GetU32(pSSM, &u32);
749 AssertLogRelRCReturn(rc, rc);
750 pHCtl->u.cmd.cbCmd = u32;
751
752 rc = SSMR3GetU32(pSSM, &u32);
753 AssertLogRelRCReturn(rc, rc);
754 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
755
756 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
757 ++pCmdVbva->u32cCtls;
758
759 return VINF_SUCCESS;
760}
761
762
763static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
764{
765 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
766 {
767 WARN(("vbva not stopped\n"));
768 return VERR_INVALID_STATE;
769 }
770
771 int rc;
772
773 do {
774 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
775 AssertLogRelRCReturn(rc, rc);
776 } while (VINF_EOF != rc);
777
778 return VINF_SUCCESS;
779}
780
781/** Loads state
782 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
783 */
784static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
785{
786 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
787 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
788 if (RT_FAILURE(rc))
789 {
790 WARN(("RTCritSectEnter failed %d\n", rc));
791 return rc;
792 }
793
794 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
795 if (RT_FAILURE(rc))
796 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
797
798 RTCritSectLeave(&pCmdVbva->CltCritSect);
799
800 return rc;
801}
802
803typedef enum
804{
805 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
806 VBVAEXHOSTCTL_SOURCE_HOST
807} VBVAEXHOSTCTL_SOURCE;
808
809
810static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
811{
812 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
813 {
814 Log(("cmd vbva not enabled\n"));
815 return VERR_INVALID_STATE;
816 }
817
818 pCtl->pfnComplete = pfnComplete;
819 pCtl->pvComplete = pvComplete;
820
821 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
822 if (RT_SUCCESS(rc))
823 {
824 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
825 {
826 Log(("cmd vbva not enabled\n"));
827 RTCritSectLeave(&pCmdVbva->CltCritSect);
828 return VERR_INVALID_STATE;
829 }
830
831 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
832 {
833 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
834 }
835 else
836 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
837
838 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
839
840 RTCritSectLeave(&pCmdVbva->CltCritSect);
841
842 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
843 }
844 else
845 WARN(("RTCritSectEnter failed %d\n", rc));
846
847 return rc;
848}
849
850void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
851{
852 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
853 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
854 void *pvChanged = pThread->pvChanged;
855
856 pThread->pfnChanged = NULL;
857 pThread->pvChanged = NULL;
858
859 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
860
861 if (pfnChanged)
862 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
863}
864
865void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
866{
867 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
868 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
869 void *pvChanged = pThread->pvChanged;
870
871 pThread->pfnChanged = NULL;
872 pThread->pvChanged = NULL;
873
874 if (pfnChanged)
875 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
876}
877
878DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
879{
880 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
881}
882
883void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
884{
885 memset(pThread, 0, sizeof (*pThread));
886 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
887}
888
889int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
890{
891 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
892 switch (u32State)
893 {
894 case VBOXVDMATHREAD_STATE_TERMINATED:
895 return VINF_SUCCESS;
896 case VBOXVDMATHREAD_STATE_TERMINATING:
897 {
898 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
899 if (!RT_SUCCESS(rc))
900 {
901 WARN(("RTThreadWait failed %d\n", rc));
902 return rc;
903 }
904
905 RTSemEventDestroy(pThread->hEvent);
906
907 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
908 return VINF_SUCCESS;
909 }
910 default:
911 WARN(("invalid state"));
912 return VERR_INVALID_STATE;
913 }
914}
915
916int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
917{
918 int rc = VBoxVDMAThreadCleanup(pThread);
919 if (RT_FAILURE(rc))
920 {
921 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
922 return rc;
923 }
924
925 rc = RTSemEventCreate(&pThread->hEvent);
926 if (RT_SUCCESS(rc))
927 {
928 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
929 pThread->pfnChanged = pfnCreated;
930 pThread->pvChanged = pvCreated;
931 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
932 if (RT_SUCCESS(rc))
933 return VINF_SUCCESS;
934 else
935 WARN(("RTThreadCreate failed %d\n", rc));
936
937 RTSemEventDestroy(pThread->hEvent);
938 }
939 else
940 WARN(("RTSemEventCreate failed %d\n", rc));
941
942 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
943
944 return rc;
945}
946
947DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
948{
949 int rc = RTSemEventSignal(pThread->hEvent);
950 AssertRC(rc);
951 return rc;
952}
953
954DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
955{
956 int rc = RTSemEventWait(pThread->hEvent, cMillies);
957 AssertRC(rc);
958 return rc;
959}
960
961int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
962{
963 int rc;
964 do
965 {
966 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
967 switch (u32State)
968 {
969 case VBOXVDMATHREAD_STATE_CREATED:
970 pThread->pfnChanged = pfnTerminated;
971 pThread->pvChanged = pvTerminated;
972 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
973 if (fNotify)
974 {
975 rc = VBoxVDMAThreadEventNotify(pThread);
976 AssertRC(rc);
977 }
978 return VINF_SUCCESS;
979 case VBOXVDMATHREAD_STATE_TERMINATING:
980 case VBOXVDMATHREAD_STATE_TERMINATED:
981 {
982 WARN(("thread is marked to termination or terminated\nn"));
983 return VERR_INVALID_STATE;
984 }
985 case VBOXVDMATHREAD_STATE_CREATING:
986 {
987 /* wait till the thread creation is completed */
988 WARN(("concurrent thread create/destron\n"));
989 RTThreadYield();
990 continue;
991 }
992 default:
993 WARN(("invalid state"));
994 return VERR_INVALID_STATE;
995 }
996 } while (1);
997
998 WARN(("should never be here\n"));
999 return VERR_INTERNAL_ERROR;
1000}
1001
1002static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
1003
1004typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
1005typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
1006
1007typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
1008{
1009 uint32_t cRefs;
1010 int32_t rc;
1011 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
1012 void *pvCompletion;
1013 VBOXVDMACMD_CHROMIUM_CTL Cmd;
1014} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
1015
1016# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
1017
1018static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
1019{
1020 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
1021 Assert(pHdr);
1022 if (pHdr)
1023 {
1024 pHdr->cRefs = 1;
1025 pHdr->rc = VERR_NOT_IMPLEMENTED;
1026 pHdr->Cmd.enmType = enmCmd;
1027 pHdr->Cmd.cbCmd = cbCmd;
1028 return &pHdr->Cmd;
1029 }
1030
1031 return NULL;
1032}
1033
1034DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1035{
1036 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1037 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1038 if (!cRefs)
1039 RTMemFree(pHdr);
1040}
1041
1042#if 0 /* unused */
1043DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1044{
1045 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1046 ASMAtomicIncU32(&pHdr->cRefs);
1047}
1048#endif /* unused */
1049
1050DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1051{
1052 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1053 return pHdr->rc;
1054}
1055
1056static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1057{
1058 RT_NOREF(pVGAState, pCmd);
1059 RTSemEventSignal((RTSEMEVENT)pvContext);
1060}
1061
1062# if 0 /** @todo vboxVDMACrCtlCbReleaseCmd is unused */
1063static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1064{
1065 RT_NOREF(pVGAState, pvContext);
1066 vboxVDMACrCtlRelease(pCmd);
1067}
1068# endif
1069
1070static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1071{
1072 if ( pVGAState->pDrv
1073 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1074 {
1075 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1076 pHdr->pfnCompletion = pfnCompletion;
1077 pHdr->pvCompletion = pvCompletion;
1078 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1079 return VINF_SUCCESS;
1080 }
1081# ifdef DEBUG_misha
1082 Assert(0);
1083# endif
1084 return VERR_NOT_SUPPORTED;
1085}
1086
1087static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1088{
1089 RTSEMEVENT hComplEvent;
1090 int rc = RTSemEventCreate(&hComplEvent);
1091 AssertRC(rc);
1092 if (RT_SUCCESS(rc))
1093 {
1094 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1095# ifdef DEBUG_misha
1096 AssertRC(rc);
1097# endif
1098 if (RT_SUCCESS(rc))
1099 {
1100 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1101 AssertRC(rc);
1102 if (RT_SUCCESS(rc))
1103 {
1104 RTSemEventDestroy(hComplEvent);
1105 }
1106 }
1107 else
1108 {
1109 /* the command is completed */
1110 RTSemEventDestroy(hComplEvent);
1111 }
1112 }
1113 return rc;
1114}
1115
1116typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1117{
1118 int rc;
1119 RTSEMEVENT hEvent;
1120} VDMA_VBVA_CTL_CYNC_COMPLETION;
1121
1122static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1123{
1124 RT_NOREF(pCmd, cbCmd);
1125 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1126 pData->rc = rc;
1127 rc = RTSemEventSignal(pData->hEvent);
1128 if (!RT_SUCCESS(rc))
1129 WARN(("RTSemEventSignal failed %d\n", rc));
1130}
1131
1132static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1133{
1134 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1135 Data.rc = VERR_NOT_IMPLEMENTED;
1136 int rc = RTSemEventCreate(&Data.hEvent);
1137 if (!RT_SUCCESS(rc))
1138 {
1139 WARN(("RTSemEventCreate failed %d\n", rc));
1140 return rc;
1141 }
1142
1143 pCtl->CalloutList.List.pNext = NULL;
1144
1145 PVGASTATE pVGAState = pVdma->pVGAState;
1146 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1147 if (RT_SUCCESS(rc))
1148 {
1149 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1150 if (RT_SUCCESS(rc))
1151 {
1152 rc = Data.rc;
1153 if (!RT_SUCCESS(rc))
1154 {
1155 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1156 }
1157
1158 }
1159 else
1160 WARN(("RTSemEventWait failed %d\n", rc));
1161 }
1162 else
1163 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1164
1165
1166 RTSemEventDestroy(Data.hEvent);
1167
1168 return rc;
1169}
1170
1171static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1172{
1173 VBVAEXHOSTCTL HCtl;
1174 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1175 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1176 if (RT_FAILURE(rc))
1177 {
1178 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1179 return rc;
1180 }
1181
1182 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1183
1184 return VINF_SUCCESS;
1185}
1186
1187static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1188{
1189 struct VBOXVDMAHOST *pVdma = hClient;
1190 if (!pVdma->pCurRemainingHostCtl)
1191 {
1192 /* disable VBVA, all subsequent host commands will go HGCM way */
1193 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1194 }
1195 else
1196 {
1197 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1198 }
1199
1200 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1201 if (pVdma->pCurRemainingHostCtl)
1202 {
1203 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1204 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1205 }
1206
1207 *pcbCtl = 0;
1208 return NULL;
1209}
1210
1211static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1212{
1213# ifdef VBOX_STRICT
1214 struct VBOXVDMAHOST *pVdma = hClient;
1215 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1216 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1217# else
1218 RT_NOREF(hClient);
1219# endif
1220}
1221
1222static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1223{
1224 struct VBOXVDMAHOST *pVdma = hClient;
1225 VBVAEXHOSTCTL HCtl;
1226 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1227 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1228
1229 pHgcmEnableData->hRHCmd = pVdma;
1230 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1231
1232 if (RT_FAILURE(rc))
1233 {
1234 if (rc == VERR_INVALID_STATE)
1235 rc = VINF_SUCCESS;
1236 else
1237 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1238 }
1239
1240 return rc;
1241}
1242
1243static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1244{
1245 VBOXCRCMDCTL_ENABLE Enable;
1246 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1247 Enable.Data.hRHCmd = pVdma;
1248 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1249
1250 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1251 Assert(!pVdma->pCurRemainingHostCtl);
1252 if (RT_SUCCESS(rc))
1253 {
1254 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1255 return VINF_SUCCESS;
1256 }
1257
1258 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1259 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1260
1261 return rc;
1262}
1263
1264static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1265{
1266 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1267 {
1268 WARN(("vdma VBVA is already enabled\n"));
1269 return VERR_INVALID_STATE;
1270 }
1271
1272 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1273 if (!pVBVA)
1274 {
1275 WARN(("invalid offset %d\n", u32Offset));
1276 return VERR_INVALID_PARAMETER;
1277 }
1278
1279 if (!pVdma->CrSrvInfo.pfnEnable)
1280 {
1281# ifdef DEBUG_misha
1282 WARN(("pfnEnable is NULL\n"));
1283 return VERR_NOT_SUPPORTED;
1284# endif
1285 }
1286
1287 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1288 if (RT_SUCCESS(rc))
1289 {
1290 VBOXCRCMDCTL_DISABLE Disable;
1291 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1292 Disable.Data.hNotifyTerm = pVdma;
1293 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1294 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1295 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1296 if (RT_SUCCESS(rc))
1297 {
1298 PVGASTATE pVGAState = pVdma->pVGAState;
1299 VBOXCRCMD_SVRENABLE_INFO Info;
1300 Info.hCltScr = pVGAState->pDrv;
1301 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1302 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1303 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1304 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1305 if (RT_SUCCESS(rc))
1306 return VINF_SUCCESS;
1307 else
1308 WARN(("pfnEnable failed %d\n", rc));
1309
1310 vboxVDMACrHgcmHandleEnable(pVdma);
1311 }
1312 else
1313 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1314
1315 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1316 }
1317 else
1318 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1319
1320 return rc;
1321}
1322
1323static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1324{
1325 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1326 {
1327 Log(("vdma VBVA is already disabled\n"));
1328 return VINF_SUCCESS;
1329 }
1330
1331 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1332 if (RT_SUCCESS(rc))
1333 {
1334 if (fDoHgcmEnable)
1335 {
1336 PVGASTATE pVGAState = pVdma->pVGAState;
1337
1338 /* disable is a bit tricky
1339 * we need to ensure the host ctl commands do not come out of order
1340 * and do not come over HGCM channel until after it is enabled */
1341 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1342 if (RT_SUCCESS(rc))
1343 {
1344 vdmaVBVANotifyDisable(pVGAState);
1345 return VINF_SUCCESS;
1346 }
1347
1348 VBOXCRCMD_SVRENABLE_INFO Info;
1349 Info.hCltScr = pVGAState->pDrv;
1350 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1351 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1352 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1353 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1354 }
1355 }
1356 else
1357 WARN(("pfnDisable failed %d\n", rc));
1358
1359 return rc;
1360}
1361
1362static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1363{
1364 *pfContinue = true;
1365
1366 switch (pCmd->enmType)
1367 {
1368 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1369 {
1370 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1371 {
1372 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1373 return VERR_INVALID_STATE;
1374 }
1375 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1376 }
1377 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1378 {
1379 int rc = vdmaVBVADisableProcess(pVdma, true);
1380 if (RT_FAILURE(rc))
1381 {
1382 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1383 return rc;
1384 }
1385
1386 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1387 }
1388 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1389 {
1390 int rc = vdmaVBVADisableProcess(pVdma, false);
1391 if (RT_FAILURE(rc))
1392 {
1393 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1394 return rc;
1395 }
1396
1397 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1398 if (RT_FAILURE(rc))
1399 {
1400 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1401 return rc;
1402 }
1403
1404 *pfContinue = false;
1405 return VINF_SUCCESS;
1406 }
1407 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1408 {
1409 PVGASTATE pVGAState = pVdma->pVGAState;
1410 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1411 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1412 if (RT_FAILURE(rc))
1413 {
1414 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1415 return rc;
1416 }
1417 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1418
1419 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1420 }
1421 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1422 {
1423 PVGASTATE pVGAState = pVdma->pVGAState;
1424 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1425
1426 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1427 if (RT_FAILURE(rc))
1428 {
1429 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1430 return rc;
1431 }
1432
1433 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1434 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1435 if (RT_FAILURE(rc))
1436 {
1437 WARN(("pfnLoadState failed %d\n", rc));
1438 return rc;
1439 }
1440
1441 return VINF_SUCCESS;
1442 }
1443 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1444 {
1445 PVGASTATE pVGAState = pVdma->pVGAState;
1446
1447 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1448 {
1449 VBVAINFOSCREEN CurScreen;
1450 VBVAINFOVIEW CurView;
1451
1452 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1453 if (RT_FAILURE(rc))
1454 {
1455 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1456 return rc;
1457 }
1458
1459 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1460 if (RT_FAILURE(rc))
1461 {
1462 WARN(("VBVAInfoScreen failed %d\n", rc));
1463 return rc;
1464 }
1465 }
1466
1467 return VINF_SUCCESS;
1468 }
1469 default:
1470 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1471 return VERR_INVALID_PARAMETER;
1472 }
1473}
1474
1475static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1476{
1477 const uint32_t u32ViewIndex = pScreen->u32ViewIndex;
1478 const uint16_t u16Flags = pScreen->u16Flags;
1479
1480 if (u16Flags & VBVA_SCREEN_F_DISABLED)
1481 {
1482 if ( u32ViewIndex < pVGAState->cMonitors
1483 || u32ViewIndex == UINT32_C(0xFFFFFFFF))
1484 {
1485 RT_ZERO(*pScreen);
1486 pScreen->u32ViewIndex = u32ViewIndex;
1487 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1488 return VINF_SUCCESS;
1489 }
1490 }
1491 else
1492 {
1493 if (u16Flags & VBVA_SCREEN_F_BLANK2)
1494 {
1495 if ( u32ViewIndex >= pVGAState->cMonitors
1496 && u32ViewIndex != UINT32_C(0xFFFFFFFF))
1497 {
1498 return VERR_INVALID_PARAMETER;
1499 }
1500
1501 /* Special case for blanking using current video mode.
1502 * Only 'u16Flags' and 'u32ViewIndex' field are relevant.
1503 */
1504 RT_ZERO(*pScreen);
1505 pScreen->u32ViewIndex = u32ViewIndex;
1506 pScreen->u16Flags = u16Flags;
1507 return VINF_SUCCESS;
1508 }
1509
1510 if ( u32ViewIndex < pVGAState->cMonitors
1511 && pScreen->u16BitsPerPixel <= 32
1512 && pScreen->u32Width <= UINT16_MAX
1513 && pScreen->u32Height <= UINT16_MAX
1514 && pScreen->u32LineSize <= UINT16_MAX * 4)
1515 {
1516 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1517 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1518 {
1519 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1520 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1521 && u64ScreenSize <= pVGAState->vram_size
1522 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1523 {
1524 return VINF_SUCCESS;
1525 }
1526 }
1527 }
1528 }
1529
1530 return VERR_INVALID_PARAMETER;
1531}
1532
1533static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1534{
1535 PVGASTATE pVGAState = pVdma->pVGAState;
1536 VBVAINFOSCREEN Screen = pEntry->Screen;
1537
1538 /* Verify and cleanup local copy of the input data. */
1539 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1540 if (RT_FAILURE(rc))
1541 {
1542 WARN(("invalid screen data\n"));
1543 return rc;
1544 }
1545
1546 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1547 memcpy(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1548 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1549
1550 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1551 if (RT_FAILURE(rc))
1552 {
1553 WARN(("pfnResize failed %d\n", rc));
1554 return rc;
1555 }
1556
1557 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1558 VBVAINFOVIEW View;
1559 View.u32ViewOffset = 0;
1560 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1561 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1562
1563 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1564
1565 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1566 i >= 0;
1567 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1568 {
1569 Screen.u32ViewIndex = i;
1570
1571 VBVAINFOSCREEN CurScreen;
1572 VBVAINFOVIEW CurView;
1573
1574 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1575 AssertRC(rc);
1576
1577 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1578 continue;
1579
1580 /* The view does not change if _BLANK2 is set. */
1581 if ( (!fDisable || !CurView.u32ViewSize)
1582 && !RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_BLANK2))
1583 {
1584 View.u32ViewIndex = Screen.u32ViewIndex;
1585
1586 rc = VBVAInfoView(pVGAState, &View);
1587 if (RT_FAILURE(rc))
1588 {
1589 WARN(("VBVAInfoView failed %d\n", rc));
1590 break;
1591 }
1592 }
1593
1594 rc = VBVAInfoScreen(pVGAState, &Screen);
1595 if (RT_FAILURE(rc))
1596 {
1597 WARN(("VBVAInfoScreen failed %d\n", rc));
1598 break;
1599 }
1600 }
1601
1602 return rc;
1603}
1604
1605static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1606{
1607 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1608 switch (enmType)
1609 {
1610 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1611 {
1612 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1613 {
1614 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1615 return VERR_INVALID_STATE;
1616 }
1617 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1618 }
1619 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1620 {
1621 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1622 {
1623 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1624 return VERR_INVALID_STATE;
1625 }
1626
1627 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1628
1629 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1630 {
1631 WARN(("invalid buffer size\n"));
1632 return VERR_INVALID_PARAMETER;
1633 }
1634
1635 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1636 if (!cElements)
1637 {
1638 WARN(("invalid buffer size\n"));
1639 return VERR_INVALID_PARAMETER;
1640 }
1641
1642 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1643
1644 int rc = VINF_SUCCESS;
1645
1646 for (uint32_t i = 0; i < cElements; ++i)
1647 {
1648 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1649 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1650 if (RT_FAILURE(rc))
1651 {
1652 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1653 break;
1654 }
1655 }
1656 return rc;
1657 }
1658 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1659 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1660 {
1661 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1662 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1663 uint32_t u32Offset = pEnable->u32Offset;
1664 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1665 if (!RT_SUCCESS(rc))
1666 {
1667 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1668 return rc;
1669 }
1670
1671 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1672 {
1673 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1674 if (!RT_SUCCESS(rc))
1675 {
1676 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1677 return rc;
1678 }
1679 }
1680
1681 return VINF_SUCCESS;
1682 }
1683 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1684 {
1685 int rc = vdmaVBVADisableProcess(pVdma, true);
1686 if (RT_FAILURE(rc))
1687 {
1688 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1689 return rc;
1690 }
1691
1692 /* do vgaUpdateDisplayAll right away */
1693 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1694 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1695
1696 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1697 }
1698 default:
1699 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1700 return VERR_INVALID_PARAMETER;
1701 }
1702}
1703
1704/**
1705 * @param fIn - whether this is a page in or out op.
1706 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1707 */
1708static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1709{
1710 RTGCPHYS phPage = (RTGCPHYS)iPage << PAGE_SHIFT;
1711 PGMPAGEMAPLOCK Lock;
1712 int rc;
1713
1714 if (fIn)
1715 {
1716 const void * pvPage;
1717 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1718 if (!RT_SUCCESS(rc))
1719 {
1720 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1721 return rc;
1722 }
1723
1724 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1725
1726 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1727 }
1728 else
1729 {
1730 void * pvPage;
1731 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1732 if (!RT_SUCCESS(rc))
1733 {
1734 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1735 return rc;
1736 }
1737
1738 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1739
1740 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1741 }
1742
1743 return VINF_SUCCESS;
1744}
1745
1746static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1747{
1748 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1749 {
1750 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1751 if (!RT_SUCCESS(rc))
1752 {
1753 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1754 return rc;
1755 }
1756 }
1757
1758 return VINF_SUCCESS;
1759}
1760
1761static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1762 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1763 uint8_t **ppu8Vram, bool *pfIn)
1764{
1765 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1766 {
1767 WARN(("cmd too small"));
1768 return -1;
1769 }
1770
1771 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1772 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1773 {
1774 WARN(("invalid cmd size"));
1775 return -1;
1776 }
1777 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1778
1779 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1780 if (offVRAM & PAGE_OFFSET_MASK)
1781 {
1782 WARN(("offVRAM address is not on page boundary\n"));
1783 return -1;
1784 }
1785 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1786
1787 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1788 if (offVRAM >= pVGAState->vram_size)
1789 {
1790 WARN(("invalid vram offset"));
1791 return -1;
1792 }
1793
1794 if (~(~(VBOXCMDVBVAPAGEIDX)0 >> PAGE_SHIFT) & cPages)
1795 {
1796 WARN(("invalid cPages %d", cPages));
1797 return -1;
1798 }
1799
1800 if (offVRAM + ((VBOXCMDVBVAOFFSET)cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1801 {
1802 WARN(("invalid cPages %d, exceeding vram size", cPages));
1803 return -1;
1804 }
1805
1806 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1807 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1808
1809 *ppPages = pPages;
1810 *pcPages = cPages;
1811 *ppu8Vram = pu8Vram;
1812 *pfIn = fIn;
1813 return 0;
1814}
1815
1816static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1817{
1818 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1819 if (offVRAM & PAGE_OFFSET_MASK)
1820 {
1821 WARN(("offVRAM address is not on page boundary\n"));
1822 return -1;
1823 }
1824
1825 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1826 if (offVRAM >= pVGAState->vram_size)
1827 {
1828 WARN(("invalid vram offset"));
1829 return -1;
1830 }
1831
1832 uint32_t cbFill = pFill->u32CbFill;
1833
1834 if (offVRAM + cbFill >= pVGAState->vram_size)
1835 {
1836 WARN(("invalid cPages"));
1837 return -1;
1838 }
1839
1840 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1841 uint32_t u32Color = pFill->u32Pattern;
1842
1843 Assert(!(cbFill % 4));
1844 for (uint32_t i = 0; i < cbFill / 4; ++i)
1845 {
1846 pu32Vram[i] = u32Color;
1847 }
1848
1849 return 0;
1850}
1851
1852static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1853{
1854 switch (pCmd->u8OpCode)
1855 {
1856 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1857 return 0;
1858 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1859 {
1860 PVGASTATE pVGAState = pVdma->pVGAState;
1861 const VBOXCMDVBVAPAGEIDX *pPages;
1862 uint32_t cPages;
1863 uint8_t *pu8Vram;
1864 bool fIn;
1865 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1866 &pPages, &cPages,
1867 &pu8Vram, &fIn);
1868 if (i8Result < 0)
1869 {
1870 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1871 return i8Result;
1872 }
1873
1874 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1875 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1876 if (!RT_SUCCESS(rc))
1877 {
1878 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1879 return -1;
1880 }
1881
1882 return 0;
1883 }
1884 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1885 {
1886 PVGASTATE pVGAState = pVdma->pVGAState;
1887 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1888 {
1889 WARN(("cmd too small"));
1890 return -1;
1891 }
1892
1893 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1894 }
1895 default:
1896 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1897 }
1898}
1899
1900# if 0
1901typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1902{
1903 VBOXCMDVBVA_HDR Hdr;
1904 /* for now can only contain offVRAM.
1905 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1906 VBOXCMDVBVA_ALLOCINFO Alloc;
1907 uint32_t u32Reserved;
1908 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1909} VBOXCMDVBVA_PAGING_TRANSFER;
1910# endif
1911
1912AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1913AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1914AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1915AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1916
1917# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1918
1919static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1920{
1921 switch (pCmd->u8OpCode)
1922 {
1923 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1924 {
1925 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1926 {
1927 WARN(("invalid command size"));
1928 return -1;
1929 }
1930 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1931 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1932 uint32_t cbRealCmd = pCmd->u8Flags;
1933 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
1934 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1935 {
1936 WARN(("invalid sysmem cmd size"));
1937 return -1;
1938 }
1939
1940 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1941
1942 PGMPAGEMAPLOCK Lock;
1943 PVGASTATE pVGAState = pVdma->pVGAState;
1944 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1945 const void * pvCmd;
1946 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1947 if (!RT_SUCCESS(rc))
1948 {
1949 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1950 return -1;
1951 }
1952
1953 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1954
1955 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1956
1957 if (cbRealCmd <= cbCmdPart)
1958 {
1959 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1960 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1961 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1962 return i8Result;
1963 }
1964
1965 VBOXCMDVBVA_HDR Hdr;
1966 const void *pvCurCmdTail;
1967 uint32_t cbCurCmdTail;
1968 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1969 {
1970 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1971 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1972 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1973 }
1974 else
1975 {
1976 memcpy(&Hdr, pvCmd, cbCmdPart);
1977 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1978 phCmd += cbCmdPart;
1979 Assert(!(phCmd & PAGE_OFFSET_MASK));
1980 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1981 if (!RT_SUCCESS(rc))
1982 {
1983 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1984 return -1;
1985 }
1986
1987 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1988 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1989 pRealCmdHdr = &Hdr;
1990 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1991 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1992 }
1993
1994 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1995 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1996
1997 int8_t i8Result = 0;
1998
1999 switch (pRealCmdHdr->u8OpCode)
2000 {
2001 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
2002 {
2003 const uint32_t *pPages;
2004 uint32_t cPages;
2005 uint8_t *pu8Vram;
2006 bool fIn;
2007 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
2008 &pPages, &cPages,
2009 &pu8Vram, &fIn);
2010 if (i8Result < 0)
2011 {
2012 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
2013 /* we need to break, not return, to ensure currently locked page is released */
2014 break;
2015 }
2016
2017 if (cbCurCmdTail & 3)
2018 {
2019 WARN(("command is not alligned properly %d", cbCurCmdTail));
2020 i8Result = -1;
2021 /* we need to break, not return, to ensure currently locked page is released */
2022 break;
2023 }
2024
2025 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
2026 Assert(cCurPages < cPages);
2027
2028 do
2029 {
2030 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
2031 if (!RT_SUCCESS(rc))
2032 {
2033 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
2034 i8Result = -1;
2035 /* we need to break, not return, to ensure currently locked page is released */
2036 break;
2037 }
2038
2039 Assert(cPages >= cCurPages);
2040 cPages -= cCurPages;
2041
2042 if (!cPages)
2043 break;
2044
2045 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2046
2047 Assert(!(phCmd & PAGE_OFFSET_MASK));
2048
2049 phCmd += PAGE_SIZE;
2050 pu8Vram += (VBOXCMDVBVAOFFSET)cCurPages << PAGE_SHIFT;
2051
2052 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
2053 if (!RT_SUCCESS(rc))
2054 {
2055 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
2056 /* the page is not locked, return */
2057 return -1;
2058 }
2059
2060 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
2061 if (cCurPages > cPages)
2062 cCurPages = cPages;
2063 } while (1);
2064 break;
2065 }
2066 default:
2067 WARN(("command can not be splitted"));
2068 i8Result = -1;
2069 break;
2070 }
2071
2072 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2073 return i8Result;
2074 }
2075 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2076 {
2077 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
2078 ++pCmd;
2079 cbCmd -= sizeof (*pCmd);
2080 uint32_t cbCurCmd = 0;
2081 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
2082 {
2083 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2084 {
2085 WARN(("invalid command size"));
2086 return -1;
2087 }
2088
2089 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2090 if (cbCmd < cbCurCmd)
2091 {
2092 WARN(("invalid command size"));
2093 return -1;
2094 }
2095
2096 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
2097 if (i8Result < 0)
2098 {
2099 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2100 return i8Result;
2101 }
2102 }
2103 return 0;
2104 }
2105 default:
2106 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2107 }
2108}
2109
2110static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2111{
2112 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2113 return;
2114
2115 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2116 {
2117 WARN(("invalid command size"));
2118 return;
2119 }
2120
2121 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2122
2123 /* check if the command is cancelled */
2124 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2125 {
2126 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2127 return;
2128 }
2129
2130 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2131}
2132
2133static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2134{
2135 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2136 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2137 int rc = VERR_NO_MEMORY;
2138 if (pCmd)
2139 {
2140 PVGASTATE pVGAState = pVdma->pVGAState;
2141 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2142 pCmd->cbVRam = pVGAState->vram_size;
2143 pCmd->pLed = &pVGAState->Led3D;
2144 pCmd->CrClientInfo.hClient = pVdma;
2145 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2146 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2147 if (RT_SUCCESS(rc))
2148 {
2149 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2150 if (RT_SUCCESS(rc))
2151 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2152 else if (rc != VERR_NOT_SUPPORTED)
2153 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2154 }
2155 else
2156 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2157
2158 vboxVDMACrCtlRelease(&pCmd->Hdr);
2159 }
2160
2161 if (!RT_SUCCESS(rc))
2162 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2163
2164 return rc;
2165}
2166
2167static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2168
2169/* check if this is external cmd to be passed to chromium backend */
2170static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2171{
2172 PVBOXVDMACMD pDmaCmd = NULL;
2173 uint32_t cbDmaCmd = 0;
2174 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2175 int rc = VINF_NOT_SUPPORTED;
2176
2177 cbDmaCmd = pCmdDr->cbBuf;
2178
2179 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2180 {
2181 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2182 {
2183 AssertMsgFailed(("invalid buffer data!"));
2184 return VERR_INVALID_PARAMETER;
2185 }
2186
2187 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2188 {
2189 AssertMsgFailed(("invalid command buffer data!"));
2190 return VERR_INVALID_PARAMETER;
2191 }
2192
2193 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2194 }
2195 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2196 {
2197 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2198 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2199 {
2200 AssertMsgFailed(("invalid command buffer data from offset!"));
2201 return VERR_INVALID_PARAMETER;
2202 }
2203 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2204 }
2205
2206 if (pDmaCmd)
2207 {
2208 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2209 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2210
2211 switch (pDmaCmd->enmType)
2212 {
2213 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2214 {
2215 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2216 if (cbBody < sizeof (*pCrCmd))
2217 {
2218 AssertMsgFailed(("invalid chromium command buffer size!"));
2219 return VERR_INVALID_PARAMETER;
2220 }
2221 PVGASTATE pVGAState = pVdma->pVGAState;
2222 rc = VINF_SUCCESS;
2223 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2224 {
2225 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2226 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2227 break;
2228 }
2229 else
2230 {
2231 Assert(0);
2232 }
2233
2234 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2235 AssertRC(tmpRc);
2236 break;
2237 }
2238 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2239 {
2240 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2241 if (cbBody < sizeof (*pTransfer))
2242 {
2243 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2244 return VERR_INVALID_PARAMETER;
2245 }
2246
2247 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2248 AssertRC(rc);
2249 if (RT_SUCCESS(rc))
2250 {
2251 pCmdDr->rc = VINF_SUCCESS;
2252 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2253 AssertRC(rc);
2254 rc = VINF_SUCCESS;
2255 }
2256 break;
2257 }
2258 default:
2259 break;
2260 }
2261 }
2262 return rc;
2263}
2264
2265int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2266{
2267 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2268 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2269 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2270 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2271 AssertRC(rc);
2272 pDr->rc = rc;
2273
2274 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2275 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2276 AssertRC(rc);
2277 return rc;
2278}
2279
2280int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2281{
2282 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2283 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2284 pCmdPrivate->rc = rc;
2285 if (pCmdPrivate->pfnCompletion)
2286 {
2287 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2288 }
2289 return VINF_SUCCESS;
2290}
2291
2292static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2293 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2294 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2295{
2296 RT_NOREF(pVdma);
2297 /* we do not support color conversion */
2298 Assert(pDstDesc->format == pSrcDesc->format);
2299 /* we do not support stretching */
2300 Assert(pDstRectl->height == pSrcRectl->height);
2301 Assert(pDstRectl->width == pSrcRectl->width);
2302 if (pDstDesc->format != pSrcDesc->format)
2303 return VERR_INVALID_FUNCTION;
2304 if (pDstDesc->width == pDstRectl->width
2305 && pSrcDesc->width == pSrcRectl->width
2306 && pSrcDesc->width == pDstDesc->width)
2307 {
2308 Assert(!pDstRectl->left);
2309 Assert(!pSrcRectl->left);
2310 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2311 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2312 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2313 }
2314 else
2315 {
2316 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2317 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2318 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2319 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2320 Assert(cbDstLine <= pDstDesc->pitch);
2321 uint32_t cbDstSkip = pDstDesc->pitch;
2322 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2323
2324 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2325# ifdef VBOX_STRICT
2326 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2327 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2328# endif
2329 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2330 Assert(cbSrcLine <= pSrcDesc->pitch);
2331 uint32_t cbSrcSkip = pSrcDesc->pitch;
2332 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2333
2334 Assert(cbDstLine == cbSrcLine);
2335
2336 for (uint32_t i = 0; ; ++i)
2337 {
2338 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2339 if (i == pDstRectl->height)
2340 break;
2341 pvDstStart += cbDstSkip;
2342 pvSrcStart += cbSrcSkip;
2343 }
2344 }
2345 return VINF_SUCCESS;
2346}
2347
2348static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2349{
2350 if (!pRectl1->width)
2351 *pRectl1 = *pRectl2;
2352 else
2353 {
2354 int16_t x21 = pRectl1->left + pRectl1->width;
2355 int16_t x22 = pRectl2->left + pRectl2->width;
2356 if (pRectl1->left > pRectl2->left)
2357 {
2358 pRectl1->left = pRectl2->left;
2359 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2360 }
2361 else if (x21 < x22)
2362 pRectl1->width = x22 - pRectl1->left;
2363
2364 x21 = pRectl1->top + pRectl1->height;
2365 x22 = pRectl2->top + pRectl2->height;
2366 if (pRectl1->top > pRectl2->top)
2367 {
2368 pRectl1->top = pRectl2->top;
2369 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2370 }
2371 else if (x21 < x22)
2372 pRectl1->height = x22 - pRectl1->top;
2373 }
2374}
2375
2376/*
2377 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2378 */
2379static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2380{
2381 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2382 Assert(cbBlt <= cbBuffer);
2383 if (cbBuffer < cbBlt)
2384 return VERR_INVALID_FUNCTION;
2385
2386 /* we do not support stretching for now */
2387 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2388 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2389 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2390 return VERR_INVALID_FUNCTION;
2391 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2392 return VERR_INVALID_FUNCTION;
2393 Assert(pBlt->cDstSubRects);
2394
2395 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2396 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2397
2398 if (pBlt->cDstSubRects)
2399 {
2400 VBOXVDMA_RECTL dstRectl, srcRectl;
2401 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2402 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2403 {
2404 pDstRectl = &pBlt->aDstSubRects[i];
2405 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2406 {
2407 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2408 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2409 dstRectl.width = pDstRectl->width;
2410 dstRectl.height = pDstRectl->height;
2411 pDstRectl = &dstRectl;
2412 }
2413
2414 pSrcRectl = &pBlt->aDstSubRects[i];
2415 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2416 {
2417 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2418 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2419 srcRectl.width = pSrcRectl->width;
2420 srcRectl.height = pSrcRectl->height;
2421 pSrcRectl = &srcRectl;
2422 }
2423
2424 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2425 &pBlt->dstDesc, &pBlt->srcDesc,
2426 pDstRectl,
2427 pSrcRectl);
2428 AssertRC(rc);
2429 if (!RT_SUCCESS(rc))
2430 return rc;
2431
2432 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2433 }
2434 }
2435 else
2436 {
2437 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2438 &pBlt->dstDesc, &pBlt->srcDesc,
2439 &pBlt->dstRectl,
2440 &pBlt->srcRectl);
2441 AssertRC(rc);
2442 if (!RT_SUCCESS(rc))
2443 return rc;
2444
2445 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2446 }
2447
2448 return cbBlt;
2449}
2450
2451static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2452{
2453 if (cbBuffer < sizeof (*pTransfer))
2454 return VERR_INVALID_PARAMETER;
2455
2456 PVGASTATE pVGAState = pVdma->pVGAState;
2457 uint8_t * pvRam = pVGAState->vram_ptrR3;
2458 PGMPAGEMAPLOCK SrcLock;
2459 PGMPAGEMAPLOCK DstLock;
2460 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2461 const void * pvSrc;
2462 void * pvDst;
2463 int rc = VINF_SUCCESS;
2464 uint32_t cbTransfer = pTransfer->cbTransferSize;
2465 uint32_t cbTransfered = 0;
2466 bool bSrcLocked = false;
2467 bool bDstLocked = false;
2468 do
2469 {
2470 uint32_t cbSubTransfer = cbTransfer;
2471 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2472 {
2473 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2474 }
2475 else
2476 {
2477 RTGCPHYS phPage = pTransfer->Src.phBuf;
2478 phPage += cbTransfered;
2479 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2480 AssertRC(rc);
2481 if (RT_SUCCESS(rc))
2482 {
2483 bSrcLocked = true;
2484 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2485 }
2486 else
2487 {
2488 break;
2489 }
2490 }
2491
2492 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2493 {
2494 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2495 }
2496 else
2497 {
2498 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2499 phPage += cbTransfered;
2500 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2501 AssertRC(rc);
2502 if (RT_SUCCESS(rc))
2503 {
2504 bDstLocked = true;
2505 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2506 }
2507 else
2508 {
2509 break;
2510 }
2511 }
2512
2513 if (RT_SUCCESS(rc))
2514 {
2515 memcpy(pvDst, pvSrc, cbSubTransfer);
2516 cbTransfer -= cbSubTransfer;
2517 cbTransfered += cbSubTransfer;
2518 }
2519 else
2520 {
2521 cbTransfer = 0; /* to break */
2522 }
2523
2524 if (bSrcLocked)
2525 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2526 if (bDstLocked)
2527 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2528 } while (cbTransfer);
2529
2530 if (RT_SUCCESS(rc))
2531 return sizeof (*pTransfer);
2532 return rc;
2533}
2534
2535static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2536{
2537 do
2538 {
2539 Assert(pvBuffer);
2540 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2541
2542 if (!pvBuffer)
2543 return VERR_INVALID_PARAMETER;
2544 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2545 return VERR_INVALID_PARAMETER;
2546
2547 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2548 switch (pCmd->enmType)
2549 {
2550 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2551 {
2552# ifdef VBOXWDDM_TEST_UHGSMI
2553 static int count = 0;
2554 static uint64_t start, end;
2555 if (count==0)
2556 {
2557 start = RTTimeNanoTS();
2558 }
2559 ++count;
2560 if (count==100000)
2561 {
2562 end = RTTimeNanoTS();
2563 float ems = (end-start)/1000000.f;
2564 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2565 }
2566# endif
2567 /** @todo post the buffer to chromium */
2568 return VINF_SUCCESS;
2569 }
2570 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2571 {
2572 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2573 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2574 Assert(cbBlt >= 0);
2575 Assert((uint32_t)cbBlt <= cbBuffer);
2576 if (cbBlt >= 0)
2577 {
2578 if ((uint32_t)cbBlt == cbBuffer)
2579 return VINF_SUCCESS;
2580 else
2581 {
2582 cbBuffer -= (uint32_t)cbBlt;
2583 pvBuffer -= cbBlt;
2584 }
2585 }
2586 else
2587 return cbBlt; /* error */
2588 break;
2589 }
2590 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2591 {
2592 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2593 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2594 Assert(cbTransfer >= 0);
2595 Assert((uint32_t)cbTransfer <= cbBuffer);
2596 if (cbTransfer >= 0)
2597 {
2598 if ((uint32_t)cbTransfer == cbBuffer)
2599 return VINF_SUCCESS;
2600 else
2601 {
2602 cbBuffer -= (uint32_t)cbTransfer;
2603 pvBuffer -= cbTransfer;
2604 }
2605 }
2606 else
2607 return cbTransfer; /* error */
2608 break;
2609 }
2610 case VBOXVDMACMD_TYPE_DMA_NOP:
2611 return VINF_SUCCESS;
2612 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2613 return VINF_SUCCESS;
2614 default:
2615 AssertBreakpoint();
2616 return VERR_INVALID_FUNCTION;
2617 }
2618 } while (1);
2619
2620 /* we should not be here */
2621 AssertBreakpoint();
2622 return VERR_INVALID_STATE;
2623}
2624
2625static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2626{
2627 RT_NOREF(hThreadSelf);
2628 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2629 PVGASTATE pVGAState = pVdma->pVGAState;
2630 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2631 uint8_t *pCmd;
2632 uint32_t cbCmd;
2633 int rc;
2634
2635 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2636
2637 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2638 {
2639 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2640 switch (enmType)
2641 {
2642 case VBVAEXHOST_DATA_TYPE_CMD:
2643 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2644 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2645 VBVARaiseIrq(pVGAState, 0);
2646 break;
2647 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2648 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2649 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2650 break;
2651 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2652 {
2653 bool fContinue = true;
2654 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2655 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2656 if (fContinue)
2657 break;
2658 }
2659 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2660 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2661 AssertRC(rc);
2662 break;
2663 default:
2664 WARN(("unexpected type %d\n", enmType));
2665 break;
2666 }
2667 }
2668
2669 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2670
2671 return VINF_SUCCESS;
2672}
2673
2674static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2675{
2676 RT_NOREF(cbCmd);
2677 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2678 const uint8_t * pvBuf;
2679 PGMPAGEMAPLOCK Lock;
2680 int rc;
2681 bool bReleaseLocked = false;
2682
2683 do
2684 {
2685 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2686
2687 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2688 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2689 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2690 {
2691 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2692 pvBuf = pvRam + pCmd->Location.offVramBuf;
2693 }
2694 else
2695 {
2696 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2697 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2698 Assert(offset + pCmd->cbBuf <= 0x1000);
2699 if (offset + pCmd->cbBuf > 0x1000)
2700 {
2701 /** @todo more advanced mechanism of command buffer proc is actually needed */
2702 rc = VERR_INVALID_PARAMETER;
2703 break;
2704 }
2705
2706 const void * pvPageBuf;
2707 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2708 AssertRC(rc);
2709 if (!RT_SUCCESS(rc))
2710 {
2711 /** @todo if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2712 break;
2713 }
2714
2715 pvBuf = (const uint8_t *)pvPageBuf;
2716 pvBuf += offset;
2717
2718 bReleaseLocked = true;
2719 }
2720
2721 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2722 AssertRC(rc);
2723
2724 if (bReleaseLocked)
2725 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2726 } while (0);
2727
2728 pCmd->rc = rc;
2729
2730 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2731 AssertRC(rc);
2732}
2733
2734# if 0 /** @todo vboxVDMAControlProcess is unused */
2735static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2736{
2737 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2738 pCmd->i32Result = VINF_SUCCESS;
2739 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2740 AssertRC(rc);
2741}
2742# endif
2743
2744#endif /* VBOX_WITH_CRHGSMI */
2745#ifdef VBOX_VDMA_WITH_WATCHDOG
2746
2747static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2748{
2749 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2750 PVGASTATE pVGAState = pVdma->pVGAState;
2751 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2752}
2753
2754static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2755{
2756 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2757 if (cMillis)
2758 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2759 else
2760 TMTimerStop(pVdma->WatchDogTimer);
2761 return VINF_SUCCESS;
2762}
2763
2764#endif /* VBOX_VDMA_WITH_WATCHDOG */
2765
2766int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2767{
2768 RT_NOREF(cPipeElements);
2769 int rc;
2770 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2771 Assert(pVdma);
2772 if (pVdma)
2773 {
2774 pVdma->pHgsmi = pVGAState->pHGSMI;
2775 pVdma->pVGAState = pVGAState;
2776
2777#ifdef VBOX_VDMA_WITH_WATCHDOG
2778 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2779 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2780 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2781 AssertRC(rc);
2782#endif
2783
2784#ifdef VBOX_WITH_CRHGSMI
2785 VBoxVDMAThreadInit(&pVdma->Thread);
2786
2787 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2788 if (RT_SUCCESS(rc))
2789 {
2790 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2791 if (RT_SUCCESS(rc))
2792 {
2793 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2794 if (RT_SUCCESS(rc))
2795 {
2796 pVGAState->pVdma = pVdma;
2797 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2798 return VINF_SUCCESS;
2799 }
2800 WARN(("RTCritSectInit failed %d\n", rc));
2801
2802 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2803 }
2804 else
2805 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2806
2807 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2808 }
2809 else
2810 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2811
2812
2813 RTMemFree(pVdma);
2814#else
2815 pVGAState->pVdma = pVdma;
2816 return VINF_SUCCESS;
2817#endif
2818 }
2819 else
2820 rc = VERR_OUT_OF_RESOURCES;
2821
2822 return rc;
2823}
2824
2825int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2826{
2827#ifdef VBOX_WITH_CRHGSMI
2828 vdmaVBVACtlDisableSync(pVdma);
2829#else
2830 RT_NOREF(pVdma);
2831#endif
2832 return VINF_SUCCESS;
2833}
2834
2835int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2836{
2837 if (!pVdma)
2838 return VINF_SUCCESS;
2839#ifdef VBOX_WITH_CRHGSMI
2840 vdmaVBVACtlDisableSync(pVdma);
2841 VBoxVDMAThreadCleanup(&pVdma->Thread);
2842 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2843 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2844 RTCritSectDelete(&pVdma->CalloutCritSect);
2845#endif
2846 RTMemFree(pVdma);
2847 return VINF_SUCCESS;
2848}
2849
2850void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2851{
2852 RT_NOREF(cbCmd);
2853 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2854
2855 switch (pCmd->enmCtl)
2856 {
2857 case VBOXVDMA_CTL_TYPE_ENABLE:
2858 pCmd->i32Result = VINF_SUCCESS;
2859 break;
2860 case VBOXVDMA_CTL_TYPE_DISABLE:
2861 pCmd->i32Result = VINF_SUCCESS;
2862 break;
2863 case VBOXVDMA_CTL_TYPE_FLUSH:
2864 pCmd->i32Result = VINF_SUCCESS;
2865 break;
2866#ifdef VBOX_VDMA_WITH_WATCHDOG
2867 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2868 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2869 break;
2870#endif
2871 default:
2872 WARN(("cmd not supported"));
2873 pCmd->i32Result = VERR_NOT_SUPPORTED;
2874 }
2875
2876 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2877 AssertRC(rc);
2878}
2879
2880void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2881{
2882 int rc = VERR_NOT_IMPLEMENTED;
2883
2884#ifdef VBOX_WITH_CRHGSMI
2885 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2886 * this is why we process them specially */
2887 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2888 if (rc == VINF_SUCCESS)
2889 return;
2890
2891 if (RT_FAILURE(rc))
2892 {
2893 pCmd->rc = rc;
2894 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2895 AssertRC(rc);
2896 return;
2897 }
2898
2899 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2900#else
2901 RT_NOREF(cbCmd);
2902 pCmd->rc = rc;
2903 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2904 AssertRC(rc);
2905#endif
2906}
2907
2908#ifdef VBOX_WITH_CRHGSMI
2909
2910static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2911
2912static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2913{
2914 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2915 if (RT_SUCCESS(rc))
2916 {
2917 if (rc == VINF_SUCCESS)
2918 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2919 else
2920 Assert(rc == VINF_ALREADY_INITIALIZED);
2921 }
2922 else
2923 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2924
2925 return rc;
2926}
2927
2928static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2929{
2930 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2931 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2932 AssertRC(rc);
2933 pGCtl->i32Result = rc;
2934
2935 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2936 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2937 AssertRC(rc);
2938
2939 VBoxVBVAExHCtlFree(pVbva, pCtl);
2940}
2941
2942static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2943{
2944 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2945 if (!pHCtl)
2946 {
2947 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2948 return VERR_NO_MEMORY;
2949 }
2950
2951 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2952 pHCtl->u.cmd.cbCmd = cbCmd;
2953 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2954 if (RT_FAILURE(rc))
2955 {
2956 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2957 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2958 return rc;;
2959 }
2960 return VINF_SUCCESS;
2961}
2962
2963static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2964{
2965 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2966 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2967 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2968 if (RT_SUCCESS(rc))
2969 return VINF_SUCCESS;
2970
2971 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2972 pCtl->i32Result = rc;
2973 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2974 AssertRC(rc);
2975 return VINF_SUCCESS;
2976}
2977
2978static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2979{
2980 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2981 if (pVboxCtl->u.pfnInternal)
2982 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2983 VBoxVBVAExHCtlFree(pVbva, pCtl);
2984}
2985
2986static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2987 PFNCRCTLCOMPLETION pfnCompletion,
2988 void *pvCompletion)
2989{
2990 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2991 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2992 if (RT_FAILURE(rc))
2993 {
2994 if (rc == VERR_INVALID_STATE)
2995 {
2996 pCmd->u.pfnInternal = NULL;
2997 PVGASTATE pVGAState = pVdma->pVGAState;
2998 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2999 if (!RT_SUCCESS(rc))
3000 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
3001
3002 return rc;
3003 }
3004 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
3005 return rc;
3006 }
3007
3008 return VINF_SUCCESS;
3009}
3010
3011static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
3012{
3013 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3014 {
3015 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
3016 if (!RT_SUCCESS(rc))
3017 {
3018 WARN(("pfnVBVAEnable failed %d\n", rc));
3019 for (uint32_t j = 0; j < i; j++)
3020 {
3021 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
3022 }
3023
3024 return rc;
3025 }
3026 }
3027 return VINF_SUCCESS;
3028}
3029
3030static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
3031{
3032 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3033 {
3034 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
3035 }
3036 return VINF_SUCCESS;
3037}
3038
3039static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
3040 void *pvThreadContext, void *pvContext)
3041{
3042 RT_NOREF(pThread);
3043 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
3044 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
3045
3046 if (RT_SUCCESS(rc))
3047 {
3048 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3049 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3050 if (rc == VINF_SUCCESS)
3051 {
3052 /* we need to inform Main about VBVA enable/disable
3053 * main expects notifications to be done from the main thread
3054 * submit it there */
3055 PVGASTATE pVGAState = pVdma->pVGAState;
3056
3057 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3058 vdmaVBVANotifyEnable(pVGAState);
3059 else
3060 vdmaVBVANotifyDisable(pVGAState);
3061 }
3062 else if (RT_FAILURE(rc))
3063 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
3064 }
3065 else
3066 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
3067
3068 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3069}
3070
3071static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3072{
3073 int rc;
3074 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3075 if (pHCtl)
3076 {
3077 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3078 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3079 pHCtl->pfnComplete = pfnComplete;
3080 pHCtl->pvComplete = pvComplete;
3081
3082 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3083 if (RT_SUCCESS(rc))
3084 return VINF_SUCCESS;
3085 else
3086 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3087
3088 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3089 }
3090 else
3091 {
3092 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3093 rc = VERR_NO_MEMORY;
3094 }
3095
3096 return rc;
3097}
3098
3099static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3100{
3101 VBVAENABLE Enable = {0};
3102 Enable.u32Flags = VBVA_F_ENABLE;
3103 Enable.u32Offset = offVram;
3104
3105 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3106 Data.rc = VERR_NOT_IMPLEMENTED;
3107 int rc = RTSemEventCreate(&Data.hEvent);
3108 if (!RT_SUCCESS(rc))
3109 {
3110 WARN(("RTSemEventCreate failed %d\n", rc));
3111 return rc;
3112 }
3113
3114 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3115 if (RT_SUCCESS(rc))
3116 {
3117 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3118 if (RT_SUCCESS(rc))
3119 {
3120 rc = Data.rc;
3121 if (!RT_SUCCESS(rc))
3122 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3123 }
3124 else
3125 WARN(("RTSemEventWait failed %d\n", rc));
3126 }
3127 else
3128 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3129
3130 RTSemEventDestroy(Data.hEvent);
3131
3132 return rc;
3133}
3134
3135static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3136{
3137 int rc;
3138 VBVAEXHOSTCTL* pHCtl;
3139 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3140 {
3141 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3142 return VINF_SUCCESS;
3143 }
3144
3145 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3146 if (!pHCtl)
3147 {
3148 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3149 return VERR_NO_MEMORY;
3150 }
3151
3152 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3153 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3154 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3155 if (RT_SUCCESS(rc))
3156 return VINF_SUCCESS;
3157
3158 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3159 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3160 return rc;
3161}
3162
3163static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3164{
3165 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3166 if (fEnable)
3167 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3168 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3169}
3170
3171static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3172{
3173 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3174 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3175 if (RT_SUCCESS(rc))
3176 return VINF_SUCCESS;
3177
3178 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3179 pEnable->Hdr.i32Result = rc;
3180 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3181 AssertRC(rc);
3182 return VINF_SUCCESS;
3183}
3184
3185static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3186 int rc, void *pvContext)
3187{
3188 RT_NOREF(pVbva, pCtl);
3189 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3190 pData->rc = rc;
3191 rc = RTSemEventSignal(pData->hEvent);
3192 if (!RT_SUCCESS(rc))
3193 WARN(("RTSemEventSignal failed %d\n", rc));
3194}
3195
3196static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3197{
3198 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3199 Data.rc = VERR_NOT_IMPLEMENTED;
3200 int rc = RTSemEventCreate(&Data.hEvent);
3201 if (!RT_SUCCESS(rc))
3202 {
3203 WARN(("RTSemEventCreate failed %d\n", rc));
3204 return rc;
3205 }
3206
3207 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3208 if (RT_SUCCESS(rc))
3209 {
3210 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3211 if (RT_SUCCESS(rc))
3212 {
3213 rc = Data.rc;
3214 if (!RT_SUCCESS(rc))
3215 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3216 }
3217 else
3218 WARN(("RTSemEventWait failed %d\n", rc));
3219 }
3220 else
3221 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3222
3223 RTSemEventDestroy(Data.hEvent);
3224
3225 return rc;
3226}
3227
3228static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3229{
3230 VBVAEXHOSTCTL Ctl;
3231 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3232 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3233}
3234
3235static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3236{
3237 VBVAEXHOSTCTL Ctl;
3238 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3239 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3240}
3241
3242static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3243{
3244 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3245 switch (rc)
3246 {
3247 case VINF_SUCCESS:
3248 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3249 case VINF_ALREADY_INITIALIZED:
3250 case VINF_EOF:
3251 case VERR_INVALID_STATE:
3252 return VINF_SUCCESS;
3253 default:
3254 Assert(!RT_FAILURE(rc));
3255 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3256 }
3257}
3258
3259
3260int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3261 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3262 PFNCRCTLCOMPLETION pfnCompletion,
3263 void *pvCompletion)
3264{
3265 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3266 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3267 if (pVdma == NULL)
3268 return VERR_INVALID_STATE;
3269 pCmd->CalloutList.List.pNext = NULL;
3270 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3271}
3272
3273typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3274{
3275 struct VBOXVDMAHOST *pVdma;
3276 uint32_t fProcessing;
3277 int rc;
3278} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3279
3280static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3281{
3282 RT_NOREF(pCmd, cbCmd);
3283 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3284
3285 pData->rc = rc;
3286
3287 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3288
3289 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3290
3291 pData->fProcessing = 0;
3292
3293 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3294}
3295
3296static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3297{
3298 pEntry->pfnCb = pfnCb;
3299 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3300 if (RT_SUCCESS(rc))
3301 {
3302 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3303 RTCritSectLeave(&pVdma->CalloutCritSect);
3304
3305 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3306 }
3307 else
3308 WARN(("RTCritSectEnter failed %d\n", rc));
3309
3310 return rc;
3311}
3312
3313
3314static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3315{
3316 int rc = VINF_SUCCESS;
3317 for (;;)
3318 {
3319 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3320 if (RT_SUCCESS(rc))
3321 {
3322 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3323 if (pEntry)
3324 RTListNodeRemove(&pEntry->Node);
3325 RTCritSectLeave(&pVdma->CalloutCritSect);
3326
3327 if (!pEntry)
3328 break;
3329
3330 pEntry->pfnCb(pEntry);
3331 }
3332 else
3333 {
3334 WARN(("RTCritSectEnter failed %d\n", rc));
3335 break;
3336 }
3337 }
3338
3339 return rc;
3340}
3341
3342DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3343 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3344{
3345 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3346 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3347 if (pVdma == NULL)
3348 return VERR_INVALID_STATE;
3349 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3350 Data.pVdma = pVdma;
3351 Data.fProcessing = 1;
3352 Data.rc = VERR_INTERNAL_ERROR;
3353 RTListInit(&pCmd->CalloutList.List);
3354 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3355 if (!RT_SUCCESS(rc))
3356 {
3357 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3358 return rc;
3359 }
3360
3361 while (Data.fProcessing)
3362 {
3363 /* Poll infrequently to make sure no completed message has been missed. */
3364 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3365
3366 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3367
3368 if (Data.fProcessing)
3369 RTThreadYield();
3370 }
3371
3372 /* extra check callouts */
3373 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3374
3375 /* 'Our' message has been processed, so should reset the semaphore.
3376 * There is still possible that another message has been processed
3377 * and the semaphore has been signalled again.
3378 * Reset only if there are no other messages completed.
3379 */
3380 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3381 Assert(c >= 0);
3382 if (!c)
3383 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3384
3385 rc = Data.rc;
3386 if (!RT_SUCCESS(rc))
3387 WARN(("host call failed %d", rc));
3388
3389 return rc;
3390}
3391
3392int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3393{
3394 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3395 int rc = VINF_SUCCESS;
3396 switch (pCtl->u32Type)
3397 {
3398 case VBOXCMDVBVACTL_TYPE_3DCTL:
3399 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3400 case VBOXCMDVBVACTL_TYPE_RESIZE:
3401 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3402 case VBOXCMDVBVACTL_TYPE_ENABLE:
3403 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3404 {
3405 WARN(("incorrect enable size\n"));
3406 rc = VERR_INVALID_PARAMETER;
3407 break;
3408 }
3409 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3410 default:
3411 WARN(("unsupported type\n"));
3412 rc = VERR_INVALID_PARAMETER;
3413 break;
3414 }
3415
3416 pCtl->i32Result = rc;
3417 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3418 AssertRC(rc);
3419 return VINF_SUCCESS;
3420}
3421
3422int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3423{
3424 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3425 {
3426 WARN(("vdma VBVA is disabled\n"));
3427 return VERR_INVALID_STATE;
3428 }
3429
3430 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3431}
3432
3433int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3434{
3435 WARN(("flush\n"));
3436 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3437 {
3438 WARN(("vdma VBVA is disabled\n"));
3439 return VERR_INVALID_STATE;
3440 }
3441 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3442}
3443
3444void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3445{
3446 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3447 return;
3448 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3449}
3450
3451bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3452{
3453 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3454}
3455
3456#endif /* VBOX_WITH_CRHGSMI */
3457
3458int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3459{
3460#ifdef VBOX_WITH_CRHGSMI
3461 int rc = vdmaVBVAPause(pVdma);
3462 if (RT_SUCCESS(rc))
3463 return VINF_SUCCESS;
3464
3465 if (rc != VERR_INVALID_STATE)
3466 {
3467 WARN(("vdmaVBVAPause failed %d\n", rc));
3468 return rc;
3469 }
3470
3471# ifdef DEBUG_misha
3472 WARN(("debug prep"));
3473# endif
3474
3475 PVGASTATE pVGAState = pVdma->pVGAState;
3476 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3477 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3478 Assert(pCmd);
3479 if (pCmd)
3480 {
3481 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3482 AssertRC(rc);
3483 if (RT_SUCCESS(rc))
3484 {
3485 rc = vboxVDMACrCtlGetRc(pCmd);
3486 }
3487 vboxVDMACrCtlRelease(pCmd);
3488 return rc;
3489 }
3490 return VERR_NO_MEMORY;
3491#else
3492 RT_NOREF(pVdma);
3493 return VINF_SUCCESS;
3494#endif
3495}
3496
3497int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3498{
3499#ifdef VBOX_WITH_CRHGSMI
3500 int rc = vdmaVBVAResume(pVdma);
3501 if (RT_SUCCESS(rc))
3502 return VINF_SUCCESS;
3503
3504 if (rc != VERR_INVALID_STATE)
3505 {
3506 WARN(("vdmaVBVAResume failed %d\n", rc));
3507 return rc;
3508 }
3509
3510# ifdef DEBUG_misha
3511 WARN(("debug done"));
3512# endif
3513
3514 PVGASTATE pVGAState = pVdma->pVGAState;
3515 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3516 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3517 Assert(pCmd);
3518 if (pCmd)
3519 {
3520 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3521 AssertRC(rc);
3522 if (RT_SUCCESS(rc))
3523 {
3524 rc = vboxVDMACrCtlGetRc(pCmd);
3525 }
3526 vboxVDMACrCtlRelease(pCmd);
3527 return rc;
3528 }
3529 return VERR_NO_MEMORY;
3530#else
3531 RT_NOREF(pVdma);
3532 return VINF_SUCCESS;
3533#endif
3534}
3535
3536int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3537{
3538 int rc;
3539#ifndef VBOX_WITH_CRHGSMI
3540 RT_NOREF(pVdma, pSSM);
3541
3542#else
3543 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3544#endif
3545 {
3546 rc = SSMR3PutU32(pSSM, UINT32_MAX);
3547 AssertRCReturn(rc, rc);
3548 return VINF_SUCCESS;
3549 }
3550
3551#ifdef VBOX_WITH_CRHGSMI
3552 PVGASTATE pVGAState = pVdma->pVGAState;
3553 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3554
3555 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3556 AssertRCReturn(rc, rc);
3557
3558 VBVAEXHOSTCTL HCtl;
3559 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3560 HCtl.u.state.pSSM = pSSM;
3561 HCtl.u.state.u32Version = 0;
3562 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3563#endif
3564}
3565
3566int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3567{
3568 uint32_t u32;
3569 int rc = SSMR3GetU32(pSSM, &u32);
3570 AssertLogRelRCReturn(rc, rc);
3571
3572 if (u32 != UINT32_MAX)
3573 {
3574#ifdef VBOX_WITH_CRHGSMI
3575 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3576 AssertLogRelRCReturn(rc, rc);
3577
3578 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3579
3580 VBVAEXHOSTCTL HCtl;
3581 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3582 HCtl.u.state.pSSM = pSSM;
3583 HCtl.u.state.u32Version = u32Version;
3584 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3585 AssertLogRelRCReturn(rc, rc);
3586
3587 rc = vdmaVBVAResume(pVdma);
3588 AssertLogRelRCReturn(rc, rc);
3589
3590 return VINF_SUCCESS;
3591#else
3592 RT_NOREF(pVdma, u32Version);
3593 WARN(("Unsupported VBVACtl info!\n"));
3594 return VERR_VERSION_MISMATCH;
3595#endif
3596 }
3597
3598 return VINF_SUCCESS;
3599}
3600
3601int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3602{
3603#ifdef VBOX_WITH_CRHGSMI
3604 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3605 return VINF_SUCCESS;
3606
3607/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3608 * the purpose of this code is. */
3609 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3610 if (!pHCtl)
3611 {
3612 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3613 return VERR_NO_MEMORY;
3614 }
3615
3616 /* sanity */
3617 pHCtl->u.cmd.pu8Cmd = NULL;
3618 pHCtl->u.cmd.cbCmd = 0;
3619
3620 /* NULL completion will just free the ctl up */
3621 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3622 if (RT_FAILURE(rc))
3623 {
3624 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3625 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3626 return rc;
3627 }
3628#else
3629 RT_NOREF(pVdma);
3630#endif
3631 return VINF_SUCCESS;
3632}
3633
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette