VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 80651

Last change on this file since 80651 was 80478, checked in by vboxsync, 6 years ago

WDDM: remove unused VBOX_VDMA_WITH_WATCHDOG code. bugref:9529

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 129.8 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 80478 2019-08-28 13:32:17Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VGA
23#include <VBox/VMMDev.h>
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/pgm.h>
26#include <VBoxVideo.h>
27#include <VBox/AssertGuest.h>
28#include <iprt/semaphore.h>
29#include <iprt/thread.h>
30#include <iprt/mem.h>
31#include <iprt/asm.h>
32#include <iprt/list.h>
33#include <iprt/param.h>
34
35#include "DevVGA.h"
36#include "HGSMI/SHGSMIHost.h"
37
38#include <VBoxVideo3D.h>
39#include <VBoxVideoHost3D.h>
40
41#ifdef DEBUG_misha
42# define VBOXVDBG_MEMCACHE_DISABLE
43#endif
44
45#ifndef VBOXVDBG_MEMCACHE_DISABLE
46# include <iprt/memcache.h>
47#endif
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef DEBUG_misha
54# define WARN_BP() do { AssertFailed(); } while (0)
55#else
56# define WARN_BP() do { } while (0)
57#endif
58#define WARN(_msg) do { \
59 LogRel(_msg); \
60 WARN_BP(); \
61 } while (0)
62
63#define VBOXVDMATHREAD_STATE_TERMINATED 0
64#define VBOXVDMATHREAD_STATE_CREATING 1
65#define VBOXVDMATHREAD_STATE_CREATED 3
66#define VBOXVDMATHREAD_STATE_TERMINATING 4
67
68
69/*********************************************************************************************************************************
70* Structures and Typedefs *
71*********************************************************************************************************************************/
72struct VBOXVDMATHREAD;
73
74typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
75
76static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
77
78
79typedef struct VBOXVDMATHREAD
80{
81 RTTHREAD hWorkerThread;
82 RTSEMEVENT hEvent;
83 volatile uint32_t u32State;
84 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
85 void *pvChanged;
86} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
87
88
89/* state transformations:
90 *
91 * submitter | processor
92 *
93 * LISTENING ---> PROCESSING
94 *
95 * */
96#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
97#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
98
99#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
100#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
101#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
102
103typedef struct VBVAEXHOSTCONTEXT
104{
105 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA;
106 /** Maximum number of data bytes addressible relative to pVBVA. */
107 uint32_t cbMaxData;
108 volatile int32_t i32State;
109 volatile int32_t i32EnableState;
110 volatile uint32_t u32cCtls;
111 /* critical section for accessing ctl lists */
112 RTCRITSECT CltCritSect;
113 RTLISTANCHOR GuestCtlList;
114 RTLISTANCHOR HostCtlList;
115#ifndef VBOXVDBG_MEMCACHE_DISABLE
116 RTMEMCACHE CtlCache;
117#endif
118} VBVAEXHOSTCONTEXT;
119
120typedef enum
121{
122 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
123 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
124 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
125 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
126 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
127 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
128 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
129 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
130 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
131 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
132 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
133 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
134 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
135} VBVAEXHOSTCTL_TYPE;
136
137struct VBVAEXHOSTCTL;
138
139typedef DECLCALLBACK(void) FNVBVAEXHOSTCTL_COMPLETE(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
140typedef FNVBVAEXHOSTCTL_COMPLETE *PFNVBVAEXHOSTCTL_COMPLETE;
141
142typedef struct VBVAEXHOSTCTL
143{
144 RTLISTNODE Node;
145 VBVAEXHOSTCTL_TYPE enmType;
146 union
147 {
148 struct
149 {
150 void RT_UNTRUSTED_VOLATILE_GUEST *pvCmd;
151 uint32_t cbCmd;
152 } cmd;
153
154 struct
155 {
156 PSSMHANDLE pSSM;
157 uint32_t u32Version;
158 } state;
159 } u;
160 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
161 void *pvComplete;
162} VBVAEXHOSTCTL;
163
164/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
165 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
166 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
167 * see mor edetailed comments in headers for function definitions */
168typedef enum
169{
170 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
171 VBVAEXHOST_DATA_TYPE_CMD,
172 VBVAEXHOST_DATA_TYPE_HOSTCTL,
173 VBVAEXHOST_DATA_TYPE_GUESTCTL
174} VBVAEXHOST_DATA_TYPE;
175
176
177typedef struct VBOXVDMA_SOURCE
178{
179 VBVAINFOSCREEN Screen;
180 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
181} VBOXVDMA_SOURCE;
182
183
184typedef struct VBOXVDMAHOST
185{
186 PHGSMIINSTANCE pHgsmi; /**< Same as VGASTATE::pHgsmi. */
187 PVGASTATE pVGAState;
188 VBVAEXHOSTCONTEXT CmdVbva;
189 VBOXVDMATHREAD Thread;
190 VBOXCRCMD_SVRINFO CrSrvInfo;
191 VBVAEXHOSTCTL* pCurRemainingHostCtl;
192 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
193 int32_t volatile i32cHostCrCtlCompleted;
194 RTCRITSECT CalloutCritSect;
195// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
196} VBOXVDMAHOST, *PVBOXVDMAHOST;
197
198
199/**
200 * List selector for VBoxVBVAExHCtlSubmit(), vdmaVBVACtlSubmit().
201 */
202typedef enum
203{
204 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
205 VBVAEXHOSTCTL_SOURCE_HOST
206} VBVAEXHOSTCTL_SOURCE;
207
208
209/*********************************************************************************************************************************
210* Internal Functions *
211*********************************************************************************************************************************/
212static int vdmaVBVANotifyDisable(PVGASTATE pVGAState);
213static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
214static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
215static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread);
216static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer,
217 uint32_t cbBuffer);
218static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
219static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
220 int rc, void *pvContext);
221
222/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
223 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
224
225
226
227/**
228 * Creates a host control command.
229 */
230static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
231{
232# ifndef VBOXVDBG_MEMCACHE_DISABLE
233 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemCacheAlloc(pCmdVbva->CtlCache);
234# else
235 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemAlloc(sizeof(VBVAEXHOSTCTL));
236# endif
237 if (pCtl)
238 {
239 RT_ZERO(*pCtl);
240 pCtl->enmType = enmType;
241 }
242 else
243 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
244 return pCtl;
245}
246
247/**
248 * Destroys a host control command.
249 */
250static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
251{
252# ifndef VBOXVDBG_MEMCACHE_DISABLE
253 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
254# else
255 RTMemFree(pCtl);
256# endif
257}
258
259
260
261/**
262 * Works the VBVA state.
263 */
264static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
265{
266 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
267
268 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
269 return VINF_SUCCESS;
270 return VERR_SEM_BUSY;
271}
272
273/**
274 * Worker for vboxVBVAExHPDataGetInner() and VBoxVBVAExHPCheckHostCtlOnDisable()
275 * that gets the next control command.
276 *
277 * @returns Pointer to command if found, NULL if not.
278 * @param pCmdVbva The VBVA command context.
279 * @param pfHostCtl Where to indicate whether it's a host or guest
280 * control command.
281 * @param fHostOnlyMode Whether to only fetch host commands, or both.
282 */
283static VBVAEXHOSTCTL *vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
284{
285 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
286
287 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
288 return NULL;
289
290 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
291 if (RT_SUCCESS(rc))
292 {
293 VBVAEXHOSTCTL *pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
294 if (pCtl)
295 *pfHostCtl = true;
296 else if (!fHostOnlyMode)
297 {
298 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
299 {
300 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
301 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
302 * and there are no HostCtl commands*/
303 Assert(pCtl);
304 *pfHostCtl = false;
305 }
306 }
307
308 if (pCtl)
309 {
310 RTListNodeRemove(&pCtl->Node);
311 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
312 }
313
314 RTCritSectLeave(&pCmdVbva->CltCritSect);
315
316 return pCtl;
317 }
318 else
319 WARN(("RTCritSectEnter failed %Rrc\n", rc));
320
321 return NULL;
322}
323
324/**
325 * Worker for vboxVDMACrHgcmHandleEnableRemainingHostCommand().
326 */
327static VBVAEXHOSTCTL *VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
328{
329 bool fHostCtl = false;
330 VBVAEXHOSTCTL *pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
331 Assert(!pCtl || fHostCtl);
332 return pCtl;
333}
334
335/**
336 * Worker for vboxVBVAExHPCheckProcessCtlInternal() and
337 * vboxVDMACrGuestCtlProcess() / VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED.
338 */
339static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
340{
341 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
342 {
343 WARN(("Invalid state\n"));
344 return VERR_INVALID_STATE;
345 }
346
347 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
348 return VINF_SUCCESS;
349}
350
351/**
352 * Works the VBVA state in response to VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME.
353 */
354static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
355{
356 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
357 {
358 WARN(("Invalid state\n"));
359 return VERR_INVALID_STATE;
360 }
361
362 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
363 return VINF_SUCCESS;
364}
365
366/**
367 * Worker for vboxVBVAExHPDataGetInner that processes PAUSE and RESUME requests.
368 *
369 * Unclear why these cannot be handled the normal way.
370 *
371 * @returns true if handled, false if not.
372 * @param pCmdVbva The VBVA context.
373 * @param pCtl The host control command.
374 */
375static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
376{
377 switch (pCtl->enmType)
378 {
379 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
380 VBoxVBVAExHPPause(pCmdVbva);
381 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
382 return true;
383
384 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
385 VBoxVBVAExHPResume(pCmdVbva);
386 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
387 return true;
388
389 default:
390 return false;
391 }
392}
393
394/**
395 * Works the VBVA state.
396 */
397static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
398{
399 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
400
401 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
402}
403
404/**
405 * Works the VBVA state.
406 */
407static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
408{
409 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
410 if (pCmdVbva->pVBVA)
411 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
412}
413
414/**
415 * Works the VBVA state.
416 */
417static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
418{
419 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
420 if (pCmdVbva->pVBVA)
421 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
422}
423
424/**
425 * Worker for vboxVBVAExHPDataGetInner.
426 *
427 * @retval VINF_SUCCESS
428 * @retval VINF_EOF
429 * @retval VINF_TRY_AGAIN
430 * @retval VERR_INVALID_STATE
431 *
432 * @thread VDMA
433 */
434static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
435{
436 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
437 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
438
439 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA; /* This is shared with the guest, so careful! */
440
441 /*
442 * Inspect records.
443 */
444 uint32_t idxRecordFirst = ASMAtomicUoReadU32(&pVBVA->indexRecordFirst);
445 uint32_t idxRecordFree = ASMAtomicReadU32(&pVBVA->indexRecordFree);
446 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
447 Log(("first = %d, free = %d\n", idxRecordFirst, idxRecordFree));
448 if (idxRecordFirst == idxRecordFree)
449 return VINF_EOF; /* No records to process. Return without assigning output variables. */
450 AssertReturn(idxRecordFirst < VBVA_MAX_RECORDS, VERR_INVALID_STATE);
451 RT_UNTRUSTED_VALIDATED_FENCE();
452
453 /*
454 * Read the record size and check that it has been completly recorded.
455 */
456 uint32_t const cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[idxRecordFirst].cbRecord);
457 uint32_t const cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
458 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
459 if ( (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
460 || !cbRecord)
461 return VINF_TRY_AGAIN; /* The record is being recorded, try again. */
462 Assert(cbRecord);
463
464 /*
465 * Get and validate the data area.
466 */
467 uint32_t const offData = ASMAtomicReadU32(&pVBVA->off32Data);
468 uint32_t cbMaxData = ASMAtomicReadU32(&pVBVA->cbData);
469 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
470 AssertLogRelMsgStmt(cbMaxData <= pCmdVbva->cbMaxData, ("%#x vs %#x\n", cbMaxData, pCmdVbva->cbMaxData),
471 cbMaxData = pCmdVbva->cbMaxData);
472 AssertLogRelMsgReturn( cbRecord <= cbMaxData
473 && offData <= cbMaxData - cbRecord,
474 ("offData=%#x cbRecord=%#x cbMaxData=%#x cbRecord\n", offData, cbRecord, cbMaxData),
475 VERR_INVALID_STATE);
476 RT_UNTRUSTED_VALIDATED_FENCE();
477
478 /*
479 * Just set the return values and we're done.
480 */
481 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)&pVBVA->au8Data[offData];
482 *pcbCmd = cbRecord;
483 return VINF_SUCCESS;
484}
485
486/**
487 * Completion routine advancing our end of the ring and data buffers forward.
488 *
489 * @param pCmdVbva The VBVA context.
490 * @param cbCmd The size of the data.
491 */
492static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
493{
494 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA;
495 if (pVBVA)
496 {
497 /* Move data head. */
498 uint32_t const cbData = pVBVA->cbData;
499 uint32_t const offData = pVBVA->off32Data;
500 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
501 if (cbData > 0)
502 ASMAtomicWriteU32(&pVBVA->off32Data, (offData + cbCmd) % cbData);
503 else
504 ASMAtomicWriteU32(&pVBVA->off32Data, 0);
505
506 /* Increment record pointer. */
507 uint32_t const idxRecFirst = pVBVA->indexRecordFirst;
508 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
509 ASMAtomicWriteU32(&pVBVA->indexRecordFirst, (idxRecFirst + 1) % RT_ELEMENTS(pVBVA->aRecords));
510 }
511}
512
513/**
514 * Control command completion routine used by many.
515 */
516static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
517{
518 if (pCtl->pfnComplete)
519 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
520 else
521 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
522}
523
524
525/**
526 * Worker for VBoxVBVAExHPDataGet.
527 * @thread VDMA
528 */
529static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGetInner(struct VBVAEXHOSTCONTEXT *pCmdVbva,
530 uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
531{
532 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
533 VBVAEXHOSTCTL *pCtl;
534 bool fHostClt;
535
536 for (;;)
537 {
538 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
539 if (pCtl)
540 {
541 if (fHostClt)
542 {
543 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
544 {
545 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */
546 *pcbCmd = sizeof (*pCtl);
547 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
548 }
549 continue; /* Processed by vboxVBVAExHPCheckProcessCtlInternal, get next. */
550 }
551 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */
552 *pcbCmd = sizeof (*pCtl);
553 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
554 }
555
556 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
557 return VBVAEXHOST_DATA_TYPE_NO_DATA;
558
559 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppbCmd, pcbCmd);
560 switch (rc)
561 {
562 case VINF_SUCCESS:
563 return VBVAEXHOST_DATA_TYPE_CMD;
564 case VINF_EOF:
565 return VBVAEXHOST_DATA_TYPE_NO_DATA;
566 case VINF_TRY_AGAIN:
567 RTThreadSleep(1);
568 continue;
569 default:
570 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
571 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %Rrc\n", rc));
572 return VBVAEXHOST_DATA_TYPE_NO_DATA;
573 }
574 }
575 /* not reached */
576}
577
578/**
579 * Called by vboxVDMAWorkerThread to get the next command to process.
580 * @thread VDMA
581 */
582static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva,
583 uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
584{
585 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd);
586 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
587 {
588 vboxVBVAExHPHgEventClear(pCmdVbva);
589 vboxVBVAExHPProcessorRelease(pCmdVbva);
590
591 /*
592 * We need to prevent racing between us clearing the flag and command check/submission thread, i.e.
593 * 1. we check the queue -> and it is empty
594 * 2. submitter adds command to the queue
595 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
596 * 4. we clear the "processing" state
597 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
598 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
599 */
600 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
601 if (RT_SUCCESS(rc))
602 {
603 /* we are the processor now */
604 enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd);
605 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
606 {
607 vboxVBVAExHPProcessorRelease(pCmdVbva);
608 return VBVAEXHOST_DATA_TYPE_NO_DATA;
609 }
610
611 vboxVBVAExHPHgEventSet(pCmdVbva);
612 }
613 }
614
615 return enmType;
616}
617
618/**
619 * Checks for pending VBVA command or (internal) control command.
620 */
621DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
622{
623 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA;
624 if (pVBVA)
625 {
626 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
627 uint32_t indexRecordFree = pVBVA->indexRecordFree;
628 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
629
630 if (indexRecordFirst != indexRecordFree)
631 return true;
632 }
633
634 return ASMAtomicReadU32(&pCmdVbva->u32cCtls) > 0;
635}
636
637/** Checks whether the new commands are ready for processing
638 * @returns
639 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
640 * VINF_EOF - no commands in a queue
641 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
642 * VERR_INVALID_STATE - the VBVA is paused or pausing */
643static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
644{
645 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
646 if (RT_SUCCESS(rc))
647 {
648 /* we are the processor now */
649 if (vboxVBVAExHSHasCommands(pCmdVbva))
650 {
651 vboxVBVAExHPHgEventSet(pCmdVbva);
652 return VINF_SUCCESS;
653 }
654
655 vboxVBVAExHPProcessorRelease(pCmdVbva);
656 return VINF_EOF;
657 }
658 if (rc == VERR_SEM_BUSY)
659 return VINF_ALREADY_INITIALIZED;
660 return VERR_INVALID_STATE;
661}
662
663/**
664 * Worker for vboxVDMAConstruct() that initializes the give VBVA host context.
665 */
666static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
667{
668 RT_ZERO(*pCmdVbva);
669 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
670 if (RT_SUCCESS(rc))
671 {
672# ifndef VBOXVDBG_MEMCACHE_DISABLE
673 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
674 0, /* size_t cbAlignment */
675 UINT32_MAX, /* uint32_t cMaxObjects */
676 NULL, /* PFNMEMCACHECTOR pfnCtor*/
677 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
678 NULL, /* void *pvUser*/
679 0 /* uint32_t fFlags*/
680 );
681 if (RT_SUCCESS(rc))
682# endif
683 {
684 RTListInit(&pCmdVbva->GuestCtlList);
685 RTListInit(&pCmdVbva->HostCtlList);
686 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
687 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
688 return VINF_SUCCESS;
689 }
690# ifndef VBOXVDBG_MEMCACHE_DISABLE
691 WARN(("RTMemCacheCreate failed %Rrc\n", rc));
692# endif
693 }
694 else
695 WARN(("RTCritSectInit failed %Rrc\n", rc));
696
697 return rc;
698}
699
700/**
701 * Checks if VBVA state is some form of enabled.
702 */
703DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
704{
705 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED;
706}
707
708/**
709 * Checks if VBVA state is disabled.
710 */
711DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
712{
713 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
714}
715
716/**
717 * Worker for vdmaVBVAEnableProcess().
718 *
719 * @thread VDMA
720 */
721static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA,
722 uint8_t *pbVRam, uint32_t cbVRam)
723{
724 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
725 {
726 WARN(("VBVAEx is enabled already\n"));
727 return VERR_INVALID_STATE;
728 }
729
730 uintptr_t offVRam = (uintptr_t)pVBVA - (uintptr_t)pbVRam;
731 AssertLogRelMsgReturn(offVRam < cbVRam - sizeof(*pVBVA), ("%#p cbVRam=%#x\n", offVRam, cbVRam), VERR_OUT_OF_RANGE);
732 RT_UNTRUSTED_VALIDATED_FENCE();
733
734 pCmdVbva->pVBVA = pVBVA;
735 pCmdVbva->cbMaxData = cbVRam - offVRam - RT_UOFFSETOF(VBVABUFFER, au8Data);
736 pVBVA->hostFlags.u32HostEvents = 0;
737 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
738 return VINF_SUCCESS;
739}
740
741/**
742 * Works the enable state.
743 * @thread VDMA, CR, EMT, ...
744 */
745static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
746{
747 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
748 return VINF_SUCCESS;
749
750 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
751 return VINF_SUCCESS;
752}
753
754/**
755 * Worker for vboxVDMADestruct() and vboxVDMAConstruct().
756 */
757static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
758{
759 /* ensure the processor is stopped */
760 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
761
762 /* ensure no one tries to submit the command */
763 if (pCmdVbva->pVBVA)
764 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
765
766 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
767 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
768
769 RTCritSectDelete(&pCmdVbva->CltCritSect);
770
771# ifndef VBOXVDBG_MEMCACHE_DISABLE
772 RTMemCacheDestroy(pCmdVbva->CtlCache);
773# endif
774
775 RT_ZERO(*pCmdVbva);
776}
777
778
779/**
780 * Worker for vboxVBVAExHSSaveStateLocked().
781 * @thread VDMA
782 */
783static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
784{
785 RT_NOREF(pCmdVbva);
786 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
787 AssertRCReturn(rc, rc);
788 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
789 AssertRCReturn(rc, rc);
790 rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pCtl->u.cmd.pvCmd - (uintptr_t)pu8VramBase));
791 AssertRCReturn(rc, rc);
792
793 return VINF_SUCCESS;
794}
795
796/**
797 * Worker for VBoxVBVAExHSSaveState().
798 * @thread VDMA
799 */
800static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
801{
802 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
803 {
804 WARN(("vbva not paused\n"));
805 return VERR_INVALID_STATE;
806 }
807
808 int rc;
809 VBVAEXHOSTCTL* pCtl;
810 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
811 {
812 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
813 AssertRCReturn(rc, rc);
814 }
815
816 rc = SSMR3PutU32(pSSM, 0);
817 AssertRCReturn(rc, rc);
818
819 return VINF_SUCCESS;
820}
821
822/**
823 * Handles VBVAEXHOSTCTL_TYPE_HH_SAVESTATE for vboxVDMACrHostCtlProcess, saving
824 * state on the VDMA thread.
825 *
826 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
827 * @thread VDMA
828 */
829static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
830{
831 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
832 AssertRCReturn(rc, rc);
833
834 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
835 if (RT_FAILURE(rc))
836 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
837
838 RTCritSectLeave(&pCmdVbva->CltCritSect);
839 return rc;
840}
841
842
843/**
844 * Worker for vboxVBVAExHSLoadStateLocked.
845 * @retval VINF_EOF if end stuff to load.
846 * @thread VDMA
847 */
848static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
849{
850 RT_NOREF(u32Version);
851 uint32_t u32;
852 int rc = SSMR3GetU32(pSSM, &u32);
853 AssertLogRelRCReturn(rc, rc);
854
855 if (!u32)
856 return VINF_EOF;
857
858 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
859 if (!pHCtl)
860 {
861 WARN(("VBoxVBVAExHCtlCreate failed\n"));
862 return VERR_NO_MEMORY;
863 }
864
865 rc = SSMR3GetU32(pSSM, &u32);
866 AssertLogRelRCReturn(rc, rc);
867 pHCtl->u.cmd.cbCmd = u32;
868
869 rc = SSMR3GetU32(pSSM, &u32);
870 AssertLogRelRCReturn(rc, rc);
871 pHCtl->u.cmd.pvCmd = pu8VramBase + u32;
872
873 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
874 ++pCmdVbva->u32cCtls;
875
876 return VINF_SUCCESS;
877}
878
879/**
880 * Worker for VBoxVBVAExHSLoadState.
881 * @thread VDMA
882 */
883static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
884{
885 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
886 {
887 WARN(("vbva not stopped\n"));
888 return VERR_INVALID_STATE;
889 }
890
891 int rc;
892 do
893 {
894 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
895 AssertLogRelRCReturn(rc, rc);
896 } while (rc != VINF_EOF);
897
898 return VINF_SUCCESS;
899}
900
901/**
902 * Handles VBVAEXHOSTCTL_TYPE_HH_LOADSTATE for vboxVDMACrHostCtlProcess(),
903 * loading state on the VDMA thread.
904 *
905 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
906 * @thread VDMA
907 */
908static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
909{
910 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
911 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
912 AssertRCReturn(rc, rc);
913
914 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
915 if (RT_FAILURE(rc))
916 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
917
918 RTCritSectLeave(&pCmdVbva->CltCritSect);
919 return rc;
920}
921
922
923
924/**
925 * Queues a control command to the VDMA worker thread.
926 *
927 * The @a enmSource argument decides which list (guest/host) it's queued on.
928 *
929 */
930static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
931 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
932{
933 int rc;
934 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
935 {
936 pCtl->pfnComplete = pfnComplete;
937 pCtl->pvComplete = pvComplete;
938
939 rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
940 if (RT_SUCCESS(rc))
941 {
942 /* Recheck that we're enabled after we've got the lock. */
943 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
944 {
945 /* Queue it. */
946 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
947 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
948 else
949 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
950 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
951
952 RTCritSectLeave(&pCmdVbva->CltCritSect);
953
954 /* Work the state or something. */
955 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
956 }
957 else
958 {
959 RTCritSectLeave(&pCmdVbva->CltCritSect);
960 Log(("cmd vbva not enabled (race)\n"));
961 rc = VERR_INVALID_STATE;
962 }
963 }
964 else
965 AssertRC(rc);
966 }
967 else
968 {
969 Log(("cmd vbva not enabled\n"));
970 rc = VERR_INVALID_STATE;
971 }
972 return rc;
973}
974
975/**
976 * Submits the control command and notifies the VDMA thread.
977 */
978static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
979 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
980{
981 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
982 if (RT_SUCCESS(rc))
983 {
984 if (rc == VINF_SUCCESS)
985 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
986 Assert(rc == VINF_ALREADY_INITIALIZED);
987 }
988 else
989 Log(("VBoxVBVAExHCtlSubmit failed %Rrc\n", rc));
990
991 return rc;
992}
993
994
995/**
996 * Call VDMA thread creation notification callback.
997 */
998void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
999{
1000 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
1001 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
1002 void *pvChanged = pThread->pvChanged;
1003
1004 pThread->pfnChanged = NULL;
1005 pThread->pvChanged = NULL;
1006
1007 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
1008
1009 if (pfnChanged)
1010 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
1011}
1012
1013/**
1014 * Call VDMA thread termination notification callback.
1015 */
1016void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
1017{
1018 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1019 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
1020 void *pvChanged = pThread->pvChanged;
1021
1022 pThread->pfnChanged = NULL;
1023 pThread->pvChanged = NULL;
1024
1025 if (pfnChanged)
1026 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
1027}
1028
1029/**
1030 * Check if VDMA thread is terminating.
1031 */
1032DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
1033{
1034 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
1035}
1036
1037/**
1038 * Init VDMA thread.
1039 */
1040void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
1041{
1042 RT_ZERO(*pThread);
1043 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
1044}
1045
1046/**
1047 * Clean up VDMA thread.
1048 */
1049int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
1050{
1051 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
1052 switch (u32State)
1053 {
1054 case VBOXVDMATHREAD_STATE_TERMINATED:
1055 return VINF_SUCCESS;
1056
1057 case VBOXVDMATHREAD_STATE_TERMINATING:
1058 {
1059 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
1060 if (RT_SUCCESS(rc))
1061 {
1062 RTSemEventDestroy(pThread->hEvent);
1063 pThread->hEvent = NIL_RTSEMEVENT;
1064 pThread->hWorkerThread = NIL_RTTHREAD;
1065 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
1066 }
1067 else
1068 WARN(("RTThreadWait failed %Rrc\n", rc));
1069 return rc;
1070 }
1071
1072 default:
1073 WARN(("invalid state"));
1074 return VERR_INVALID_STATE;
1075 }
1076}
1077
1078/**
1079 * Start VDMA thread.
1080 */
1081int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread,
1082 PFNVBOXVDMATHREAD_CHANGED pfnCreated, void *pvCreated)
1083{
1084 int rc = VBoxVDMAThreadCleanup(pThread);
1085 if (RT_SUCCESS(rc))
1086 {
1087 rc = RTSemEventCreate(&pThread->hEvent);
1088 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
1089 pThread->pfnChanged = pfnCreated;
1090 pThread->pvChanged = pvCreated;
1091 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
1092 if (RT_SUCCESS(rc))
1093 return VINF_SUCCESS;
1094
1095 WARN(("RTThreadCreate failed %Rrc\n", rc));
1096 RTSemEventDestroy(pThread->hEvent);
1097 pThread->hEvent = NIL_RTSEMEVENT;
1098 pThread->hWorkerThread = NIL_RTTHREAD;
1099 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
1100 }
1101 else
1102 WARN(("VBoxVDMAThreadCleanup failed %Rrc\n", rc));
1103 return rc;
1104}
1105
1106/**
1107 * Notifies the VDMA thread.
1108 * @thread !VDMA
1109 */
1110static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
1111{
1112 int rc = RTSemEventSignal(pThread->hEvent);
1113 AssertRC(rc);
1114 return rc;
1115}
1116
1117/**
1118 * State worker for VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD &
1119 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrHostCtlProcess(), and
1120 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrGuestCtlProcess().
1121 *
1122 * @thread VDMA
1123 */
1124static int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void *pvTerminated, bool fNotify)
1125{
1126 for (;;)
1127 {
1128 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
1129 switch (u32State)
1130 {
1131 case VBOXVDMATHREAD_STATE_CREATED:
1132 pThread->pfnChanged = pfnTerminated;
1133 pThread->pvChanged = pvTerminated;
1134 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
1135 if (fNotify)
1136 {
1137 int rc = VBoxVDMAThreadEventNotify(pThread);
1138 AssertRC(rc);
1139 }
1140 return VINF_SUCCESS;
1141
1142 case VBOXVDMATHREAD_STATE_TERMINATING:
1143 case VBOXVDMATHREAD_STATE_TERMINATED:
1144 WARN(("thread is marked to termination or terminated\nn"));
1145 return VERR_INVALID_STATE;
1146
1147 case VBOXVDMATHREAD_STATE_CREATING:
1148 /* wait till the thread creation is completed */
1149 WARN(("concurrent thread create/destron\n"));
1150 RTThreadYield();
1151 continue;
1152
1153 default:
1154 WARN(("invalid state"));
1155 return VERR_INVALID_STATE;
1156 }
1157 }
1158}
1159
1160
1161
1162/*
1163 *
1164 *
1165 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1166 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1167 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1168 *
1169 *
1170 */
1171
1172/** Completion callback for vboxVDMACrCtlPostAsync(). */
1173typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
1174/** Pointer to a vboxVDMACrCtlPostAsync completion callback. */
1175typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
1176
1177/**
1178 * Private wrapper around VBOXVDMACMD_CHROMIUM_CTL.
1179 */
1180typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
1181{
1182 uint32_t uMagic; /**< VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC */
1183 uint32_t cRefs;
1184 int32_t volatile rc;
1185 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
1186 void *pvCompletion;
1187 RTSEMEVENT hEvtDone;
1188 VBOXVDMACMD_CHROMIUM_CTL Cmd;
1189} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
1190/** Magic number for VBOXVDMACMD_CHROMIUM_CTL_PRIVATE (Michael Wolff). */
1191# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC UINT32_C(0x19530827)
1192
1193/** Converts from a VBOXVDMACMD_CHROMIUM_CTL::Cmd pointer to a pointer to the
1194 * containing structure. */
1195# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) RT_FROM_MEMBER(pCmd, VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)
1196
1197/**
1198 * Creates a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1199 */
1200static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
1201{
1202 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr;
1203 pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
1204 if (pHdr)
1205 {
1206 pHdr->uMagic = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
1207 pHdr->cRefs = 1;
1208 pHdr->rc = VERR_NOT_IMPLEMENTED;
1209 pHdr->hEvtDone = NIL_RTSEMEVENT;
1210 pHdr->Cmd.enmType = enmCmd;
1211 pHdr->Cmd.cbCmd = cbCmd;
1212 return &pHdr->Cmd;
1213 }
1214 return NULL;
1215}
1216
1217/**
1218 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1219 */
1220DECLINLINE(void) vboxVDMACrCtlRelease(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1221{
1222 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1223 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1224
1225 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1226 if (!cRefs)
1227 {
1228 pHdr->uMagic = ~VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
1229 if (pHdr->hEvtDone != NIL_RTSEMEVENT)
1230 {
1231 RTSemEventDestroy(pHdr->hEvtDone);
1232 pHdr->hEvtDone = NIL_RTSEMEVENT;
1233 }
1234 RTMemFree(pHdr);
1235 }
1236}
1237
1238/**
1239 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1240 */
1241DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1242{
1243 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1244 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1245
1246 uint32_t cRefs = ASMAtomicIncU32(&pHdr->cRefs);
1247 Assert(cRefs > 1);
1248 Assert(cRefs < _1K);
1249 RT_NOREF_PV(cRefs);
1250}
1251
1252/**
1253 * Gets the result from our private chromium control command.
1254 *
1255 * @returns status code.
1256 * @param pCmd The command.
1257 */
1258DECLINLINE(int) vboxVDMACrCtlGetRc(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1259{
1260 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1261 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1262 return pHdr->rc;
1263}
1264
1265/**
1266 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync}
1267 *
1268 * @note Some indirect completion magic, you gotta love this code!
1269 */
1270DECLCALLBACK(int) vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
1271{
1272 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1273 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1274 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1275
1276 pHdr->rc = rc;
1277 if (pHdr->pfnCompletion)
1278 pHdr->pfnCompletion(pVGAState, pCmd, pHdr->pvCompletion);
1279 return VINF_SUCCESS;
1280}
1281
1282/**
1283 * @callback_method_impl{FNCRCTLCOMPLETION,
1284 * Completion callback for vboxVDMACrCtlPost. }
1285 */
1286static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void *pvContext)
1287{
1288 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)pvContext;
1289 Assert(pHdr == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd));
1290 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1291 RT_NOREF(pVGAState, pCmd);
1292
1293 int rc = RTSemEventSignal(pHdr->hEvtDone);
1294 AssertRC(rc);
1295
1296 vboxVDMACrCtlRelease(&pHdr->Cmd);
1297}
1298
1299/**
1300 * Worker for vboxVDMACrCtlPost().
1301 */
1302static int vboxVDMACrCtlPostAsync(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd,
1303 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1304{
1305 if ( pVGAState->pDrv
1306 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1307 {
1308 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1309 pHdr->pfnCompletion = pfnCompletion;
1310 pHdr->pvCompletion = pvCompletion;
1311 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1312 return VINF_SUCCESS;
1313 }
1314 return VERR_NOT_SUPPORTED;
1315}
1316
1317/**
1318 * Posts stuff and waits.
1319 */
1320static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1321{
1322 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1323
1324 /* Allocate the semaphore. */
1325 Assert(pHdr->hEvtDone == NIL_RTSEMEVENT);
1326 int rc = RTSemEventCreate(&pHdr->hEvtDone);
1327 AssertRCReturn(rc, rc);
1328
1329 /* Grab a reference for the completion routine. */
1330 vboxVDMACrCtlRetain(&pHdr->Cmd);
1331
1332 /* Submit and wait for it. */
1333 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, pHdr);
1334 if (RT_SUCCESS(rc))
1335 rc = RTSemEventWaitNoResume(pHdr->hEvtDone, RT_INDEFINITE_WAIT);
1336 else
1337 {
1338 if (rc != VERR_NOT_SUPPORTED)
1339 AssertRC(rc);
1340 vboxVDMACrCtlRelease(pCmd);
1341 }
1342 return rc;
1343}
1344
1345
1346/**
1347 * Structure for passing data between vboxVDMACrHgcmSubmitSync() and the
1348 * completion routine vboxVDMACrHgcmSubmitSyncCompletion().
1349 */
1350typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1351{
1352 int volatile rc;
1353 RTSEMEVENT hEvent;
1354} VDMA_VBVA_CTL_CYNC_COMPLETION;
1355
1356/**
1357 * @callback_method_impl{FNCRCTLCOMPLETION,
1358 * Completion callback for vboxVDMACrHgcmSubmitSync() that signals the
1359 * waiting thread.}
1360 */
1361static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1362{
1363 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1364 pData->rc = rc;
1365 rc = RTSemEventSignal(pData->hEvent);
1366 AssertLogRelRC(rc);
1367
1368 RT_NOREF(pCmd, cbCmd);
1369}
1370
1371/**
1372 * Worker for vboxVDMACrHgcmHandleEnable() and vdmaVBVAEnableProcess() that
1373 * works pVGAState->pDrv->pfnCrHgcmCtlSubmit.
1374 *
1375 * @thread VDMA
1376 */
1377static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1378{
1379 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1380 Data.rc = VERR_NOT_IMPLEMENTED;
1381 int rc = RTSemEventCreate(&Data.hEvent);
1382 if (!RT_SUCCESS(rc))
1383 {
1384 WARN(("RTSemEventCreate failed %Rrc\n", rc));
1385 return rc;
1386 }
1387
1388 pCtl->CalloutList.List.pNext = NULL;
1389
1390 PVGASTATE pVGAState = pVdma->pVGAState;
1391 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1392 if (RT_SUCCESS(rc))
1393 {
1394 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1395 if (RT_SUCCESS(rc))
1396 {
1397 rc = Data.rc;
1398 if (!RT_SUCCESS(rc))
1399 {
1400 WARN(("pfnCrHgcmCtlSubmit command failed %Rrc\n", rc));
1401 }
1402
1403 }
1404 else
1405 WARN(("RTSemEventWait failed %Rrc\n", rc));
1406 }
1407 else
1408 WARN(("pfnCrHgcmCtlSubmit failed %Rrc\n", rc));
1409
1410
1411 RTSemEventDestroy(Data.hEvent);
1412
1413 return rc;
1414}
1415
1416
1417/**
1418 * Worker for vboxVDMAReset().
1419 */
1420static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1421{
1422 VBVAEXHOSTCTL HCtl;
1423 RT_ZERO(HCtl);
1424 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1425 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1426 if (RT_SUCCESS(rc))
1427 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1428 else
1429 Log(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
1430 return rc;
1431}
1432
1433
1434/**
1435 * Used by vboxVDMACrHgcmNotifyTerminatingCb() and called by
1436 * crVBoxServerCrCmdDisablePostProcess() during crServerTearDown() to drain
1437 * command queues or something.
1438 */
1439static DECLCALLBACK(uint8_t *)
1440vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1441{
1442 struct VBOXVDMAHOST *pVdma = hClient;
1443
1444 if (!pVdma->pCurRemainingHostCtl)
1445 VBoxVBVAExHSDisable(&pVdma->CmdVbva); /* disable VBVA, all subsequent host commands will go HGCM way */
1446 else
1447 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1448
1449 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1450 if (pVdma->pCurRemainingHostCtl)
1451 {
1452 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1453 return (uint8_t *)pVdma->pCurRemainingHostCtl->u.cmd.pvCmd;
1454 }
1455
1456 *pcbCtl = 0;
1457 return NULL;
1458}
1459
1460/**
1461 * Called by crServerTearDown().
1462 */
1463static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1464{
1465# ifdef VBOX_STRICT
1466 struct VBOXVDMAHOST *pVdma = hClient;
1467 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1468 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1469# else
1470 RT_NOREF(hClient);
1471# endif
1472}
1473
1474/**
1475 * Called by crServerTearDown().
1476 */
1477static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient,
1478 VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1479{
1480 struct VBOXVDMAHOST *pVdma = hClient;
1481
1482 VBVAEXHOSTCTL HCtl;
1483 RT_ZERO(HCtl);
1484 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1485 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1486
1487 pHgcmEnableData->hRHCmd = pVdma;
1488 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1489
1490 if (rc == VERR_INVALID_STATE)
1491 rc = VINF_SUCCESS;
1492 else if (RT_FAILURE(rc))
1493 WARN(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
1494
1495 return rc;
1496}
1497
1498/**
1499 * Worker for vdmaVBVAEnableProcess() and vdmaVBVADisableProcess().
1500 *
1501 * @thread VDMA
1502 */
1503static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1504{
1505 VBOXCRCMDCTL_ENABLE Enable;
1506 RT_ZERO(Enable);
1507 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1508 Enable.Data.hRHCmd = pVdma;
1509 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1510
1511 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1512 Assert(!pVdma->pCurRemainingHostCtl);
1513 if (RT_SUCCESS(rc))
1514 {
1515 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1516 return VINF_SUCCESS;
1517 }
1518
1519 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1520 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
1521 return rc;
1522}
1523
1524/**
1525 * Handles VBVAEXHOSTCTL_TYPE_GHH_ENABLE and VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED
1526 * for vboxVDMACrGuestCtlProcess().
1527 *
1528 * @thread VDMA
1529 */
1530static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1531{
1532 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1533 {
1534 WARN(("vdma VBVA is already enabled\n"));
1535 return VERR_INVALID_STATE;
1536 }
1537
1538 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA
1539 = (VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1540 if (!pVBVA)
1541 {
1542 WARN(("invalid offset %d (%#x)\n", u32Offset, u32Offset));
1543 return VERR_INVALID_PARAMETER;
1544 }
1545
1546 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA, pVdma->pVGAState->vram_ptrR3, pVdma->pVGAState->vram_size);
1547 if (RT_SUCCESS(rc))
1548 {
1549 if (!pVdma->CrSrvInfo.pfnEnable)
1550 {
1551 /* "HGCM-less" mode. All inited. */
1552 return VINF_SUCCESS;
1553 }
1554
1555 VBOXCRCMDCTL_DISABLE Disable;
1556 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1557 Disable.Data.hNotifyTerm = pVdma;
1558 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1559 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1560 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1561 if (RT_SUCCESS(rc))
1562 {
1563 PVGASTATE pVGAState = pVdma->pVGAState;
1564 VBOXCRCMD_SVRENABLE_INFO Info;
1565 Info.hCltScr = pVGAState->pDrv;
1566 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1567 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1568 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1569 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1570 if (RT_SUCCESS(rc))
1571 return VINF_SUCCESS;
1572
1573 WARN(("pfnEnable failed %Rrc\n", rc));
1574 vboxVDMACrHgcmHandleEnable(pVdma);
1575 }
1576 else
1577 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
1578
1579 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1580 }
1581 else
1582 WARN(("VBoxVBVAExHSEnable failed %Rrc\n", rc));
1583
1584 return rc;
1585}
1586
1587/**
1588 * Worker for several vboxVDMACrHostCtlProcess() commands.
1589 *
1590 * @returns IPRT status code.
1591 * @param pVdma The VDMA channel.
1592 * @param fDoHgcmEnable ???
1593 * @thread VDMA
1594 */
1595static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1596{
1597 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1598 {
1599 Log(("vdma VBVA is already disabled\n"));
1600 return VINF_SUCCESS;
1601 }
1602
1603 if (!pVdma->CrSrvInfo.pfnDisable)
1604 {
1605 /* "HGCM-less" mode. Just undo what vdmaVBVAEnableProcess did. */
1606 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1607 return VINF_SUCCESS;
1608 }
1609
1610 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1611 if (RT_SUCCESS(rc))
1612 {
1613 if (fDoHgcmEnable)
1614 {
1615 PVGASTATE pVGAState = pVdma->pVGAState;
1616
1617 /* disable is a bit tricky
1618 * we need to ensure the host ctl commands do not come out of order
1619 * and do not come over HGCM channel until after it is enabled */
1620 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1621 if (RT_SUCCESS(rc))
1622 {
1623 vdmaVBVANotifyDisable(pVGAState);
1624 return VINF_SUCCESS;
1625 }
1626
1627 VBOXCRCMD_SVRENABLE_INFO Info;
1628 Info.hCltScr = pVGAState->pDrv;
1629 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1630 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1631 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1632 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info); /** @todo ignoring return code */
1633 }
1634 }
1635 else
1636 WARN(("pfnDisable failed %Rrc\n", rc));
1637
1638 return rc;
1639}
1640
1641/**
1642 * Handles VBVAEXHOST_DATA_TYPE_HOSTCTL for vboxVDMAWorkerThread.
1643 *
1644 * @returns VBox status code.
1645 * @param pVdma The VDMA channel.
1646 * @param pCmd The control command to process. Should be
1647 * safe, i.e. not shared with guest.
1648 * @param pfContinue Where to return whether to continue or not.
1649 * @thread VDMA
1650 */
1651static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1652{
1653 *pfContinue = true;
1654
1655 int rc;
1656 switch (pCmd->enmType)
1657 {
1658 /*
1659 * See vdmaVBVACtlOpaqueHostSubmit() and its callers.
1660 */
1661 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1662 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1663 {
1664 if (pVdma->CrSrvInfo.pfnHostCtl)
1665 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, (uint8_t *)pCmd->u.cmd.pvCmd, pCmd->u.cmd.cbCmd);
1666 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1667 }
1668 else
1669 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for HGCM-less mode\n"));
1670 return VERR_INVALID_STATE;
1671
1672 /*
1673 * See vdmaVBVACtlDisableSync().
1674 */
1675 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1676 rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
1677 if (RT_SUCCESS(rc))
1678 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */ );
1679 else
1680 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
1681 return rc;
1682
1683 /*
1684 * See vboxVDMACrHgcmNotifyTerminatingCb().
1685 */
1686 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1687 rc = vdmaVBVADisableProcess(pVdma, false /* fDoHgcmEnable */);
1688 if (RT_SUCCESS(rc))
1689 {
1690 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true /* fNotify */);
1691 if (RT_SUCCESS(rc))
1692 *pfContinue = false;
1693 else
1694 WARN(("VBoxVDMAThreadTerm failed %Rrc\n", rc));
1695 }
1696 else
1697 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
1698 return rc;
1699
1700 /*
1701 * See vboxVDMASaveStateExecPerform().
1702 */
1703 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1704 rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM);
1705 if (RT_SUCCESS(rc))
1706 {
1707 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1708 if (pVdma->CrSrvInfo.pfnSaveState)
1709 rc = pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1710 }
1711 else
1712 WARN(("VBoxVBVAExHSSaveState failed %Rrc\n", rc));
1713 return rc;
1714
1715 /*
1716 * See vboxVDMASaveLoadExecPerform().
1717 */
1718 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1719 rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1720 if (RT_SUCCESS(rc))
1721 {
1722 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1723 if (pVdma->CrSrvInfo.pfnLoadState)
1724 {
1725 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1726 if (RT_FAILURE(rc))
1727 WARN(("pfnLoadState failed %Rrc\n", rc));
1728 }
1729 }
1730 else
1731 WARN(("VBoxVBVAExHSLoadState failed %Rrc\n", rc));
1732 return rc;
1733
1734 /*
1735 * See vboxVDMASaveLoadDone().
1736 */
1737 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1738 {
1739 PVGASTATE pVGAState = pVdma->pVGAState;
1740 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1741 {
1742 VBVAINFOSCREEN CurScreen;
1743 VBVAINFOVIEW CurView;
1744 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1745 AssertLogRelMsgRCReturn(rc, ("VBVAGetInfoViewAndScreen [screen #%u] -> %#x\n", i, rc), rc);
1746
1747 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1748 AssertLogRelMsgRCReturn(rc, ("VBVAInfoScreen [screen #%u] -> %#x\n", i, rc), rc);
1749 }
1750
1751 return VINF_SUCCESS;
1752 }
1753
1754 default:
1755 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1756 return VERR_INVALID_PARAMETER;
1757 }
1758}
1759
1760/**
1761 * Worker for vboxVDMACrGuestCtlResizeEntryProcess.
1762 *
1763 * @returns VINF_SUCCESS or VERR_INVALID_PARAMETER.
1764 * @param pVGAState The VGA device state.
1765 * @param pScreen The screen info (safe copy).
1766 */
1767static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1768{
1769 const uint32_t idxView = pScreen->u32ViewIndex;
1770 const uint16_t fFlags = pScreen->u16Flags;
1771
1772 if (fFlags & VBVA_SCREEN_F_DISABLED)
1773 {
1774 if ( idxView < pVGAState->cMonitors
1775 || idxView == UINT32_C(0xFFFFFFFF))
1776 {
1777 RT_UNTRUSTED_VALIDATED_FENCE();
1778
1779 RT_ZERO(*pScreen);
1780 pScreen->u32ViewIndex = idxView;
1781 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1782 return VINF_SUCCESS;
1783 }
1784 }
1785 else
1786 {
1787 if (fFlags & VBVA_SCREEN_F_BLANK2)
1788 {
1789 if ( idxView >= pVGAState->cMonitors
1790 && idxView != UINT32_C(0xFFFFFFFF))
1791 return VERR_INVALID_PARAMETER;
1792 RT_UNTRUSTED_VALIDATED_FENCE();
1793
1794 /* Special case for blanking using current video mode.
1795 * Only 'u16Flags' and 'u32ViewIndex' field are relevant.
1796 */
1797 RT_ZERO(*pScreen);
1798 pScreen->u32ViewIndex = idxView;
1799 pScreen->u16Flags = fFlags;
1800 return VINF_SUCCESS;
1801 }
1802
1803 if ( idxView < pVGAState->cMonitors
1804 && pScreen->u16BitsPerPixel <= 32
1805 && pScreen->u32Width <= UINT16_MAX
1806 && pScreen->u32Height <= UINT16_MAX
1807 && pScreen->u32LineSize <= UINT16_MAX * 4)
1808 {
1809 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1810 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1811 {
1812 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1813 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1814 && u64ScreenSize <= pVGAState->vram_size
1815 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1816 return VINF_SUCCESS;
1817 }
1818 }
1819 }
1820
1821 LogFunc(("Failed\n"));
1822 return VERR_INVALID_PARAMETER;
1823}
1824
1825/**
1826 * Handles on entry in a VBVAEXHOSTCTL_TYPE_GHH_RESIZE command.
1827 *
1828 * @returns IPRT status code.
1829 * @param pVdma The VDMA channel
1830 * @param pEntry The entry to handle. Considered volatile.
1831 *
1832 * @thread VDMA
1833 */
1834static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma,
1835 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry)
1836{
1837 PVGASTATE pVGAState = pVdma->pVGAState;
1838
1839 VBVAINFOSCREEN Screen;
1840 RT_COPY_VOLATILE(Screen, pEntry->Screen);
1841 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1842
1843 /* Verify and cleanup local copy of the input data. */
1844 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1845 if (RT_FAILURE(rc))
1846 {
1847 WARN(("invalid screen data\n"));
1848 return rc;
1849 }
1850 RT_UNTRUSTED_VALIDATED_FENCE();
1851
1852 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1853 RT_BCOPY_VOLATILE(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1854 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1855
1856 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1857
1858 if (pVdma->CrSrvInfo.pfnResize)
1859 {
1860 /* Also inform the HGCM service, if it is there. */
1861 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1862 if (RT_FAILURE(rc))
1863 {
1864 WARN(("pfnResize failed %Rrc\n", rc));
1865 return rc;
1866 }
1867 }
1868
1869 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1870 VBVAINFOVIEW View;
1871 View.u32ViewOffset = 0;
1872 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1873 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1874
1875 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1876
1877 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1878 i >= 0;
1879 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1880 {
1881 Screen.u32ViewIndex = i;
1882
1883 VBVAINFOSCREEN CurScreen;
1884 VBVAINFOVIEW CurView;
1885
1886 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1887 AssertRC(rc);
1888
1889 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1890 continue;
1891
1892 /* The view does not change if _BLANK2 is set. */
1893 if ( (!fDisable || !CurView.u32ViewSize)
1894 && !RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_BLANK2))
1895 {
1896 View.u32ViewIndex = Screen.u32ViewIndex;
1897
1898 rc = VBVAInfoView(pVGAState, &View);
1899 if (RT_FAILURE(rc))
1900 {
1901 WARN(("VBVAInfoView failed %Rrc\n", rc));
1902 break;
1903 }
1904 }
1905
1906 rc = VBVAInfoScreen(pVGAState, &Screen);
1907 if (RT_FAILURE(rc))
1908 {
1909 WARN(("VBVAInfoScreen failed %Rrc\n", rc));
1910 break;
1911 }
1912 }
1913
1914 return rc;
1915}
1916
1917
1918/**
1919 * Processes VBVAEXHOST_DATA_TYPE_GUESTCTL for vboxVDMAWorkerThread and
1920 * vdmaVBVACtlThreadCreatedEnable.
1921 *
1922 * @returns VBox status code.
1923 * @param pVdma The VDMA channel.
1924 * @param pCmd The command to process. Maybe safe (not shared
1925 * with guest).
1926 *
1927 * @thread VDMA
1928 */
1929static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1930{
1931 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1932 switch (enmType)
1933 {
1934 /*
1935 * See handling of VBOXCMDVBVACTL_TYPE_3DCTL in vboxCmdVBVACmdCtl().
1936 */
1937 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1938 ASSERT_GUEST_LOGREL_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE);
1939 ASSERT_GUEST_LOGREL_RETURN(pVdma->CrSrvInfo.pfnGuestCtl, VERR_INVALID_STATE);
1940 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr,
1941 (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd,
1942 pCmd->u.cmd.cbCmd);
1943
1944 /*
1945 * See handling of VBOXCMDVBVACTL_TYPE_RESIZE in vboxCmdVBVACmdCtl().
1946 */
1947 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1948 {
1949 ASSERT_GUEST_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE);
1950 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1951 ASSERT_GUEST_LOGREL_MSG_RETURN( !(cbCmd % sizeof(VBOXCMDVBVA_RESIZE_ENTRY))
1952 && cbCmd > 0,
1953 ("cbCmd=%#x\n", cbCmd), VERR_INVALID_PARAMETER);
1954
1955 uint32_t const cElements = cbCmd / sizeof(VBOXCMDVBVA_RESIZE_ENTRY);
1956 VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *pResize
1957 = (VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd;
1958 for (uint32_t i = 0; i < cElements; ++i)
1959 {
1960 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry = &pResize->aEntries[i];
1961 int rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1962 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("vboxVDMACrGuestCtlResizeEntryProcess failed for #%u: %Rrc\n", i, rc), rc);
1963 }
1964 return VINF_SUCCESS;
1965 }
1966
1967 /*
1968 * See vdmaVBVACtlEnableSubmitInternal().
1969 */
1970 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1971 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1972 {
1973 ASSERT_GUEST(pCmd->u.cmd.cbCmd == sizeof(VBVAENABLE));
1974
1975 VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable = (VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd;
1976 uint32_t const u32Offset = pEnable->u32Offset;
1977 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1978
1979 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1980 ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVAEnableProcess -> %Rrc\n", rc), rc);
1981
1982 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1983 {
1984 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1985 ASSERT_GUEST_MSG_RC_RETURN(rc, ("VBoxVBVAExHPPause -> %Rrc\n", rc), rc);
1986 }
1987 return VINF_SUCCESS;
1988 }
1989
1990 /*
1991 * See vdmaVBVACtlDisableSubmitInternal().
1992 */
1993 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1994 {
1995 int rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
1996 ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVADisableProcess -> %Rrc\n", rc), rc);
1997
1998 /* do vgaUpdateDisplayAll right away */
1999 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
2000 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
2001
2002 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */);
2003 }
2004
2005 default:
2006 ASSERT_GUEST_LOGREL_MSG_FAILED(("unexpected ctl type %d\n", enmType));
2007 return VERR_INVALID_PARAMETER;
2008 }
2009}
2010
2011
2012/**
2013 * Copies one page in a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
2014 *
2015 * @param pDevIns Device instance data.
2016 * @param uPageNo Page frame number.
2017 * @param pbVram Pointer to the VRAM.
2018 * @param fIn Flag whether this is a page in or out op.
2019 * @thread VDMA
2020 *
2021 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
2022 */
2023static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX uPageNo, uint8_t *pbVram, bool fIn)
2024{
2025 RTGCPHYS GCPhysPage = (RTGCPHYS)uPageNo << X86_PAGE_SHIFT;
2026 PGMPAGEMAPLOCK Lock;
2027
2028 if (fIn)
2029 {
2030 const void *pvPage;
2031 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
2032 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtrReadOnly %RGp -> %Rrc\n", GCPhysPage, rc), rc);
2033
2034 memcpy(pbVram, pvPage, PAGE_SIZE);
2035 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2036 }
2037 else
2038 {
2039 void *pvPage;
2040 int rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
2041 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtr %RGp -> %Rrc\n", GCPhysPage, rc), rc);
2042
2043 memcpy(pvPage, pbVram, PAGE_SIZE);
2044 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2045 }
2046
2047 return VINF_SUCCESS;
2048}
2049
2050/**
2051 * Handles a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
2052 *
2053 * @return 0 on success, -1 on failure.
2054 *
2055 * @thread VDMA
2056 */
2057static int8_t vboxVDMACrCmdVbvaPageTransfer(PVGASTATE pVGAState, VBOXCMDVBVA_HDR const RT_UNTRUSTED_VOLATILE_GUEST *pHdr,
2058 uint32_t cbCmd, const VBOXCMDVBVA_PAGING_TRANSFER_DATA RT_UNTRUSTED_VOLATILE_GUEST *pData)
2059{
2060 /*
2061 * Extract and validate information.
2062 */
2063 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_PAGING_TRANSFER), ("%#x\n", cbCmd), -1);
2064
2065 bool const fIn = RT_BOOL(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
2066 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2067
2068 uint32_t cbPageNumbers = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
2069 ASSERT_GUEST_MSG_RETURN(!(cbPageNumbers % sizeof(VBOXCMDVBVAPAGEIDX)), ("%#x\n", cbPageNumbers), -1);
2070 VBOXCMDVBVAPAGEIDX const cPages = cbPageNumbers / sizeof(VBOXCMDVBVAPAGEIDX);
2071
2072 VBOXCMDVBVAOFFSET offVRam = pData->Alloc.u.offVRAM;
2073 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2074 ASSERT_GUEST_MSG_RETURN(!(offVRam & X86_PAGE_OFFSET_MASK), ("%#x\n", offVRam), -1);
2075 ASSERT_GUEST_MSG_RETURN(offVRam < pVGAState->vram_size, ("%#x vs %#x\n", offVRam, pVGAState->vram_size), -1);
2076 uint32_t cVRamPages = (pVGAState->vram_size - offVRam) >> X86_PAGE_SHIFT;
2077 ASSERT_GUEST_MSG_RETURN(cPages <= cVRamPages, ("cPages=%#x vs cVRamPages=%#x @ offVRam=%#x\n", cPages, cVRamPages, offVRam), -1);
2078
2079 RT_UNTRUSTED_VALIDATED_FENCE();
2080
2081 /*
2082 * Execute the command.
2083 */
2084 uint8_t *pbVRam = (uint8_t *)pVGAState->vram_ptrR3 + offVRam;
2085 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbVRam += X86_PAGE_SIZE)
2086 {
2087 uint32_t uPageNo = pData->aPageNumbers[iPage];
2088 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2089 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pVGAState->pDevInsR3, uPageNo, pbVRam, fIn);
2090 ASSERT_GUEST_MSG_RETURN(RT_SUCCESS(rc), ("#%#x: uPageNo=%#x rc=%Rrc\n", iPage, uPageNo, rc), -1);
2091 }
2092 return 0;
2093}
2094
2095
2096/**
2097 * Handles VBOXCMDVBVA_OPTYPE_PAGING_FILL.
2098 *
2099 * @returns 0 on success, -1 on failure.
2100 * @param pVGAState The VGA state.
2101 * @param pFill The fill command (volatile).
2102 *
2103 * @thread VDMA
2104 */
2105static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *pFill)
2106{
2107 /*
2108 * Copy and validate input.
2109 */
2110 VBOXCMDVBVA_PAGING_FILL FillSafe;
2111 RT_COPY_VOLATILE(FillSafe, *pFill);
2112 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2113
2114 VBOXCMDVBVAOFFSET offVRAM = FillSafe.offVRAM;
2115 ASSERT_GUEST_MSG_RETURN(!(offVRAM & X86_PAGE_OFFSET_MASK), ("offVRAM=%#x\n", offVRAM), -1);
2116 ASSERT_GUEST_MSG_RETURN(offVRAM <= pVGAState->vram_size, ("offVRAM=%#x\n", offVRAM), -1);
2117
2118 uint32_t cbFill = FillSafe.u32CbFill;
2119 ASSERT_GUEST_STMT(!(cbFill & 3), cbFill &= ~(uint32_t)3);
2120 ASSERT_GUEST_MSG_RETURN( cbFill < pVGAState->vram_size
2121 && offVRAM <= pVGAState->vram_size - cbFill,
2122 ("offVRAM=%#x cbFill=%#x\n", offVRAM, cbFill), -1);
2123
2124 RT_UNTRUSTED_VALIDATED_FENCE();
2125
2126 /*
2127 * Execute.
2128 */
2129 uint32_t *pu32Vram = (uint32_t *)((uint8_t *)pVGAState->vram_ptrR3 + offVRAM);
2130 uint32_t const u32Color = FillSafe.u32Pattern;
2131
2132 uint32_t cLoops = cbFill / 4;
2133 while (cLoops-- > 0)
2134 pu32Vram[cLoops] = u32Color;
2135
2136 return 0;
2137}
2138
2139/**
2140 * Process command data.
2141 *
2142 * @returns zero or positive is success, negative failure.
2143 * @param pVdma The VDMA channel.
2144 * @param pCmd The command data to process. Assume volatile.
2145 * @param cbCmd The amount of command data.
2146 *
2147 * @thread VDMA
2148 */
2149static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma,
2150 const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
2151{
2152 uint8_t bOpCode = pCmd->u8OpCode;
2153 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2154 switch (bOpCode)
2155 {
2156 case VBOXCMDVBVA_OPTYPE_NOPCMD:
2157 return 0;
2158
2159 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
2160 return vboxVDMACrCmdVbvaPageTransfer(pVdma->pVGAState, pCmd, cbCmd,
2161 &((VBOXCMDVBVA_PAGING_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->Data);
2162
2163 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
2164 ASSERT_GUEST_RETURN(cbCmd == sizeof(VBOXCMDVBVA_PAGING_FILL), -1);
2165 return vboxVDMACrCmdVbvaPagingFill(pVdma->pVGAState, (VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *)pCmd);
2166
2167 default:
2168 ASSERT_GUEST_RETURN(pVdma->CrSrvInfo.pfnCmd != NULL, -1);
2169 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
2170 }
2171}
2172
2173# if 0
2174typedef struct VBOXCMDVBVA_PAGING_TRANSFER
2175{
2176 VBOXCMDVBVA_HDR Hdr;
2177 /* for now can only contain offVRAM.
2178 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
2179 VBOXCMDVBVA_ALLOCINFO Alloc;
2180 uint32_t u32Reserved;
2181 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
2182} VBOXCMDVBVA_PAGING_TRANSFER;
2183# endif
2184
2185AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
2186AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
2187AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
2188AssertCompile(!(X86_PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
2189
2190# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (X86_PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
2191
2192/**
2193 * Worker for vboxVDMACrCmdProcess.
2194 *
2195 * @returns 8-bit result.
2196 * @param pVdma The VDMA channel.
2197 * @param pCmd The command. Consider volatile!
2198 * @param cbCmd The size of what @a pCmd points to. At least
2199 * sizeof(VBOXCMDVBVA_HDR).
2200 * @param fRecursion Set if recursive call, false if not.
2201 *
2202 * @thread VDMA
2203 */
2204static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd,
2205 uint32_t cbCmd, bool fRecursion)
2206{
2207 int8_t i8Result = 0;
2208 uint8_t const bOpCode = pCmd->u8OpCode;
2209 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2210 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: ENTER, bOpCode=%u\n", bOpCode));
2211 switch (bOpCode)
2212 {
2213 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
2214 {
2215 /*
2216 * Extract the command physical address and size.
2217 */
2218 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_SYSMEMCMD), ("%#x\n", cbCmd), -1);
2219 RTGCPHYS GCPhysCmd = ((VBOXCMDVBVA_SYSMEMCMD RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->phCmd;
2220 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2221 uint32_t cbCmdPart = X86_PAGE_SIZE - (uint32_t)(GCPhysCmd & X86_PAGE_OFFSET_MASK);
2222
2223 uint32_t cbRealCmd = pCmd->u8Flags;
2224 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
2225 ASSERT_GUEST_MSG_RETURN(cbRealCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbRealCmd), -1);
2226 ASSERT_GUEST_MSG_RETURN(cbRealCmd <= _1M, ("%#x\n", cbRealCmd), -1);
2227
2228 /*
2229 * Lock down the first page of the memory specified by the command.
2230 */
2231 PGMPAGEMAPLOCK Lock;
2232 PVGASTATE pVGAState = pVdma->pVGAState;
2233 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
2234 VBOXCMDVBVA_HDR const *pRealCmdHdr = NULL;
2235 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysCmd, 0, (const void **)&pRealCmdHdr, &Lock);
2236 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("VDMA: %RGp -> %Rrc\n", GCPhysCmd, rc), -1);
2237 Assert((GCPhysCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pRealCmdHdr) & PAGE_OFFSET_MASK));
2238
2239 /*
2240 * All fits within one page? We can handle that pretty efficiently.
2241 */
2242 if (cbRealCmd <= cbCmdPart)
2243 {
2244 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
2245 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2246 }
2247 else
2248 {
2249 /*
2250 * To keep things damn simple, just double buffer cross page or
2251 * multipage requests.
2252 */
2253 uint8_t *pbCmdBuf = (uint8_t *)RTMemTmpAllocZ(RT_ALIGN_Z(cbRealCmd, 16));
2254 if (pbCmdBuf)
2255 {
2256 memcpy(pbCmdBuf, pRealCmdHdr, cbCmdPart);
2257 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2258 pRealCmdHdr = NULL;
2259
2260 rc = PDMDevHlpPhysRead(pDevIns, GCPhysCmd + cbCmdPart, &pbCmdBuf[cbCmdPart], cbRealCmd - cbCmdPart);
2261 if (RT_SUCCESS(rc))
2262 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, (VBOXCMDVBVA_HDR const *)pbCmdBuf, cbRealCmd);
2263 else
2264 LogRelMax(200, ("VDMA: Error reading %#x bytes of guest memory %#RGp!\n", cbRealCmd, GCPhysCmd));
2265 RTMemTmpFree(pbCmdBuf);
2266 }
2267 else
2268 {
2269 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2270 LogRelMax(200, ("VDMA: Out of temporary memory! %#x\n", cbRealCmd));
2271 i8Result = -1;
2272 }
2273 }
2274 return i8Result;
2275 }
2276
2277 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2278 {
2279 Assert(cbCmd >= sizeof(VBOXCMDVBVA_HDR)); /* caller already checked this */
2280 ASSERT_GUEST_RETURN(!fRecursion, -1);
2281
2282 /* Skip current command. */
2283 cbCmd -= sizeof(*pCmd);
2284 pCmd++;
2285
2286 /* Process subcommands. */
2287 while (cbCmd > 0)
2288 {
2289 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbCmd), -1);
2290
2291 uint16_t cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2292 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2293 ASSERT_GUEST_MSG_RETURN(cbCurCmd <= cbCmd, ("cbCurCmd=%#x, cbCmd=%#x\n", cbCurCmd, cbCmd), -1);
2294
2295 i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd, true /*fRecursive*/);
2296 ASSERT_GUEST_MSG_RETURN(i8Result >= 0, ("vboxVDMACrCmdVbvaProcess -> %d\n", i8Result), i8Result);
2297
2298 /* Advance to the next command. */
2299 pCmd = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCmd + cbCurCmd);
2300 cbCmd -= cbCurCmd;
2301 }
2302 return 0;
2303 }
2304
2305 default:
2306 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2307 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: LEAVE, opCode(%i)\n", pCmd->u8OpCode));
2308 return i8Result;
2309 }
2310}
2311
2312/**
2313 * Worker for vboxVDMAWorkerThread handling VBVAEXHOST_DATA_TYPE_CMD.
2314 *
2315 * @thread VDMA
2316 */
2317static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd)
2318{
2319 if ( cbCmd > 0
2320 && *pbCmd == VBOXCMDVBVA_OPTYPE_NOP)
2321 { /* nop */ }
2322 else
2323 {
2324 ASSERT_GUEST_RETURN_VOID(cbCmd >= sizeof(VBOXCMDVBVA_HDR));
2325 VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)pbCmd;
2326
2327 /* check if the command is cancelled */
2328 if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2329 {
2330 /* Process it. */
2331 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd, false /*fRecursion*/);
2332 }
2333 else
2334 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2335 }
2336
2337}
2338
2339/**
2340 * Worker for vboxVDMAConstruct().
2341 */
2342static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2343{
2344 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd;
2345 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof(*pCmd));
2346 int rc;
2347 if (pCmd)
2348 {
2349 PVGASTATE pVGAState = pVdma->pVGAState;
2350 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2351 pCmd->cbVRam = pVGAState->vram_size;
2352 pCmd->pLed = &pVGAState->Led3D;
2353 pCmd->CrClientInfo.hClient = pVdma;
2354 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2355 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2356 if (RT_SUCCESS(rc))
2357 {
2358 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2359 if (RT_SUCCESS(rc))
2360 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2361 else if (rc != VERR_NOT_SUPPORTED)
2362 WARN(("vboxVDMACrCtlGetRc returned %Rrc\n", rc));
2363 }
2364 else
2365 WARN(("vboxVDMACrCtlPost failed %Rrc\n", rc));
2366
2367 vboxVDMACrCtlRelease(&pCmd->Hdr);
2368 }
2369 else
2370 rc = VERR_NO_MEMORY;
2371
2372 if (!RT_SUCCESS(rc))
2373 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2374
2375 return rc;
2376}
2377
2378/**
2379 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync}
2380 *
2381 * @note Some indirect completion magic, you gotta love this code!
2382 */
2383DECLCALLBACK(int) vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2384{
2385 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2386 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2387 VBOXVDMACMD RT_UNTRUSTED_VOLATILE_GUEST *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2388 VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2389
2390 AssertRC(rc);
2391 pDr->rc = rc;
2392
2393 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2394 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2395 AssertRC(rc);
2396
2397 return rc;
2398}
2399
2400/**
2401 * Worker for vboxVDMACmdExecBlt().
2402 */
2403static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, const VBOXVIDEOOFFSET offDst, const VBOXVIDEOOFFSET offSrc,
2404 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2405 const VBOXVDMA_RECTL *pDstRectl, const VBOXVDMA_RECTL *pSrcRectl)
2406{
2407 /*
2408 * We do not support color conversion.
2409 */
2410 AssertReturn(pDstDesc->format == pSrcDesc->format, VERR_INVALID_FUNCTION);
2411
2412 /* we do not support stretching (checked by caller) */
2413 Assert(pDstRectl->height == pSrcRectl->height);
2414 Assert(pDstRectl->width == pSrcRectl->width);
2415
2416 uint8_t *pbRam = pVdma->pVGAState->vram_ptrR3;
2417 AssertCompileSize(pVdma->pVGAState->vram_size, sizeof(uint32_t));
2418 uint32_t cbVRamSize = pVdma->pVGAState->vram_size;
2419 uint8_t *pbDstSurf = pbRam + offDst;
2420 uint8_t *pbSrcSurf = pbRam + offSrc;
2421
2422 if ( pDstDesc->width == pDstRectl->width
2423 && pSrcDesc->width == pSrcRectl->width
2424 && pSrcDesc->width == pDstDesc->width
2425 && pSrcDesc->pitch == pDstDesc->pitch)
2426 {
2427 Assert(!pDstRectl->left);
2428 Assert(!pSrcRectl->left);
2429 uint32_t offBoth = pDstDesc->pitch * pDstRectl->top;
2430 uint32_t cbToCopy = pDstDesc->pitch * pDstRectl->height;
2431
2432 if ( cbToCopy <= cbVRamSize
2433 && (uintptr_t)(pbDstSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy
2434 && (uintptr_t)(pbSrcSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy)
2435 {
2436 RT_UNTRUSTED_VALIDATED_FENCE();
2437 memcpy(pbDstSurf + offBoth, pbSrcSurf + offBoth, cbToCopy);
2438 }
2439 else
2440 return VERR_INVALID_PARAMETER;
2441 }
2442 else
2443 {
2444 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2445 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2446 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2447 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2448 Assert(cbDstLine <= pDstDesc->pitch);
2449 uint32_t cbDstSkip = pDstDesc->pitch;
2450 uint8_t *pbDstStart = pbDstSurf + offDstStart;
2451
2452 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2453# ifdef VBOX_STRICT
2454 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2455 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2456# endif
2457 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2458 Assert(cbSrcLine <= pSrcDesc->pitch);
2459 uint32_t cbSrcSkip = pSrcDesc->pitch;
2460 const uint8_t *pbSrcStart = pbSrcSurf + offSrcStart;
2461
2462 Assert(cbDstLine == cbSrcLine);
2463
2464 for (uint32_t i = 0; ; ++i)
2465 {
2466 if ( cbDstLine <= cbVRamSize
2467 && (uintptr_t)pbDstStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine
2468 && (uintptr_t)pbSrcStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine)
2469 {
2470 RT_UNTRUSTED_VALIDATED_FENCE(); /** @todo this could potentially be buzzkiller. */
2471 memcpy(pbDstStart, pbSrcStart, cbDstLine);
2472 }
2473 else
2474 return VERR_INVALID_PARAMETER;
2475 if (i == pDstRectl->height)
2476 break;
2477 pbDstStart += cbDstSkip;
2478 pbSrcStart += cbSrcSkip;
2479 }
2480 }
2481 return VINF_SUCCESS;
2482}
2483
2484#if 0 /* unused */
2485static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2486{
2487 if (!pRectl1->width)
2488 *pRectl1 = *pRectl2;
2489 else
2490 {
2491 int16_t x21 = pRectl1->left + pRectl1->width;
2492 int16_t x22 = pRectl2->left + pRectl2->width;
2493 if (pRectl1->left > pRectl2->left)
2494 {
2495 pRectl1->left = pRectl2->left;
2496 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2497 }
2498 else if (x21 < x22)
2499 pRectl1->width = x22 - pRectl1->left;
2500
2501 x21 = pRectl1->top + pRectl1->height;
2502 x22 = pRectl2->top + pRectl2->height;
2503 if (pRectl1->top > pRectl2->top)
2504 {
2505 pRectl1->top = pRectl2->top;
2506 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2507 }
2508 else if (x21 < x22)
2509 pRectl1->height = x22 - pRectl1->top;
2510 }
2511}
2512#endif /* unused */
2513
2514/**
2515 * Handles VBOXVDMACMD_TYPE_DMA_PRESENT_BLT for vboxVDMACmdExec().
2516 *
2517 * @returns number of bytes (positive) of the full command on success,
2518 * otherwise a negative error status (VERR_XXX).
2519 *
2520 * @param pVdma The VDMA channel.
2521 * @param pBlt Blit command buffer. This is to be considered
2522 * volatile!
2523 * @param cbBuffer Number of bytes accessible at @a pBtl.
2524 */
2525static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt,
2526 uint32_t cbBuffer)
2527{
2528 /*
2529 * Validate and make a local copy of the blt command up to the rectangle array.
2530 */
2531 AssertReturn(cbBuffer >= RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects), VERR_INVALID_PARAMETER);
2532 VBOXVDMACMD_DMA_PRESENT_BLT BltSafe;
2533 RT_BCOPY_VOLATILE(&BltSafe, (void const *)pBlt, RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects));
2534 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2535
2536 AssertReturn(BltSafe.cDstSubRects < _8M, VERR_INVALID_PARAMETER);
2537 uint32_t const cbBlt = RT_UOFFSETOF_DYN(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[BltSafe.cDstSubRects]);
2538 AssertReturn(cbBuffer >= cbBlt, VERR_INVALID_PARAMETER);
2539
2540 /*
2541 * We do not support stretching.
2542 */
2543 AssertReturn(BltSafe.srcRectl.width == BltSafe.dstRectl.width, VERR_INVALID_FUNCTION);
2544 AssertReturn(BltSafe.srcRectl.height == BltSafe.dstRectl.height, VERR_INVALID_FUNCTION);
2545
2546 Assert(BltSafe.cDstSubRects);
2547
2548 RT_UNTRUSTED_VALIDATED_FENCE();
2549
2550 /*
2551 * Do the work.
2552 */
2553 //VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0}; - pointless
2554 if (BltSafe.cDstSubRects)
2555 {
2556 for (uint32_t i = 0; i < BltSafe.cDstSubRects; ++i)
2557 {
2558 VBOXVDMA_RECTL dstSubRectl;
2559 dstSubRectl.left = pBlt->aDstSubRects[i].left;
2560 dstSubRectl.top = pBlt->aDstSubRects[i].top;
2561 dstSubRectl.width = pBlt->aDstSubRects[i].width;
2562 dstSubRectl.height = pBlt->aDstSubRects[i].height;
2563 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2564
2565 VBOXVDMA_RECTL srcSubRectl = dstSubRectl;
2566
2567 dstSubRectl.left += BltSafe.dstRectl.left;
2568 dstSubRectl.top += BltSafe.dstRectl.top;
2569
2570 srcSubRectl.left += BltSafe.srcRectl.left;
2571 srcSubRectl.top += BltSafe.srcRectl.top;
2572
2573 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2574 &dstSubRectl, &srcSubRectl);
2575 AssertRCReturn(rc, rc);
2576
2577 //vboxVDMARectlUnite(&updateRectl, &dstSubRectl); - pointless
2578 }
2579 }
2580 else
2581 {
2582 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2583 &BltSafe.dstRectl, &BltSafe.srcRectl);
2584 AssertRCReturn(rc, rc);
2585
2586 //vboxVDMARectlUnite(&updateRectl, &BltSafe.dstRectl); - pointless
2587 }
2588
2589 return cbBlt;
2590}
2591
2592
2593/**
2594 * Handles VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER for vboxVDMACmdCheckCrCmd() and
2595 * vboxVDMACmdExec().
2596 *
2597 * @returns number of bytes (positive) of the full command on success,
2598 * otherwise a negative error status (VERR_XXX).
2599 *
2600 * @param pVdma The VDMA channel.
2601 * @param pTransfer Transfer command buffer. This is to be considered
2602 * volatile!
2603 * @param cbBuffer Number of bytes accessible at @a pTransfer.
2604 */
2605static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer,
2606 uint32_t cbBuffer)
2607{
2608 /*
2609 * Make a copy of the command (it's volatile).
2610 */
2611 AssertReturn(cbBuffer >= sizeof(*pTransfer), VERR_INVALID_PARAMETER);
2612 VBOXVDMACMD_DMA_BPB_TRANSFER TransferSafeCopy;
2613 RT_COPY_VOLATILE(TransferSafeCopy, *pTransfer);
2614 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2615
2616 PVGASTATE pVGAState = pVdma->pVGAState;
2617 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
2618 uint8_t *pbRam = pVGAState->vram_ptrR3;
2619 uint32_t cbTransfer = TransferSafeCopy.cbTransferSize;
2620
2621 /*
2622 * Validate VRAM offset.
2623 */
2624 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2625 AssertReturn( cbTransfer <= pVGAState->vram_size
2626 && TransferSafeCopy.Src.offVramBuf <= pVGAState->vram_size - cbTransfer,
2627 VERR_INVALID_PARAMETER);
2628
2629 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2630 AssertReturn( cbTransfer <= pVGAState->vram_size
2631 && TransferSafeCopy.Dst.offVramBuf <= pVGAState->vram_size - cbTransfer,
2632 VERR_INVALID_PARAMETER);
2633 RT_UNTRUSTED_VALIDATED_FENCE();
2634
2635 /*
2636 * Transfer loop.
2637 */
2638 uint32_t cbTransfered = 0;
2639 int rc = VINF_SUCCESS;
2640 do
2641 {
2642 uint32_t cbSubTransfer = cbTransfer;
2643
2644 const void *pvSrc;
2645 bool fSrcLocked = false;
2646 PGMPAGEMAPLOCK SrcLock;
2647 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2648 pvSrc = pbRam + TransferSafeCopy.Src.offVramBuf + cbTransfered;
2649 else
2650 {
2651 RTGCPHYS GCPhysSrcPage = TransferSafeCopy.Src.phBuf + cbTransfered;
2652 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysSrcPage, 0, &pvSrc, &SrcLock);
2653 AssertRC(rc);
2654 if (RT_SUCCESS(rc))
2655 {
2656 fSrcLocked = true;
2657 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysSrcPage & X86_PAGE_OFFSET_MASK));
2658 }
2659 else
2660 break;
2661 }
2662
2663 void *pvDst;
2664 PGMPAGEMAPLOCK DstLock;
2665 bool fDstLocked = false;
2666 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2667 pvDst = pbRam + TransferSafeCopy.Dst.offVramBuf + cbTransfered;
2668 else
2669 {
2670 RTGCPHYS GCPhysDstPage = TransferSafeCopy.Dst.phBuf + cbTransfered;
2671 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysDstPage, 0, &pvDst, &DstLock);
2672 AssertRC(rc);
2673 if (RT_SUCCESS(rc))
2674 {
2675 fDstLocked = true;
2676 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysDstPage & X86_PAGE_OFFSET_MASK));
2677 }
2678 }
2679
2680 if (RT_SUCCESS(rc))
2681 {
2682 memcpy(pvDst, pvSrc, cbSubTransfer);
2683 cbTransfered += cbSubTransfer;
2684 cbTransfer -= cbSubTransfer;
2685 }
2686 else
2687 cbTransfer = 0; /* force break below */
2688
2689 if (fSrcLocked)
2690 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2691 if (fDstLocked)
2692 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2693 } while (cbTransfer);
2694
2695 if (RT_SUCCESS(rc))
2696 return sizeof(TransferSafeCopy);
2697 return rc;
2698}
2699
2700/**
2701 * Worker for vboxVDMACommandProcess().
2702 *
2703 * @param pVdma Tthe VDMA channel.
2704 * @param pbBuffer Command buffer, considered volatile.
2705 * @param cbBuffer The number of bytes at @a pbBuffer.
2706 * @param pCmdDr The command. For setting the async flag on chromium
2707 * requests.
2708 * @param pfAsyncCmd Flag to set if async command completion on chromium
2709 * requests. Input stat is false, so it only ever need to
2710 * be set to true.
2711 */
2712static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *pbBuffer, uint32_t cbBuffer,
2713 VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmdDr, bool *pfAsyncCmd)
2714{
2715 AssertReturn(pbBuffer, VERR_INVALID_POINTER);
2716
2717 for (;;)
2718 {
2719 AssertReturn(cbBuffer >= VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
2720
2721 VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST *pCmd = (VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST *)pbBuffer;
2722 VBOXVDMACMD_TYPE enmCmdType = pCmd->enmType;
2723 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2724
2725 ASSERT_GUEST_MSG_RETURN( enmCmdType == VBOXVDMACMD_TYPE_CHROMIUM_CMD
2726 || enmCmdType == VBOXVDMACMD_TYPE_DMA_PRESENT_BLT
2727 || enmCmdType == VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER
2728 || enmCmdType == VBOXVDMACMD_TYPE_DMA_NOP
2729 || enmCmdType == VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ,
2730 ("enmCmdType=%d\n", enmCmdType),
2731 VERR_INVALID_FUNCTION);
2732 RT_UNTRUSTED_VALIDATED_FENCE();
2733
2734 int cbProcessed;
2735 switch (enmCmdType)
2736 {
2737 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2738 {
2739 VBOXVDMACMD_CHROMIUM_CMD RT_UNTRUSTED_VOLATILE_GUEST *pCrCmd = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_CHROMIUM_CMD);
2740 uint32_t const cbBody = VBOXVDMACMD_BODY_SIZE(cbBuffer);
2741 AssertReturn(cbBody >= sizeof(*pCrCmd), VERR_INVALID_PARAMETER);
2742
2743 PVGASTATE pVGAState = pVdma->pVGAState;
2744 AssertReturn(pVGAState->pDrv->pfnCrHgsmiCommandProcess, VERR_NOT_SUPPORTED);
2745
2746 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2747 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2748 *pfAsyncCmd = true;
2749 return VINF_SUCCESS;
2750 }
2751
2752 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2753 {
2754 VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2755 cbProcessed = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2756 Assert(cbProcessed >= 0);
2757 break;
2758 }
2759
2760 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2761 {
2762 VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer
2763 = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2764 cbProcessed = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2765 Assert(cbProcessed >= 0);
2766 break;
2767 }
2768
2769 case VBOXVDMACMD_TYPE_DMA_NOP:
2770 return VINF_SUCCESS;
2771
2772 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2773 return VINF_SUCCESS;
2774
2775 default:
2776 AssertFailedReturn(VERR_INVALID_FUNCTION);
2777 }
2778
2779 /* Advance buffer or return. */
2780 if (cbProcessed >= 0)
2781 {
2782 Assert(cbProcessed > 0);
2783 cbProcessed += VBOXVDMACMD_HEADER_SIZE();
2784 if ((uint32_t)cbProcessed >= cbBuffer)
2785 {
2786 Assert((uint32_t)cbProcessed == cbBuffer);
2787 return VINF_SUCCESS;
2788 }
2789
2790 cbBuffer -= cbProcessed;
2791 pbBuffer += cbProcessed;
2792 }
2793 else
2794 {
2795 RT_UNTRUSTED_VALIDATED_FENCE();
2796 return cbProcessed; /* error status */
2797 }
2798 }
2799}
2800
2801/**
2802 * VDMA worker thread procedure, see vdmaVBVACtlEnableSubmitInternal().
2803 *
2804 * @thread VDMA
2805 */
2806static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2807{
2808 RT_NOREF(hThreadSelf);
2809 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2810 PVGASTATE pVGAState = pVdma->pVGAState;
2811 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2812 int rc;
2813
2814 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2815
2816 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2817 {
2818 uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd = NULL;
2819 uint32_t cbCmd = 0;
2820 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pbCmd, &cbCmd);
2821 switch (enmType)
2822 {
2823 case VBVAEXHOST_DATA_TYPE_CMD:
2824 vboxVDMACrCmdProcess(pVdma, pbCmd, cbCmd);
2825 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2826 VBVARaiseIrq(pVGAState, 0);
2827 break;
2828
2829 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2830 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd);
2831 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2832 break;
2833
2834 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2835 {
2836 bool fContinue = true;
2837 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd, &fContinue);
2838 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2839 if (fContinue)
2840 break;
2841 }
2842 RT_FALL_THRU();
2843
2844 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2845 rc = RTSemEventWaitNoResume(pVdma->Thread.hEvent, RT_INDEFINITE_WAIT);
2846 AssertMsg(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc));
2847 break;
2848
2849 default:
2850 WARN(("unexpected type %d\n", enmType));
2851 break;
2852 }
2853 }
2854
2855 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2856
2857 return VINF_SUCCESS;
2858}
2859
2860/**
2861 * Worker for vboxVDMACommand.
2862 *
2863 * @returns VBox status code of the operation.
2864 * @param pVdma VDMA instance data.
2865 * @param pCmd The command to process. Consider content volatile.
2866 * @param cbCmd Number of valid bytes at @a pCmd. This is at least
2867 * sizeof(VBOXVDMACBUF_DR).
2868 * @param pfAsyncCmd Flag to set if async command completion on chromium
2869 * requests. Input stat is false, so it only ever need to
2870 * be set to true.
2871 * @thread EMT
2872 */
2873static int vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmd,
2874 uint32_t cbCmd, bool *pfAsyncCmd)
2875{
2876 /*
2877 * Get the command buffer (volatile).
2878 */
2879 uint16_t const cbCmdBuf = pCmd->cbBuf;
2880 uint16_t const fCmdFlags = pCmd->fFlags;
2881 uint64_t const offVramBuf_or_GCPhysBuf = pCmd->Location.offVramBuf;
2882 AssertCompile(sizeof(pCmd->Location.offVramBuf) == sizeof(pCmd->Location.phBuf));
2883 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2884
2885 const uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmdBuf;
2886 PGMPAGEMAPLOCK Lock;
2887 bool fReleaseLocked = false;
2888 if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2889 {
2890 pbCmdBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2891 AssertReturn((uintptr_t)&pbCmdBuf[cbCmdBuf] <= (uintptr_t)&((uint8_t *)pCmd)[cbCmd],
2892 VERR_INVALID_PARAMETER);
2893 RT_UNTRUSTED_VALIDATED_FENCE();
2894 }
2895 else if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2896 {
2897 AssertReturn( offVramBuf_or_GCPhysBuf <= pVdma->pVGAState->vram_size
2898 && offVramBuf_or_GCPhysBuf + cbCmdBuf <= pVdma->pVGAState->vram_size,
2899 VERR_INVALID_PARAMETER);
2900 RT_UNTRUSTED_VALIDATED_FENCE();
2901
2902 pbCmdBuf = (uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *)pVdma->pVGAState->vram_ptrR3 + offVramBuf_or_GCPhysBuf;
2903 }
2904 else
2905 {
2906 /* Make sure it doesn't cross a page. */
2907 AssertReturn((uint32_t)(offVramBuf_or_GCPhysBuf & X86_PAGE_OFFSET_MASK) + cbCmdBuf <= (uint32_t)X86_PAGE_SIZE,
2908 VERR_INVALID_PARAMETER);
2909 RT_UNTRUSTED_VALIDATED_FENCE();
2910
2911 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pVdma->pVGAState->pDevInsR3, offVramBuf_or_GCPhysBuf, 0 /*fFlags*/,
2912 (const void **)&pbCmdBuf, &Lock);
2913 AssertRCReturn(rc, rc); /* if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2914 fReleaseLocked = true;
2915 }
2916
2917 /*
2918 * Process the command.
2919 */
2920 int rc = vboxVDMACmdExec(pVdma, pbCmdBuf, cbCmdBuf, pCmd, pfAsyncCmd);
2921 AssertRC(rc);
2922
2923 /* Clean up comand buffer. */
2924 if (fReleaseLocked)
2925 PDMDevHlpPhysReleasePageMappingLock(pVdma->pVGAState->pDevInsR3, &Lock);
2926 return rc;
2927}
2928
2929# if 0 /** @todo vboxVDMAControlProcess is unused */
2930static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2931{
2932 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2933 pCmd->i32Result = VINF_SUCCESS;
2934 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2935 AssertRC(rc);
2936}
2937# endif
2938
2939/**
2940 * Called by vgaR3Construct() to initialize the state.
2941 *
2942 * @returns VBox status code.
2943 */
2944int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2945{
2946 RT_NOREF(cPipeElements);
2947 int rc;
2948 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2949 Assert(pVdma);
2950 if (pVdma)
2951 {
2952 pVdma->pHgsmi = pVGAState->pHGSMI;
2953 pVdma->pVGAState = pVGAState;
2954
2955 rc = VINF_SUCCESS;
2956 if (RT_SUCCESS(rc))
2957 {
2958 VBoxVDMAThreadInit(&pVdma->Thread);
2959
2960 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2961 if (RT_SUCCESS(rc))
2962 {
2963 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2964 if (RT_SUCCESS(rc))
2965 {
2966 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2967 if (RT_SUCCESS(rc))
2968 {
2969 pVGAState->pVdma = pVdma;
2970
2971 /* No HGCM service if VMSVGA is enabled. */
2972 if (!pVGAState->fVMSVGAEnabled)
2973 {
2974 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2975 }
2976 return VINF_SUCCESS;
2977 }
2978
2979 WARN(("RTCritSectInit failed %Rrc\n", rc));
2980 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2981 }
2982 else
2983 WARN(("VBoxVBVAExHSInit failed %Rrc\n", rc));
2984 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2985 }
2986 else
2987 WARN(("RTSemEventMultiCreate failed %Rrc\n", rc));
2988
2989 /* the timer is cleaned up automatically */
2990 }
2991 RTMemFree(pVdma);
2992 }
2993 else
2994 rc = VERR_OUT_OF_RESOURCES;
2995 return rc;
2996}
2997
2998/**
2999 * Called by vgaR3Reset() to do reset.
3000 */
3001void vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
3002{
3003 vdmaVBVACtlDisableSync(pVdma);
3004}
3005
3006/**
3007 * Called by vgaR3Destruct() to do cleanup.
3008 */
3009void vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
3010{
3011 if (!pVdma)
3012 return;
3013
3014 if (pVdma->pVGAState->fVMSVGAEnabled)
3015 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
3016 else
3017 {
3018 /** @todo Remove. It does nothing because pVdma->CmdVbva is already disabled at this point
3019 * as the result of the SharedOpenGL HGCM service unloading.
3020 */
3021 vdmaVBVACtlDisableSync(pVdma);
3022 }
3023 VBoxVDMAThreadCleanup(&pVdma->Thread);
3024 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
3025 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
3026 RTCritSectDelete(&pVdma->CalloutCritSect);
3027 RTMemFree(pVdma);
3028}
3029
3030/**
3031 * Handle VBVA_VDMA_CTL, see vbvaChannelHandler
3032 *
3033 * @param pVdma The VDMA channel.
3034 * @param pCmd The control command to handle. Considered volatile.
3035 * @param cbCmd The size of the command. At least sizeof(VBOXVDMA_CTL).
3036 */
3037void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, VBOXVDMA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
3038{
3039 RT_NOREF(cbCmd);
3040 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
3041
3042 VBOXVDMA_CTL_TYPE enmCtl = pCmd->enmCtl;
3043 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
3044
3045 int rc;
3046 if (enmCtl < VBOXVDMA_CTL_TYPE_END)
3047 {
3048 RT_UNTRUSTED_VALIDATED_FENCE();
3049
3050 switch (enmCtl)
3051 {
3052 case VBOXVDMA_CTL_TYPE_ENABLE:
3053 rc = VINF_SUCCESS;
3054 break;
3055 case VBOXVDMA_CTL_TYPE_DISABLE:
3056 rc = VINF_SUCCESS;
3057 break;
3058 case VBOXVDMA_CTL_TYPE_FLUSH:
3059 rc = VINF_SUCCESS;
3060 break;
3061 case VBOXVDMA_CTL_TYPE_WATCHDOG:
3062 rc = VERR_NOT_SUPPORTED;
3063 break;
3064 default:
3065 AssertFailedBreakStmt(rc = VERR_IPE_NOT_REACHED_DEFAULT_CASE);
3066 }
3067 }
3068 else
3069 {
3070 RT_UNTRUSTED_VALIDATED_FENCE();
3071 ASSERT_GUEST_FAILED();
3072 rc = VERR_NOT_SUPPORTED;
3073 }
3074
3075 pCmd->i32Result = rc;
3076 rc = VBoxSHGSMICommandComplete(pIns, pCmd);
3077 AssertRC(rc);
3078}
3079
3080/**
3081 * Handle VBVA_VDMA_CMD, see vbvaChannelHandler().
3082 *
3083 * @param pVdma The VDMA channel.
3084 * @param pCmd The command to handle. Considered volatile.
3085 * @param cbCmd The size of the command. At least sizeof(VBOXVDMACBUF_DR).
3086 * @thread EMT
3087 */
3088void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
3089{
3090 /*
3091 * Process the command.
3092 */
3093 bool fAsyncCmd = false;
3094 int rc = vboxVDMACommandProcess(pVdma, pCmd, cbCmd, &fAsyncCmd);
3095
3096 /*
3097 * Complete the command unless it's asynchronous (e.g. chromium).
3098 */
3099 if (!fAsyncCmd)
3100 {
3101 pCmd->rc = rc;
3102 int rc2 = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmd);
3103 AssertRC(rc2);
3104 }
3105}
3106
3107
3108/**
3109 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
3110 * Used by vdmaVBVACtlEnableDisableSubmit() and vdmaVBVACtlEnableDisableSubmit() }
3111 */
3112static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3113 int rc, void *pvContext)
3114{
3115 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
3116 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pGCtl
3117 = (VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCtl->u.cmd.pvCmd - sizeof(VBOXCMDVBVA_CTL));
3118 AssertRC(rc);
3119 pGCtl->i32Result = rc;
3120
3121 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
3122 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
3123 AssertRC(rc);
3124
3125 VBoxVBVAExHCtlFree(pVbva, pCtl);
3126}
3127
3128/**
3129 * Worker for vdmaVBVACtlGenericGuestSubmit() and vdmaVBVACtlOpaqueHostSubmit().
3130 */
3131static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType,
3132 uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd,
3133 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3134{
3135 int rc;
3136 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
3137 if (pHCtl)
3138 {
3139 pHCtl->u.cmd.pvCmd = pbCmd;
3140 pHCtl->u.cmd.cbCmd = cbCmd;
3141 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
3142 if (RT_SUCCESS(rc))
3143 return VINF_SUCCESS;
3144
3145 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3146 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3147 }
3148 else
3149 {
3150 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3151 rc = VERR_NO_MEMORY;
3152 }
3153 return rc;
3154}
3155
3156/**
3157 * Handler for vboxCmdVBVACmdCtl()/VBOXCMDVBVACTL_TYPE_3DCTL.
3158 */
3159static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType,
3160 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl)
3161{
3162 Assert(cbCtl >= sizeof(VBOXCMDVBVA_CTL)); /* Checked by callers caller, vbvaChannelHandler(). */
3163
3164 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
3165 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType,
3166 (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)(pCtl + 1),
3167 cbCtl - sizeof(VBOXCMDVBVA_CTL),
3168 vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3169 if (RT_SUCCESS(rc))
3170 return VINF_SUCCESS;
3171
3172 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
3173 pCtl->i32Result = rc;
3174 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3175 AssertRC(rc);
3176 return VINF_SUCCESS;
3177}
3178
3179/**
3180 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE, Used by vdmaVBVACtlOpaqueHostSubmit()}
3181 */
3182static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3183 int rc, void *pvCompletion)
3184{
3185 VBOXCRCMDCTL *pVboxCtl = (VBOXCRCMDCTL *)pCtl->u.cmd.pvCmd;
3186 if (pVboxCtl->u.pfnInternal)
3187 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
3188 VBoxVBVAExHCtlFree(pVbva, pCtl);
3189}
3190
3191/**
3192 * Worker for vboxCmdVBVACmdHostCtl() and vboxCmdVBVACmdHostCtlSync().
3193 */
3194static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3195 PFNCRCTLCOMPLETION pfnCompletion, void *pvCompletion)
3196{
3197 pCmd->u.pfnInternal = (PFNRT)pfnCompletion;
3198 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
3199 (uint8_t *)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
3200 if (RT_FAILURE(rc))
3201 {
3202 if (rc == VERR_INVALID_STATE)
3203 {
3204 pCmd->u.pfnInternal = NULL;
3205 PVGASTATE pVGAState = pVdma->pVGAState;
3206 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
3207 if (!RT_SUCCESS(rc))
3208 WARN(("pfnCrHgsmiControlProcess failed %Rrc\n", rc));
3209
3210 return rc;
3211 }
3212 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
3213 return rc;
3214 }
3215
3216 return VINF_SUCCESS;
3217}
3218
3219/**
3220 * Called from vdmaVBVACtlThreadCreatedEnable().
3221 */
3222static int vdmaVBVANotifyEnable(PVGASTATE pVGAState)
3223{
3224 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3225 {
3226 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
3227 if (!RT_SUCCESS(rc))
3228 {
3229 WARN(("pfnVBVAEnable failed %Rrc\n", rc));
3230 for (uint32_t j = 0; j < i; j++)
3231 {
3232 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
3233 }
3234
3235 return rc;
3236 }
3237 }
3238 return VINF_SUCCESS;
3239}
3240
3241/**
3242 * Called from vdmaVBVACtlThreadCreatedEnable() and vdmaVBVADisableProcess().
3243 */
3244static int vdmaVBVANotifyDisable(PVGASTATE pVGAState)
3245{
3246 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3247 pVGAState->pDrv->pfnVBVADisable(pVGAState->pDrv, i);
3248 return VINF_SUCCESS;
3249}
3250
3251/**
3252 * Hook that is called by vboxVDMAWorkerThread when it starts.
3253 *
3254 * @thread VDMA
3255 */
3256static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
3257 void *pvThreadContext, void *pvContext)
3258{
3259 RT_NOREF(pThread);
3260 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
3261 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
3262
3263 if (RT_SUCCESS(rc))
3264 {
3265 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3266 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3267 if (rc == VINF_SUCCESS)
3268 {
3269 /* we need to inform Main about VBVA enable/disable
3270 * main expects notifications to be done from the main thread
3271 * submit it there */
3272 PVGASTATE pVGAState = pVdma->pVGAState;
3273
3274 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3275 vdmaVBVANotifyEnable(pVGAState);
3276 else
3277 vdmaVBVANotifyDisable(pVGAState);
3278 }
3279 else if (RT_FAILURE(rc))
3280 WARN(("vboxVDMACrGuestCtlProcess failed %Rrc\n", rc));
3281 }
3282 else
3283 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %Rrc\n", rc));
3284
3285 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3286}
3287
3288/**
3289 * Worker for vdmaVBVACtlEnableDisableSubmitInternal() and vdmaVBVACtlEnableSubmitSync().
3290 */
3291static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable, bool fPaused,
3292 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3293{
3294 int rc;
3295 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva,
3296 fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3297 if (pHCtl)
3298 {
3299 pHCtl->u.cmd.pvCmd = pEnable;
3300 pHCtl->u.cmd.cbCmd = sizeof(*pEnable);
3301 pHCtl->pfnComplete = pfnComplete;
3302 pHCtl->pvComplete = pvComplete;
3303
3304 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3305 if (RT_SUCCESS(rc))
3306 return VINF_SUCCESS;
3307
3308 WARN(("VBoxVDMAThreadCreate failed %Rrc\n", rc));
3309 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3310 }
3311 else
3312 {
3313 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3314 rc = VERR_NO_MEMORY;
3315 }
3316
3317 return rc;
3318}
3319
3320/**
3321 * Worker for vboxVDMASaveLoadExecPerform().
3322 */
3323static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3324{
3325 VBVAENABLE Enable = {0};
3326 Enable.u32Flags = VBVA_F_ENABLE;
3327 Enable.u32Offset = offVram;
3328
3329 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3330 Data.rc = VERR_NOT_IMPLEMENTED;
3331 int rc = RTSemEventCreate(&Data.hEvent);
3332 if (!RT_SUCCESS(rc))
3333 {
3334 WARN(("RTSemEventCreate failed %Rrc\n", rc));
3335 return rc;
3336 }
3337
3338 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3339 if (RT_SUCCESS(rc))
3340 {
3341 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3342 if (RT_SUCCESS(rc))
3343 {
3344 rc = Data.rc;
3345 if (!RT_SUCCESS(rc))
3346 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
3347 }
3348 else
3349 WARN(("RTSemEventWait failed %Rrc\n", rc));
3350 }
3351 else
3352 WARN(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3353
3354 RTSemEventDestroy(Data.hEvent);
3355
3356 return rc;
3357}
3358
3359/**
3360 * Worker for vdmaVBVACtlEnableDisableSubmitInternal().
3361 */
3362static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable,
3363 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3364{
3365 int rc;
3366 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3367 {
3368 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3369 return VINF_SUCCESS;
3370 }
3371
3372 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3373 if (!pHCtl)
3374 {
3375 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3376 return VERR_NO_MEMORY;
3377 }
3378
3379 pHCtl->u.cmd.pvCmd = pEnable;
3380 pHCtl->u.cmd.cbCmd = sizeof(*pEnable);
3381 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3382 if (RT_SUCCESS(rc))
3383 return VINF_SUCCESS;
3384
3385 WARN(("vdmaVBVACtlSubmit failed rc %Rrc\n", rc));
3386 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3387 return rc;
3388}
3389
3390/**
3391 * Worker for vdmaVBVACtlEnableDisableSubmit().
3392 */
3393static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable,
3394 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3395{
3396 bool fEnable = (pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE;
3397 if (fEnable)
3398 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3399 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3400}
3401
3402/**
3403 * Handler for vboxCmdVBVACmdCtl/VBOXCMDVBVACTL_TYPE_ENABLE.
3404 */
3405static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable)
3406{
3407 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3408 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3409 if (RT_SUCCESS(rc))
3410 return VINF_SUCCESS;
3411
3412 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %Rrc\n", rc));
3413 pEnable->Hdr.i32Result = rc;
3414 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3415 AssertRC(rc);
3416 return VINF_SUCCESS;
3417}
3418
3419/**
3420 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
3421 * Used by vdmaVBVACtlSubmitSync() and vdmaVBVACtlEnableSubmitSync().}
3422 */
3423static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3424 int rc, void *pvContext)
3425{
3426 RT_NOREF(pVbva, pCtl);
3427 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION *)pvContext;
3428 pData->rc = rc;
3429 rc = RTSemEventSignal(pData->hEvent);
3430 if (!RT_SUCCESS(rc))
3431 WARN(("RTSemEventSignal failed %Rrc\n", rc));
3432}
3433
3434
3435/**
3436 *
3437 */
3438static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3439{
3440 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3441 Data.rc = VERR_NOT_IMPLEMENTED;
3442 Data.hEvent = NIL_RTSEMEVENT;
3443 int rc = RTSemEventCreate(&Data.hEvent);
3444 if (RT_SUCCESS(rc))
3445 {
3446 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3447 if (RT_SUCCESS(rc))
3448 {
3449 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3450 if (RT_SUCCESS(rc))
3451 {
3452 rc = Data.rc;
3453 if (!RT_SUCCESS(rc))
3454 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
3455 }
3456 else
3457 WARN(("RTSemEventWait failed %Rrc\n", rc));
3458 }
3459 else
3460 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3461
3462 RTSemEventDestroy(Data.hEvent);
3463 }
3464 else
3465 WARN(("RTSemEventCreate failed %Rrc\n", rc));
3466 return rc;
3467}
3468
3469/**
3470 * Worker for vboxVDMASaveStateExecPrep().
3471 */
3472static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3473{
3474 VBVAEXHOSTCTL Ctl;
3475 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3476 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3477}
3478
3479/**
3480 * Worker for vboxVDMASaveLoadExecPerform() and vboxVDMASaveStateExecDone().
3481 */
3482static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3483{
3484 VBVAEXHOSTCTL Ctl;
3485 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3486 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3487}
3488
3489/**
3490 * Worker for vboxCmdVBVACmdSubmit(), vboxCmdVBVACmdFlush() and vboxCmdVBVATimerRefresh().
3491 */
3492static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3493{
3494 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3495 switch (rc)
3496 {
3497 case VINF_SUCCESS:
3498 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3499 case VINF_ALREADY_INITIALIZED:
3500 case VINF_EOF:
3501 case VERR_INVALID_STATE:
3502 return VINF_SUCCESS;
3503 default:
3504 Assert(!RT_FAILURE(rc));
3505 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3506 }
3507}
3508
3509
3510/**
3511 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmit}
3512 */
3513int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3514 struct VBOXCRCMDCTL *pCmd,
3515 uint32_t cbCmd,
3516 PFNCRCTLCOMPLETION pfnCompletion,
3517 void *pvCompletion)
3518{
3519 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3520 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3521 if (pVdma == NULL)
3522 return VERR_INVALID_STATE;
3523 pCmd->CalloutList.List.pNext = NULL;
3524 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3525}
3526
3527/**
3528 * Argument package from vboxCmdVBVACmdHostCtlSync to vboxCmdVBVACmdHostCtlSyncCb.
3529 */
3530typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3531{
3532 struct VBOXVDMAHOST *pVdma;
3533 uint32_t fProcessing;
3534 int rc;
3535} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3536
3537/**
3538 * @callback_method_impl{FNCRCTLCOMPLETION, Used by vboxCmdVBVACmdHostCtlSync.}
3539 */
3540static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3541{
3542 RT_NOREF(pCmd, cbCmd);
3543 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC *)pvCompletion;
3544
3545 pData->rc = rc;
3546
3547 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3548
3549 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3550
3551 pData->fProcessing = 0;
3552
3553 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3554}
3555
3556/**
3557 * Worker for vboxVDMACrCtlHgsmiSetup.
3558 *
3559 * @note r=bird: not to be confused with the callout function below. sigh.
3560 */
3561static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd,
3562 VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3563{
3564 pEntry->pfnCb = pfnCb;
3565 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3566 if (RT_SUCCESS(rc))
3567 {
3568 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3569 RTCritSectLeave(&pVdma->CalloutCritSect);
3570
3571 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3572 }
3573 else
3574 WARN(("RTCritSectEnter failed %Rrc\n", rc));
3575
3576 return rc;
3577}
3578
3579
3580/**
3581 * Worker for vboxCmdVBVACmdHostCtlSync.
3582 */
3583static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3584{
3585 int rc = VINF_SUCCESS;
3586 for (;;)
3587 {
3588 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3589 if (RT_SUCCESS(rc))
3590 {
3591 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3592 if (pEntry)
3593 RTListNodeRemove(&pEntry->Node);
3594 RTCritSectLeave(&pVdma->CalloutCritSect);
3595
3596 if (!pEntry)
3597 break;
3598
3599 pEntry->pfnCb(pEntry);
3600 }
3601 else
3602 {
3603 WARN(("RTCritSectEnter failed %Rrc\n", rc));
3604 break;
3605 }
3606 }
3607
3608 return rc;
3609}
3610
3611/**
3612 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmitSync}
3613 */
3614DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface, struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd)
3615{
3616 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3617 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3618 if (pVdma == NULL)
3619 return VERR_INVALID_STATE;
3620
3621 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3622 Data.pVdma = pVdma;
3623 Data.fProcessing = 1;
3624 Data.rc = VERR_INTERNAL_ERROR;
3625 RTListInit(&pCmd->CalloutList.List);
3626 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3627 if (!RT_SUCCESS(rc))
3628 {
3629 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %Rrc", rc));
3630 return rc;
3631 }
3632
3633 while (Data.fProcessing)
3634 {
3635 /* Poll infrequently to make sure no completed message has been missed. */
3636 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3637
3638 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3639
3640 if (Data.fProcessing)
3641 RTThreadYield();
3642 }
3643
3644 /* extra check callouts */
3645 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3646
3647 /* 'Our' message has been processed, so should reset the semaphore.
3648 * There is still possible that another message has been processed
3649 * and the semaphore has been signalled again.
3650 * Reset only if there are no other messages completed.
3651 */
3652 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3653 Assert(c >= 0);
3654 if (!c)
3655 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3656
3657 rc = Data.rc;
3658 if (!RT_SUCCESS(rc))
3659 WARN(("host call failed %Rrc", rc));
3660
3661 return rc;
3662}
3663
3664/**
3665 * Handler for VBVA_CMDVBVA_CTL, see vbvaChannelHandler().
3666 *
3667 * @returns VBox status code
3668 * @param pVGAState The VGA state.
3669 * @param pCtl The control command.
3670 * @param cbCtl The size of it. This is at least
3671 * sizeof(VBOXCMDVBVA_CTL).
3672 * @thread EMT
3673 */
3674int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl)
3675{
3676 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3677 uint32_t uType = pCtl->u32Type;
3678 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
3679
3680 if ( uType == VBOXCMDVBVACTL_TYPE_3DCTL
3681 || uType == VBOXCMDVBVACTL_TYPE_RESIZE
3682 || uType == VBOXCMDVBVACTL_TYPE_ENABLE)
3683 {
3684 RT_UNTRUSTED_VALIDATED_FENCE();
3685
3686 switch (uType)
3687 {
3688 case VBOXCMDVBVACTL_TYPE_3DCTL:
3689 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3690
3691 case VBOXCMDVBVACTL_TYPE_RESIZE:
3692 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3693
3694 case VBOXCMDVBVACTL_TYPE_ENABLE:
3695 ASSERT_GUEST_BREAK(cbCtl == sizeof(VBOXCMDVBVA_CTL_ENABLE));
3696 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCtl);
3697
3698 default:
3699 AssertFailed();
3700 }
3701 }
3702
3703 pCtl->i32Result = VERR_INVALID_PARAMETER;
3704 int rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3705 AssertRC(rc);
3706 return VINF_SUCCESS;
3707}
3708
3709/**
3710 * Handler for VBVA_CMDVBVA_SUBMIT, see vbvaChannelHandler().
3711 *
3712 * @thread EMT
3713 */
3714int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3715{
3716 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3717 {
3718 WARN(("vdma VBVA is disabled\n"));
3719 return VERR_INVALID_STATE;
3720 }
3721
3722 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3723}
3724
3725/**
3726 * Handler for VBVA_CMDVBVA_FLUSH, see vbvaChannelHandler().
3727 *
3728 * @thread EMT
3729 */
3730int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3731{
3732 WARN(("flush\n"));
3733 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3734 {
3735 WARN(("vdma VBVA is disabled\n"));
3736 return VERR_INVALID_STATE;
3737 }
3738 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3739}
3740
3741/**
3742 * Called from vgaTimerRefresh().
3743 */
3744void vboxCmdVBVATimerRefresh(PVGASTATE pVGAState)
3745{
3746 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3747 return;
3748 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3749}
3750
3751bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3752{
3753 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3754}
3755
3756
3757
3758/*
3759 *
3760 *
3761 * Saved state.
3762 * Saved state.
3763 * Saved state.
3764 *
3765 *
3766 */
3767
3768int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3769{
3770 int rc = vdmaVBVAPause(pVdma);
3771 if (RT_SUCCESS(rc))
3772 return VINF_SUCCESS;
3773
3774 if (rc != VERR_INVALID_STATE)
3775 {
3776 WARN(("vdmaVBVAPause failed %Rrc\n", rc));
3777 return rc;
3778 }
3779
3780# ifdef DEBUG_misha
3781 WARN(("debug prep"));
3782# endif
3783
3784 PVGASTATE pVGAState = pVdma->pVGAState;
3785 PVBOXVDMACMD_CHROMIUM_CTL pCmd;
3786 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof(*pCmd));
3787 if (pCmd)
3788 {
3789 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3790 AssertRC(rc);
3791 if (RT_SUCCESS(rc))
3792 rc = vboxVDMACrCtlGetRc(pCmd);
3793 vboxVDMACrCtlRelease(pCmd);
3794 return rc;
3795 }
3796 return VERR_NO_MEMORY;
3797}
3798
3799int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3800{
3801 int rc = vdmaVBVAResume(pVdma);
3802 if (RT_SUCCESS(rc))
3803 return VINF_SUCCESS;
3804
3805 if (rc != VERR_INVALID_STATE)
3806 {
3807 WARN(("vdmaVBVAResume failed %Rrc\n", rc));
3808 return rc;
3809 }
3810
3811# ifdef DEBUG_misha
3812 WARN(("debug done"));
3813# endif
3814
3815 PVGASTATE pVGAState = pVdma->pVGAState;
3816 PVBOXVDMACMD_CHROMIUM_CTL pCmd;
3817 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof(*pCmd));
3818 Assert(pCmd);
3819 if (pCmd)
3820 {
3821 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3822 AssertRC(rc);
3823 if (RT_SUCCESS(rc))
3824 rc = vboxVDMACrCtlGetRc(pCmd);
3825 vboxVDMACrCtlRelease(pCmd);
3826 return rc;
3827 }
3828 return VERR_NO_MEMORY;
3829}
3830
3831int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3832{
3833 int rc;
3834
3835 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3836 {
3837 rc = SSMR3PutU32(pSSM, UINT32_MAX);
3838 AssertRCReturn(rc, rc);
3839 return VINF_SUCCESS;
3840 }
3841
3842 PVGASTATE pVGAState = pVdma->pVGAState;
3843 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3844
3845 rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pVdma->CmdVbva.pVBVA - (uintptr_t)pu8VramBase));
3846 AssertRCReturn(rc, rc);
3847
3848 VBVAEXHOSTCTL HCtl;
3849 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3850 HCtl.u.state.pSSM = pSSM;
3851 HCtl.u.state.u32Version = 0;
3852 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3853}
3854
3855int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3856{
3857 uint32_t u32;
3858 int rc = SSMR3GetU32(pSSM, &u32);
3859 AssertLogRelRCReturn(rc, rc);
3860
3861 if (u32 != UINT32_MAX)
3862 {
3863 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3864 AssertLogRelRCReturn(rc, rc);
3865
3866 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3867
3868 VBVAEXHOSTCTL HCtl;
3869 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3870 HCtl.u.state.pSSM = pSSM;
3871 HCtl.u.state.u32Version = u32Version;
3872 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3873 AssertLogRelRCReturn(rc, rc);
3874
3875 rc = vdmaVBVAResume(pVdma);
3876 AssertLogRelRCReturn(rc, rc);
3877
3878 return VINF_SUCCESS;
3879 }
3880
3881 return VINF_SUCCESS;
3882}
3883
3884int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3885{
3886 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3887 return VINF_SUCCESS;
3888
3889/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3890 * the purpose of this code is. */
3891 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3892 if (!pHCtl)
3893 {
3894 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3895 return VERR_NO_MEMORY;
3896 }
3897
3898 /* sanity */
3899 pHCtl->u.cmd.pvCmd = NULL;
3900 pHCtl->u.cmd.cbCmd = 0;
3901
3902 /* NULL completion will just free the ctl up */
3903 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3904 if (RT_FAILURE(rc))
3905 {
3906 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3907 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3908 return rc;
3909 }
3910
3911 return VINF_SUCCESS;
3912}
3913
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette