VirtualBox

source: vbox/trunk/src/VBox/Devices/PC/DevDMA.cpp@ 32204

Last change on this file since 32204 was 28800, checked in by vboxsync, 14 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.4 KB
Line 
1/* $Id: DevDMA.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * DevDMA - DMA Controller Device.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 * --------------------------------------------------------------------
17 *
18 * This code is based on:
19 *
20 * QEMU DMA emulation
21 *
22 * Copyright (c) 2003 Vassili Karpov (malc)
23 *
24 * Permission is hereby granted, free of charge, to any person obtaining a copy
25 * of this software and associated documentation files (the "Software"), to deal
26 * in the Software without restriction, including without limitation the rights
27 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
28 * copies of the Software, and to permit persons to whom the Software is
29 * furnished to do so, subject to the following conditions:
30 *
31 * The above copyright notice and this permission notice shall be included in
32 * all copies or substantial portions of the Software.
33 *
34 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
37 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
38 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
39 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
40 * THE SOFTWARE.
41 */
42
43#ifdef VBOX
44
45/*******************************************************************************
46* Header Files *
47*******************************************************************************/
48#include <VBox/pdmdev.h>
49#include <VBox/err.h>
50
51#define LOG_GROUP LOG_GROUP_DEFAULT ///@todo LOG_GROUP_DEV_DMA
52#include <VBox/log.h>
53#include <iprt/assert.h>
54#include <iprt/string.h>
55
56#include <stdio.h>
57#include <stdlib.h>
58
59#include "../Builtins.h"
60#include "../vl_vbox.h"
61typedef PFNDMATRANSFERHANDLER DMA_transfer_handler;
62
63#else /* !VBOX */
64#include "vl.h"
65#endif
66
67/* #define DEBUG_DMA */
68
69#ifndef VBOX
70#ifndef __WIN32__
71#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
72#ifdef DEBUG_DMA
73#define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
74#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
75#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
76#else
77#define lwarn(...)
78#define linfo(...)
79#define ldebug(...)
80#endif
81#else
82#define dolog()
83#define lwarn()
84#define linfo()
85#define ldebug()
86#endif
87#else /* VBOX */
88
89# ifdef LOG_ENABLED
90# define DEBUG_DMA
91 static void DMA_DPRINTF (const char *fmt, ...)
92 {
93 if (LogIsEnabled ()) {
94 va_list args;
95 va_start (args, fmt);
96 RTLogLogger (NULL, NULL, "dma: %N", fmt, &args); /* %N - nested va_list * type formatting call. */
97 va_end (args);
98 }
99 }
100# else
101 DECLINLINE(void) DMA_DPRINTF(const char *pszFmt, ...) {}
102# endif
103
104# define dolog DMA_DPRINTF
105# define lwarn DMA_DPRINTF
106# define linfo DMA_DPRINTF
107# define ldebug DMA_DPRINTF
108
109#endif /* VBOX */
110
111#define LENOFA(a) ((int) (sizeof(a)/sizeof(a[0])))
112
113struct dma_regs {
114 unsigned int now[2];
115 uint16_t base[2];
116 uint8_t mode;
117 uint8_t page;
118 uint8_t pageh;
119 uint8_t dack;
120 uint8_t eop;
121 DMA_transfer_handler transfer_handler;
122 void *opaque;
123};
124
125#define ADDR 0
126#define COUNT 1
127
128struct dma_cont {
129 uint8_t status;
130 uint8_t command;
131 uint8_t mask;
132 uint8_t flip_flop;
133 unsigned int dshift;
134 struct dma_regs regs[4];
135};
136
137typedef struct {
138 PPDMDEVINS pDevIns;
139 PCPDMDMACHLP pHlp;
140 struct dma_cont dma_controllers[2];
141} DMAState;
142
143enum {
144 CMD_MEMORY_TO_MEMORY = 0x01,
145 CMD_FIXED_ADDRESS = 0x02,
146 CMD_BLOCK_CONTROLLER = 0x04,
147 CMD_COMPRESSED_TIME = 0x08,
148 CMD_CYCLIC_PRIORITY = 0x10,
149 CMD_EXTENDED_WRITE = 0x20,
150 CMD_LOW_DREQ = 0x40,
151 CMD_LOW_DACK = 0x80,
152 CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
153 | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
154 | CMD_LOW_DREQ | CMD_LOW_DACK
155
156};
157
158static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
159
160static void write_page (void *opaque, uint32_t nport, uint32_t data)
161{
162 struct dma_cont *d = (struct dma_cont*)opaque;
163 int ichan;
164
165 ichan = channels[nport & 7];
166 if (-1 == ichan) {
167 dolog ("invalid channel %#x %#x\n", nport, data);
168 return;
169 }
170 d->regs[ichan].page = data;
171}
172
173static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
174{
175 struct dma_cont *d = (struct dma_cont*)opaque;
176 int ichan;
177
178 ichan = channels[nport & 7];
179 if (-1 == ichan) {
180 dolog ("invalid channel %#x %#x\n", nport, data);
181 return;
182 }
183 d->regs[ichan].pageh = data;
184}
185
186static uint32_t read_page (void *opaque, uint32_t nport)
187{
188 struct dma_cont *d = (struct dma_cont*)opaque;
189 int ichan;
190
191 ichan = channels[nport & 7];
192 if (-1 == ichan) {
193 dolog ("invalid channel read %#x\n", nport);
194 return 0;
195 }
196 return d->regs[ichan].page;
197}
198
199static uint32_t read_pageh (void *opaque, uint32_t nport)
200{
201 struct dma_cont *d = (struct dma_cont*)opaque;
202 int ichan;
203
204 ichan = channels[nport & 7];
205 if (-1 == ichan) {
206 dolog ("invalid channel read %#x\n", nport);
207 return 0;
208 }
209 return d->regs[ichan].pageh;
210}
211
212static inline void init_chan (struct dma_cont *d, int ichan)
213{
214 struct dma_regs *r;
215
216 r = d->regs + ichan;
217 r->now[ADDR] = r->base[ADDR] << d->dshift;
218 r->now[COUNT] = 0;
219}
220
221static inline int getff (struct dma_cont *d)
222{
223 int ff;
224
225 ff = d->flip_flop;
226 d->flip_flop = !ff;
227 return ff;
228}
229
230static uint32_t read_chan (void *opaque, uint32_t nport)
231{
232 struct dma_cont *d = (struct dma_cont*)opaque;
233 int ichan, nreg, iport, ff, val, dir;
234 struct dma_regs *r;
235
236 iport = (nport >> d->dshift) & 0x0f;
237 ichan = iport >> 1;
238 nreg = iport & 1;
239 r = d->regs + ichan;
240
241 dir = ((r->mode >> 5) & 1) ? -1 : 1;
242 ff = getff (d);
243 if (nreg)
244 val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
245 else
246 val = r->now[ADDR] + r->now[COUNT] * dir;
247
248 ldebug ("read_chan %#x -> %d\n", iport, val);
249 return (val >> (d->dshift + (ff << 3))) & 0xff;
250}
251
252static void write_chan (void *opaque, uint32_t nport, uint32_t data)
253{
254 struct dma_cont *d = (struct dma_cont*)opaque;
255 int iport, ichan, nreg;
256 struct dma_regs *r;
257
258 iport = (nport >> d->dshift) & 0x0f;
259 ichan = iport >> 1;
260 nreg = iport & 1;
261 r = d->regs + ichan;
262 if (getff (d)) {
263 r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
264 init_chan (d, ichan);
265 } else {
266 r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
267 }
268}
269
270static void write_cont (void *opaque, uint32_t nport, uint32_t data)
271{
272 struct dma_cont *d = (struct dma_cont*)opaque;
273 int iport, ichan = 0;
274
275 iport = (nport >> d->dshift) & 0x0f;
276 switch (iport) {
277 case 0x08: /* command */
278 if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
279 dolog ("command %#x not supported\n", data);
280 return;
281 }
282 d->command = data;
283 break;
284
285 case 0x09:
286 ichan = data & 3;
287 if (data & 4) {
288 d->status |= 1 << (ichan + 4);
289 }
290 else {
291 d->status &= ~(1 << (ichan + 4));
292 }
293 d->status &= ~(1 << ichan);
294 break;
295
296 case 0x0a: /* single mask */
297 if (data & 4)
298 d->mask |= 1 << (data & 3);
299 else
300 d->mask &= ~(1 << (data & 3));
301 break;
302
303 case 0x0b: /* mode */
304 {
305 ichan = data & 3;
306#ifdef DEBUG_DMA
307 {
308 int op, ai, dir, opmode;
309 op = (data >> 2) & 3;
310 ai = (data >> 4) & 1;
311 dir = (data >> 5) & 1;
312 opmode = (data >> 6) & 3;
313
314 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
315 ichan, op, ai, dir, opmode);
316 }
317#endif
318 d->regs[ichan].mode = data;
319 break;
320 }
321
322 case 0x0c: /* clear flip flop */
323 d->flip_flop = 0;
324 break;
325
326 case 0x0d: /* reset */
327 d->flip_flop = 0;
328 d->mask = ~0;
329 d->status = 0;
330 d->command = 0;
331 break;
332
333 case 0x0e: /* clear mask for all channels */
334 d->mask = 0;
335 break;
336
337 case 0x0f: /* write mask for all channels */
338 d->mask = data;
339 break;
340
341 default:
342 dolog ("unknown iport %#x\n", iport);
343 break;
344 }
345
346#ifdef DEBUG_DMA
347 if (0xc != iport) {
348 linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
349 nport, ichan, data);
350 }
351#endif
352}
353
354static uint32_t read_cont (void *opaque, uint32_t nport)
355{
356 struct dma_cont *d = (struct dma_cont*)opaque;
357 int iport, val;
358
359 iport = (nport >> d->dshift) & 0x0f;
360 switch (iport) {
361 case 0x08: /* status */
362 val = d->status;
363 d->status &= 0xf0;
364 break;
365 case 0x0f: /* mask */
366 val = d->mask;
367 break;
368 default:
369 val = 0;
370 break;
371 }
372
373 ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
374 return val;
375}
376
377static uint8_t DMA_get_channel_mode (DMAState *s, int nchan)
378{
379 return s->dma_controllers[nchan > 3].regs[nchan & 3].mode;
380}
381
382static void DMA_hold_DREQ (DMAState *s, int nchan)
383{
384 int ncont, ichan;
385
386 ncont = nchan > 3;
387 ichan = nchan & 3;
388 linfo ("held cont=%d chan=%d\n", ncont, ichan);
389 s->dma_controllers[ncont].status |= 1 << (ichan + 4);
390}
391
392static void DMA_release_DREQ (DMAState *s, int nchan)
393{
394 int ncont, ichan;
395
396 ncont = nchan > 3;
397 ichan = nchan & 3;
398 linfo ("released cont=%d chan=%d\n", ncont, ichan);
399 s->dma_controllers[ncont].status &= ~(1 << (ichan + 4));
400}
401
402static void channel_run (DMAState *s, int ncont, int ichan)
403{
404 int n;
405 struct dma_regs *r = &s->dma_controllers[ncont].regs[ichan];
406#ifdef DEBUG_DMA
407 int dir, opmode;
408
409 dir = (r->mode >> 5) & 1;
410 opmode = (r->mode >> 6) & 3;
411
412 if (dir) {
413 dolog ("DMA in address decrement mode\n");
414 }
415 if (opmode != 1) {
416 dolog ("DMA not in single mode select %#x\n", opmode);
417 }
418#endif
419
420 r = s->dma_controllers[ncont].regs + ichan;
421 n = r->transfer_handler (s->pDevIns, r->opaque, ichan + (ncont << 2),
422 r->now[COUNT], (r->base[COUNT] + 1) << ncont);
423 r->now[COUNT] = n;
424 ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
425}
426
427static void DMA_run (DMAState *s)
428{
429 struct dma_cont *d;
430 int icont, ichan;
431
432 d = s->dma_controllers;
433
434 for (icont = 0; icont < 2; icont++, d++) {
435 for (ichan = 0; ichan < 4; ichan++) {
436 int mask;
437
438 mask = 1 << ichan;
439
440 if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4))))
441 channel_run (s, icont, ichan);
442 }
443 }
444}
445
446static void DMA_register_channel (DMAState *s, unsigned nchan,
447 DMA_transfer_handler transfer_handler,
448 void *opaque)
449{
450 struct dma_regs *r;
451 int ichan, ncont;
452 LogFlow (("DMA_register_channel: s=%p nchan=%d transfer_handler=%p opaque=%p\n",
453 s, nchan, transfer_handler, opaque));
454
455 ncont = nchan > 3;
456 ichan = nchan & 3;
457
458 r = s->dma_controllers[ncont].regs + ichan;
459 r->transfer_handler = transfer_handler;
460 r->opaque = opaque;
461}
462
463static uint32_t DMA_read_memory (DMAState *s,
464 unsigned nchan,
465 void *buf,
466 uint32_t pos,
467 uint32_t len)
468{
469 struct dma_regs *r = &s->dma_controllers[nchan > 3].regs[nchan & 3];
470 uint32_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
471
472 if (r->mode & 0x20) {
473 unsigned i;
474 uint8_t *p = (uint8_t*)buf;
475
476#ifdef VBOX
477 PDMDevHlpPhysRead (s->pDevIns, addr - pos - len, buf, len);
478#else
479 cpu_physical_memory_read (addr - pos - len, buf, len);
480#endif
481 /* What about 16bit transfers? */
482 for (i = 0; i < len >> 1; i++) {
483 uint8_t b = p[len - i - 1];
484 p[i] = b;
485 }
486 }
487 else
488#ifdef VBOX
489 PDMDevHlpPhysRead (s->pDevIns, addr + pos, buf, len);
490#else
491 cpu_physical_memory_read (addr + pos, buf, len);
492#endif
493 return len;
494}
495
496static uint32_t DMA_write_memory (DMAState *s,
497 unsigned nchan,
498 const void *buf,
499 uint32_t pos,
500 uint32_t len)
501{
502 struct dma_regs *r = &s->dma_controllers[nchan > 3].regs[nchan & 3];
503 uint32_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
504
505 if (r->mode & 0x20) {
506 unsigned i;
507 uint8_t *p = (uint8_t *) buf;
508
509#ifdef VBOX
510 PDMDevHlpPhysWrite (s->pDevIns, addr - pos - len, buf, len);
511#else
512 cpu_physical_memory_write (addr - pos - len, buf, len);
513#endif
514 /* What about 16bit transfers? */
515 for (i = 0; i < len; i++) {
516 uint8_t b = p[len - i - 1];
517 p[i] = b;
518 }
519 }
520 else
521#ifdef VBOX
522 PDMDevHlpPhysWrite (s->pDevIns, addr + pos, buf, len);
523#else
524 cpu_physical_memory_write (addr + pos, buf, len);
525#endif
526
527 return len;
528}
529
530
531#ifndef VBOX
532/* request the emulator to transfer a new DMA memory block ASAP */
533void DMA_schedule(int nchan)
534{
535 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
536}
537#endif
538
539static void dma_reset(void *opaque)
540{
541 struct dma_cont *d = (struct dma_cont*)opaque;
542 write_cont (d, (0x0d << d->dshift), 0);
543}
544
545#ifdef VBOX
546#define IO_READ_PROTO(n) \
547static DECLCALLBACK(int) io_read_##n (PPDMDEVINS pDevIns, \
548 void *pvUser, \
549 RTIOPORT Port, \
550 uint32_t *pu32, \
551 unsigned cb)
552
553
554#define IO_WRITE_PROTO(n) \
555static DECLCALLBACK(int) io_write_##n (PPDMDEVINS pDevIns, \
556 void *pvUser, \
557 RTIOPORT Port, \
558 uint32_t u32, \
559 unsigned cb)
560
561IO_WRITE_PROTO (chan)
562{
563 if (cb == 1) {
564 write_chan (pvUser, Port, u32);
565 }
566#ifdef PARANOID
567 else {
568 Log (("Unknown write to %#x of size %d, value %#x\n",
569 Port, cb, u32));
570 }
571#endif
572 return VINF_SUCCESS;
573}
574
575IO_WRITE_PROTO (page)
576{
577 if (cb == 1) {
578 write_page (pvUser, Port, u32);
579 }
580#ifdef PARANOID
581 else {
582 Log (("Unknown write to %#x of size %d, value %#x\n",
583 Port, cb, u32));
584 }
585#endif
586 return VINF_SUCCESS;
587}
588
589IO_WRITE_PROTO (pageh)
590{
591 if (cb == 1) {
592 write_pageh (pvUser, Port, u32);
593 }
594#ifdef PARANOID
595 else {
596 Log (("Unknown write to %#x of size %d, value %#x\n",
597 Port, cb, u32));
598 }
599#endif
600 return VINF_SUCCESS;
601}
602
603IO_WRITE_PROTO (cont)
604{
605 if (cb == 1) {
606 write_cont (pvUser, Port, u32);
607 }
608#ifdef PARANOID
609 else {
610 Log (("Unknown write to %#x of size %d, value %#x\n",
611 Port, cb, u32));
612 }
613#endif
614 return VINF_SUCCESS;
615}
616
617IO_READ_PROTO (chan)
618{
619 if (cb == 1) {
620 *pu32 = read_chan (pvUser, Port);
621 return VINF_SUCCESS;
622 }
623 else {
624 return VERR_IOM_IOPORT_UNUSED;
625 }
626}
627
628IO_READ_PROTO (page)
629{
630 if (cb == 1) {
631 *pu32 = read_page (pvUser, Port);
632 return VINF_SUCCESS;
633 }
634 else {
635 return VERR_IOM_IOPORT_UNUSED;
636 }
637}
638
639IO_READ_PROTO (pageh)
640{
641 if (cb == 1) {
642 *pu32 = read_pageh (pvUser, Port);
643 return VINF_SUCCESS;
644 }
645 else {
646 return VERR_IOM_IOPORT_UNUSED;
647 }
648}
649
650IO_READ_PROTO (cont)
651{
652 if (cb == 1) {
653 *pu32 = read_cont (pvUser, Port);
654 return VINF_SUCCESS;
655 }
656 else {
657 return VERR_IOM_IOPORT_UNUSED;
658 }
659}
660#endif
661
662/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
663static void dma_init2(DMAState *s, struct dma_cont *d, int base, int dshift,
664 int page_base, int pageh_base)
665{
666 const static int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
667 int i;
668
669 d->dshift = dshift;
670 for (i = 0; i < 8; i++) {
671#ifdef VBOX
672 PDMDevHlpIOPortRegister (s->pDevIns, base + (i << dshift), 1, d,
673 io_write_chan, io_read_chan, NULL, NULL, "DMA");
674#else
675 register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
676 register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
677#endif
678 }
679 for (i = 0; i < LENOFA (page_port_list); i++) {
680#ifdef VBOX
681 PDMDevHlpIOPortRegister (s->pDevIns, page_base + page_port_list[i], 1, d,
682 io_write_page, io_read_page, NULL, NULL, "DMA Page");
683#else
684 register_ioport_write (page_base + page_port_list[i], 1, 1,
685 write_page, d);
686 register_ioport_read (page_base + page_port_list[i], 1, 1,
687 read_page, d);
688#endif
689 if (pageh_base >= 0) {
690#ifdef VBOX
691 PDMDevHlpIOPortRegister (s->pDevIns, pageh_base + page_port_list[i], 1, d,
692 io_write_pageh, io_read_pageh, NULL, NULL, "DMA Page High");
693#else
694 register_ioport_write (pageh_base + page_port_list[i], 1, 1,
695 write_pageh, d);
696 register_ioport_read (pageh_base + page_port_list[i], 1, 1,
697 read_pageh, d);
698#endif
699 }
700 }
701 for (i = 0; i < 8; i++) {
702#ifdef VBOX
703 PDMDevHlpIOPortRegister (s->pDevIns, base + ((i + 8) << dshift), 1, d,
704 io_write_cont, io_read_cont, NULL, NULL, "DMA cont");
705#else
706 register_ioport_write (base + ((i + 8) << dshift), 1, 1,
707 write_cont, d);
708 register_ioport_read (base + ((i + 8) << dshift), 1, 1,
709 read_cont, d);
710#endif
711 }
712#ifndef VBOX
713 qemu_register_reset(dma_reset, d);
714#endif
715 dma_reset(d);
716}
717
718static void dma_save (QEMUFile *f, void *opaque)
719{
720 struct dma_cont *d = (struct dma_cont*)opaque;
721 int i;
722
723 /* qemu_put_8s (f, &d->status); */
724 qemu_put_8s (f, &d->command);
725 qemu_put_8s (f, &d->mask);
726 qemu_put_8s (f, &d->flip_flop);
727 qemu_put_be32s (f, &d->dshift);
728
729 for (i = 0; i < 4; ++i) {
730 struct dma_regs *r = &d->regs[i];
731 qemu_put_be32s (f, &r->now[0]);
732 qemu_put_be32s (f, &r->now[1]);
733 qemu_put_be16s (f, &r->base[0]);
734 qemu_put_be16s (f, &r->base[1]);
735 qemu_put_8s (f, &r->mode);
736 qemu_put_8s (f, &r->page);
737 qemu_put_8s (f, &r->pageh);
738 qemu_put_8s (f, &r->dack);
739 qemu_put_8s (f, &r->eop);
740 }
741}
742
743static int dma_load (QEMUFile *f, void *opaque, int version_id)
744{
745 struct dma_cont *d = (struct dma_cont*)opaque;
746 int i;
747
748 if (version_id != 1)
749#ifdef VBOX
750 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
751#else
752 return -EINVAL;
753#endif
754
755 /* qemu_get_8s (f, &d->status); */
756 qemu_get_8s (f, &d->command);
757 qemu_get_8s (f, &d->mask);
758 qemu_get_8s (f, &d->flip_flop);
759 qemu_get_be32s (f, &d->dshift);
760
761 for (i = 0; i < 4; ++i) {
762 struct dma_regs *r = &d->regs[i];
763 qemu_get_be32s (f, &r->now[0]);
764 qemu_get_be32s (f, &r->now[1]);
765 qemu_get_be16s (f, &r->base[0]);
766 qemu_get_be16s (f, &r->base[1]);
767 qemu_get_8s (f, &r->mode);
768 qemu_get_8s (f, &r->page);
769 qemu_get_8s (f, &r->pageh);
770 qemu_get_8s (f, &r->dack);
771 qemu_get_8s (f, &r->eop);
772 }
773 return 0;
774}
775
776#ifndef VBOX
777void DMA_init (int high_page_enable)
778{
779 dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
780 high_page_enable ? 0x480 : -1);
781 dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
782 high_page_enable ? 0x488 : -1);
783 register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]);
784 register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]);
785}
786#endif
787
788#ifdef VBOX
789static bool run_wrapper (PPDMDEVINS pDevIns)
790{
791 DMA_run (PDMINS_2_DATA (pDevIns, DMAState *));
792 return 0;
793}
794
795static void register_channel_wrapper (PPDMDEVINS pDevIns,
796 unsigned nchan,
797 PFNDMATRANSFERHANDLER f,
798 void *opaque)
799{
800 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *);
801 DMA_register_channel (s, nchan, f, opaque);
802}
803
804static uint32_t rd_mem_wrapper (PPDMDEVINS pDevIns,
805 unsigned nchan,
806 void *buf,
807 uint32_t pos,
808 uint32_t len)
809{
810 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *);
811 return DMA_read_memory (s, nchan, buf, pos, len);
812}
813
814static uint32_t wr_mem_wrapper (PPDMDEVINS pDevIns,
815 unsigned nchan,
816 const void *buf,
817 uint32_t pos,
818 uint32_t len)
819{
820 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *);
821 return DMA_write_memory (s, nchan, buf, pos, len);
822}
823
824static void set_DREQ_wrapper (PPDMDEVINS pDevIns,
825 unsigned nchan,
826 unsigned level)
827{
828 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *);
829 if (level) {
830 DMA_hold_DREQ (s, nchan);
831 }
832 else {
833 DMA_release_DREQ (s, nchan);
834 }
835}
836
837static uint8_t get_mode_wrapper (PPDMDEVINS pDevIns, unsigned nchan)
838{
839 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *);
840 return DMA_get_channel_mode (s, nchan);
841}
842
843static void dmaReset (PPDMDEVINS pDevIns)
844{
845 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *);
846 dma_reset (&s->dma_controllers[0]);
847 dma_reset (&s->dma_controllers[1]);
848}
849
850static DECLCALLBACK(int) dmaSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSMHandle)
851{
852 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *);
853 dma_save (pSSMHandle, &s->dma_controllers[0]);
854 dma_save (pSSMHandle, &s->dma_controllers[1]);
855 return VINF_SUCCESS;
856}
857
858static DECLCALLBACK(int) dmaLoadExec (PPDMDEVINS pDevIns,
859 PSSMHANDLE pSSMHandle,
860 uint32_t uVersion,
861 uint32_t uPass)
862{
863 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *);
864
865 AssertMsgReturn (uVersion == 1, ("%d\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
866 Assert (uPass == SSM_PASS_FINAL); NOREF(uPass);
867
868 dma_load (pSSMHandle, &s->dma_controllers[0], uVersion);
869 return dma_load (pSSMHandle, &s->dma_controllers[1], uVersion);
870}
871
872/**
873 * @interface_method_impl{PDMDEVREG,pfnConstruct}
874 */
875static DECLCALLBACK(int) dmaConstruct(PPDMDEVINS pDevIns,
876 int iInstance,
877 PCFGMNODE pCfg)
878{
879 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *);
880 bool high_page_enable = 0;
881 PDMDMACREG reg;
882 int rc;
883
884 s->pDevIns = pDevIns;
885
886 /*
887 * Validate configuration.
888 */
889 if (!CFGMR3AreValuesValid(pCfg, "\0")) /* "HighPageEnable\0")) */
890 return VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES;
891
892#if 0
893 rc = CFGMR3QueryBool (pCfg, "HighPageEnable", &high_page_enable);
894 if (RT_FAILURE (rc)) {
895 return rc;
896 }
897#endif
898
899 dma_init2(s, &s->dma_controllers[0], 0x00, 0, 0x80,
900 high_page_enable ? 0x480 : -1);
901 dma_init2(s, &s->dma_controllers[1], 0xc0, 1, 0x88,
902 high_page_enable ? 0x488 : -1);
903
904 reg.u32Version = PDM_DMACREG_VERSION;
905 reg.pfnRun = run_wrapper;
906 reg.pfnRegister = register_channel_wrapper;
907 reg.pfnReadMemory = rd_mem_wrapper;
908 reg.pfnWriteMemory = wr_mem_wrapper;
909 reg.pfnSetDREQ = set_DREQ_wrapper;
910 reg.pfnGetChannelMode = get_mode_wrapper;
911
912 rc = PDMDevHlpDMACRegister (pDevIns, &reg, &s->pHlp);
913 if (RT_FAILURE (rc)) {
914 return rc;
915 }
916
917 rc = PDMDevHlpSSMRegister (pDevIns, 1 /*uVersion*/, sizeof (*s), dmaSaveExec, dmaLoadExec);
918 if (RT_FAILURE(rc))
919 return rc;
920
921 return VINF_SUCCESS;
922}
923
924/**
925 * The device registration structure.
926 */
927const PDMDEVREG g_DeviceDMA =
928{
929 /* u32Version */
930 PDM_DEVREG_VERSION,
931 /* szName */
932 "8237A",
933 /* szRCMod */
934 "",
935 /* szR0Mod */
936 "",
937 /* pszDescription */
938 "DMA Controller Device",
939 /* fFlags */
940 PDM_DEVREG_FLAGS_DEFAULT_BITS,
941 /* fClass */
942 PDM_DEVREG_CLASS_DMA,
943 /* cMaxInstances */
944 1,
945 /* cbInstance */
946 sizeof(DMAState),
947 /* pfnConstruct */
948 dmaConstruct,
949 /* pfnDestruct */
950 NULL,
951 /* pfnRelocate */
952 NULL,
953 /* pfnIOCtl */
954 NULL,
955 /* pfnPowerOn */
956 NULL,
957 /* pfnReset */
958 dmaReset,
959 /* pfnSuspend */
960 NULL,
961 /* pfnResume */
962 NULL,
963 /* pfnAttach */
964 NULL,
965 /* pfnDetach */
966 NULL,
967 /* pfnQueryInterface. */
968 NULL,
969 /* pfnInitComplete */
970 NULL,
971 /* pfnPowerOff */
972 NULL,
973 /* pfnSoftReset */
974 NULL,
975 /* u32VersionEnd */
976 PDM_DEVREG_VERSION
977};
978#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette