VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/module/vboxmod.c@ 4485

Last change on this file since 4485 was 4299, checked in by vboxsync, 17 years ago

Linux guest kernel module adaption to 2.6.22

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 26.4 KB
Line 
1/** @file
2 *
3 * vboxadd -- VirtualBox Guest Additions for Linux
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#include "the-linux-kernel.h"
19#include "version-generated.h"
20
21/* #define IRQ_DEBUG */
22
23#include "vboxmod.h"
24#include "waitcompat.h"
25#include <VBox/log.h>
26
27#define VERSION "0.5"
28
29MODULE_DESCRIPTION("VirtualBox Guest Additions for Linux Module");
30MODULE_AUTHOR("innotek GmbH");
31MODULE_LICENSE("GPL");
32#ifdef MODULE_VERSION
33MODULE_VERSION(VBOX_VERSION_STRING);
34#endif
35
36/* Runtime assert implementation for Linux ring 0 */
37RTDECL(void) AssertMsg1(const char *pszExpr, unsigned uLine,
38 const char *pszFile, const char *pszFunction)
39{
40 elog("!!Assertion Failed!!\n"
41 "Expression: %s\n"
42 "Location : %s(%d) %s\n",
43 pszExpr, pszFile, uLine, pszFunction);
44 Log(("!!Assertion Failed!!\n"
45 "Expression: %s\n"
46 "Location : %s(%d) %s\n",
47 pszExpr, pszFile, uLine, pszFunction));
48}
49
50/* Runtime assert implementation for Linux ring 0 */
51RTDECL(void) AssertMsg2(const char *pszFormat, ...)
52{
53 va_list ap;
54 char msg[256];
55
56 va_start(ap, pszFormat);
57 vsnprintf(msg, sizeof(msg) - 1, pszFormat, ap);
58 msg[sizeof(msg) - 1] = '\0';
59 elog("%s", msg);
60 Log(("%s", msg));
61 va_end(ap);
62}
63
64#if 0 /* We now have real backdoor logging */
65/* Backdoor logging function, needed by the runtime */
66RTDECL(size_t) RTLogBackdoorPrintf (const char *pszFormat, ...)
67{
68 va_list ap;
69 char msg[256];
70 size_t n;
71
72 va_start(ap, pszFormat);
73 n = vsnprintf(msg, sizeof(msg) - 1, pszFormat, ap);
74 msg[sizeof(msg) - 1] = '\0';
75 printk ("%s", msg);
76 va_end(ap);
77 return n;
78}
79#endif
80
81/** device extension structure (we only support one device instance) */
82static VBoxDevice *vboxDev = NULL;
83/** our file node major id (set dynamically) */
84#ifdef CONFIG_VBOXADD_MAJOR
85static unsigned int vbox_major = CONFIG_VBOXADD_MAJOR;
86#else
87static unsigned int vbox_major = 0;
88#endif
89
90DECLVBGL (void *) vboxadd_cmc_open (void)
91{
92 return vboxDev;
93}
94
95DECLVBGL (void) vboxadd_cmc_close (void *opaque)
96{
97 (void) opaque;
98}
99
100EXPORT_SYMBOL (vboxadd_cmc_open);
101EXPORT_SYMBOL (vboxadd_cmc_close);
102
103/**
104 * File open handler
105 *
106 */
107static int vboxadd_open(struct inode *inode, struct file *filp)
108{
109 /* no checks required */
110 return 0;
111}
112
113/**
114 * File close handler
115 *
116 */
117static int vboxadd_release(struct inode *inode, struct file * filp)
118{
119 /* no action required */
120 return 0;
121}
122
123/**
124 * Wait for event
125 *
126 */
127static void
128vboxadd_wait_for_event_helper (VBoxDevice *dev, long timeout,
129 uint32_t in_mask, uint32_t * out_mask)
130{
131 BUG ();
132}
133
134static void
135vboxadd_wait_for_event (VBoxGuestWaitEventInfo * info)
136{
137 long timeout;
138
139 timeout = msecs_to_jiffies (info->u32TimeoutIn);
140 vboxadd_wait_for_event_helper (vboxDev, timeout,
141 info->u32EventMaskIn,
142 &info->u32EventFlagsOut);
143}
144
145
146/**
147 * IOCTL handler
148 *
149 */
150static int vboxadd_ioctl(struct inode *inode, struct file *filp,
151 unsigned int cmd, unsigned long arg)
152{
153 switch (cmd)
154 {
155 case IOCTL_VBOXGUEST_WAITEVENT:
156 {
157 VBoxGuestWaitEventInfo info;
158 char *ptr = (void *) arg;
159
160 if (copy_from_user (&info, ptr, sizeof (info)))
161 {
162 printk (KERN_ERR "vboxadd_ioctl: can not get event info\n");
163 return -EFAULT;
164 }
165
166 vboxadd_wait_for_event (&info);
167
168 ptr += offsetof (VBoxGuestWaitEventInfo, u32EventFlagsOut);
169 if (put_user (info.u32EventFlagsOut, ptr))
170 {
171 printk (KERN_ERR "vboxadd_ioctl: can not put out_mask\n");
172 return -EFAULT;
173 }
174 return 0;
175 }
176
177 case IOCTL_VBOXGUEST_VMMREQUEST:
178 {
179 VMMDevRequestHeader reqHeader;
180 VMMDevRequestHeader *reqFull = NULL;
181 size_t cbRequestSize;
182 size_t cbVanillaRequestSize;
183 int rc;
184
185 if (_IOC_SIZE(cmd) != sizeof(VMMDevRequestHeader))
186 {
187 printk(KERN_ERR "vboxadd_ioctl: invalid VMM request structure size: %d\n",
188 _IOC_SIZE(cmd));
189 return -EINVAL;
190 }
191 if (copy_from_user(&reqHeader, (void*)arg, _IOC_SIZE(cmd)))
192 {
193 printk(KERN_ERR "vboxadd_ioctl: copy_from_user failed for vmm request!\n");
194 return -EFAULT;
195 }
196 /* get the request size */
197 cbVanillaRequestSize = vmmdevGetRequestSize(reqHeader.requestType);
198 if (!cbVanillaRequestSize)
199 {
200 printk(KERN_ERR "vboxadd_ioctl: invalid request type: %d\n",
201 reqHeader.requestType);
202 return -EINVAL;
203 }
204
205 cbRequestSize = reqHeader.size;
206 if (cbRequestSize < cbVanillaRequestSize)
207 {
208 printk(KERN_ERR
209 "vboxadd_ioctl: invalid request size: %d min: %d type: %d\n",
210 cbRequestSize,
211 cbVanillaRequestSize,
212 reqHeader.requestType);
213 return -EINVAL;
214 }
215 /* request storage for the full request */
216 rc = VbglGRAlloc(&reqFull, cbRequestSize, reqHeader.requestType);
217 if (VBOX_FAILURE(rc))
218 {
219 printk(KERN_ERR
220 "vboxadd_ioctl: could not allocate request structure! rc = %d\n", rc);
221 return -EFAULT;
222 }
223 /* now get the full request */
224 if (copy_from_user(reqFull, (void*)arg, cbRequestSize))
225 {
226 printk(KERN_ERR
227 "vboxadd_ioctl: failed to fetch full request from user space!\n");
228 VbglGRFree(reqFull);
229 return -EFAULT;
230 }
231
232 /* now issue the request */
233 rc = VbglGRPerform(reqFull);
234
235 /* asynchronous processing? */
236 if (rc == VINF_HGCM_ASYNC_EXECUTE)
237 {
238 VMMDevHGCMRequestHeader *reqHGCM = (VMMDevHGCMRequestHeader*)reqFull;
239 wait_event (vboxDev->eventq, reqHGCM->fu32Flags & VBOX_HGCM_REQ_DONE);
240 rc = reqFull->rc;
241 }
242
243 /* failed? */
244 if (VBOX_FAILURE(rc) || VBOX_FAILURE(reqFull->rc))
245 {
246 printk(KERN_ERR "vboxadd_ioctl: request execution failed!\n");
247 VbglGRFree(reqFull);
248 return -EFAULT;
249 }
250 else
251 {
252 /* success, copy the result data to user space */
253 if (copy_to_user((void*)arg, (void*)reqFull, cbRequestSize))
254 {
255 printk(KERN_ERR
256 "vboxadd_ioctl: error copying request result to user space!\n");
257 VbglGRFree(reqFull);
258 return -EFAULT;
259 }
260 }
261 VbglGRFree(reqFull);
262 break;
263 }
264
265 case IOCTL_VBOXGUEST_HGCM_CALL:
266 {
267 /* This IOCTL allows the guest to make an HGCM call from user space. The
268 OS-independant part of the Guest Additions already contain code for making an
269 HGCM call from the guest, but this code assumes that the call is made from the
270 kernel's address space. So before calling it, we have to copy all parameters
271 to the HGCM call from user space to kernel space and reconstruct the structures
272 passed to the call (which include pointers to other memory) inside the kernel's
273 address space. */
274 return vbox_ioctl_hgcm_call(arg, vboxDev);
275 }
276
277 case IOCTL_VBOXGUEST_CLIPBOARD_CONNECT:
278 {
279 static uint32_t u32ClientID = 0;
280 VMMDevHGCMDisconnect *reqDisconnect = NULL;
281 VMMDevHGCMConnect *reqConnect = NULL;
282 size_t cbRequestSize;
283 int rc;
284
285 /* First, disconnect any old client. */
286 if (u32ClientID != 0)
287 {
288 /* get the request size */
289 cbRequestSize = vmmdevGetRequestSize(VMMDevReq_HGCMDisconnect);
290 /* request storage for the request */
291 rc = VbglGRAlloc((VMMDevRequestHeader **) &reqDisconnect, cbRequestSize,
292 VMMDevReq_HGCMDisconnect);
293 if (VBOX_FAILURE(rc))
294 {
295 printk(KERN_ERR
296 "vboxadd_ioctl: could not allocate request structure! rc = %d\n", rc);
297 return -EFAULT;
298 }
299 /* now get the full request */
300 vmmdevInitRequest(&reqDisconnect->header.header, VMMDevReq_HGCMDisconnect);
301 reqDisconnect->u32ClientID = u32ClientID;
302
303 /* now issue the request */
304 rc = VbglGRPerform(&reqDisconnect->header.header);
305
306 /* asynchronous processing? */
307 if (rc == VINF_HGCM_ASYNC_EXECUTE)
308 {
309 VMMDevHGCMRequestHeader *reqHGCM = &reqDisconnect->header;
310 wait_event (vboxDev->eventq, reqHGCM->fu32Flags & VBOX_HGCM_REQ_DONE);
311 rc = reqHGCM->header.rc;
312 }
313
314 /* failed? */
315 if (VBOX_FAILURE(rc) || VBOX_FAILURE(reqDisconnect->header.header.rc))
316 {
317 printk(KERN_ERR "vboxadd_ioctl: request execution failed!\n");
318 VbglGRFree(&reqDisconnect->header.header);
319 return -EFAULT;
320 }
321 VbglGRFree(&reqDisconnect->header.header);
322 }
323
324 /* And connect... */
325 /* get the request size */
326 cbRequestSize = vmmdevGetRequestSize(VMMDevReq_HGCMConnect);
327 /* request storage for the request */
328 rc = VbglGRAlloc((VMMDevRequestHeader **) &reqConnect, cbRequestSize, VMMDevReq_HGCMConnect);
329 if (VBOX_FAILURE(rc))
330 {
331 printk(KERN_ERR
332 "vboxadd_ioctl: could not allocate request structure! rc = %d\n", rc);
333 return -EFAULT;
334 }
335 /* now get the full request */
336 vmmdevInitRequest((VMMDevRequestHeader*)reqConnect, VMMDevReq_HGCMConnect);
337 reqConnect->loc.type = VMMDevHGCMLoc_LocalHost_Existing;
338 strcpy (reqConnect->loc.u.host.achName, "VBoxSharedClipboard");
339
340 /* now issue the request */
341 rc = VbglGRPerform(&reqConnect->header.header);
342
343 /* asynchronous processing? */
344 if (rc == VINF_HGCM_ASYNC_EXECUTE)
345 {
346 VMMDevHGCMRequestHeader *reqHGCM = &reqConnect->header;
347 wait_event (vboxDev->eventq, reqHGCM->fu32Flags & VBOX_HGCM_REQ_DONE);
348 rc = reqHGCM->header.rc;
349 }
350
351 /* failed? */
352 if (VBOX_FAILURE(rc) || VBOX_FAILURE(reqConnect->header.header.rc))
353 {
354 printk(KERN_ERR "vboxadd_ioctl: request execution failed!\n");
355 VbglGRFree(&reqConnect->header.header);
356 return -EFAULT;
357 }
358 else
359 {
360 /* success, copy the result data to user space */
361 u32ClientID = reqConnect->u32ClientID;
362 if (copy_to_user((void*)arg, (void*)&(reqConnect->u32ClientID), sizeof(uint32_t)))
363 {
364 printk(KERN_ERR
365 "vboxadd_ioctl: error copying request result to user space!\n");
366 VbglGRFree(&reqConnect->header.header);
367 return -EFAULT;
368 }
369 }
370 VbglGRFree(&reqConnect->header.header);
371 break;
372 }
373
374 default:
375 {
376 elog("vboxadd_ioctl: unknown command: %x, IOCTL_VBOXGUEST_HGCM_CALL is %x\n", cmd,
377 IOCTL_VBOXGUEST_HGCM_CALL);
378 Log(("vboxadd_ioctl: unknown command: %x, IOCTL_VBOXGUEST_HGCM_CALL is %x\n", cmd,
379 IOCTL_VBOXGUEST_HGCM_CALL));
380 return -EINVAL;
381 }
382 }
383 return 0;
384}
385
386#ifdef DEBUG
387static ssize_t
388vboxadd_read (struct file *file, char *buf, size_t count, loff_t *loff)
389{
390 if (count != 8 || *loff != 0)
391 {
392 return -EINVAL;
393 }
394 *(uint32_t *) buf = vboxDev->pVMMDevMemory->V.V1_04.fHaveEvents;
395 *(uint32_t *) (buf + 4) = vboxDev->u32Events;
396 *loff += 8;
397 return 8;
398}
399#endif
400
401/** strategy handlers (file operations) */
402static struct file_operations vbox_fops =
403{
404 .owner = THIS_MODULE,
405 .open = vboxadd_open,
406 .release = vboxadd_release,
407 .ioctl = vboxadd_ioctl,
408#ifdef DEBUG
409 .read = vboxadd_read,
410#endif
411 .llseek = no_llseek
412};
413
414#ifndef IRQ_RETVAL
415/* interrupt handlers in 2.4 kernels don't return anything */
416# define irqreturn_t void
417# define IRQ_RETVAL(n)
418#endif
419
420/**
421 * vboxadd_irq_handler
422 *
423 * Interrupt handler
424 *
425 * @returns scsi error code
426 * @param irq Irq number
427 * @param dev_id Irq handler parameter
428 * @param regs Regs
429 *
430 */
431#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
432static irqreturn_t vboxadd_irq_handler(int irq, void *dev_id)
433#else
434static irqreturn_t vboxadd_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
435#endif
436{
437 int fIRQTaken = 0;
438 int rcVBox;
439
440#ifdef IRQ_DEBUG
441 printk ("%s: vboxDev->pVMMDevMemory=%p vboxDev->pVMMDevMemory->fHaveEvents=%d\n",
442 __func__, vboxDev->pVMMDevMemory, vboxDev->pVMMDevMemory->fHaveEvents);
443#endif
444
445 /* check if IRQ was asserted by VBox */
446 if (vboxDev->pVMMDevMemory->V.V1_04.fHaveEvents != 0)
447 {
448#ifdef IRQ_DEBUG
449 printk(KERN_INFO "vboxadd: got IRQ with event mask 0x%x\n",
450 vboxDev->pVMMDevMemory->u32HostEvents);
451#endif
452
453 /* make a copy of the event mask */
454 rcVBox = VbglGRPerform (&vboxDev->irqAckRequest->header);
455 if (VBOX_SUCCESS(rcVBox) && VBOX_SUCCESS(vboxDev->irqAckRequest->header.rc))
456 {
457 if (RT_LIKELY (vboxDev->irqAckRequest->events))
458 {
459 vboxDev->u32Events |= vboxDev->irqAckRequest->events;
460 wake_up (&vboxDev->eventq);
461 }
462 }
463 else
464 {
465 /* impossible... */
466 printk(KERN_ERR
467 "vboxadd: failed acknowledging IRQ! rc = %x, header.rc = %d\n",
468 rcVBox, vboxDev->irqAckRequest->header.rc);
469 BUG ();
470 }
471
472 /* it was ours! */
473 fIRQTaken = 1;
474 }
475#ifdef IRQ_DEBUG
476 else
477 {
478 printk ("vboxadd: stale IRQ mem=%p events=%d devevents=%#x\n",
479 vboxDev->pVMMDevMemory,
480 vboxDev->pVMMDevMemory->fHaveEvents,
481 vboxDev->u32Events);
482 }
483#endif
484 /* it was ours */
485 return IRQ_RETVAL(fIRQTaken);
486}
487
488/**
489 * Helper function to reserve a fixed kernel address space window
490 * and tell the VMM that it can safely put its hypervisor there.
491 * This function might fail which is not a critical error.
492 */
493static int vboxadd_reserve_hypervisor(void)
494{
495 VMMDevReqHypervisorInfo *req = NULL;
496 int rcVBox;
497
498 /* allocate request structure */
499 rcVBox = VbglGRAlloc(
500 (VMMDevRequestHeader**)&req,
501 sizeof(VMMDevReqHypervisorInfo),
502 VMMDevReq_GetHypervisorInfo
503 );
504 if (VBOX_FAILURE(rcVBox))
505 {
506 printk(KERN_ERR "vboxadd: failed to allocate hypervisor info structure! rc = %d\n",
507 rcVBox);
508 goto bail_out;
509 }
510 /* query the hypervisor information */
511 rcVBox = VbglGRPerform(&req->header);
512 if (VBOX_SUCCESS(rcVBox) && VBOX_SUCCESS(req->header.rc))
513 {
514 /* are we supposed to make a reservation? */
515 if (req->hypervisorSize)
516 {
517 /** @todo repeat this several times until we get an address the host likes */
518
519 void *hypervisorArea;
520 /* reserve another 4MB because the start needs to be 4MB aligned */
521 uint32_t hypervisorSize = req->hypervisorSize + 0x400000;
522 /* perform a fictive IO space mapping */
523 hypervisorArea = ioremap(HYPERVISOR_PHYSICAL_START, hypervisorSize);
524 if (hypervisorArea)
525 {
526 /* communicate result to VMM, align at 4MB */
527 req->hypervisorStart = (vmmDevHypPtr)ALIGNP(hypervisorArea, 0x400000);
528 req->header.requestType = VMMDevReq_SetHypervisorInfo;
529 req->header.rc = VERR_GENERAL_FAILURE;
530 rcVBox = VbglGRPerform(&req->header);
531 if (VBOX_SUCCESS(rcVBox) && VBOX_SUCCESS(req->header.rc))
532 {
533 /* store mapping for future unmapping */
534 vboxDev->hypervisorStart = hypervisorArea;
535 vboxDev->hypervisorSize = hypervisorSize;
536 }
537 else
538 {
539 printk(KERN_ERR "vboxadd: failed to set hypervisor region! "
540 "rc = %d, header.rc = %d\n",
541 rcVBox, req->header.rc);
542 goto bail_out;
543 }
544 }
545 else
546 {
547 printk(KERN_ERR "vboxadd: failed to allocate 0x%x bytes of IO space\n",
548 hypervisorSize);
549 goto bail_out;
550 }
551 }
552 }
553 else
554 {
555 printk(KERN_ERR "vboxadd: failed to query hypervisor info! rc = %d, header.rc = %d\n",
556 rcVBox, req->header.rc);
557 goto bail_out;
558 }
559 /* successful return */
560 VbglGRFree(&req->header);
561 return 0;
562bail_out:
563 /* error return */
564 if (req)
565 VbglGRFree(&req->header);
566 return 1;
567}
568
569/**
570 * Helper function to free the hypervisor address window
571 *
572 */
573static int vboxadd_free_hypervisor(void)
574{
575 VMMDevReqHypervisorInfo *req = NULL;
576 int rcVBox;
577
578 /* allocate request structure */
579 rcVBox = VbglGRAlloc(
580 (VMMDevRequestHeader**)&req,
581 sizeof(VMMDevReqHypervisorInfo),
582 VMMDevReq_SetHypervisorInfo
583 );
584 if (VBOX_FAILURE(rcVBox))
585 {
586 printk(KERN_ERR
587 "vboxadd: failed to allocate hypervisor info structure! rc = %d\n", rcVBox);
588 goto bail_out;
589 }
590 /* reset the hypervisor information */
591 req->hypervisorStart = 0;
592 req->hypervisorSize = 0;
593 rcVBox = VbglGRPerform(&req->header);
594 if (VBOX_SUCCESS(rcVBox) && VBOX_SUCCESS(req->header.rc))
595 {
596 /* now we can free the associated IO space mapping */
597 iounmap(vboxDev->hypervisorStart);
598 vboxDev->hypervisorStart = 0;
599 }
600 else
601 {
602 printk(KERN_ERR "vboxadd: failed to reset hypervisor info! rc = %d, header.rc = %d\n",
603 rcVBox, req->header.rc);
604 goto bail_out;
605 }
606 return 0;
607
608 bail_out:
609 if (req)
610 VbglGRFree(&req->header);
611 return 1;
612}
613
614/**
615 * Helper to free resources
616 *
617 */
618static void free_resources(void)
619{
620 if (vboxDev)
621 {
622 if (vboxDev->hypervisorStart)
623 {
624 vboxadd_free_hypervisor();
625 }
626 if (vboxDev->irqAckRequest)
627 {
628 VbglGRFree(&vboxDev->irqAckRequest->header);
629 VbglTerminate();
630 }
631 if (vboxDev->pVMMDevMemory)
632 iounmap(vboxDev->pVMMDevMemory);
633 if (vboxDev->vmmdevmem)
634 release_mem_region(vboxDev->vmmdevmem, vboxDev->vmmdevmem_size);
635 if (vboxDev->irq)
636 free_irq(vboxDev->irq, vboxDev);
637 kfree(vboxDev);
638 vboxDev = NULL;
639 }
640}
641
642#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
643#define PCI_DEV_GET(v,d,p) pci_get_device(v,d,p)
644#define PCI_DEV_PUT(x) pci_dev_put(x)
645#else
646#define PCI_DEV_GET(v,d,p) pci_find_device(v,d,p)
647#define PCI_DEV_PUT(x)
648#endif
649
650/**
651 * Module initialization
652 *
653 */
654static __init int init(void)
655{
656 int err;
657 int rcVBox;
658 struct pci_dev *pcidev = NULL;
659 VMMDevReportGuestInfo *infoReq = NULL;
660
661 printk(KERN_INFO "vboxadd: initializing version %s\n", VERSION);
662
663 if (vboxadd_cmc_init ())
664 {
665 printk (KERN_ERR "vboxadd: could not init cmc.\n");
666 return -ENODEV;
667 }
668
669 /*
670 * Detect PCI device
671 */
672 pcidev = PCI_DEV_GET(VMMDEV_VENDORID, VMMDEV_DEVICEID, pcidev);
673 if (!pcidev)
674 {
675 printk(KERN_ERR "vboxadd: VirtualBox PCI device not found.\n");
676 return -ENODEV;
677 }
678
679 err = pci_enable_device (pcidev);
680 if (err)
681 {
682 printk (KERN_ERR "vboxadd: could not enable device: %d\n", err);
683 PCI_DEV_PUT(pcidev);
684 return -ENODEV;
685 }
686
687 /* register a character device */
688 err = register_chrdev(vbox_major, "vboxadd", &vbox_fops);
689 if (err < 0 || ((vbox_major & err) || (!vbox_major && !err)))
690 {
691 printk(KERN_ERR "vboxadd: register_chrdev failed: vbox_major: %d, err = %d\n",
692 vbox_major, err);
693 PCI_DEV_PUT(pcidev);
694 return -ENODEV;
695 }
696 /* if no major code was set, take the return value */
697 if (!vbox_major)
698 vbox_major = err;
699
700 /* allocate and initialize device extension */
701 vboxDev = kmalloc(sizeof(*vboxDev), GFP_KERNEL);
702 if (!vboxDev)
703 {
704 printk(KERN_ERR "vboxadd: cannot allocate device!\n");
705 err = -ENOMEM;
706 goto fail;
707 }
708 memset(vboxDev, 0, sizeof(*vboxDev));
709 snprintf(vboxDev->name, sizeof(vboxDev->name), "vboxadd");
710
711 /* get the IO port region */
712 vboxDev->io_port = pci_resource_start(pcidev, 0);
713
714 /* get the memory region */
715 vboxDev->vmmdevmem = pci_resource_start(pcidev, 1);
716 vboxDev->vmmdevmem_size = pci_resource_len(pcidev, 1);
717
718 /* all resources found? */
719 if (!vboxDev->io_port || !vboxDev->vmmdevmem || !vboxDev->vmmdevmem_size)
720 {
721 printk(KERN_ERR "vboxadd: did not find expected hardware resources!\n");
722 goto fail;
723 }
724
725 /* request ownership of adapter memory */
726 if (request_mem_region(vboxDev->vmmdevmem, vboxDev->vmmdevmem_size, "vboxadd") == 0)
727 {
728 printk(KERN_ERR "vboxadd: failed to request adapter memory!\n");
729 goto fail;
730 }
731
732 /* map adapter memory into kernel address space and check version */
733 vboxDev->pVMMDevMemory = (VMMDevMemory *) ioremap(vboxDev->vmmdevmem,
734 vboxDev->vmmdevmem_size);
735 if (!vboxDev->pVMMDevMemory)
736 {
737 printk (KERN_ERR "vboxadd: ioremap failed\n");
738 goto fail;
739 }
740
741 if (vboxDev->pVMMDevMemory->u32Version != VMMDEV_MEMORY_VERSION)
742 {
743 printk(KERN_ERR
744 "vboxadd: invalid VMM device memory version! (got 0x%x, expected 0x%x)\n",
745 vboxDev->pVMMDevMemory->u32Version, VMMDEV_MEMORY_VERSION);
746 goto fail;
747 }
748
749 /* initialize VBGL subsystem */
750 rcVBox = VbglInit(vboxDev->io_port, vboxDev->pVMMDevMemory);
751 if (VBOX_FAILURE(rcVBox))
752 {
753 printk(KERN_ERR "vboxadd: could not initialize VBGL subsystem! rc = %d\n", rcVBox);
754 goto fail;
755 }
756
757 /* report guest information to host, this must be done as the very first request */
758 rcVBox = VbglGRAlloc((VMMDevRequestHeader**)&infoReq,
759 sizeof(VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
760 if (VBOX_FAILURE(rcVBox))
761 {
762 printk(KERN_ERR "vboxadd: could not allocate request structure! rc = %d\n", rcVBox);
763 goto fail;
764 }
765
766 /* report guest version to host, the VMMDev requires that to be done first */
767 infoReq->guestInfo.additionsVersion = VMMDEV_VERSION;
768#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0)
769 infoReq->guestInfo.osType = OSTypeLinux26;
770#else
771 infoReq->guestInfo.osType = OSTypeLinux24;
772#endif
773 rcVBox = VbglGRPerform(&infoReq->header);
774 if (VBOX_FAILURE(rcVBox) || VBOX_FAILURE(infoReq->header.rc))
775 {
776 printk(KERN_ERR
777 "vboxadd: error reporting guest info to host! rc = %d, header.rc = %d\n",
778 rcVBox, infoReq->header.rc);
779 VbglGRFree(&infoReq->header);
780 goto fail;
781 }
782 VbglGRFree(&infoReq->header);
783
784 /* perform hypervisor address space reservation */
785 if (vboxadd_reserve_hypervisor())
786 {
787 /* we just ignore the error, no address window reservation, non fatal */
788 }
789
790 /* allocate a VMM request structure for use in the ISR */
791 rcVBox = VbglGRAlloc((VMMDevRequestHeader**)&vboxDev->irqAckRequest,
792 sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
793 if (VBOX_FAILURE(rcVBox))
794 {
795 printk(KERN_ERR "vboxadd: could not allocate request structure! rc = %d\n", rcVBox);
796 goto fail;
797 }
798
799 /* get ISR */
800 err = request_irq(pcidev->irq, vboxadd_irq_handler,
801#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
802 IRQF_SHARED,
803#else
804 SA_SHIRQ,
805#endif
806 "vboxadd", vboxDev);
807 if (err)
808 {
809 printk(KERN_ERR "vboxadd: Could not request IRQ %d, err: %d\n", pcidev->irq, err);
810 goto fail;
811 }
812 vboxDev->irq = pcidev->irq;
813
814 init_waitqueue_head (&vboxDev->eventq);
815
816 /* some useful information for the user */
817 printk(KERN_INFO
818 "vboxadd: major code: %d, using irq %d, "
819 "io port 0x%x, memory at 0x%x (size %d bytes), "
820 "hypervisor window at 0x%p (size 0x%x bytes)\n",
821 vbox_major, vboxDev->irq, vboxDev->io_port,
822 vboxDev->vmmdevmem, vboxDev->vmmdevmem_size,
823 vboxDev->hypervisorStart, vboxDev->hypervisorSize);
824
825 /* successful return */
826 PCI_DEV_PUT(pcidev);
827 return 0;
828
829fail:
830 PCI_DEV_PUT(pcidev);
831 free_resources();
832 unregister_chrdev(vbox_major, "vboxadd");
833 return err;
834}
835
836/**
837 * Module termination
838 *
839 */
840static __exit void fini(void)
841{
842 printk(KERN_DEBUG "vboxadd: unloading...\n");
843
844 unregister_chrdev(vbox_major, "vboxadd");
845 free_resources();
846 vboxadd_cmc_fini ();
847 printk(KERN_DEBUG "vboxadd: unloaded\n");
848}
849
850module_init(init);
851module_exit(fini);
852
853/* PCI hotplug structure */
854static const struct pci_device_id __devinitdata vmmdev_pci_id[] =
855{
856 {
857 .vendor = VMMDEV_VENDORID,
858 .device = VMMDEV_DEVICEID
859 },
860 {
861 /* empty entry */
862 }
863};
864MODULE_DEVICE_TABLE(pci, vmmdev_pci_id);
865
866int __gxx_personality_v0 = 0xdeadbeef;
867
868/*
869 * Local Variables:
870 * c-mode: bsd
871 * indent-tabs-mode: nil
872 * c-plusplus: evil
873 * End:
874 */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette