VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DrvNAT.cpp@ 91881

Last change on this file since 91881 was 91872, checked in by vboxsync, 3 years ago

Devices/Network: Change the network drivers to access the CFGM API through the driver helper callback table only, bugref:10074

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 66.3 KB
Line 
1/* $Id: DrvNAT.cpp 91872 2021-10-20 09:07:44Z vboxsync $ */
2/** @file
3 * DrvNAT - NAT network transport driver.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DRV_NAT
23#define __STDC_LIMIT_MACROS
24#define __STDC_CONSTANT_MACROS
25#include "slirp/libslirp.h"
26extern "C" {
27#include "slirp/slirp_dns.h"
28}
29#include "slirp/ctl.h"
30
31#include <VBox/vmm/dbgf.h>
32#include <VBox/vmm/pdmdrv.h>
33#include <VBox/vmm/pdmnetifs.h>
34#include <VBox/vmm/pdmnetinline.h>
35
36#include <iprt/assert.h>
37#include <iprt/critsect.h>
38#include <iprt/cidr.h>
39#include <iprt/file.h>
40#include <iprt/mem.h>
41#include <iprt/pipe.h>
42#include <iprt/string.h>
43#include <iprt/stream.h>
44#include <iprt/uuid.h>
45
46#include "VBoxDD.h"
47
48#ifndef RT_OS_WINDOWS
49# include <unistd.h>
50# include <fcntl.h>
51# include <poll.h>
52# include <errno.h>
53#endif
54#ifdef RT_OS_FREEBSD
55# include <netinet/in.h>
56#endif
57#include <iprt/semaphore.h>
58#include <iprt/req.h>
59#ifdef RT_OS_DARWIN
60# include <SystemConfiguration/SystemConfiguration.h>
61# include <CoreFoundation/CoreFoundation.h>
62#endif
63
64#define COUNTERS_INIT
65#include "counters.h"
66
67
68/*********************************************************************************************************************************
69* Defined Constants And Macros *
70*********************************************************************************************************************************/
71
72#define DRVNAT_MAXFRAMESIZE (16 * 1024)
73
74/**
75 * @todo: This is a bad hack to prevent freezing the guest during high network
76 * activity. Windows host only. This needs to be fixed properly.
77 */
78#define VBOX_NAT_DELAY_HACK
79
80#define GET_EXTRADATA(pdrvins, node, name, rc, type, type_name, var) \
81do { \
82 (rc) = (pdrvins)->pHlpR3->pfnCFGMQuery ## type((node), name, &(var)); \
83 if (RT_FAILURE((rc)) && (rc) != VERR_CFGM_VALUE_NOT_FOUND) \
84 return PDMDrvHlpVMSetError((pdrvins), (rc), RT_SRC_POS, N_("NAT#%d: configuration query for \"" name "\" " #type_name " failed"), \
85 (pdrvins)->iInstance); \
86} while (0)
87
88#define GET_ED_STRICT(pdrvins, node, name, rc, type, type_name, var) \
89do { \
90 (rc) = (pdrvins)->pHlpR3->pfnCFGMQuery ## type((node), name, &(var)); \
91 if (RT_FAILURE((rc))) \
92 return PDMDrvHlpVMSetError((pdrvins), (rc), RT_SRC_POS, N_("NAT#%d: configuration query for \"" name "\" " #type_name " failed"), \
93 (pdrvins)->iInstance); \
94} while (0)
95
96#define GET_EXTRADATA_N(pdrvins, node, name, rc, type, type_name, var, var_size) \
97do { \
98 (rc) = (pdrvins)->pHlpR3->pfnCFGMQuery ## type((node), name, &(var), var_size); \
99 if (RT_FAILURE((rc)) && (rc) != VERR_CFGM_VALUE_NOT_FOUND) \
100 return PDMDrvHlpVMSetError((pdrvins), (rc), RT_SRC_POS, N_("NAT#%d: configuration query for \"" name "\" " #type_name " failed"), \
101 (pdrvins)->iInstance); \
102} while (0)
103
104#define GET_BOOL(rc, pdrvins, node, name, var) \
105 GET_EXTRADATA(pdrvins, node, name, (rc), Bool, bolean, (var))
106#define GET_STRING(rc, pdrvins, node, name, var, var_size) \
107 GET_EXTRADATA_N(pdrvins, node, name, (rc), String, string, (var), (var_size))
108#define GET_STRING_ALLOC(rc, pdrvins, node, name, var) \
109 GET_EXTRADATA(pdrvins, node, name, (rc), StringAlloc, string, (var))
110#define GET_S32(rc, pdrvins, node, name, var) \
111 GET_EXTRADATA(pdrvins, node, name, (rc), S32, int, (var))
112#define GET_S32_STRICT(rc, pdrvins, node, name, var) \
113 GET_ED_STRICT(pdrvins, node, name, (rc), S32, int, (var))
114
115
116
117#define DO_GET_IP(rc, node, instance, status, x) \
118do { \
119 char sz##x[32]; \
120 GET_STRING((rc), (node), (instance), #x, sz ## x[0], sizeof(sz ## x)); \
121 if (rc != VERR_CFGM_VALUE_NOT_FOUND) \
122 (status) = inet_aton(sz ## x, &x); \
123} while (0)
124
125#define GETIP_DEF(rc, node, instance, x, def) \
126do \
127{ \
128 int status = 0; \
129 DO_GET_IP((rc), (node), (instance), status, x); \
130 if (status == 0 || rc == VERR_CFGM_VALUE_NOT_FOUND) \
131 x.s_addr = def; \
132} while (0)
133
134
135/*********************************************************************************************************************************
136* Structures and Typedefs *
137*********************************************************************************************************************************/
138/**
139 * NAT network transport driver instance data.
140 *
141 * @implements PDMINETWORKUP
142 */
143typedef struct DRVNAT
144{
145 /** The network interface. */
146 PDMINETWORKUP INetworkUp;
147 /** The network NAT Engine configureation. */
148 PDMINETWORKNATCONFIG INetworkNATCfg;
149 /** The port we're attached to. */
150 PPDMINETWORKDOWN pIAboveNet;
151 /** The network config of the port we're attached to. */
152 PPDMINETWORKCONFIG pIAboveConfig;
153 /** Pointer to the driver instance. */
154 PPDMDRVINS pDrvIns;
155 /** Link state */
156 PDMNETWORKLINKSTATE enmLinkState;
157 /** NAT state for this instance. */
158 PNATState pNATState;
159 /** TFTP directory prefix. */
160 char *pszTFTPPrefix;
161 /** Boot file name to provide in the DHCP server response. */
162 char *pszBootFile;
163 /** tftp server name to provide in the DHCP server response. */
164 char *pszNextServer;
165 /** Polling thread. */
166 PPDMTHREAD pSlirpThread;
167 /** Queue for NAT-thread-external events. */
168 RTREQQUEUE hSlirpReqQueue;
169 /** The guest IP for port-forwarding. */
170 uint32_t GuestIP;
171 /** Link state set when the VM is suspended. */
172 PDMNETWORKLINKSTATE enmLinkStateWant;
173
174#ifndef RT_OS_WINDOWS
175 /** The write end of the control pipe. */
176 RTPIPE hPipeWrite;
177 /** The read end of the control pipe. */
178 RTPIPE hPipeRead;
179# if HC_ARCH_BITS == 32
180 uint32_t u32Padding;
181# endif
182#else
183 /** for external notification */
184 HANDLE hWakeupEvent;
185#endif
186
187#define DRV_PROFILE_COUNTER(name, dsc) STAMPROFILE Stat ## name
188#define DRV_COUNTING_COUNTER(name, dsc) STAMCOUNTER Stat ## name
189#include "counters.h"
190 /** thread delivering packets for receiving by the guest */
191 PPDMTHREAD pRecvThread;
192 /** thread delivering urg packets for receiving by the guest */
193 PPDMTHREAD pUrgRecvThread;
194 /** event to wakeup the guest receive thread */
195 RTSEMEVENT EventRecv;
196 /** event to wakeup the guest urgent receive thread */
197 RTSEMEVENT EventUrgRecv;
198 /** Receive Req queue (deliver packets to the guest) */
199 RTREQQUEUE hRecvReqQueue;
200 /** Receive Urgent Req queue (deliver packets to the guest). */
201 RTREQQUEUE hUrgRecvReqQueue;
202
203 /** makes access to device func RecvAvail and Recv atomical. */
204 RTCRITSECT DevAccessLock;
205 /** Number of in-flight urgent packets. */
206 volatile uint32_t cUrgPkts;
207 /** Number of in-flight regular packets. */
208 volatile uint32_t cPkts;
209
210 /** Transmit lock taken by BeginXmit and released by EndXmit. */
211 RTCRITSECT XmitLock;
212
213 /** Request queue for the async host resolver. */
214 RTREQQUEUE hHostResQueue;
215 /** Async host resolver thread. */
216 PPDMTHREAD pHostResThread;
217
218#ifdef RT_OS_DARWIN
219 /* Handle of the DNS watcher runloop source. */
220 CFRunLoopSourceRef hRunLoopSrcDnsWatcher;
221#endif
222} DRVNAT;
223AssertCompileMemberAlignment(DRVNAT, StatNATRecvWakeups, 8);
224/** Pointer to the NAT driver instance data. */
225typedef DRVNAT *PDRVNAT;
226
227
228/*********************************************************************************************************************************
229* Internal Functions *
230*********************************************************************************************************************************/
231static void drvNATNotifyNATThread(PDRVNAT pThis, const char *pszWho);
232DECLINLINE(void) drvNATUpdateDNS(PDRVNAT pThis, bool fFlapLink);
233static DECLCALLBACK(int) drvNATReinitializeHostNameResolving(PDRVNAT pThis);
234
235
236/**
237 * @callback_method_impl{FNPDMTHREADDRV}
238 */
239static DECLCALLBACK(int) drvNATRecv(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
240{
241 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
242
243 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
244 return VINF_SUCCESS;
245
246 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
247 {
248 RTReqQueueProcess(pThis->hRecvReqQueue, 0);
249 if (ASMAtomicReadU32(&pThis->cPkts) == 0)
250 RTSemEventWait(pThis->EventRecv, RT_INDEFINITE_WAIT);
251 }
252 return VINF_SUCCESS;
253}
254
255
256/**
257 * @callback_method_impl{FNPDMTHREADWAKEUPDRV}
258 */
259static DECLCALLBACK(int) drvNATRecvWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
260{
261 RT_NOREF(pThread);
262 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
263 int rc;
264 rc = RTSemEventSignal(pThis->EventRecv);
265
266 STAM_COUNTER_INC(&pThis->StatNATRecvWakeups);
267 return VINF_SUCCESS;
268}
269
270
271/**
272 * @callback_method_impl{FNPDMTHREADDRV}
273 */
274static DECLCALLBACK(int) drvNATUrgRecv(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
275{
276 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
277
278 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
279 return VINF_SUCCESS;
280
281 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
282 {
283 RTReqQueueProcess(pThis->hUrgRecvReqQueue, 0);
284 if (ASMAtomicReadU32(&pThis->cUrgPkts) == 0)
285 {
286 int rc = RTSemEventWait(pThis->EventUrgRecv, RT_INDEFINITE_WAIT);
287 AssertRC(rc);
288 }
289 }
290 return VINF_SUCCESS;
291}
292
293
294/**
295 * @callback_method_impl{FNPDMTHREADWAKEUPDRV}
296 */
297static DECLCALLBACK(int) drvNATUrgRecvWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
298{
299 RT_NOREF(pThread);
300 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
301 int rc = RTSemEventSignal(pThis->EventUrgRecv);
302 AssertRC(rc);
303
304 return VINF_SUCCESS;
305}
306
307
308static DECLCALLBACK(void) drvNATUrgRecvWorker(PDRVNAT pThis, uint8_t *pu8Buf, int cb, struct mbuf *m)
309{
310 int rc = RTCritSectEnter(&pThis->DevAccessLock);
311 AssertRC(rc);
312 rc = pThis->pIAboveNet->pfnWaitReceiveAvail(pThis->pIAboveNet, RT_INDEFINITE_WAIT);
313 if (RT_SUCCESS(rc))
314 {
315 rc = pThis->pIAboveNet->pfnReceive(pThis->pIAboveNet, pu8Buf, cb);
316 AssertRC(rc);
317 }
318 else if ( rc != VERR_TIMEOUT
319 && rc != VERR_INTERRUPTED)
320 {
321 AssertRC(rc);
322 }
323
324 rc = RTCritSectLeave(&pThis->DevAccessLock);
325 AssertRC(rc);
326
327 slirp_ext_m_free(pThis->pNATState, m, pu8Buf);
328 if (ASMAtomicDecU32(&pThis->cUrgPkts) == 0)
329 {
330 drvNATRecvWakeup(pThis->pDrvIns, pThis->pRecvThread);
331 drvNATNotifyNATThread(pThis, "drvNATUrgRecvWorker");
332 }
333}
334
335
336static DECLCALLBACK(void) drvNATRecvWorker(PDRVNAT pThis, uint8_t *pu8Buf, int cb, struct mbuf *m)
337{
338 int rc;
339 STAM_PROFILE_START(&pThis->StatNATRecv, a);
340
341
342 while (ASMAtomicReadU32(&pThis->cUrgPkts) != 0)
343 {
344 rc = RTSemEventWait(pThis->EventRecv, RT_INDEFINITE_WAIT);
345 if ( RT_FAILURE(rc)
346 && ( rc == VERR_TIMEOUT
347 || rc == VERR_INTERRUPTED))
348 goto done_unlocked;
349 }
350
351 rc = RTCritSectEnter(&pThis->DevAccessLock);
352 AssertRC(rc);
353
354 STAM_PROFILE_START(&pThis->StatNATRecvWait, b);
355 rc = pThis->pIAboveNet->pfnWaitReceiveAvail(pThis->pIAboveNet, RT_INDEFINITE_WAIT);
356 STAM_PROFILE_STOP(&pThis->StatNATRecvWait, b);
357
358 if (RT_SUCCESS(rc))
359 {
360 rc = pThis->pIAboveNet->pfnReceive(pThis->pIAboveNet, pu8Buf, cb);
361 AssertRC(rc);
362 }
363 else if ( rc != VERR_TIMEOUT
364 && rc != VERR_INTERRUPTED)
365 {
366 AssertRC(rc);
367 }
368
369 rc = RTCritSectLeave(&pThis->DevAccessLock);
370 AssertRC(rc);
371
372done_unlocked:
373 slirp_ext_m_free(pThis->pNATState, m, pu8Buf);
374 ASMAtomicDecU32(&pThis->cPkts);
375
376 drvNATNotifyNATThread(pThis, "drvNATRecvWorker");
377
378 STAM_PROFILE_STOP(&pThis->StatNATRecv, a);
379}
380
381/**
382 * Frees a S/G buffer allocated by drvNATNetworkUp_AllocBuf.
383 *
384 * @param pThis Pointer to the NAT instance.
385 * @param pSgBuf The S/G buffer to free.
386 */
387static void drvNATFreeSgBuf(PDRVNAT pThis, PPDMSCATTERGATHER pSgBuf)
388{
389 Assert((pSgBuf->fFlags & PDMSCATTERGATHER_FLAGS_MAGIC_MASK) == PDMSCATTERGATHER_FLAGS_MAGIC);
390 pSgBuf->fFlags = 0;
391 if (pSgBuf->pvAllocator)
392 {
393 Assert(!pSgBuf->pvUser);
394 slirp_ext_m_free(pThis->pNATState, (struct mbuf *)pSgBuf->pvAllocator, NULL);
395 pSgBuf->pvAllocator = NULL;
396 }
397 else if (pSgBuf->pvUser)
398 {
399 RTMemFree(pSgBuf->aSegs[0].pvSeg);
400 pSgBuf->aSegs[0].pvSeg = NULL;
401 RTMemFree(pSgBuf->pvUser);
402 pSgBuf->pvUser = NULL;
403 }
404 RTMemFree(pSgBuf);
405}
406
407/**
408 * Worker function for drvNATSend().
409 *
410 * @param pThis Pointer to the NAT instance.
411 * @param pSgBuf The scatter/gather buffer.
412 * @thread NAT
413 */
414static DECLCALLBACK(void) drvNATSendWorker(PDRVNAT pThis, PPDMSCATTERGATHER pSgBuf)
415{
416#if 0 /* Assertion happens often to me after resuming a VM -- no time to investigate this now. */
417 Assert(pThis->enmLinkState == PDMNETWORKLINKSTATE_UP);
418#endif
419 if (pThis->enmLinkState == PDMNETWORKLINKSTATE_UP)
420 {
421 struct mbuf *m = (struct mbuf *)pSgBuf->pvAllocator;
422 if (m)
423 {
424 /*
425 * A normal frame.
426 */
427 pSgBuf->pvAllocator = NULL;
428 slirp_input(pThis->pNATState, m, pSgBuf->cbUsed);
429 }
430 else
431 {
432 /*
433 * GSO frame, need to segment it.
434 */
435 /** @todo Make the NAT engine grok large frames? Could be more efficient... */
436#if 0 /* this is for testing PDMNetGsoCarveSegmentQD. */
437 uint8_t abHdrScratch[256];
438#endif
439 uint8_t const *pbFrame = (uint8_t const *)pSgBuf->aSegs[0].pvSeg;
440 PCPDMNETWORKGSO pGso = (PCPDMNETWORKGSO)pSgBuf->pvUser;
441 /* Do not attempt to segment frames with invalid GSO parameters. */
442 if (PDMNetGsoIsValid(pGso, sizeof(*pGso), pSgBuf->cbUsed))
443 {
444 uint32_t const cSegs = PDMNetGsoCalcSegmentCount(pGso, pSgBuf->cbUsed); Assert(cSegs > 1);
445 for (uint32_t iSeg = 0; iSeg < cSegs; iSeg++)
446 {
447 size_t cbSeg;
448 void *pvSeg;
449 m = slirp_ext_m_get(pThis->pNATState, pGso->cbHdrsTotal + pGso->cbMaxSeg, &pvSeg, &cbSeg);
450 if (!m)
451 break;
452
453#if 1
454 uint32_t cbPayload, cbHdrs;
455 uint32_t offPayload = PDMNetGsoCarveSegment(pGso, pbFrame, pSgBuf->cbUsed,
456 iSeg, cSegs, (uint8_t *)pvSeg, &cbHdrs, &cbPayload);
457 memcpy((uint8_t *)pvSeg + cbHdrs, pbFrame + offPayload, cbPayload);
458
459 slirp_input(pThis->pNATState, m, cbPayload + cbHdrs);
460#else
461 uint32_t cbSegFrame;
462 void *pvSegFrame = PDMNetGsoCarveSegmentQD(pGso, (uint8_t *)pbFrame, pSgBuf->cbUsed, abHdrScratch,
463 iSeg, cSegs, &cbSegFrame);
464 memcpy((uint8_t *)pvSeg, pvSegFrame, cbSegFrame);
465
466 slirp_input(pThis->pNATState, m, cbSegFrame);
467#endif
468 }
469 }
470 }
471 }
472 drvNATFreeSgBuf(pThis, pSgBuf);
473
474 /** @todo Implement the VERR_TRY_AGAIN drvNATNetworkUp_AllocBuf semantics. */
475}
476
477/**
478 * @interface_method_impl{PDMINETWORKUP,pfnBeginXmit}
479 */
480static DECLCALLBACK(int) drvNATNetworkUp_BeginXmit(PPDMINETWORKUP pInterface, bool fOnWorkerThread)
481{
482 RT_NOREF(fOnWorkerThread);
483 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
484 int rc = RTCritSectTryEnter(&pThis->XmitLock);
485 if (RT_FAILURE(rc))
486 {
487 /** @todo Kick the worker thread when we have one... */
488 rc = VERR_TRY_AGAIN;
489 }
490 return rc;
491}
492
493/**
494 * @interface_method_impl{PDMINETWORKUP,pfnAllocBuf}
495 */
496static DECLCALLBACK(int) drvNATNetworkUp_AllocBuf(PPDMINETWORKUP pInterface, size_t cbMin,
497 PCPDMNETWORKGSO pGso, PPPDMSCATTERGATHER ppSgBuf)
498{
499 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
500 Assert(RTCritSectIsOwner(&pThis->XmitLock));
501
502 /*
503 * Drop the incoming frame if the NAT thread isn't running.
504 */
505 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
506 {
507 Log(("drvNATNetowrkUp_AllocBuf: returns VERR_NET_NO_NETWORK\n"));
508 return VERR_NET_NO_NETWORK;
509 }
510
511 /*
512 * Allocate a scatter/gather buffer and an mbuf.
513 */
514 PPDMSCATTERGATHER pSgBuf = (PPDMSCATTERGATHER)RTMemAlloc(sizeof(*pSgBuf));
515 if (!pSgBuf)
516 return VERR_NO_MEMORY;
517 if (!pGso)
518 {
519 /*
520 * Drop the frame if it is too big.
521 */
522 if (cbMin >= DRVNAT_MAXFRAMESIZE)
523 {
524 Log(("drvNATNetowrkUp_AllocBuf: drops over-sized frame (%u bytes), returns VERR_INVALID_PARAMETER\n",
525 cbMin));
526 RTMemFree(pSgBuf);
527 return VERR_INVALID_PARAMETER;
528 }
529
530 pSgBuf->pvUser = NULL;
531 pSgBuf->pvAllocator = slirp_ext_m_get(pThis->pNATState, cbMin,
532 &pSgBuf->aSegs[0].pvSeg, &pSgBuf->aSegs[0].cbSeg);
533 if (!pSgBuf->pvAllocator)
534 {
535 RTMemFree(pSgBuf);
536 return VERR_TRY_AGAIN;
537 }
538 }
539 else
540 {
541 /*
542 * Drop the frame if its segment is too big.
543 */
544 if (pGso->cbHdrsTotal + pGso->cbMaxSeg >= DRVNAT_MAXFRAMESIZE)
545 {
546 Log(("drvNATNetowrkUp_AllocBuf: drops over-sized frame (%u bytes), returns VERR_INVALID_PARAMETER\n",
547 pGso->cbHdrsTotal + pGso->cbMaxSeg));
548 RTMemFree(pSgBuf);
549 return VERR_INVALID_PARAMETER;
550 }
551
552 pSgBuf->pvUser = RTMemDup(pGso, sizeof(*pGso));
553 pSgBuf->pvAllocator = NULL;
554 pSgBuf->aSegs[0].cbSeg = RT_ALIGN_Z(cbMin, 16);
555 pSgBuf->aSegs[0].pvSeg = RTMemAlloc(pSgBuf->aSegs[0].cbSeg);
556 if (!pSgBuf->pvUser || !pSgBuf->aSegs[0].pvSeg)
557 {
558 RTMemFree(pSgBuf->aSegs[0].pvSeg);
559 RTMemFree(pSgBuf->pvUser);
560 RTMemFree(pSgBuf);
561 return VERR_TRY_AGAIN;
562 }
563 }
564
565 /*
566 * Initialize the S/G buffer and return.
567 */
568 pSgBuf->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_1;
569 pSgBuf->cbUsed = 0;
570 pSgBuf->cbAvailable = pSgBuf->aSegs[0].cbSeg;
571 pSgBuf->cSegs = 1;
572
573#if 0 /* poison */
574 memset(pSgBuf->aSegs[0].pvSeg, 'F', pSgBuf->aSegs[0].cbSeg);
575#endif
576 *ppSgBuf = pSgBuf;
577 return VINF_SUCCESS;
578}
579
580/**
581 * @interface_method_impl{PDMINETWORKUP,pfnFreeBuf}
582 */
583static DECLCALLBACK(int) drvNATNetworkUp_FreeBuf(PPDMINETWORKUP pInterface, PPDMSCATTERGATHER pSgBuf)
584{
585 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
586 Assert(RTCritSectIsOwner(&pThis->XmitLock));
587 drvNATFreeSgBuf(pThis, pSgBuf);
588 return VINF_SUCCESS;
589}
590
591/**
592 * @interface_method_impl{PDMINETWORKUP,pfnSendBuf}
593 */
594static DECLCALLBACK(int) drvNATNetworkUp_SendBuf(PPDMINETWORKUP pInterface, PPDMSCATTERGATHER pSgBuf, bool fOnWorkerThread)
595{
596 RT_NOREF(fOnWorkerThread);
597 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
598 Assert((pSgBuf->fFlags & PDMSCATTERGATHER_FLAGS_OWNER_MASK) == PDMSCATTERGATHER_FLAGS_OWNER_1);
599 Assert(RTCritSectIsOwner(&pThis->XmitLock));
600
601 int rc;
602 if (pThis->pSlirpThread->enmState == PDMTHREADSTATE_RUNNING)
603 {
604 rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, NULL /*ppReq*/, 0 /*cMillies*/,
605 RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
606 (PFNRT)drvNATSendWorker, 2, pThis, pSgBuf);
607 if (RT_SUCCESS(rc))
608 {
609 drvNATNotifyNATThread(pThis, "drvNATNetworkUp_SendBuf");
610 return VINF_SUCCESS;
611 }
612
613 rc = VERR_NET_NO_BUFFER_SPACE;
614 }
615 else
616 rc = VERR_NET_DOWN;
617 drvNATFreeSgBuf(pThis, pSgBuf);
618 return rc;
619}
620
621/**
622 * @interface_method_impl{PDMINETWORKUP,pfnEndXmit}
623 */
624static DECLCALLBACK(void) drvNATNetworkUp_EndXmit(PPDMINETWORKUP pInterface)
625{
626 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
627 RTCritSectLeave(&pThis->XmitLock);
628}
629
630/**
631 * Get the NAT thread out of poll/WSAWaitForMultipleEvents
632 */
633static void drvNATNotifyNATThread(PDRVNAT pThis, const char *pszWho)
634{
635 RT_NOREF(pszWho);
636 int rc;
637#ifndef RT_OS_WINDOWS
638 /* kick poll() */
639 size_t cbIgnored;
640 rc = RTPipeWrite(pThis->hPipeWrite, "", 1, &cbIgnored);
641#else
642 /* kick WSAWaitForMultipleEvents */
643 rc = WSASetEvent(pThis->hWakeupEvent);
644#endif
645 AssertRC(rc);
646}
647
648/**
649 * @interface_method_impl{PDMINETWORKUP,pfnSetPromiscuousMode}
650 */
651static DECLCALLBACK(void) drvNATNetworkUp_SetPromiscuousMode(PPDMINETWORKUP pInterface, bool fPromiscuous)
652{
653 RT_NOREF(pInterface, fPromiscuous);
654 LogFlow(("drvNATNetworkUp_SetPromiscuousMode: fPromiscuous=%d\n", fPromiscuous));
655 /* nothing to do */
656}
657
658/**
659 * Worker function for drvNATNetworkUp_NotifyLinkChanged().
660 * @thread "NAT" thread.
661 */
662static DECLCALLBACK(void) drvNATNotifyLinkChangedWorker(PDRVNAT pThis, PDMNETWORKLINKSTATE enmLinkState)
663{
664 pThis->enmLinkState = pThis->enmLinkStateWant = enmLinkState;
665 switch (enmLinkState)
666 {
667 case PDMNETWORKLINKSTATE_UP:
668 LogRel(("NAT: Link up\n"));
669 slirp_link_up(pThis->pNATState);
670 break;
671
672 case PDMNETWORKLINKSTATE_DOWN:
673 case PDMNETWORKLINKSTATE_DOWN_RESUME:
674 LogRel(("NAT: Link down\n"));
675 slirp_link_down(pThis->pNATState);
676 break;
677
678 default:
679 AssertMsgFailed(("drvNATNetworkUp_NotifyLinkChanged: unexpected link state %d\n", enmLinkState));
680 }
681}
682
683/**
684 * Notification on link status changes.
685 *
686 * @param pInterface Pointer to the interface structure containing the called function pointer.
687 * @param enmLinkState The new link state.
688 * @thread EMT
689 */
690static DECLCALLBACK(void) drvNATNetworkUp_NotifyLinkChanged(PPDMINETWORKUP pInterface, PDMNETWORKLINKSTATE enmLinkState)
691{
692 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
693
694 LogFlow(("drvNATNetworkUp_NotifyLinkChanged: enmLinkState=%d\n", enmLinkState));
695
696 /* Don't queue new requests if the NAT thread is not running (e.g. paused,
697 * stopping), otherwise we would deadlock. Memorize the change. */
698 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
699 {
700 pThis->enmLinkStateWant = enmLinkState;
701 return;
702 }
703
704 PRTREQ pReq;
705 int rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, &pReq, 0 /*cMillies*/, RTREQFLAGS_VOID,
706 (PFNRT)drvNATNotifyLinkChangedWorker, 2, pThis, enmLinkState);
707 if (rc == VERR_TIMEOUT)
708 {
709 drvNATNotifyNATThread(pThis, "drvNATNetworkUp_NotifyLinkChanged");
710 rc = RTReqWait(pReq, RT_INDEFINITE_WAIT);
711 AssertRC(rc);
712 }
713 else
714 AssertRC(rc);
715 RTReqRelease(pReq);
716}
717
718static DECLCALLBACK(void) drvNATNotifyApplyPortForwardCommand(PDRVNAT pThis, bool fRemove,
719 bool fUdp, const char *pHostIp,
720 uint16_t u16HostPort, const char *pGuestIp, uint16_t u16GuestPort)
721{
722 struct in_addr guestIp, hostIp;
723
724 if ( pHostIp == NULL
725 || inet_aton(pHostIp, &hostIp) == 0)
726 hostIp.s_addr = INADDR_ANY;
727
728 if ( pGuestIp == NULL
729 || inet_aton(pGuestIp, &guestIp) == 0)
730 guestIp.s_addr = pThis->GuestIP;
731
732 if (fRemove)
733 slirp_remove_redirect(pThis->pNATState, fUdp, hostIp, u16HostPort, guestIp, u16GuestPort);
734 else
735 slirp_add_redirect(pThis->pNATState, fUdp, hostIp, u16HostPort, guestIp, u16GuestPort);
736}
737
738static DECLCALLBACK(int) drvNATNetworkNatConfigRedirect(PPDMINETWORKNATCONFIG pInterface, bool fRemove,
739 bool fUdp, const char *pHostIp, uint16_t u16HostPort,
740 const char *pGuestIp, uint16_t u16GuestPort)
741{
742 LogFlowFunc(("fRemove=%d, fUdp=%d, pHostIp=%s, u16HostPort=%u, pGuestIp=%s, u16GuestPort=%u\n",
743 RT_BOOL(fRemove), RT_BOOL(fUdp), pHostIp, u16HostPort, pGuestIp, u16GuestPort));
744 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkNATCfg);
745 /* Execute the command directly if the VM is not running. */
746 int rc;
747 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
748 {
749 drvNATNotifyApplyPortForwardCommand(pThis, fRemove, fUdp, pHostIp,
750 u16HostPort, pGuestIp,u16GuestPort);
751 rc = VINF_SUCCESS;
752 }
753 else
754 {
755 PRTREQ pReq;
756 rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, &pReq, 0 /*cMillies*/, RTREQFLAGS_VOID,
757 (PFNRT)drvNATNotifyApplyPortForwardCommand, 7, pThis, fRemove,
758 fUdp, pHostIp, u16HostPort, pGuestIp, u16GuestPort);
759 if (rc == VERR_TIMEOUT)
760 {
761 drvNATNotifyNATThread(pThis, "drvNATNetworkNatConfigRedirect");
762 rc = RTReqWait(pReq, RT_INDEFINITE_WAIT);
763 AssertRC(rc);
764 }
765 else
766 AssertRC(rc);
767
768 RTReqRelease(pReq);
769 }
770 return rc;
771}
772
773/**
774 * NAT thread handling the slirp stuff.
775 *
776 * The slirp implementation is single-threaded so we execute this enginre in a
777 * dedicated thread. We take care that this thread does not become the
778 * bottleneck: If the guest wants to send, a request is enqueued into the
779 * hSlirpReqQueue and handled asynchronously by this thread. If this thread
780 * wants to deliver packets to the guest, it enqueues a request into
781 * hRecvReqQueue which is later handled by the Recv thread.
782 */
783static DECLCALLBACK(int) drvNATAsyncIoThread(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
784{
785 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
786 int nFDs = -1;
787#ifdef RT_OS_WINDOWS
788 HANDLE *phEvents = slirp_get_events(pThis->pNATState);
789 unsigned int cBreak = 0;
790#else /* RT_OS_WINDOWS */
791 unsigned int cPollNegRet = 0;
792#endif /* !RT_OS_WINDOWS */
793
794 LogFlow(("drvNATAsyncIoThread: pThis=%p\n", pThis));
795
796 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
797 return VINF_SUCCESS;
798
799 if (pThis->enmLinkStateWant != pThis->enmLinkState)
800 drvNATNotifyLinkChangedWorker(pThis, pThis->enmLinkStateWant);
801
802 /*
803 * Polling loop.
804 */
805 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
806 {
807 /*
808 * To prevent concurrent execution of sending/receiving threads
809 */
810#ifndef RT_OS_WINDOWS
811 nFDs = slirp_get_nsock(pThis->pNATState);
812 /* allocation for all sockets + Management pipe */
813 struct pollfd *polls = (struct pollfd *)RTMemAlloc((1 + nFDs) * sizeof(struct pollfd) + sizeof(uint32_t));
814 if (polls == NULL)
815 return VERR_NO_MEMORY;
816
817 /* don't pass the management pipe */
818 slirp_select_fill(pThis->pNATState, &nFDs, &polls[1]);
819
820 polls[0].fd = RTPipeToNative(pThis->hPipeRead);
821 /* POLLRDBAND usually doesn't used on Linux but seems used on Solaris */
822 polls[0].events = POLLRDNORM | POLLPRI | POLLRDBAND;
823 polls[0].revents = 0;
824
825 int cChangedFDs = poll(polls, nFDs + 1, slirp_get_timeout_ms(pThis->pNATState));
826 if (cChangedFDs < 0)
827 {
828 if (errno == EINTR)
829 {
830 Log2(("NAT: signal was caught while sleep on poll\n"));
831 /* No error, just process all outstanding requests but don't wait */
832 cChangedFDs = 0;
833 }
834 else if (cPollNegRet++ > 128)
835 {
836 LogRel(("NAT: Poll returns (%s) suppressed %d\n", strerror(errno), cPollNegRet));
837 cPollNegRet = 0;
838 }
839 }
840
841 if (cChangedFDs >= 0)
842 {
843 slirp_select_poll(pThis->pNATState, &polls[1], nFDs);
844 if (polls[0].revents & (POLLRDNORM|POLLPRI|POLLRDBAND))
845 {
846 /* drain the pipe
847 *
848 * Note! drvNATSend decoupled so we don't know how many times
849 * device's thread sends before we've entered multiplex,
850 * so to avoid false alarm drain pipe here to the very end
851 *
852 * @todo: Probably we should counter drvNATSend to count how
853 * deep pipe has been filed before drain.
854 *
855 */
856 /** @todo XXX: Make it reading exactly we need to drain the
857 * pipe.*/
858 char ch;
859 size_t cbRead;
860 RTPipeRead(pThis->hPipeRead, &ch, 1, &cbRead);
861 }
862 }
863 /* process _all_ outstanding requests but don't wait */
864 RTReqQueueProcess(pThis->hSlirpReqQueue, 0);
865 RTMemFree(polls);
866
867#else /* RT_OS_WINDOWS */
868 nFDs = -1;
869 slirp_select_fill(pThis->pNATState, &nFDs);
870 DWORD dwEvent = WSAWaitForMultipleEvents(nFDs, phEvents, FALSE,
871 slirp_get_timeout_ms(pThis->pNATState),
872 /* :fAlertable */ TRUE);
873 AssertCompile(WSA_WAIT_EVENT_0 == 0);
874 if ( (/*dwEvent < WSA_WAIT_EVENT_0 ||*/ dwEvent > WSA_WAIT_EVENT_0 + nFDs - 1)
875 && dwEvent != WSA_WAIT_TIMEOUT && dwEvent != WSA_WAIT_IO_COMPLETION)
876 {
877 int error = WSAGetLastError();
878 LogRel(("NAT: WSAWaitForMultipleEvents returned %d (error %d)\n", dwEvent, error));
879 RTAssertPanic();
880 }
881
882 if (dwEvent == WSA_WAIT_TIMEOUT)
883 {
884 /* only check for slow/fast timers */
885 slirp_select_poll(pThis->pNATState, /* fTimeout=*/true);
886 continue;
887 }
888 /* poll the sockets in any case */
889 Log2(("%s: poll\n", __FUNCTION__));
890 slirp_select_poll(pThis->pNATState, /* fTimeout=*/false);
891 /* process _all_ outstanding requests but don't wait */
892 RTReqQueueProcess(pThis->hSlirpReqQueue, 0);
893# ifdef VBOX_NAT_DELAY_HACK
894 if (cBreak++ > 128)
895 {
896 cBreak = 0;
897 RTThreadSleep(2);
898 }
899# endif
900#endif /* RT_OS_WINDOWS */
901 }
902
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Unblock the send thread so it can respond to a state change.
909 *
910 * @returns VBox status code.
911 * @param pDevIns The pcnet device instance.
912 * @param pThread The send thread.
913 */
914static DECLCALLBACK(int) drvNATAsyncIoWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
915{
916 RT_NOREF(pThread);
917 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
918
919 drvNATNotifyNATThread(pThis, "drvNATAsyncIoWakeup");
920 return VINF_SUCCESS;
921}
922
923
924static DECLCALLBACK(int) drvNATHostResThread(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
925{
926 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
927
928 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
929 return VINF_SUCCESS;
930
931 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
932 {
933 RTReqQueueProcess(pThis->hHostResQueue, RT_INDEFINITE_WAIT);
934 }
935
936 return VINF_SUCCESS;
937}
938
939
940static DECLCALLBACK(int) drvNATReqQueueInterrupt()
941{
942 /*
943 * RTReqQueueProcess loops until request returns a warning or info
944 * status code (other than VINF_SUCCESS).
945 */
946 return VINF_INTERRUPTED;
947}
948
949
950static DECLCALLBACK(int) drvNATHostResWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
951{
952 RT_NOREF(pThread);
953 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
954 Assert(pThis != NULL);
955
956 int rc;
957 rc = RTReqQueueCallEx(pThis->hHostResQueue, NULL /*ppReq*/, 0 /*cMillies*/,
958 RTREQFLAGS_IPRT_STATUS | RTREQFLAGS_NO_WAIT,
959 (PFNRT)drvNATReqQueueInterrupt, 0);
960 return rc;
961}
962
963
964/**
965 * Function called by slirp to check if it's possible to feed incoming data to the network port.
966 * @returns 1 if possible.
967 * @returns 0 if not possible.
968 */
969int slirp_can_output(void *pvUser)
970{
971 RT_NOREF(pvUser);
972 return 1;
973}
974
975void slirp_push_recv_thread(void *pvUser)
976{
977 PDRVNAT pThis = (PDRVNAT)pvUser;
978 Assert(pThis);
979 drvNATUrgRecvWakeup(pThis->pDrvIns, pThis->pUrgRecvThread);
980}
981
982void slirp_urg_output(void *pvUser, struct mbuf *m, const uint8_t *pu8Buf, int cb)
983{
984 PDRVNAT pThis = (PDRVNAT)pvUser;
985 Assert(pThis);
986
987 /* don't queue new requests when the NAT thread is about to stop */
988 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
989 return;
990
991 ASMAtomicIncU32(&pThis->cUrgPkts);
992 int rc = RTReqQueueCallEx(pThis->hUrgRecvReqQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
993 (PFNRT)drvNATUrgRecvWorker, 4, pThis, pu8Buf, cb, m);
994 AssertRC(rc);
995 drvNATUrgRecvWakeup(pThis->pDrvIns, pThis->pUrgRecvThread);
996}
997
998/**
999 * Function called by slirp to wake up device after VERR_TRY_AGAIN
1000 */
1001void slirp_output_pending(void *pvUser)
1002{
1003 PDRVNAT pThis = (PDRVNAT)pvUser;
1004 Assert(pThis);
1005 LogFlowFuncEnter();
1006 pThis->pIAboveNet->pfnXmitPending(pThis->pIAboveNet);
1007 LogFlowFuncLeave();
1008}
1009
1010/**
1011 * Function called by slirp to feed incoming data to the NIC.
1012 */
1013void slirp_output(void *pvUser, struct mbuf *m, const uint8_t *pu8Buf, int cb)
1014{
1015 PDRVNAT pThis = (PDRVNAT)pvUser;
1016 Assert(pThis);
1017
1018 LogFlow(("slirp_output BEGIN %p %d\n", pu8Buf, cb));
1019 Log6(("slirp_output: pu8Buf=%p cb=%#x (pThis=%p)\n%.*Rhxd\n", pu8Buf, cb, pThis, cb, pu8Buf));
1020
1021 /* don't queue new requests when the NAT thread is about to stop */
1022 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
1023 return;
1024
1025 ASMAtomicIncU32(&pThis->cPkts);
1026 int rc = RTReqQueueCallEx(pThis->hRecvReqQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
1027 (PFNRT)drvNATRecvWorker, 4, pThis, pu8Buf, cb, m);
1028 AssertRC(rc);
1029 drvNATRecvWakeup(pThis->pDrvIns, pThis->pRecvThread);
1030 STAM_COUNTER_INC(&pThis->StatQueuePktSent);
1031 LogFlowFuncLeave();
1032}
1033
1034
1035/*
1036 * Call a function on the slirp thread.
1037 */
1038int slirp_call(void *pvUser, PRTREQ *ppReq, RTMSINTERVAL cMillies,
1039 unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
1040{
1041 PDRVNAT pThis = (PDRVNAT)pvUser;
1042 Assert(pThis);
1043
1044 int rc;
1045
1046 va_list va;
1047 va_start(va, cArgs);
1048
1049 rc = RTReqQueueCallV(pThis->hSlirpReqQueue, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
1050
1051 va_end(va);
1052
1053 if (RT_SUCCESS(rc))
1054 drvNATNotifyNATThread(pThis, "slirp_vcall");
1055
1056 return rc;
1057}
1058
1059
1060/*
1061 * Call a function on the host resolver thread.
1062 */
1063int slirp_call_hostres(void *pvUser, PRTREQ *ppReq, RTMSINTERVAL cMillies,
1064 unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
1065{
1066 PDRVNAT pThis = (PDRVNAT)pvUser;
1067 Assert(pThis);
1068
1069 int rc;
1070
1071 AssertReturn((pThis->hHostResQueue != NIL_RTREQQUEUE), VERR_INVALID_STATE);
1072 AssertReturn((pThis->pHostResThread != NULL), VERR_INVALID_STATE);
1073
1074 va_list va;
1075 va_start(va, cArgs);
1076
1077 rc = RTReqQueueCallV(pThis->hHostResQueue, ppReq, cMillies, fFlags,
1078 pfnFunction, cArgs, va);
1079
1080 va_end(va);
1081 return rc;
1082}
1083
1084
1085#if HAVE_NOTIFICATION_FOR_DNS_UPDATE && !defined(RT_OS_DARWIN)
1086/**
1087 * @interface_method_impl{PDMINETWORKNATCONFIG,pfnNotifyDnsChanged}
1088 *
1089 * We are notified that host's resolver configuration has changed. In
1090 * the current setup we don't get any details and just reread that
1091 * information ourselves.
1092 */
1093static DECLCALLBACK(void) drvNATNotifyDnsChanged(PPDMINETWORKNATCONFIG pInterface)
1094{
1095 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkNATCfg);
1096 drvNATUpdateDNS(pThis, /* fFlapLink */ true);
1097}
1098#endif
1099
1100#ifdef RT_OS_DARWIN
1101/**
1102 * Callback for the SystemConfiguration framework to notify us whenever the DNS
1103 * server changes.
1104 *
1105 * @returns nothing.
1106 * @param hDynStor The DynamicStore handle.
1107 * @param hChangedKey Array of changed keys we watch for.
1108 * @param pvUser Opaque user data (NAT driver instance).
1109 */
1110static DECLCALLBACK(void) drvNatDnsChanged(SCDynamicStoreRef hDynStor, CFArrayRef hChangedKeys, void *pvUser)
1111{
1112 PDRVNAT pThis = (PDRVNAT)pvUser;
1113
1114 Log2(("NAT: System configuration has changed\n"));
1115
1116 /* Check if any of parameters we are interested in were actually changed. If the size
1117 * of hChangedKeys is 0, it means that SCDynamicStore has been restarted. */
1118 if (hChangedKeys && CFArrayGetCount(hChangedKeys) > 0)
1119 {
1120 /* Look to the updated parameters in particular. */
1121 CFStringRef pDNSKey = CFSTR("State:/Network/Global/DNS");
1122
1123 if (CFArrayContainsValue(hChangedKeys, CFRangeMake(0, CFArrayGetCount(hChangedKeys)), pDNSKey))
1124 {
1125 LogRel(("NAT: DNS servers changed, triggering reconnect\n"));
1126#if 0
1127 CFDictionaryRef hDnsDict = (CFDictionaryRef)SCDynamicStoreCopyValue(hDynStor, pDNSKey);
1128 if (hDnsDict)
1129 {
1130 CFArrayRef hArrAddresses = (CFArrayRef)CFDictionaryGetValue(hDnsDict, kSCPropNetDNSServerAddresses);
1131 if (hArrAddresses && CFArrayGetCount(hArrAddresses) > 0)
1132 {
1133 /* Dump DNS servers list. */
1134 for (int i = 0; i < CFArrayGetCount(hArrAddresses); i++)
1135 {
1136 CFStringRef pDNSAddrStr = (CFStringRef)CFArrayGetValueAtIndex(hArrAddresses, i);
1137 const char *pszDNSAddr = pDNSAddrStr ? CFStringGetCStringPtr(pDNSAddrStr, CFStringGetSystemEncoding()) : NULL;
1138 LogRel(("NAT: New DNS server#%d: %s\n", i, pszDNSAddr ? pszDNSAddr : "None"));
1139 }
1140 }
1141 else
1142 LogRel(("NAT: DNS server list is empty (1)\n"));
1143
1144 CFRelease(hDnsDict);
1145 }
1146 else
1147 LogRel(("NAT: DNS server list is empty (2)\n"));
1148#else
1149 RT_NOREF(hDynStor);
1150#endif
1151 drvNATUpdateDNS(pThis, /* fFlapLink */ true);
1152 }
1153 else
1154 Log2(("NAT: No DNS changes detected\n"));
1155 }
1156 else
1157 Log2(("NAT: SCDynamicStore has been restarted\n"));
1158}
1159#endif
1160
1161/**
1162 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
1163 */
1164static DECLCALLBACK(void *) drvNATQueryInterface(PPDMIBASE pInterface, const char *pszIID)
1165{
1166 PPDMDRVINS pDrvIns = PDMIBASE_2_PDMDRV(pInterface);
1167 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1168
1169 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pDrvIns->IBase);
1170 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKUP, &pThis->INetworkUp);
1171 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKNATCONFIG, &pThis->INetworkNATCfg);
1172 return NULL;
1173}
1174
1175
1176/**
1177 * Get the MAC address into the slirp stack.
1178 *
1179 * Called by drvNATLoadDone and drvNATPowerOn.
1180 */
1181static void drvNATSetMac(PDRVNAT pThis)
1182{
1183#if 0 /* XXX: do we still need this for anything? */
1184 if (pThis->pIAboveConfig)
1185 {
1186 RTMAC Mac;
1187 pThis->pIAboveConfig->pfnGetMac(pThis->pIAboveConfig, &Mac);
1188 }
1189#else
1190 RT_NOREF(pThis);
1191#endif
1192}
1193
1194
1195/**
1196 * After loading we have to pass the MAC address of the ethernet device to the slirp stack.
1197 * Otherwise the guest is not reachable until it performs a DHCP request or an ARP request
1198 * (usually done during guest boot).
1199 */
1200static DECLCALLBACK(int) drvNATLoadDone(PPDMDRVINS pDrvIns, PSSMHANDLE pSSM)
1201{
1202 RT_NOREF(pSSM);
1203 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1204 drvNATSetMac(pThis);
1205 return VINF_SUCCESS;
1206}
1207
1208
1209/**
1210 * Some guests might not use DHCP to retrieve an IP but use a static IP.
1211 */
1212static DECLCALLBACK(void) drvNATPowerOn(PPDMDRVINS pDrvIns)
1213{
1214 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1215 drvNATSetMac(pThis);
1216}
1217
1218
1219/**
1220 * @interface_method_impl{PDMDRVREG,pfnResume}
1221 */
1222static DECLCALLBACK(void) drvNATResume(PPDMDRVINS pDrvIns)
1223{
1224 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1225 VMRESUMEREASON enmReason = PDMDrvHlpVMGetResumeReason(pDrvIns);
1226
1227 switch (enmReason)
1228 {
1229 case VMRESUMEREASON_HOST_RESUME:
1230 bool fFlapLink;
1231#if HAVE_NOTIFICATION_FOR_DNS_UPDATE
1232 /* let event handler do it if necessary */
1233 fFlapLink = false;
1234#else
1235 /* XXX: when in doubt, use brute force */
1236 fFlapLink = true;
1237#endif
1238 drvNATUpdateDNS(pThis, fFlapLink);
1239 return;
1240 default: /* Ignore every other resume reason. */
1241 /* do nothing */
1242 return;
1243 }
1244}
1245
1246
1247static DECLCALLBACK(int) drvNATReinitializeHostNameResolving(PDRVNAT pThis)
1248{
1249 slirpReleaseDnsSettings(pThis->pNATState);
1250 slirpInitializeDnsSettings(pThis->pNATState);
1251 return VINF_SUCCESS;
1252}
1253
1254/**
1255 * This function at this stage could be called from two places, but both from non-NAT thread,
1256 * - drvNATResume (EMT?)
1257 * - drvNatDnsChanged (darwin, GUI or main) "listener"
1258 * When Main's interface IHost will support host network configuration change event on every host,
1259 * we won't call it from drvNATResume, but from listener of Main event in the similar way it done
1260 * for port-forwarding, and it wan't be on GUI/main thread, but on EMT thread only.
1261 *
1262 * Thread here is important, because we need to change DNS server list and domain name (+ perhaps,
1263 * search string) at runtime (VBOX_NAT_ENFORCE_INTERNAL_DNS_UPDATE), we can do it safely on NAT thread,
1264 * so with changing other variables (place where we handle update) the main mechanism of update
1265 * _won't_ be changed, the only thing will change is drop of fFlapLink parameter.
1266 */
1267DECLINLINE(void) drvNATUpdateDNS(PDRVNAT pThis, bool fFlapLink)
1268{
1269 int strategy = slirp_host_network_configuration_change_strategy_selector(pThis->pNATState);
1270 switch (strategy)
1271 {
1272 case VBOX_NAT_DNS_DNSPROXY:
1273 {
1274 /**
1275 * XXX: Here or in _strategy_selector we should deal with network change
1276 * in "network change" scenario domain name change we have to update guest lease
1277 * forcibly.
1278 * Note at that built-in dhcp also updates DNS information on NAT thread.
1279 */
1280 /**
1281 * It's unsafe to to do it directly on non-NAT thread
1282 * so we schedule the worker and kick the NAT thread.
1283 */
1284 int rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, NULL /*ppReq*/, 0 /*cMillies*/,
1285 RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
1286 (PFNRT)drvNATReinitializeHostNameResolving, 1, pThis);
1287 if (RT_SUCCESS(rc))
1288 drvNATNotifyNATThread(pThis, "drvNATUpdateDNS");
1289
1290 return;
1291 }
1292
1293 case VBOX_NAT_DNS_EXTERNAL:
1294 /*
1295 * Host resumed from a suspend and the network might have changed.
1296 * Disconnect the guest from the network temporarily to let it pick up the changes.
1297 */
1298 if (fFlapLink)
1299 pThis->pIAboveConfig->pfnSetLinkState(pThis->pIAboveConfig,
1300 PDMNETWORKLINKSTATE_DOWN_RESUME);
1301 return;
1302
1303 case VBOX_NAT_DNS_HOSTRESOLVER:
1304 default:
1305 return;
1306 }
1307}
1308
1309
1310/**
1311 * Info handler.
1312 */
1313static DECLCALLBACK(void) drvNATInfo(PPDMDRVINS pDrvIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
1314{
1315 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1316 slirp_info(pThis->pNATState, pHlp, pszArgs);
1317}
1318
1319#ifdef VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER
1320static int drvNATConstructDNSMappings(unsigned iInstance, PDRVNAT pThis, PCFGMNODE pMappingsCfg)
1321{
1322 PPDMDRVINS pDrvIns = pThis->pDrvIns;
1323 PCPDMDRVHLPR3 pHlp = pDrvIns->pHlpR3;
1324
1325 RT_NOREF(iInstance);
1326 int rc = VINF_SUCCESS;
1327 LogFlowFunc(("ENTER: iInstance:%d\n", iInstance));
1328 for (PCFGMNODE pNode = pHlp->pfnCFGMGetFirstChild(pMappingsCfg); pNode; pNode = pHlp->pfnCFGMGetNextChild(pNode))
1329 {
1330 if (!pHlp->pfnCFGMAreValuesValid(pNode, "HostName\0HostNamePattern\0HostIP\0"))
1331 return PDMDRV_SET_ERROR(pThis->pDrvIns, VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES,
1332 N_("Unknown configuration in dns mapping"));
1333 char szHostNameOrPattern[255];
1334 bool fPattern = false;
1335 RT_ZERO(szHostNameOrPattern);
1336 GET_STRING(rc, pDrvIns, pNode, "HostName", szHostNameOrPattern[0], sizeof(szHostNameOrPattern));
1337 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1338 {
1339 GET_STRING(rc, pDrvIns, pNode, "HostNamePattern", szHostNameOrPattern[0], sizeof(szHostNameOrPattern));
1340 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1341 {
1342 char szNodeName[225];
1343 RT_ZERO(szNodeName);
1344 pHlp->pfnCFGMGetName(pNode, szNodeName, sizeof(szNodeName));
1345 LogRel(("NAT: Neither 'HostName' nor 'HostNamePattern' is specified for mapping %s\n", szNodeName));
1346 continue;
1347 }
1348 fPattern = true;
1349 }
1350 struct in_addr HostIP;
1351 RT_ZERO(HostIP);
1352 GETIP_DEF(rc, pDrvIns, pNode, HostIP, INADDR_ANY);
1353 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1354 {
1355 LogRel(("NAT: DNS mapping %s is ignored (address not pointed)\n", szHostNameOrPattern));
1356 continue;
1357 }
1358 slirp_add_host_resolver_mapping(pThis->pNATState, szHostNameOrPattern, fPattern, HostIP.s_addr);
1359 }
1360 LogFlowFunc(("LEAVE: %Rrc\n", rc));
1361 return rc;
1362}
1363#endif /* !VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER */
1364
1365
1366/**
1367 * Sets up the redirectors.
1368 *
1369 * @returns VBox status code.
1370 * @param pCfg The configuration handle.
1371 */
1372static int drvNATConstructRedir(unsigned iInstance, PDRVNAT pThis, PCFGMNODE pCfg, PRTNETADDRIPV4 pNetwork)
1373{
1374 PPDMDRVINS pDrvIns = pThis->pDrvIns;
1375 PCPDMDRVHLPR3 pHlp = pDrvIns->pHlpR3;
1376
1377 RT_NOREF(pNetwork); /** @todo figure why pNetwork isn't used */
1378
1379 PCFGMNODE pPFTree = CFGMR3GetChild(pCfg, "PortForwarding");
1380 if (pPFTree == NULL)
1381 return VINF_SUCCESS;
1382
1383 /*
1384 * Enumerate redirections.
1385 */
1386 for (PCFGMNODE pNode = pHlp->pfnCFGMGetFirstChild(pPFTree); pNode; pNode = pHlp->pfnCFGMGetNextChild(pNode))
1387 {
1388 /*
1389 * Validate the port forwarding config.
1390 */
1391 if (!pHlp->pfnCFGMAreValuesValid(pNode, "Name\0Protocol\0UDP\0HostPort\0GuestPort\0GuestIP\0BindIP\0"))
1392 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES,
1393 N_("Unknown configuration in port forwarding"));
1394
1395 /* protocol type */
1396 bool fUDP;
1397 char szProtocol[32];
1398 int rc;
1399 GET_STRING(rc, pDrvIns, pNode, "Protocol", szProtocol[0], sizeof(szProtocol));
1400 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1401 {
1402 fUDP = false;
1403 GET_BOOL(rc, pDrvIns, pNode, "UDP", fUDP);
1404 }
1405 else if (RT_SUCCESS(rc))
1406 {
1407 if (!RTStrICmp(szProtocol, "TCP"))
1408 fUDP = false;
1409 else if (!RTStrICmp(szProtocol, "UDP"))
1410 fUDP = true;
1411 else
1412 return PDMDrvHlpVMSetError(pDrvIns, VERR_INVALID_PARAMETER, RT_SRC_POS,
1413 N_("NAT#%d: Invalid configuration value for \"Protocol\": \"%s\""),
1414 iInstance, szProtocol);
1415 }
1416 else
1417 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS,
1418 N_("NAT#%d: configuration query for \"Protocol\" failed"),
1419 iInstance);
1420 /* host port */
1421 int32_t iHostPort;
1422 GET_S32_STRICT(rc, pDrvIns, pNode, "HostPort", iHostPort);
1423
1424 /* guest port */
1425 int32_t iGuestPort;
1426 GET_S32_STRICT(rc, pDrvIns, pNode, "GuestPort", iGuestPort);
1427
1428 /* host address ("BindIP" name is rather unfortunate given "HostPort" to go with it) */
1429 struct in_addr BindIP;
1430 RT_ZERO(BindIP);
1431 GETIP_DEF(rc, pDrvIns, pNode, BindIP, INADDR_ANY);
1432
1433 /* guest address */
1434 struct in_addr GuestIP;
1435 RT_ZERO(GuestIP);
1436 GETIP_DEF(rc, pDrvIns, pNode, GuestIP, INADDR_ANY);
1437
1438 /*
1439 * Call slirp about it.
1440 */
1441 if (slirp_add_redirect(pThis->pNATState, fUDP, BindIP, iHostPort, GuestIP, iGuestPort) < 0)
1442 return PDMDrvHlpVMSetError(pThis->pDrvIns, VERR_NAT_REDIR_SETUP, RT_SRC_POS,
1443 N_("NAT#%d: configuration error: failed to set up "
1444 "redirection of %d to %d. Probably a conflict with "
1445 "existing services or other rules"), iInstance, iHostPort,
1446 iGuestPort);
1447 } /* for each redir rule */
1448
1449 return VINF_SUCCESS;
1450}
1451
1452
1453/**
1454 * Destruct a driver instance.
1455 *
1456 * Most VM resources are freed by the VM. This callback is provided so that any non-VM
1457 * resources can be freed correctly.
1458 *
1459 * @param pDrvIns The driver instance data.
1460 */
1461static DECLCALLBACK(void) drvNATDestruct(PPDMDRVINS pDrvIns)
1462{
1463 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1464 LogFlow(("drvNATDestruct:\n"));
1465 PDMDRV_CHECK_VERSIONS_RETURN_VOID(pDrvIns);
1466
1467 if (pThis->pNATState)
1468 {
1469 slirp_term(pThis->pNATState);
1470 slirp_deregister_statistics(pThis->pNATState, pDrvIns);
1471#ifdef VBOX_WITH_STATISTICS
1472# define DRV_PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pThis)
1473# define DRV_COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pThis)
1474# include "counters.h"
1475#endif
1476 pThis->pNATState = NULL;
1477 }
1478
1479 RTReqQueueDestroy(pThis->hHostResQueue);
1480 pThis->hHostResQueue = NIL_RTREQQUEUE;
1481
1482 RTReqQueueDestroy(pThis->hSlirpReqQueue);
1483 pThis->hSlirpReqQueue = NIL_RTREQQUEUE;
1484
1485 RTReqQueueDestroy(pThis->hUrgRecvReqQueue);
1486 pThis->hUrgRecvReqQueue = NIL_RTREQQUEUE;
1487
1488 RTReqQueueDestroy(pThis->hRecvReqQueue);
1489 pThis->hRecvReqQueue = NIL_RTREQQUEUE;
1490
1491 RTSemEventDestroy(pThis->EventRecv);
1492 pThis->EventRecv = NIL_RTSEMEVENT;
1493
1494 RTSemEventDestroy(pThis->EventUrgRecv);
1495 pThis->EventUrgRecv = NIL_RTSEMEVENT;
1496
1497 if (RTCritSectIsInitialized(&pThis->DevAccessLock))
1498 RTCritSectDelete(&pThis->DevAccessLock);
1499
1500 if (RTCritSectIsInitialized(&pThis->XmitLock))
1501 RTCritSectDelete(&pThis->XmitLock);
1502
1503#ifndef RT_OS_WINDOWS
1504 RTPipeClose(pThis->hPipeRead);
1505 RTPipeClose(pThis->hPipeWrite);
1506#endif
1507
1508#ifdef RT_OS_DARWIN
1509 /* Cleanup the DNS watcher. */
1510 if (pThis->hRunLoopSrcDnsWatcher != NULL)
1511 {
1512 CFRunLoopRef hRunLoopMain = CFRunLoopGetMain();
1513 CFRetain(hRunLoopMain);
1514 CFRunLoopRemoveSource(hRunLoopMain, pThis->hRunLoopSrcDnsWatcher, kCFRunLoopCommonModes);
1515 CFRelease(hRunLoopMain);
1516 CFRelease(pThis->hRunLoopSrcDnsWatcher);
1517 pThis->hRunLoopSrcDnsWatcher = NULL;
1518 }
1519#endif
1520}
1521
1522
1523/**
1524 * Construct a NAT network transport driver instance.
1525 *
1526 * @copydoc FNPDMDRVCONSTRUCT
1527 */
1528static DECLCALLBACK(int) drvNATConstruct(PPDMDRVINS pDrvIns, PCFGMNODE pCfg, uint32_t fFlags)
1529{
1530 RT_NOREF(fFlags);
1531 PDMDRV_CHECK_VERSIONS_RETURN(pDrvIns);
1532 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1533
1534 LogFlow(("drvNATConstruct:\n"));
1535
1536 /*
1537 * Init the static parts.
1538 */
1539 pThis->pDrvIns = pDrvIns;
1540 pThis->pNATState = NULL;
1541 pThis->pszTFTPPrefix = NULL;
1542 pThis->pszBootFile = NULL;
1543 pThis->pszNextServer = NULL;
1544 pThis->hSlirpReqQueue = NIL_RTREQQUEUE;
1545 pThis->hUrgRecvReqQueue = NIL_RTREQQUEUE;
1546 pThis->hHostResQueue = NIL_RTREQQUEUE;
1547 pThis->EventRecv = NIL_RTSEMEVENT;
1548 pThis->EventUrgRecv = NIL_RTSEMEVENT;
1549#ifdef RT_OS_DARWIN
1550 pThis->hRunLoopSrcDnsWatcher = NULL;
1551#endif
1552
1553 /* IBase */
1554 pDrvIns->IBase.pfnQueryInterface = drvNATQueryInterface;
1555
1556 /* INetwork */
1557 pThis->INetworkUp.pfnBeginXmit = drvNATNetworkUp_BeginXmit;
1558 pThis->INetworkUp.pfnAllocBuf = drvNATNetworkUp_AllocBuf;
1559 pThis->INetworkUp.pfnFreeBuf = drvNATNetworkUp_FreeBuf;
1560 pThis->INetworkUp.pfnSendBuf = drvNATNetworkUp_SendBuf;
1561 pThis->INetworkUp.pfnEndXmit = drvNATNetworkUp_EndXmit;
1562 pThis->INetworkUp.pfnSetPromiscuousMode = drvNATNetworkUp_SetPromiscuousMode;
1563 pThis->INetworkUp.pfnNotifyLinkChanged = drvNATNetworkUp_NotifyLinkChanged;
1564
1565 /* NAT engine configuration */
1566 pThis->INetworkNATCfg.pfnRedirectRuleCommand = drvNATNetworkNatConfigRedirect;
1567#if HAVE_NOTIFICATION_FOR_DNS_UPDATE && !defined(RT_OS_DARWIN)
1568 /*
1569 * On OS X we stick to the old OS X specific notifications for
1570 * now. Elsewhere use IHostNameResolutionConfigurationChangeEvent
1571 * by enbaling HAVE_NOTIFICATION_FOR_DNS_UPDATE in libslirp.h.
1572 * This code is still in a bit of flux and is implemented and
1573 * enabled in steps to simplify more conservative backporting.
1574 */
1575 pThis->INetworkNATCfg.pfnNotifyDnsChanged = drvNATNotifyDnsChanged;
1576#else
1577 pThis->INetworkNATCfg.pfnNotifyDnsChanged = NULL;
1578#endif
1579
1580 /*
1581 * Validate the config.
1582 */
1583 PDMDRV_VALIDATE_CONFIG_RETURN(pDrvIns,
1584 "PassDomain"
1585 "|TFTPPrefix"
1586 "|BootFile"
1587 "|Network"
1588 "|NextServer"
1589 "|DNSProxy"
1590 "|BindIP"
1591 "|UseHostResolver"
1592 "|SlirpMTU"
1593 "|AliasMode"
1594 "|SockRcv"
1595 "|SockSnd"
1596 "|TcpRcv"
1597 "|TcpSnd"
1598 "|ICMPCacheLimit"
1599 "|SoMaxConnection"
1600//#ifdef VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER
1601 "|HostResolverMappings"
1602//#endif
1603 , "");
1604
1605 /*
1606 * Get the configuration settings.
1607 */
1608 int rc;
1609 bool fPassDomain = true;
1610 GET_BOOL(rc, pDrvIns, pCfg, "PassDomain", fPassDomain);
1611
1612 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "TFTPPrefix", pThis->pszTFTPPrefix);
1613 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "BootFile", pThis->pszBootFile);
1614 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "NextServer", pThis->pszNextServer);
1615
1616 int fDNSProxy = 0;
1617 GET_S32(rc, pDrvIns, pCfg, "DNSProxy", fDNSProxy);
1618 int fUseHostResolver = 0;
1619 GET_S32(rc, pDrvIns, pCfg, "UseHostResolver", fUseHostResolver);
1620 int MTU = 1500;
1621 GET_S32(rc, pDrvIns, pCfg, "SlirpMTU", MTU);
1622 int i32AliasMode = 0;
1623 int i32MainAliasMode = 0;
1624 GET_S32(rc, pDrvIns, pCfg, "AliasMode", i32MainAliasMode);
1625 int iIcmpCacheLimit = 100;
1626 GET_S32(rc, pDrvIns, pCfg, "ICMPCacheLimit", iIcmpCacheLimit);
1627
1628 i32AliasMode |= (i32MainAliasMode & 0x1 ? 0x1 : 0);
1629 i32AliasMode |= (i32MainAliasMode & 0x2 ? 0x40 : 0);
1630 i32AliasMode |= (i32MainAliasMode & 0x4 ? 0x4 : 0);
1631 int i32SoMaxConn = 10;
1632 GET_S32(rc, pDrvIns, pCfg, "SoMaxConnection", i32SoMaxConn);
1633 /*
1634 * Query the network port interface.
1635 */
1636 pThis->pIAboveNet = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMINETWORKDOWN);
1637 if (!pThis->pIAboveNet)
1638 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_ABOVE,
1639 N_("Configuration error: the above device/driver didn't "
1640 "export the network port interface"));
1641 pThis->pIAboveConfig = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMINETWORKCONFIG);
1642 if (!pThis->pIAboveConfig)
1643 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_ABOVE,
1644 N_("Configuration error: the above device/driver didn't "
1645 "export the network config interface"));
1646
1647 /* Generate a network address for this network card. */
1648 char szNetwork[32]; /* xxx.xxx.xxx.xxx/yy */
1649 GET_STRING(rc, pDrvIns, pCfg, "Network", szNetwork[0], sizeof(szNetwork));
1650 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1651 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS, N_("NAT%d: Configuration error: missing network"),
1652 pDrvIns->iInstance);
1653
1654 RTNETADDRIPV4 Network, Netmask;
1655
1656 rc = RTCidrStrToIPv4(szNetwork, &Network, &Netmask);
1657 if (RT_FAILURE(rc))
1658 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS,
1659 N_("NAT#%d: Configuration error: network '%s' describes not a valid IPv4 network"),
1660 pDrvIns->iInstance, szNetwork);
1661
1662 /*
1663 * Initialize slirp.
1664 */
1665 rc = slirp_init(&pThis->pNATState, RT_H2N_U32(Network.u), Netmask.u,
1666 fPassDomain, !!fUseHostResolver, i32AliasMode,
1667 iIcmpCacheLimit, pThis);
1668 if (RT_SUCCESS(rc))
1669 {
1670 slirp_set_dhcp_TFTP_prefix(pThis->pNATState, pThis->pszTFTPPrefix);
1671 slirp_set_dhcp_TFTP_bootfile(pThis->pNATState, pThis->pszBootFile);
1672 slirp_set_dhcp_next_server(pThis->pNATState, pThis->pszNextServer);
1673 slirp_set_dhcp_dns_proxy(pThis->pNATState, !!fDNSProxy);
1674 slirp_set_mtu(pThis->pNATState, MTU);
1675 slirp_set_somaxconn(pThis->pNATState, i32SoMaxConn);
1676
1677 char *pszBindIP = NULL;
1678 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "BindIP", pszBindIP);
1679 slirp_set_binding_address(pThis->pNATState, pszBindIP);
1680 if (pszBindIP != NULL)
1681 MMR3HeapFree(pszBindIP);
1682
1683#define SLIRP_SET_TUNING_VALUE(name, setter) \
1684 do \
1685 { \
1686 int len = 0; \
1687 rc = CFGMR3QueryS32(pCfg, name, &len); \
1688 if (RT_SUCCESS(rc)) \
1689 setter(pThis->pNATState, len); \
1690 } while(0)
1691
1692 SLIRP_SET_TUNING_VALUE("SockRcv", slirp_set_rcvbuf);
1693 SLIRP_SET_TUNING_VALUE("SockSnd", slirp_set_sndbuf);
1694 SLIRP_SET_TUNING_VALUE("TcpRcv", slirp_set_tcp_rcvspace);
1695 SLIRP_SET_TUNING_VALUE("TcpSnd", slirp_set_tcp_sndspace);
1696
1697 slirp_register_statistics(pThis->pNATState, pDrvIns);
1698#ifdef VBOX_WITH_STATISTICS
1699# define DRV_PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pThis, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
1700# define DRV_COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pThis, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
1701# include "counters.h"
1702#endif
1703
1704#ifdef VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER
1705 PCFGMNODE pMappingsCfg = CFGMR3GetChild(pCfg, "HostResolverMappings");
1706
1707 if (pMappingsCfg)
1708 {
1709 rc = drvNATConstructDNSMappings(pDrvIns->iInstance, pThis, pMappingsCfg);
1710 AssertRC(rc);
1711 }
1712#endif
1713 rc = drvNATConstructRedir(pDrvIns->iInstance, pThis, pCfg, &Network);
1714 if (RT_SUCCESS(rc))
1715 {
1716 /*
1717 * Register a load done notification to get the MAC address into the slirp
1718 * engine after we loaded a guest state.
1719 */
1720 rc = PDMDrvHlpSSMRegisterLoadDone(pDrvIns, drvNATLoadDone);
1721 AssertLogRelRCReturn(rc, rc);
1722
1723 rc = RTReqQueueCreate(&pThis->hSlirpReqQueue);
1724 AssertLogRelRCReturn(rc, rc);
1725
1726 rc = RTReqQueueCreate(&pThis->hRecvReqQueue);
1727 AssertLogRelRCReturn(rc, rc);
1728
1729 rc = RTReqQueueCreate(&pThis->hUrgRecvReqQueue);
1730 AssertLogRelRCReturn(rc, rc);
1731
1732 rc = PDMDrvHlpThreadCreate(pDrvIns, &pThis->pRecvThread, pThis, drvNATRecv,
1733 drvNATRecvWakeup, 128 * _1K, RTTHREADTYPE_IO, "NATRX");
1734 AssertRCReturn(rc, rc);
1735
1736 rc = RTSemEventCreate(&pThis->EventRecv);
1737 AssertRCReturn(rc, rc);
1738
1739 rc = RTSemEventCreate(&pThis->EventUrgRecv);
1740 AssertRCReturn(rc, rc);
1741
1742 rc = PDMDrvHlpThreadCreate(pDrvIns, &pThis->pUrgRecvThread, pThis, drvNATUrgRecv,
1743 drvNATUrgRecvWakeup, 128 * _1K, RTTHREADTYPE_IO, "NATURGRX");
1744 AssertRCReturn(rc, rc);
1745
1746 rc = RTReqQueueCreate(&pThis->hHostResQueue);
1747 AssertRCReturn(rc, rc);
1748
1749 rc = PDMDrvHlpThreadCreate(pThis->pDrvIns, &pThis->pHostResThread,
1750 pThis, drvNATHostResThread, drvNATHostResWakeup,
1751 64 * _1K, RTTHREADTYPE_IO, "HOSTRES");
1752 AssertRCReturn(rc, rc);
1753
1754 rc = RTCritSectInit(&pThis->DevAccessLock);
1755 AssertRCReturn(rc, rc);
1756
1757 rc = RTCritSectInit(&pThis->XmitLock);
1758 AssertRCReturn(rc, rc);
1759
1760 char szTmp[128];
1761 RTStrPrintf(szTmp, sizeof(szTmp), "nat%d", pDrvIns->iInstance);
1762 PDMDrvHlpDBGFInfoRegister(pDrvIns, szTmp, "NAT info.", drvNATInfo);
1763
1764#ifndef RT_OS_WINDOWS
1765 /*
1766 * Create the control pipe.
1767 */
1768 rc = RTPipeCreate(&pThis->hPipeRead, &pThis->hPipeWrite, 0 /*fFlags*/);
1769 AssertRCReturn(rc, rc);
1770#else
1771 pThis->hWakeupEvent = CreateEvent(NULL, FALSE, FALSE, NULL); /* auto-reset event */
1772 slirp_register_external_event(pThis->pNATState, pThis->hWakeupEvent,
1773 VBOX_WAKEUP_EVENT_INDEX);
1774#endif
1775
1776 rc = PDMDrvHlpThreadCreate(pDrvIns, &pThis->pSlirpThread, pThis, drvNATAsyncIoThread,
1777 drvNATAsyncIoWakeup, 128 * _1K, RTTHREADTYPE_IO, "NAT");
1778 AssertRCReturn(rc, rc);
1779
1780 pThis->enmLinkState = pThis->enmLinkStateWant = PDMNETWORKLINKSTATE_UP;
1781
1782#ifdef RT_OS_DARWIN
1783 /* Set up a watcher which notifies us everytime the DNS server changes. */
1784 int rc2 = VINF_SUCCESS;
1785 SCDynamicStoreContext SCDynStorCtx;
1786
1787 SCDynStorCtx.version = 0;
1788 SCDynStorCtx.info = pThis;
1789 SCDynStorCtx.retain = NULL;
1790 SCDynStorCtx.release = NULL;
1791 SCDynStorCtx.copyDescription = NULL;
1792
1793 SCDynamicStoreRef hDynStor = SCDynamicStoreCreate(NULL, CFSTR("org.virtualbox.drvnat"), drvNatDnsChanged, &SCDynStorCtx);
1794 if (hDynStor)
1795 {
1796 CFRunLoopSourceRef hRunLoopSrc = SCDynamicStoreCreateRunLoopSource(NULL, hDynStor, 0);
1797 if (hRunLoopSrc)
1798 {
1799 CFStringRef aWatchKeys[] =
1800 {
1801 CFSTR("State:/Network/Global/DNS")
1802 };
1803 CFArrayRef hArray = CFArrayCreate(NULL, (const void **)aWatchKeys, 1, &kCFTypeArrayCallBacks);
1804
1805 if (hArray)
1806 {
1807 if (SCDynamicStoreSetNotificationKeys(hDynStor, hArray, NULL))
1808 {
1809 CFRunLoopRef hRunLoopMain = CFRunLoopGetMain();
1810 CFRetain(hRunLoopMain);
1811 CFRunLoopAddSource(hRunLoopMain, hRunLoopSrc, kCFRunLoopCommonModes);
1812 CFRelease(hRunLoopMain);
1813 pThis->hRunLoopSrcDnsWatcher = hRunLoopSrc;
1814 }
1815 else
1816 rc2 = VERR_NO_MEMORY;
1817
1818 CFRelease(hArray);
1819 }
1820 else
1821 rc2 = VERR_NO_MEMORY;
1822
1823 if (RT_FAILURE(rc2)) /* Keep the runloop source referenced for destruction. */
1824 CFRelease(hRunLoopSrc);
1825 }
1826 CFRelease(hDynStor);
1827 }
1828 else
1829 rc2 = VERR_NO_MEMORY;
1830
1831 if (RT_FAILURE(rc2))
1832 LogRel(("NAT#%d: Failed to install DNS change notifier. The guest might loose DNS access when switching networks on the host\n",
1833 pDrvIns->iInstance));
1834#endif
1835 return rc;
1836 }
1837
1838 /* failure path */
1839 slirp_term(pThis->pNATState);
1840 pThis->pNATState = NULL;
1841 }
1842 else
1843 {
1844 PDMDRV_SET_ERROR(pDrvIns, rc, N_("Unknown error during NAT networking setup: "));
1845 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
1846 }
1847
1848 return rc;
1849}
1850
1851
1852/**
1853 * NAT network transport driver registration record.
1854 */
1855const PDMDRVREG g_DrvNAT =
1856{
1857 /* u32Version */
1858 PDM_DRVREG_VERSION,
1859 /* szName */
1860 "NAT",
1861 /* szRCMod */
1862 "",
1863 /* szR0Mod */
1864 "",
1865 /* pszDescription */
1866 "NAT Network Transport Driver",
1867 /* fFlags */
1868 PDM_DRVREG_FLAGS_HOST_BITS_DEFAULT,
1869 /* fClass. */
1870 PDM_DRVREG_CLASS_NETWORK,
1871 /* cMaxInstances */
1872 ~0U,
1873 /* cbInstance */
1874 sizeof(DRVNAT),
1875 /* pfnConstruct */
1876 drvNATConstruct,
1877 /* pfnDestruct */
1878 drvNATDestruct,
1879 /* pfnRelocate */
1880 NULL,
1881 /* pfnIOCtl */
1882 NULL,
1883 /* pfnPowerOn */
1884 drvNATPowerOn,
1885 /* pfnReset */
1886 NULL,
1887 /* pfnSuspend */
1888 NULL,
1889 /* pfnResume */
1890 drvNATResume,
1891 /* pfnAttach */
1892 NULL,
1893 /* pfnDetach */
1894 NULL,
1895 /* pfnPowerOff */
1896 NULL,
1897 /* pfnSoftReset */
1898 NULL,
1899 /* u32EndVersion */
1900 PDM_DRVREG_VERSION
1901};
1902
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette