VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DrvNAT.cpp

Last change on this file was 105726, checked in by vboxsync, 3 weeks ago

Devices/Network: cleaned up header file, memory leak fixes, search domains pushed to guest on host network change only. bugref:10268

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 67.0 KB
RevLine 
[20555]1/* $Id: DrvNAT.cpp 105726 2024-08-19 14:05:15Z vboxsync $ */
[1]2/** @file
[20555]3 * DrvNAT - NAT network transport driver.
[1]4 */
5
6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[1]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
[1]26 */
27
28
[57358]29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
[1]32#define LOG_GROUP LOG_GROUP_DRV_NAT
[9330]33#define __STDC_LIMIT_MACROS
34#define __STDC_CONSTANT_MACROS
[20712]35#include "slirp/libslirp.h"
[50046]36extern "C" {
37#include "slirp/slirp_dns.h"
[50048]38}
[21112]39#include "slirp/ctl.h"
[48150]40
41#include <VBox/vmm/dbgf.h>
[35346]42#include <VBox/vmm/pdmdrv.h>
43#include <VBox/vmm/pdmnetifs.h>
44#include <VBox/vmm/pdmnetinline.h>
[37596]45
[1]46#include <iprt/assert.h>
[37596]47#include <iprt/critsect.h>
48#include <iprt/cidr.h>
[1]49#include <iprt/file.h>
[15765]50#include <iprt/mem.h>
[37596]51#include <iprt/pipe.h>
[1]52#include <iprt/string.h>
[13986]53#include <iprt/stream.h>
[25966]54#include <iprt/uuid.h>
[1]55
[35353]56#include "VBoxDD.h"
[1]57
[18902]58#ifndef RT_OS_WINDOWS
59# include <unistd.h>
60# include <fcntl.h>
61# include <poll.h>
[20716]62# include <errno.h>
[13604]63#endif
[21659]64#ifdef RT_OS_FREEBSD
65# include <netinet/in.h>
66#endif
[18902]67#include <iprt/semaphore.h>
68#include <iprt/req.h>
[48056]69#ifdef RT_OS_DARWIN
70# include <SystemConfiguration/SystemConfiguration.h>
71# include <CoreFoundation/CoreFoundation.h>
72#endif
[13237]73
[22449]74#define COUNTERS_INIT
75#include "counters.h"
[20555]76
[22458]77
[57358]78/*********************************************************************************************************************************
79* Defined Constants And Macros *
80*********************************************************************************************************************************/
[27827]81
[47506]82#define DRVNAT_MAXFRAMESIZE (16 * 1024)
83
[27827]84/**
85 * @todo: This is a bad hack to prevent freezing the guest during high network
86 * activity. Windows host only. This needs to be fixed properly.
87 */
88#define VBOX_NAT_DELAY_HACK
89
[91872]90#define GET_EXTRADATA(pdrvins, node, name, rc, type, type_name, var) \
[21009]91do { \
[91872]92 (rc) = (pdrvins)->pHlpR3->pfnCFGMQuery ## type((node), name, &(var)); \
[21009]93 if (RT_FAILURE((rc)) && (rc) != VERR_CFGM_VALUE_NOT_FOUND) \
[91872]94 return PDMDrvHlpVMSetError((pdrvins), (rc), RT_SRC_POS, N_("NAT#%d: configuration query for \"" name "\" " #type_name " failed"), \
95 (pdrvins)->iInstance); \
[22459]96} while (0)
[21009]97
[91872]98#define GET_ED_STRICT(pdrvins, node, name, rc, type, type_name, var) \
[21011]99do { \
[91872]100 (rc) = (pdrvins)->pHlpR3->pfnCFGMQuery ## type((node), name, &(var)); \
[21011]101 if (RT_FAILURE((rc))) \
[91872]102 return PDMDrvHlpVMSetError((pdrvins), (rc), RT_SRC_POS, N_("NAT#%d: configuration query for \"" name "\" " #type_name " failed"), \
103 (pdrvins)->iInstance); \
[22459]104} while (0)
[21011]105
[91872]106#define GET_EXTRADATA_N(pdrvins, node, name, rc, type, type_name, var, var_size) \
[21009]107do { \
[91872]108 (rc) = (pdrvins)->pHlpR3->pfnCFGMQuery ## type((node), name, &(var), var_size); \
[21009]109 if (RT_FAILURE((rc)) && (rc) != VERR_CFGM_VALUE_NOT_FOUND) \
[91872]110 return PDMDrvHlpVMSetError((pdrvins), (rc), RT_SRC_POS, N_("NAT#%d: configuration query for \"" name "\" " #type_name " failed"), \
111 (pdrvins)->iInstance); \
[22459]112} while (0)
[21009]113
[91872]114#define GET_BOOL(rc, pdrvins, node, name, var) \
115 GET_EXTRADATA(pdrvins, node, name, (rc), Bool, bolean, (var))
116#define GET_STRING(rc, pdrvins, node, name, var, var_size) \
117 GET_EXTRADATA_N(pdrvins, node, name, (rc), String, string, (var), (var_size))
118#define GET_STRING_ALLOC(rc, pdrvins, node, name, var) \
119 GET_EXTRADATA(pdrvins, node, name, (rc), StringAlloc, string, (var))
120#define GET_S32(rc, pdrvins, node, name, var) \
121 GET_EXTRADATA(pdrvins, node, name, (rc), S32, int, (var))
122#define GET_S32_STRICT(rc, pdrvins, node, name, var) \
123 GET_ED_STRICT(pdrvins, node, name, (rc), S32, int, (var))
[21009]124
[21018]125
126
[22459]127#define DO_GET_IP(rc, node, instance, status, x) \
128do { \
129 char sz##x[32]; \
130 GET_STRING((rc), (node), (instance), #x, sz ## x[0], sizeof(sz ## x)); \
131 if (rc != VERR_CFGM_VALUE_NOT_FOUND) \
132 (status) = inet_aton(sz ## x, &x); \
133} while (0)
[21018]134
135#define GETIP_DEF(rc, node, instance, x, def) \
136do \
137{ \
138 int status = 0; \
[22459]139 DO_GET_IP((rc), (node), (instance), status, x); \
[21018]140 if (status == 0 || rc == VERR_CFGM_VALUE_NOT_FOUND) \
141 x.s_addr = def; \
[22459]142} while (0)
[21018]143
[57358]144
145/*********************************************************************************************************************************
146* Structures and Typedefs *
147*********************************************************************************************************************************/
[1]148/**
[5832]149 * NAT network transport driver instance data.
[25966]150 *
[26305]151 * @implements PDMINETWORKUP
[1]152 */
153typedef struct DRVNAT
154{
155 /** The network interface. */
[26305]156 PDMINETWORKUP INetworkUp;
[33825]157 /** The network NAT Engine configureation. */
158 PDMINETWORKNATCONFIG INetworkNATCfg;
[1]159 /** The port we're attached to. */
[26305]160 PPDMINETWORKDOWN pIAboveNet;
[8285]161 /** The network config of the port we're attached to. */
[26305]162 PPDMINETWORKCONFIG pIAboveConfig;
[1]163 /** Pointer to the driver instance. */
164 PPDMDRVINS pDrvIns;
[792]165 /** Link state */
166 PDMNETWORKLINKSTATE enmLinkState;
[1033]167 /** NAT state for this instance. */
168 PNATState pNATState;
[5332]169 /** TFTP directory prefix. */
[20712]170 char *pszTFTPPrefix;
[5332]171 /** Boot file name to provide in the DHCP server response. */
[20712]172 char *pszBootFile;
[17437]173 /** tftp server name to provide in the DHCP server response. */
[20712]174 char *pszNextServer;
[28258]175 /** Polling thread. */
[22360]176 PPDMTHREAD pSlirpThread;
[14204]177 /** Queue for NAT-thread-external events. */
[39498]178 RTREQQUEUE hSlirpReqQueue;
[25402]179 /** The guest IP for port-forwarding. */
180 uint32_t GuestIP;
[30349]181 /** Link state set when the VM is suspended. */
182 PDMNETWORKLINKSTATE enmLinkStateWant;
[22160]183
[18902]184#ifndef RT_OS_WINDOWS
[13951]185 /** The write end of the control pipe. */
[37596]186 RTPIPE hPipeWrite;
[13951]187 /** The read end of the control pipe. */
[37596]188 RTPIPE hPipeRead;
[54107]189# if HC_ARCH_BITS == 32
190 uint32_t u32Padding;
191# endif
[18902]192#else
[14204]193 /** for external notification */
194 HANDLE hWakeupEvent;
[13604]195#endif
[22458]196
[22406]197#define DRV_PROFILE_COUNTER(name, dsc) STAMPROFILE Stat ## name
198#define DRV_COUNTING_COUNTER(name, dsc) STAMCOUNTER Stat ## name
199#include "counters.h"
[22361]200 /** thread delivering packets for receiving by the guest */
[22360]201 PPDMTHREAD pRecvThread;
[23462]202 /** thread delivering urg packets for receiving by the guest */
203 PPDMTHREAD pUrgRecvThread;
[22361]204 /** event to wakeup the guest receive thread */
[22360]205 RTSEMEVENT EventRecv;
[23462]206 /** event to wakeup the guest urgent receive thread */
207 RTSEMEVENT EventUrgRecv;
[22361]208 /** Receive Req queue (deliver packets to the guest) */
[39498]209 RTREQQUEUE hRecvReqQueue;
[28216]210 /** Receive Urgent Req queue (deliver packets to the guest). */
[39498]211 RTREQQUEUE hUrgRecvReqQueue;
[23462]212
[28216]213 /** makes access to device func RecvAvail and Recv atomical. */
[28258]214 RTCRITSECT DevAccessLock;
215 /** Number of in-flight urgent packets. */
216 volatile uint32_t cUrgPkts;
217 /** Number of in-flight regular packets. */
218 volatile uint32_t cPkts;
219
220 /** Transmit lock taken by BeginXmit and released by EndXmit. */
221 RTCRITSECT XmitLock;
[48056]222
[60142]223 /** Request queue for the async host resolver. */
224 RTREQQUEUE hHostResQueue;
225 /** Async host resolver thread. */
226 PPDMTHREAD pHostResThread;
227
[48056]228#ifdef RT_OS_DARWIN
229 /* Handle of the DNS watcher runloop source. */
230 CFRunLoopSourceRef hRunLoopSrcDnsWatcher;
231#endif
[20712]232} DRVNAT;
[22925]233AssertCompileMemberAlignment(DRVNAT, StatNATRecvWakeups, 8);
[41805]234/** Pointer to the NAT driver instance data. */
[20712]235typedef DRVNAT *PDRVNAT;
[1]236
[22341]237
[57358]238/*********************************************************************************************************************************
239* Internal Functions *
240*********************************************************************************************************************************/
[28143]241static void drvNATNotifyNATThread(PDRVNAT pThis, const char *pszWho);
[50951]242DECLINLINE(void) drvNATUpdateDNS(PDRVNAT pThis, bool fFlapLink);
[50046]243static DECLCALLBACK(int) drvNATReinitializeHostNameResolving(PDRVNAT pThis);
[22341]244
[22360]245
[62980]246/**
247 * @callback_method_impl{FNPDMTHREADDRV}
248 */
[22360]249static DECLCALLBACK(int) drvNATRecv(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
[23462]250{
[22201]251 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
[22360]252
[22201]253 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
254 return VINF_SUCCESS;
[22360]255
[22201]256 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
[22219]257 {
[39498]258 RTReqQueueProcess(pThis->hRecvReqQueue, 0);
[28258]259 if (ASMAtomicReadU32(&pThis->cPkts) == 0)
[23462]260 RTSemEventWait(pThis->EventRecv, RT_INDEFINITE_WAIT);
[22219]261 }
[22201]262 return VINF_SUCCESS;
263}
264
265
[62980]266/**
267 * @callback_method_impl{FNPDMTHREADWAKEUPDRV}
268 */
[22360]269static DECLCALLBACK(int) drvNATRecvWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
[22201]270{
[62980]271 RT_NOREF(pThread);
[22201]272 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
[22361]273
[22360]274 STAM_COUNTER_INC(&pThis->StatNATRecvWakeups);
[104583]275 return RTSemEventSignal(pThis->EventRecv);
[22201]276}
277
[62980]278
279/**
280 * @callback_method_impl{FNPDMTHREADDRV}
281 */
[23462]282static DECLCALLBACK(int) drvNATUrgRecv(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
283{
284 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
[22458]285
[23462]286 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
287 return VINF_SUCCESS;
288
289 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
290 {
[39498]291 RTReqQueueProcess(pThis->hUrgRecvReqQueue, 0);
[28258]292 if (ASMAtomicReadU32(&pThis->cUrgPkts) == 0)
[25110]293 {
[25125]294 int rc = RTSemEventWait(pThis->EventUrgRecv, RT_INDEFINITE_WAIT);
[25111]295 AssertRC(rc);
[25110]296 }
[23462]297 }
298 return VINF_SUCCESS;
299}
[26574]300
[62980]301
302/**
303 * @callback_method_impl{FNPDMTHREADWAKEUPDRV}
304 */
[23462]305static DECLCALLBACK(int) drvNATUrgRecvWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
[22219]306{
[62980]307 RT_NOREF(pThread);
[23462]308 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
[25130]309 int rc = RTSemEventSignal(pThis->EventUrgRecv);
310 AssertRC(rc);
[23462]311
312 return VINF_SUCCESS;
313}
314
[62980]315
[26574]316static DECLCALLBACK(void) drvNATUrgRecvWorker(PDRVNAT pThis, uint8_t *pu8Buf, int cb, struct mbuf *m)
[23462]317{
[28258]318 int rc = RTCritSectEnter(&pThis->DevAccessLock);
[25111]319 AssertRC(rc);
[26305]320 rc = pThis->pIAboveNet->pfnWaitReceiveAvail(pThis->pIAboveNet, RT_INDEFINITE_WAIT);
[23462]321 if (RT_SUCCESS(rc))
322 {
[26305]323 rc = pThis->pIAboveNet->pfnReceive(pThis->pIAboveNet, pu8Buf, cb);
[25111]324 AssertRC(rc);
[23462]325 }
[28396]326 else if ( rc != VERR_TIMEOUT
327 && rc != VERR_INTERRUPTED)
[23462]328 {
[25111]329 AssertRC(rc);
[25893]330 }
[24058]331
[28258]332 rc = RTCritSectLeave(&pThis->DevAccessLock);
[25111]333 AssertRC(rc);
[24058]334
[30421]335 slirp_ext_m_free(pThis->pNATState, m, pu8Buf);
[28258]336 if (ASMAtomicDecU32(&pThis->cUrgPkts) == 0)
[23462]337 {
338 drvNATRecvWakeup(pThis->pDrvIns, pThis->pRecvThread);
[28143]339 drvNATNotifyNATThread(pThis, "drvNATUrgRecvWorker");
[23462]340 }
341}
342
343
[26574]344static DECLCALLBACK(void) drvNATRecvWorker(PDRVNAT pThis, uint8_t *pu8Buf, int cb, struct mbuf *m)
[23462]345{
[24058]346 int rc;
[22449]347 STAM_PROFILE_START(&pThis->StatNATRecv, a);
[22458]348
349
[28258]350 while (ASMAtomicReadU32(&pThis->cUrgPkts) != 0)
[24058]351 {
352 rc = RTSemEventWait(pThis->EventRecv, RT_INDEFINITE_WAIT);
[25893]353 if ( RT_FAILURE(rc)
[28143]354 && ( rc == VERR_TIMEOUT
355 || rc == VERR_INTERRUPTED))
[25893]356 goto done_unlocked;
[24058]357 }
[22458]358
[28258]359 rc = RTCritSectEnter(&pThis->DevAccessLock);
[26423]360 AssertRC(rc);
[24058]361
[38111]362 STAM_PROFILE_START(&pThis->StatNATRecvWait, b);
[26305]363 rc = pThis->pIAboveNet->pfnWaitReceiveAvail(pThis->pIAboveNet, RT_INDEFINITE_WAIT);
[38111]364 STAM_PROFILE_STOP(&pThis->StatNATRecvWait, b);
365
[23462]366 if (RT_SUCCESS(rc))
367 {
[26305]368 rc = pThis->pIAboveNet->pfnReceive(pThis->pIAboveNet, pu8Buf, cb);
[25111]369 AssertRC(rc);
[25893]370 }
[28396]371 else if ( rc != VERR_TIMEOUT
[28143]372 && rc != VERR_INTERRUPTED)
[23462]373 {
[25111]374 AssertRC(rc);
[23462]375 }
[24058]376
[28258]377 rc = RTCritSectLeave(&pThis->DevAccessLock);
[25111]378 AssertRC(rc);
[26404]379
[24058]380done_unlocked:
[30421]381 slirp_ext_m_free(pThis->pNATState, m, pu8Buf);
[28258]382 ASMAtomicDecU32(&pThis->cPkts);
[22458]383
[28143]384 drvNATNotifyNATThread(pThis, "drvNATRecvWorker");
[23462]385
[22449]386 STAM_PROFILE_STOP(&pThis->StatNATRecv, a);
[22341]387}
[22219]388
[14204]389/**
[26574]390 * Frees a S/G buffer allocated by drvNATNetworkUp_AllocBuf.
391 *
392 * @param pThis Pointer to the NAT instance.
393 * @param pSgBuf The S/G buffer to free.
394 */
395static void drvNATFreeSgBuf(PDRVNAT pThis, PPDMSCATTERGATHER pSgBuf)
396{
397 Assert((pSgBuf->fFlags & PDMSCATTERGATHER_FLAGS_MAGIC_MASK) == PDMSCATTERGATHER_FLAGS_MAGIC);
398 pSgBuf->fFlags = 0;
399 if (pSgBuf->pvAllocator)
400 {
[28054]401 Assert(!pSgBuf->pvUser);
[30421]402 slirp_ext_m_free(pThis->pNATState, (struct mbuf *)pSgBuf->pvAllocator, NULL);
[26574]403 pSgBuf->pvAllocator = NULL;
404 }
[28054]405 else if (pSgBuf->pvUser)
406 {
407 RTMemFree(pSgBuf->aSegs[0].pvSeg);
408 pSgBuf->aSegs[0].pvSeg = NULL;
409 RTMemFree(pSgBuf->pvUser);
410 pSgBuf->pvUser = NULL;
411 }
[26574]412 RTMemFree(pSgBuf);
413}
414
415/**
[14204]416 * Worker function for drvNATSend().
[26574]417 *
418 * @param pThis Pointer to the NAT instance.
419 * @param pSgBuf The scatter/gather buffer.
420 * @thread NAT
[14204]421 */
[85121]422static DECLCALLBACK(void) drvNATSendWorker(PDRVNAT pThis, PPDMSCATTERGATHER pSgBuf)
[14204]423{
[59312]424#if 0 /* Assertion happens often to me after resuming a VM -- no time to investigate this now. */
[14204]425 Assert(pThis->enmLinkState == PDMNETWORKLINKSTATE_UP);
[54291]426#endif
[14204]427 if (pThis->enmLinkState == PDMNETWORKLINKSTATE_UP)
[26574]428 {
429 struct mbuf *m = (struct mbuf *)pSgBuf->pvAllocator;
[28054]430 if (m)
431 {
432 /*
433 * A normal frame.
434 */
435 pSgBuf->pvAllocator = NULL;
436 slirp_input(pThis->pNATState, m, pSgBuf->cbUsed);
437 }
438 else
439 {
440 /*
441 * GSO frame, need to segment it.
442 */
443 /** @todo Make the NAT engine grok large frames? Could be more efficient... */
[28061]444#if 0 /* this is for testing PDMNetGsoCarveSegmentQD. */
445 uint8_t abHdrScratch[256];
446#endif
[28054]447 uint8_t const *pbFrame = (uint8_t const *)pSgBuf->aSegs[0].pvSeg;
448 PCPDMNETWORKGSO pGso = (PCPDMNETWORKGSO)pSgBuf->pvUser;
[88553]449 /* Do not attempt to segment frames with invalid GSO parameters. */
450 if (PDMNetGsoIsValid(pGso, sizeof(*pGso), pSgBuf->cbUsed))
[28054]451 {
[88553]452 uint32_t const cSegs = PDMNetGsoCalcSegmentCount(pGso, pSgBuf->cbUsed); Assert(cSegs > 1);
453 for (uint32_t iSeg = 0; iSeg < cSegs; iSeg++)
454 {
455 size_t cbSeg;
456 void *pvSeg;
457 m = slirp_ext_m_get(pThis->pNATState, pGso->cbHdrsTotal + pGso->cbMaxSeg, &pvSeg, &cbSeg);
458 if (!m)
459 break;
[28054]460
[28061]461#if 1
[88553]462 uint32_t cbPayload, cbHdrs;
463 uint32_t offPayload = PDMNetGsoCarveSegment(pGso, pbFrame, pSgBuf->cbUsed,
464 iSeg, cSegs, (uint8_t *)pvSeg, &cbHdrs, &cbPayload);
465 memcpy((uint8_t *)pvSeg + cbHdrs, pbFrame + offPayload, cbPayload);
[28054]466
[88553]467 slirp_input(pThis->pNATState, m, cbPayload + cbHdrs);
[28061]468#else
[88553]469 uint32_t cbSegFrame;
470 void *pvSegFrame = PDMNetGsoCarveSegmentQD(pGso, (uint8_t *)pbFrame, pSgBuf->cbUsed, abHdrScratch,
471 iSeg, cSegs, &cbSegFrame);
472 memcpy((uint8_t *)pvSeg, pvSegFrame, cbSegFrame);
[28061]473
[88553]474 slirp_input(pThis->pNATState, m, cbSegFrame);
[28061]475#endif
[88553]476 }
[28054]477 }
478 }
[26574]479 }
480 drvNATFreeSgBuf(pThis, pSgBuf);
481
[33540]482 /** @todo Implement the VERR_TRY_AGAIN drvNATNetworkUp_AllocBuf semantics. */
[14204]483}
[1]484
485/**
[28258]486 * @interface_method_impl{PDMINETWORKUP,pfnBeginXmit}
487 */
[28275]488static DECLCALLBACK(int) drvNATNetworkUp_BeginXmit(PPDMINETWORKUP pInterface, bool fOnWorkerThread)
[28258]489{
[62980]490 RT_NOREF(fOnWorkerThread);
[28258]491 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
492 int rc = RTCritSectTryEnter(&pThis->XmitLock);
493 if (RT_FAILURE(rc))
494 {
495 /** @todo Kick the worker thread when we have one... */
496 rc = VERR_TRY_AGAIN;
497 }
498 return rc;
499}
500
501/**
[26574]502 * @interface_method_impl{PDMINETWORKUP,pfnAllocBuf}
[1]503 */
[27973]504static DECLCALLBACK(int) drvNATNetworkUp_AllocBuf(PPDMINETWORKUP pInterface, size_t cbMin,
505 PCPDMNETWORKGSO pGso, PPPDMSCATTERGATHER ppSgBuf)
[1]506{
[26574]507 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
[28275]508 Assert(RTCritSectIsOwner(&pThis->XmitLock));
[698]509
[26574]510 /*
511 * Drop the incoming frame if the NAT thread isn't running.
512 */
513 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
514 {
515 Log(("drvNATNetowrkUp_AllocBuf: returns VERR_NET_NO_NETWORK\n"));
516 return VERR_NET_NO_NETWORK;
517 }
[698]518
[26574]519 /*
520 * Allocate a scatter/gather buffer and an mbuf.
521 */
522 PPDMSCATTERGATHER pSgBuf = (PPDMSCATTERGATHER)RTMemAlloc(sizeof(*pSgBuf));
523 if (!pSgBuf)
524 return VERR_NO_MEMORY;
[28054]525 if (!pGso)
[26574]526 {
[47506]527 /*
528 * Drop the frame if it is too big.
529 */
530 if (cbMin >= DRVNAT_MAXFRAMESIZE)
531 {
532 Log(("drvNATNetowrkUp_AllocBuf: drops over-sized frame (%u bytes), returns VERR_INVALID_PARAMETER\n",
533 cbMin));
[57850]534 RTMemFree(pSgBuf);
[47506]535 return VERR_INVALID_PARAMETER;
536 }
537
[28054]538 pSgBuf->pvUser = NULL;
539 pSgBuf->pvAllocator = slirp_ext_m_get(pThis->pNATState, cbMin,
540 &pSgBuf->aSegs[0].pvSeg, &pSgBuf->aSegs[0].cbSeg);
541 if (!pSgBuf->pvAllocator)
542 {
543 RTMemFree(pSgBuf);
[35922]544 return VERR_TRY_AGAIN;
[28054]545 }
[26574]546 }
[28054]547 else
548 {
[47506]549 /*
550 * Drop the frame if its segment is too big.
551 */
552 if (pGso->cbHdrsTotal + pGso->cbMaxSeg >= DRVNAT_MAXFRAMESIZE)
553 {
554 Log(("drvNATNetowrkUp_AllocBuf: drops over-sized frame (%u bytes), returns VERR_INVALID_PARAMETER\n",
555 pGso->cbHdrsTotal + pGso->cbMaxSeg));
[57850]556 RTMemFree(pSgBuf);
[47506]557 return VERR_INVALID_PARAMETER;
558 }
559
[28054]560 pSgBuf->pvUser = RTMemDup(pGso, sizeof(*pGso));
561 pSgBuf->pvAllocator = NULL;
562 pSgBuf->aSegs[0].cbSeg = RT_ALIGN_Z(cbMin, 16);
563 pSgBuf->aSegs[0].pvSeg = RTMemAlloc(pSgBuf->aSegs[0].cbSeg);
564 if (!pSgBuf->pvUser || !pSgBuf->aSegs[0].pvSeg)
565 {
566 RTMemFree(pSgBuf->aSegs[0].pvSeg);
567 RTMemFree(pSgBuf->pvUser);
568 RTMemFree(pSgBuf);
[35922]569 return VERR_TRY_AGAIN;
[28054]570 }
571 }
[22360]572
[26574]573 /*
574 * Initialize the S/G buffer and return.
575 */
576 pSgBuf->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_1;
577 pSgBuf->cbUsed = 0;
578 pSgBuf->cbAvailable = pSgBuf->aSegs[0].cbSeg;
579 pSgBuf->cSegs = 1;
[22360]580
[28143]581#if 0 /* poison */
[28054]582 memset(pSgBuf->aSegs[0].pvSeg, 'F', pSgBuf->aSegs[0].cbSeg);
583#endif
[26574]584 *ppSgBuf = pSgBuf;
585 return VINF_SUCCESS;
586}
[16540]587
[26574]588/**
[27842]589 * @interface_method_impl{PDMINETWORKUP,pfnFreeBuf}
590 */
591static DECLCALLBACK(int) drvNATNetworkUp_FreeBuf(PPDMINETWORKUP pInterface, PPDMSCATTERGATHER pSgBuf)
592{
593 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
[28275]594 Assert(RTCritSectIsOwner(&pThis->XmitLock));
[27842]595 drvNATFreeSgBuf(pThis, pSgBuf);
596 return VINF_SUCCESS;
597}
598
599/**
[26574]600 * @interface_method_impl{PDMINETWORKUP,pfnSendBuf}
601 */
602static DECLCALLBACK(int) drvNATNetworkUp_SendBuf(PPDMINETWORKUP pInterface, PPDMSCATTERGATHER pSgBuf, bool fOnWorkerThread)
603{
[62980]604 RT_NOREF(fOnWorkerThread);
[26574]605 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
606 Assert((pSgBuf->fFlags & PDMSCATTERGATHER_FLAGS_OWNER_MASK) == PDMSCATTERGATHER_FLAGS_OWNER_1);
[28275]607 Assert(RTCritSectIsOwner(&pThis->XmitLock));
[26574]608
609 int rc;
610 if (pThis->pSlirpThread->enmState == PDMTHREADSTATE_RUNNING)
611 {
[105353]612 rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
[39498]613 (PFNRT)drvNATSendWorker, 2, pThis, pSgBuf);
[26574]614 if (RT_SUCCESS(rc))
615 {
[28143]616 drvNATNotifyNATThread(pThis, "drvNATNetworkUp_SendBuf");
[26574]617 return VINF_SUCCESS;
618 }
[16540]619
[26574]620 rc = VERR_NET_NO_BUFFER_SPACE;
621 }
622 else
623 rc = VERR_NET_DOWN;
624 drvNATFreeSgBuf(pThis, pSgBuf);
625 return rc;
626}
[16443]627
[26574]628/**
[28258]629 * @interface_method_impl{PDMINETWORKUP,pfnEndXmit}
630 */
631static DECLCALLBACK(void) drvNATNetworkUp_EndXmit(PPDMINETWORKUP pInterface)
632{
633 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
634 RTCritSectLeave(&pThis->XmitLock);
635}
636
637/**
[22360]638 * Get the NAT thread out of poll/WSAWaitForMultipleEvents
639 */
[28143]640static void drvNATNotifyNATThread(PDRVNAT pThis, const char *pszWho)
[22341]641{
[62980]642 RT_NOREF(pszWho);
[22341]643 int rc;
[18902]644#ifndef RT_OS_WINDOWS
[28143]645 /* kick poll() */
[37596]646 size_t cbIgnored;
647 rc = RTPipeWrite(pThis->hPipeWrite, "", 1, &cbIgnored);
[18902]648#else
[16448]649 /* kick WSAWaitForMultipleEvents */
650 rc = WSASetEvent(pThis->hWakeupEvent);
[18902]651#endif
[25111]652 AssertRC(rc);
[1]653}
654
655/**
[26305]656 * @interface_method_impl{PDMINETWORKUP,pfnSetPromiscuousMode}
[1]657 */
[26574]658static DECLCALLBACK(void) drvNATNetworkUp_SetPromiscuousMode(PPDMINETWORKUP pInterface, bool fPromiscuous)
[1]659{
[62984]660 RT_NOREF(pInterface, fPromiscuous);
[26574]661 LogFlow(("drvNATNetworkUp_SetPromiscuousMode: fPromiscuous=%d\n", fPromiscuous));
[1]662 /* nothing to do */
663}
664
[14204]665/**
[26574]666 * Worker function for drvNATNetworkUp_NotifyLinkChanged().
[14204]667 * @thread "NAT" thread.
668 */
[85121]669static DECLCALLBACK(void) drvNATNotifyLinkChangedWorker(PDRVNAT pThis, PDMNETWORKLINKSTATE enmLinkState)
[14204]670{
[30349]671 pThis->enmLinkState = pThis->enmLinkStateWant = enmLinkState;
[14204]672 switch (enmLinkState)
673 {
674 case PDMNETWORKLINKSTATE_UP:
[55371]675 LogRel(("NAT: Link up\n"));
[14204]676 slirp_link_up(pThis->pNATState);
677 break;
678
679 case PDMNETWORKLINKSTATE_DOWN:
680 case PDMNETWORKLINKSTATE_DOWN_RESUME:
[55371]681 LogRel(("NAT: Link down\n"));
[14204]682 slirp_link_down(pThis->pNATState);
683 break;
684
685 default:
[26574]686 AssertMsgFailed(("drvNATNetworkUp_NotifyLinkChanged: unexpected link state %d\n", enmLinkState));
[14204]687 }
688}
689
[1]690/**
691 * Notification on link status changes.
692 *
693 * @param pInterface Pointer to the interface structure containing the called function pointer.
694 * @param enmLinkState The new link state.
695 * @thread EMT
696 */
[26574]697static DECLCALLBACK(void) drvNATNetworkUp_NotifyLinkChanged(PPDMINETWORKUP pInterface, PDMNETWORKLINKSTATE enmLinkState)
[1]698{
[26574]699 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
[698]700
[26574]701 LogFlow(("drvNATNetworkUp_NotifyLinkChanged: enmLinkState=%d\n", enmLinkState));
[698]702
[57600]703 /* Don't queue new requests if the NAT thread is not running (e.g. paused,
704 * stopping), otherwise we would deadlock. Memorize the change. */
[22360]705 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
[30349]706 {
707 pThis->enmLinkStateWant = enmLinkState;
[14204]708 return;
[30349]709 }
[22360]710
[26574]711 PRTREQ pReq;
[39498]712 int rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, &pReq, 0 /*cMillies*/, RTREQFLAGS_VOID,
713 (PFNRT)drvNATNotifyLinkChangedWorker, 2, pThis, enmLinkState);
[57600]714 if (rc == VERR_TIMEOUT)
[1]715 {
[28143]716 drvNATNotifyNATThread(pThis, "drvNATNetworkUp_NotifyLinkChanged");
[14204]717 rc = RTReqWait(pReq, RT_INDEFINITE_WAIT);
[25111]718 AssertRC(rc);
[14204]719 }
720 else
[25111]721 AssertRC(rc);
[39550]722 RTReqRelease(pReq);
[1]723}
724
[85121]725static DECLCALLBACK(void) drvNATNotifyApplyPortForwardCommand(PDRVNAT pThis, bool fRemove,
726 bool fUdp, const char *pHostIp,
727 uint16_t u16HostPort, const char *pGuestIp, uint16_t u16GuestPort)
[33825]728{
729 struct in_addr guestIp, hostIp;
730
[34014]731 if ( pHostIp == NULL
[33825]732 || inet_aton(pHostIp, &hostIp) == 0)
733 hostIp.s_addr = INADDR_ANY;
734
735 if ( pGuestIp == NULL
736 || inet_aton(pGuestIp, &guestIp) == 0)
737 guestIp.s_addr = pThis->GuestIP;
738
739 if (fRemove)
740 slirp_remove_redirect(pThis->pNATState, fUdp, hostIp, u16HostPort, guestIp, u16GuestPort);
741 else
[57784]742 slirp_add_redirect(pThis->pNATState, fUdp, hostIp, u16HostPort, guestIp, u16GuestPort);
[33825]743}
744
[57600]745static DECLCALLBACK(int) drvNATNetworkNatConfigRedirect(PPDMINETWORKNATCONFIG pInterface, bool fRemove,
746 bool fUdp, const char *pHostIp, uint16_t u16HostPort,
747 const char *pGuestIp, uint16_t u16GuestPort)
[33825]748{
[33865]749 LogFlowFunc(("fRemove=%d, fUdp=%d, pHostIp=%s, u16HostPort=%u, pGuestIp=%s, u16GuestPort=%u\n",
[57600]750 RT_BOOL(fRemove), RT_BOOL(fUdp), pHostIp, u16HostPort, pGuestIp, u16GuestPort));
[33825]751 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkNATCfg);
[57600]752 /* Execute the command directly if the VM is not running. */
753 int rc;
754 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
[33825]755 {
[57600]756 drvNATNotifyApplyPortForwardCommand(pThis, fRemove, fUdp, pHostIp,
757 u16HostPort, pGuestIp,u16GuestPort);
758 rc = VINF_SUCCESS;
[33825]759 }
760 else
[57600]761 {
762 PRTREQ pReq;
763 rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, &pReq, 0 /*cMillies*/, RTREQFLAGS_VOID,
764 (PFNRT)drvNATNotifyApplyPortForwardCommand, 7, pThis, fRemove,
765 fUdp, pHostIp, u16HostPort, pGuestIp, u16GuestPort);
766 if (rc == VERR_TIMEOUT)
767 {
768 drvNATNotifyNATThread(pThis, "drvNATNetworkNatConfigRedirect");
769 rc = RTReqWait(pReq, RT_INDEFINITE_WAIT);
770 AssertRC(rc);
771 }
772 else
773 AssertRC(rc);
[34014]774
[57600]775 RTReqRelease(pReq);
776 }
[33825]777 return rc;
778}
779
[22360]780/**
[26574]781 * NAT thread handling the slirp stuff.
782 *
783 * The slirp implementation is single-threaded so we execute this enginre in a
784 * dedicated thread. We take care that this thread does not become the
785 * bottleneck: If the guest wants to send, a request is enqueued into the
[39498]786 * hSlirpReqQueue and handled asynchronously by this thread. If this thread
[26574]787 * wants to deliver packets to the guest, it enqueues a request into
[39498]788 * hRecvReqQueue which is later handled by the Recv thread.
[22360]789 */
[13604]790static DECLCALLBACK(int) drvNATAsyncIoThread(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
791{
792 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
[13987]793 int nFDs = -1;
[18902]794#ifdef RT_OS_WINDOWS
[28146]795 HANDLE *phEvents = slirp_get_events(pThis->pNATState);
[16086]796 unsigned int cBreak = 0;
[18902]797#else /* RT_OS_WINDOWS */
798 unsigned int cPollNegRet = 0;
799#endif /* !RT_OS_WINDOWS */
[13604]800
801 LogFlow(("drvNATAsyncIoThread: pThis=%p\n", pThis));
802
803 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
804 return VINF_SUCCESS;
[13986]805
[30349]806 if (pThis->enmLinkStateWant != pThis->enmLinkState)
807 drvNATNotifyLinkChangedWorker(pThis, pThis->enmLinkStateWant);
808
[13604]809 /*
810 * Polling loop.
811 */
812 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
813 {
[13984]814 /*
[33540]815 * To prevent concurrent execution of sending/receiving threads
[13984]816 */
[18902]817#ifndef RT_OS_WINDOWS
[16572]818 nFDs = slirp_get_nsock(pThis->pNATState);
[19049]819 /* allocation for all sockets + Management pipe */
[28146]820 struct pollfd *polls = (struct pollfd *)RTMemAlloc((1 + nFDs) * sizeof(struct pollfd) + sizeof(uint32_t));
[16572]821 if (polls == NULL)
[16653]822 return VERR_NO_MEMORY;
[16572]823
[33540]824 /* don't pass the management pipe */
[19049]825 slirp_select_fill(pThis->pNATState, &nFDs, &polls[1]);
[16572]826
[37596]827 polls[0].fd = RTPipeToNative(pThis->hPipeRead);
[19049]828 /* POLLRDBAND usually doesn't used on Linux but seems used on Solaris */
[37596]829 polls[0].events = POLLRDNORM | POLLPRI | POLLRDBAND;
[16653]830 polls[0].revents = 0;
831
[28146]832 int cChangedFDs = poll(polls, nFDs + 1, slirp_get_timeout_ms(pThis->pNATState));
[19049]833 if (cChangedFDs < 0)
[18858]834 {
[19049]835 if (errno == EINTR)
[18858]836 {
[19074]837 Log2(("NAT: signal was caught while sleep on poll\n"));
[19049]838 /* No error, just process all outstanding requests but don't wait */
839 cChangedFDs = 0;
840 }
[19047]841 else if (cPollNegRet++ > 128)
842 {
[55371]843 LogRel(("NAT: Poll returns (%s) suppressed %d\n", strerror(errno), cPollNegRet));
[18858]844 cPollNegRet = 0;
845 }
846 }
847
[13987]848 if (cChangedFDs >= 0)
[13986]849 {
[16653]850 slirp_select_poll(pThis->pNATState, &polls[1], nFDs);
[16572]851 if (polls[0].revents & (POLLRDNORM|POLLPRI|POLLRDBAND))
[13986]852 {
[37596]853 /* drain the pipe
854 *
855 * Note! drvNATSend decoupled so we don't know how many times
[16726]856 * device's thread sends before we've entered multiplex,
857 * so to avoid false alarm drain pipe here to the very end
858 *
[18645]859 * @todo: Probably we should counter drvNATSend to count how
860 * deep pipe has been filed before drain.
[16758]861 *
[16726]862 */
[37596]863 /** @todo XXX: Make it reading exactly we need to drain the
864 * pipe.*/
865 char ch;
866 size_t cbRead;
867 RTPipeRead(pThis->hPipeRead, &ch, 1, &cbRead);
[13951]868 }
869 }
[19048]870 /* process _all_ outstanding requests but don't wait */
[39498]871 RTReqQueueProcess(pThis->hSlirpReqQueue, 0);
[16572]872 RTMemFree(polls);
[28146]873
[18902]874#else /* RT_OS_WINDOWS */
[28146]875 nFDs = -1;
[16572]876 slirp_select_fill(pThis->pNATState, &nFDs);
[28146]877 DWORD dwEvent = WSAWaitForMultipleEvents(nFDs, phEvents, FALSE,
878 slirp_get_timeout_ms(pThis->pNATState),
[53399]879 /* :fAlertable */ TRUE);
[62984]880 AssertCompile(WSA_WAIT_EVENT_0 == 0);
881 if ( (/*dwEvent < WSA_WAIT_EVENT_0 ||*/ dwEvent > WSA_WAIT_EVENT_0 + nFDs - 1)
[53399]882 && dwEvent != WSA_WAIT_TIMEOUT && dwEvent != WSA_WAIT_IO_COMPLETION)
[14189]883 {
884 int error = WSAGetLastError();
[28146]885 LogRel(("NAT: WSAWaitForMultipleEvents returned %d (error %d)\n", dwEvent, error));
[25111]886 RTAssertPanic();
[14189]887 }
[14028]888
[28146]889 if (dwEvent == WSA_WAIT_TIMEOUT)
[14189]890 {
[14204]891 /* only check for slow/fast timers */
[53399]892 slirp_select_poll(pThis->pNATState, /* fTimeout=*/true);
[14189]893 continue;
894 }
[14204]895 /* poll the sockets in any case */
[16443]896 Log2(("%s: poll\n", __FUNCTION__));
[53399]897 slirp_select_poll(pThis->pNATState, /* fTimeout=*/false);
[14204]898 /* process _all_ outstanding requests but don't wait */
[39498]899 RTReqQueueProcess(pThis->hSlirpReqQueue, 0);
[18902]900# ifdef VBOX_NAT_DELAY_HACK
[16086]901 if (cBreak++ > 128)
[16085]902 {
903 cBreak = 0;
904 RTThreadSleep(2);
905 }
[18902]906# endif
907#endif /* RT_OS_WINDOWS */
[13604]908 }
909
[13670]910 return VINF_SUCCESS;
[13604]911}
[13984]912
[20716]913
[22360]914/**
915 * Unblock the send thread so it can respond to a state change.
916 *
917 * @returns VBox status code.
918 * @param pDevIns The pcnet device instance.
919 * @param pThread The send thread.
920 */
921static DECLCALLBACK(int) drvNATAsyncIoWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
922{
[62985]923 RT_NOREF(pThread);
[22360]924 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
[13986]925
[28143]926 drvNATNotifyNATThread(pThis, "drvNATAsyncIoWakeup");
[22360]927 return VINF_SUCCESS;
928}
[14204]929
[60142]930
931static DECLCALLBACK(int) drvNATHostResThread(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
932{
933 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
934
935 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
936 return VINF_SUCCESS;
937
938 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
939 {
940 RTReqQueueProcess(pThis->hHostResQueue, RT_INDEFINITE_WAIT);
941 }
942
943 return VINF_SUCCESS;
944}
945
946
947static DECLCALLBACK(int) drvNATReqQueueInterrupt()
948{
949 /*
950 * RTReqQueueProcess loops until request returns a warning or info
951 * status code (other than VINF_SUCCESS).
952 */
953 return VINF_INTERRUPTED;
954}
955
956
957static DECLCALLBACK(int) drvNATHostResWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
958{
[62985]959 RT_NOREF(pThread);
[60142]960 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
961 Assert(pThis != NULL);
962
963 int rc;
[105353]964 rc = RTReqQueueCallEx(pThis->hHostResQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_IPRT_STATUS | RTREQFLAGS_NO_WAIT,
[60142]965 (PFNRT)drvNATReqQueueInterrupt, 0);
966 return rc;
967}
968
969
[99775]970#if 0 /* unused */
[28143]971/**
[1]972 * Function called by slirp to check if it's possible to feed incoming data to the network port.
973 * @returns 1 if possible.
974 * @returns 0 if not possible.
975 */
[1033]976int slirp_can_output(void *pvUser)
[1]977{
[62985]978 RT_NOREF(pvUser);
[22193]979 return 1;
[1]980}
981
[99775]982static void slirp_push_recv_thread(void *pvUser)
[23462]983{
984 PDRVNAT pThis = (PDRVNAT)pvUser;
985 Assert(pThis);
986 drvNATUrgRecvWakeup(pThis->pDrvIns, pThis->pUrgRecvThread);
987}
[99775]988#endif
[23462]989
[26574]990void slirp_urg_output(void *pvUser, struct mbuf *m, const uint8_t *pu8Buf, int cb)
[23462]991{
992 PDRVNAT pThis = (PDRVNAT)pvUser;
993 Assert(pThis);
994
995 /* don't queue new requests when the NAT thread is about to stop */
996 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
997 return;
998
[28258]999 ASMAtomicIncU32(&pThis->cUrgPkts);
[39498]1000 int rc = RTReqQueueCallEx(pThis->hUrgRecvReqQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
1001 (PFNRT)drvNATUrgRecvWorker, 4, pThis, pu8Buf, cb, m);
[25111]1002 AssertRC(rc);
[23462]1003 drvNATUrgRecvWakeup(pThis->pDrvIns, pThis->pUrgRecvThread);
1004}
1005
[1]1006/**
[35922]1007 * Function called by slirp to wake up device after VERR_TRY_AGAIN
1008 */
1009void slirp_output_pending(void *pvUser)
1010{
1011 PDRVNAT pThis = (PDRVNAT)pvUser;
1012 Assert(pThis);
[41453]1013 LogFlowFuncEnter();
[35922]1014 pThis->pIAboveNet->pfnXmitPending(pThis->pIAboveNet);
[41453]1015 LogFlowFuncLeave();
[35922]1016}
1017
1018/**
[26574]1019 * Function called by slirp to feed incoming data to the NIC.
[1]1020 */
[26574]1021void slirp_output(void *pvUser, struct mbuf *m, const uint8_t *pu8Buf, int cb)
[1]1022{
[11269]1023 PDRVNAT pThis = (PDRVNAT)pvUser;
[22360]1024 Assert(pThis);
[1033]1025
[52687]1026 LogFlow(("slirp_output BEGIN %p %d\n", pu8Buf, cb));
[52688]1027 Log6(("slirp_output: pu8Buf=%p cb=%#x (pThis=%p)\n%.*Rhxd\n", pu8Buf, cb, pThis, cb, pu8Buf));
[701]1028
[22341]1029 /* don't queue new requests when the NAT thread is about to stop */
[22360]1030 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
[22341]1031 return;
[22360]1032
[28258]1033 ASMAtomicIncU32(&pThis->cPkts);
[39498]1034 int rc = RTReqQueueCallEx(pThis->hRecvReqQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
1035 (PFNRT)drvNATRecvWorker, 4, pThis, pu8Buf, cb, m);
[25111]1036 AssertRC(rc);
[22360]1037 drvNATRecvWakeup(pThis->pDrvIns, pThis->pRecvThread);
[22341]1038 STAM_COUNTER_INC(&pThis->StatQueuePktSent);
[41453]1039 LogFlowFuncLeave();
[1]1040}
1041
[20716]1042
[60142]1043/*
1044 * Call a function on the slirp thread.
1045 */
[105353]1046int slirp_call(void *pvUser, PRTREQ *ppReq, RTMSINTERVAL cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
[60142]1047{
1048 PDRVNAT pThis = (PDRVNAT)pvUser;
1049 Assert(pThis);
1050
1051 int rc;
1052
1053 va_list va;
1054 va_start(va, cArgs);
1055
1056 rc = RTReqQueueCallV(pThis->hSlirpReqQueue, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
1057
1058 va_end(va);
1059
1060 if (RT_SUCCESS(rc))
1061 drvNATNotifyNATThread(pThis, "slirp_vcall");
1062
1063 return rc;
1064}
1065
1066
1067/*
1068 * Call a function on the host resolver thread.
1069 */
1070int slirp_call_hostres(void *pvUser, PRTREQ *ppReq, RTMSINTERVAL cMillies,
1071 unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
1072{
1073 PDRVNAT pThis = (PDRVNAT)pvUser;
1074 Assert(pThis);
1075
1076 int rc;
1077
1078 AssertReturn((pThis->hHostResQueue != NIL_RTREQQUEUE), VERR_INVALID_STATE);
1079 AssertReturn((pThis->pHostResThread != NULL), VERR_INVALID_STATE);
1080
1081 va_list va;
1082 va_start(va, cArgs);
1083
[105353]1084 rc = RTReqQueueCallV(pThis->hHostResQueue, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
[60142]1085
1086 va_end(va);
1087 return rc;
1088}
1089
1090
[63214]1091#if HAVE_NOTIFICATION_FOR_DNS_UPDATE && !defined(RT_OS_DARWIN)
[54109]1092/**
1093 * @interface_method_impl{PDMINETWORKNATCONFIG,pfnNotifyDnsChanged}
1094 *
1095 * We are notified that host's resolver configuration has changed. In
1096 * the current setup we don't get any details and just reread that
1097 * information ourselves.
1098 */
[105726]1099static DECLCALLBACK(void) drvNATNotifyDnsChanged(PPDMINETWORKNATCONFIG pInterface, PCPDMINETWORKNATDNSCONFIG pDnsConf)
[54109]1100{
[105726]1101 RT_NOREF(pDnsConf);
[54109]1102 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkNATCfg);
1103 drvNATUpdateDNS(pThis, /* fFlapLink */ true);
1104}
[63214]1105#endif
[54109]1106
[48056]1107#ifdef RT_OS_DARWIN
[1]1108/**
[48056]1109 * Callback for the SystemConfiguration framework to notify us whenever the DNS
1110 * server changes.
1111 *
1112 * @param hDynStor The DynamicStore handle.
1113 * @param hChangedKey Array of changed keys we watch for.
1114 * @param pvUser Opaque user data (NAT driver instance).
1115 */
1116static DECLCALLBACK(void) drvNatDnsChanged(SCDynamicStoreRef hDynStor, CFArrayRef hChangedKeys, void *pvUser)
1117{
1118 PDRVNAT pThis = (PDRVNAT)pvUser;
1119
[52976]1120 Log2(("NAT: System configuration has changed\n"));
[48526]1121
[52591]1122 /* Check if any of parameters we are interested in were actually changed. If the size
1123 * of hChangedKeys is 0, it means that SCDynamicStore has been restarted. */
1124 if (hChangedKeys && CFArrayGetCount(hChangedKeys) > 0)
[48526]1125 {
[52591]1126 /* Look to the updated parameters in particular. */
1127 CFStringRef pDNSKey = CFSTR("State:/Network/Global/DNS");
1128
1129 if (CFArrayContainsValue(hChangedKeys, CFRangeMake(0, CFArrayGetCount(hChangedKeys)), pDNSKey))
1130 {
1131 LogRel(("NAT: DNS servers changed, triggering reconnect\n"));
[52976]1132#if 0
[52591]1133 CFDictionaryRef hDnsDict = (CFDictionaryRef)SCDynamicStoreCopyValue(hDynStor, pDNSKey);
1134 if (hDnsDict)
1135 {
1136 CFArrayRef hArrAddresses = (CFArrayRef)CFDictionaryGetValue(hDnsDict, kSCPropNetDNSServerAddresses);
1137 if (hArrAddresses && CFArrayGetCount(hArrAddresses) > 0)
1138 {
1139 /* Dump DNS servers list. */
1140 for (int i = 0; i < CFArrayGetCount(hArrAddresses); i++)
1141 {
1142 CFStringRef pDNSAddrStr = (CFStringRef)CFArrayGetValueAtIndex(hArrAddresses, i);
1143 const char *pszDNSAddr = pDNSAddrStr ? CFStringGetCStringPtr(pDNSAddrStr, CFStringGetSystemEncoding()) : NULL;
1144 LogRel(("NAT: New DNS server#%d: %s\n", i, pszDNSAddr ? pszDNSAddr : "None"));
1145 }
1146 }
1147 else
1148 LogRel(("NAT: DNS server list is empty (1)\n"));
1149
1150 CFRelease(hDnsDict);
1151 }
1152 else
1153 LogRel(("NAT: DNS server list is empty (2)\n"));
[63478]1154#else
1155 RT_NOREF(hDynStor);
[52976]1156#endif
[50951]1157 drvNATUpdateDNS(pThis, /* fFlapLink */ true);
[52591]1158 }
1159 else
[52976]1160 Log2(("NAT: No DNS changes detected\n"));
[48526]1161 }
[52591]1162 else
[52976]1163 Log2(("NAT: SCDynamicStore has been restarted\n"));
[48056]1164}
1165#endif
1166
1167/**
[25966]1168 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
[1]1169 */
[25966]1170static DECLCALLBACK(void *) drvNATQueryInterface(PPDMIBASE pInterface, const char *pszIID)
[1]1171{
[25966]1172 PPDMDRVINS pDrvIns = PDMIBASE_2_PDMDRV(pInterface);
1173 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1174
[25985]1175 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pDrvIns->IBase);
[26305]1176 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKUP, &pThis->INetworkUp);
[33825]1177 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKNATCONFIG, &pThis->INetworkNATCfg);
[25966]1178 return NULL;
[1]1179}
1180
1181
1182/**
[20716]1183 * Get the MAC address into the slirp stack.
[1]1184 *
[20716]1185 * Called by drvNATLoadDone and drvNATPowerOn.
[1]1186 */
[20716]1187static void drvNATSetMac(PDRVNAT pThis)
[1]1188{
[57784]1189#if 0 /* XXX: do we still need this for anything? */
[26305]1190 if (pThis->pIAboveConfig)
[20716]1191 {
1192 RTMAC Mac;
[26305]1193 pThis->pIAboveConfig->pfnGetMac(pThis->pIAboveConfig, &Mac);
[20716]1194 }
[62985]1195#else
1196 RT_NOREF(pThis);
[57784]1197#endif
[20716]1198}
1199
1200
1201/**
1202 * After loading we have to pass the MAC address of the ethernet device to the slirp stack.
1203 * Otherwise the guest is not reachable until it performs a DHCP request or an ARP request
1204 * (usually done during guest boot).
1205 */
[62985]1206static DECLCALLBACK(int) drvNATLoadDone(PPDMDRVINS pDrvIns, PSSMHANDLE pSSM)
[20716]1207{
[62985]1208 RT_NOREF(pSSM);
[11269]1209 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
[20716]1210 drvNATSetMac(pThis);
1211 return VINF_SUCCESS;
1212}
[698]1213
1214
[20716]1215/**
1216 * Some guests might not use DHCP to retrieve an IP but use a static IP.
1217 */
1218static DECLCALLBACK(void) drvNATPowerOn(PPDMDRVINS pDrvIns)
1219{
1220 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1221 drvNATSetMac(pThis);
[1]1222}
1223
1224
1225/**
[81585]1226 * @interface_method_impl{PDMDRVREG,pfnResume}
[47499]1227 */
1228static DECLCALLBACK(void) drvNATResume(PPDMDRVINS pDrvIns)
1229{
1230 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1231 VMRESUMEREASON enmReason = PDMDrvHlpVMGetResumeReason(pDrvIns);
1232
1233 switch (enmReason)
1234 {
1235 case VMRESUMEREASON_HOST_RESUME:
[50950]1236 bool fFlapLink;
[54109]1237#if HAVE_NOTIFICATION_FOR_DNS_UPDATE
1238 /* let event handler do it if necessary */
[50950]1239 fFlapLink = false;
[50047]1240#else
[50950]1241 /* XXX: when in doubt, use brute force */
1242 fFlapLink = true;
[50047]1243#endif
[50951]1244 drvNATUpdateDNS(pThis, fFlapLink);
[50046]1245 return;
1246 default: /* Ignore every other resume reason. */
1247 /* do nothing */
1248 return;
1249 }
1250}
1251
1252
1253static DECLCALLBACK(int) drvNATReinitializeHostNameResolving(PDRVNAT pThis)
1254{
1255 slirpReleaseDnsSettings(pThis->pNATState);
1256 slirpInitializeDnsSettings(pThis->pNATState);
1257 return VINF_SUCCESS;
1258}
1259
1260/**
1261 * This function at this stage could be called from two places, but both from non-NAT thread,
1262 * - drvNATResume (EMT?)
1263 * - drvNatDnsChanged (darwin, GUI or main) "listener"
1264 * When Main's interface IHost will support host network configuration change event on every host,
[50619]1265 * we won't call it from drvNATResume, but from listener of Main event in the similar way it done
[50046]1266 * for port-forwarding, and it wan't be on GUI/main thread, but on EMT thread only.
1267 *
[50619]1268 * Thread here is important, because we need to change DNS server list and domain name (+ perhaps,
[50046]1269 * search string) at runtime (VBOX_NAT_ENFORCE_INTERNAL_DNS_UPDATE), we can do it safely on NAT thread,
[50619]1270 * so with changing other variables (place where we handle update) the main mechanism of update
[50950]1271 * _won't_ be changed, the only thing will change is drop of fFlapLink parameter.
[50046]1272 */
[50951]1273DECLINLINE(void) drvNATUpdateDNS(PDRVNAT pThis, bool fFlapLink)
[50046]1274{
1275 int strategy = slirp_host_network_configuration_change_strategy_selector(pThis->pNATState);
1276 switch (strategy)
1277 {
[50949]1278 case VBOX_NAT_DNS_DNSPROXY:
[57600]1279 {
1280 /**
1281 * XXX: Here or in _strategy_selector we should deal with network change
1282 * in "network change" scenario domain name change we have to update guest lease
1283 * forcibly.
1284 * Note at that built-in dhcp also updates DNS information on NAT thread.
1285 */
1286 /**
1287 * It's unsafe to to do it directly on non-NAT thread
1288 * so we schedule the worker and kick the NAT thread.
1289 */
[105353]1290 int rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
[57600]1291 (PFNRT)drvNATReinitializeHostNameResolving, 1, pThis);
1292 if (RT_SUCCESS(rc))
1293 drvNATNotifyNATThread(pThis, "drvNATUpdateDNS");
[50619]1294
[57600]1295 return;
1296 }
[50046]1297
[50949]1298 case VBOX_NAT_DNS_EXTERNAL:
[47499]1299 /*
1300 * Host resumed from a suspend and the network might have changed.
1301 * Disconnect the guest from the network temporarily to let it pick up the changes.
1302 */
[50950]1303 if (fFlapLink)
[50046]1304 pThis->pIAboveConfig->pfnSetLinkState(pThis->pIAboveConfig,
1305 PDMNETWORKLINKSTATE_DOWN_RESUME);
[47499]1306 return;
[50620]1307
[50949]1308 case VBOX_NAT_DNS_HOSTRESOLVER:
[50046]1309 default:
[47499]1310 return;
1311 }
1312}
1313
1314
1315/**
[34209]1316 * Info handler.
1317 */
1318static DECLCALLBACK(void) drvNATInfo(PPDMDRVINS pDrvIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
1319{
1320 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1321 slirp_info(pThis->pNATState, pHlp, pszArgs);
1322}
1323
[39766]1324#ifdef VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER
1325static int drvNATConstructDNSMappings(unsigned iInstance, PDRVNAT pThis, PCFGMNODE pMappingsCfg)
1326{
[91872]1327 PPDMDRVINS pDrvIns = pThis->pDrvIns;
1328 PCPDMDRVHLPR3 pHlp = pDrvIns->pHlpR3;
1329
[62985]1330 RT_NOREF(iInstance);
[39766]1331 int rc = VINF_SUCCESS;
1332 LogFlowFunc(("ENTER: iInstance:%d\n", iInstance));
[91872]1333 for (PCFGMNODE pNode = pHlp->pfnCFGMGetFirstChild(pMappingsCfg); pNode; pNode = pHlp->pfnCFGMGetNextChild(pNode))
[39766]1334 {
[91872]1335 if (!pHlp->pfnCFGMAreValuesValid(pNode, "HostName\0HostNamePattern\0HostIP\0"))
[39766]1336 return PDMDRV_SET_ERROR(pThis->pDrvIns, VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES,
1337 N_("Unknown configuration in dns mapping"));
[39778]1338 char szHostNameOrPattern[255];
[59219]1339 bool fPattern = false;
[39778]1340 RT_ZERO(szHostNameOrPattern);
[91872]1341 GET_STRING(rc, pDrvIns, pNode, "HostName", szHostNameOrPattern[0], sizeof(szHostNameOrPattern));
[39778]1342 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1343 {
[91872]1344 GET_STRING(rc, pDrvIns, pNode, "HostNamePattern", szHostNameOrPattern[0], sizeof(szHostNameOrPattern));
[39778]1345 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1346 {
1347 char szNodeName[225];
1348 RT_ZERO(szNodeName);
[91872]1349 pHlp->pfnCFGMGetName(pNode, szNodeName, sizeof(szNodeName));
[39778]1350 LogRel(("NAT: Neither 'HostName' nor 'HostNamePattern' is specified for mapping %s\n", szNodeName));
1351 continue;
1352 }
[59219]1353 fPattern = true;
[39778]1354 }
[39766]1355 struct in_addr HostIP;
[62985]1356 RT_ZERO(HostIP);
[91872]1357 GETIP_DEF(rc, pDrvIns, pNode, HostIP, INADDR_ANY);
[39778]1358 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1359 {
1360 LogRel(("NAT: DNS mapping %s is ignored (address not pointed)\n", szHostNameOrPattern));
1361 continue;
1362 }
[59219]1363 slirp_add_host_resolver_mapping(pThis->pNATState, szHostNameOrPattern, fPattern, HostIP.s_addr);
[39766]1364 }
1365 LogFlowFunc(("LEAVE: %Rrc\n", rc));
1366 return rc;
1367}
1368#endif /* !VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER */
[34209]1369
[39766]1370
[34209]1371/**
[1]1372 * Sets up the redirectors.
1373 *
1374 * @returns VBox status code.
[26173]1375 * @param pCfg The configuration handle.
[1]1376 */
[45047]1377static int drvNATConstructRedir(unsigned iInstance, PDRVNAT pThis, PCFGMNODE pCfg, PRTNETADDRIPV4 pNetwork)
[1]1378{
[91872]1379 PPDMDRVINS pDrvIns = pThis->pDrvIns;
1380 PCPDMDRVHLPR3 pHlp = pDrvIns->pHlpR3;
1381
[62985]1382 RT_NOREF(pNetwork); /** @todo figure why pNetwork isn't used */
1383
[91883]1384 PCFGMNODE pPFTree = pHlp->pfnCFGMGetChild(pCfg, "PortForwarding");
[64166]1385 if (pPFTree == NULL)
1386 return VINF_SUCCESS;
1387
[1]1388 /*
1389 * Enumerate redirections.
1390 */
[91872]1391 for (PCFGMNODE pNode = pHlp->pfnCFGMGetFirstChild(pPFTree); pNode; pNode = pHlp->pfnCFGMGetNextChild(pNode))
[1]1392 {
[1698]1393 /*
1394 * Validate the port forwarding config.
1395 */
[91872]1396 if (!pHlp->pfnCFGMAreValuesValid(pNode, "Name\0Protocol\0UDP\0HostPort\0GuestPort\0GuestIP\0BindIP\0"))
1397 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES,
[34890]1398 N_("Unknown configuration in port forwarding"));
[1698]1399
[1]1400 /* protocol type */
1401 bool fUDP;
[1698]1402 char szProtocol[32];
[21009]1403 int rc;
[91872]1404 GET_STRING(rc, pDrvIns, pNode, "Protocol", szProtocol[0], sizeof(szProtocol));
[1]1405 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1406 {
[21009]1407 fUDP = false;
[91872]1408 GET_BOOL(rc, pDrvIns, pNode, "UDP", fUDP);
[1]1409 }
[11266]1410 else if (RT_SUCCESS(rc))
[1698]1411 {
[1704]1412 if (!RTStrICmp(szProtocol, "TCP"))
[1698]1413 fUDP = false;
[1704]1414 else if (!RTStrICmp(szProtocol, "UDP"))
[1698]1415 fUDP = true;
1416 else
[91872]1417 return PDMDrvHlpVMSetError(pDrvIns, VERR_INVALID_PARAMETER, RT_SRC_POS,
[21363]1418 N_("NAT#%d: Invalid configuration value for \"Protocol\": \"%s\""),
[21009]1419 iInstance, szProtocol);
[1698]1420 }
[34890]1421 else
[91872]1422 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS,
[34890]1423 N_("NAT#%d: configuration query for \"Protocol\" failed"),
1424 iInstance);
[1]1425 /* host port */
1426 int32_t iHostPort;
[91872]1427 GET_S32_STRICT(rc, pDrvIns, pNode, "HostPort", iHostPort);
[1]1428
1429 /* guest port */
1430 int32_t iGuestPort;
[91872]1431 GET_S32_STRICT(rc, pDrvIns, pNode, "GuestPort", iGuestPort);
[1]1432
[57784]1433 /* host address ("BindIP" name is rather unfortunate given "HostPort" to go with it) */
1434 struct in_addr BindIP;
[62985]1435 RT_ZERO(BindIP);
[91872]1436 GETIP_DEF(rc, pDrvIns, pNode, BindIP, INADDR_ANY);
[57784]1437
[1]1438 /* guest address */
1439 struct in_addr GuestIP;
[62985]1440 RT_ZERO(GuestIP);
[91872]1441 GETIP_DEF(rc, pDrvIns, pNode, GuestIP, INADDR_ANY);
[1]1442
1443 /*
1444 * Call slirp about it.
1445 */
[57784]1446 if (slirp_add_redirect(pThis->pNATState, fUDP, BindIP, iHostPort, GuestIP, iGuestPort) < 0)
[13986]1447 return PDMDrvHlpVMSetError(pThis->pDrvIns, VERR_NAT_REDIR_SETUP, RT_SRC_POS,
[21011]1448 N_("NAT#%d: configuration error: failed to set up "
1449 "redirection of %d to %d. Probably a conflict with "
[21363]1450 "existing services or other rules"), iInstance, iHostPort,
[21011]1451 iGuestPort);
[1]1452 } /* for each redir rule */
1453
1454 return VINF_SUCCESS;
1455}
1456
[8340]1457
1458/**
[20716]1459 * Destruct a driver instance.
1460 *
1461 * Most VM resources are freed by the VM. This callback is provided so that any non-VM
1462 * resources can be freed correctly.
1463 *
1464 * @param pDrvIns The driver instance data.
[8340]1465 */
[20716]1466static DECLCALLBACK(void) drvNATDestruct(PPDMDRVINS pDrvIns)
[8285]1467{
[11269]1468 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
[20716]1469 LogFlow(("drvNATDestruct:\n"));
[26001]1470 PDMDRV_CHECK_VERSIONS_RETURN_VOID(pDrvIns);
[8285]1471
[27565]1472 if (pThis->pNATState)
1473 {
1474 slirp_term(pThis->pNATState);
1475 slirp_deregister_statistics(pThis->pNATState, pDrvIns);
[20716]1476#ifdef VBOX_WITH_STATISTICS
[22406]1477# define DRV_PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pThis)
1478# define DRV_COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pThis)
1479# include "counters.h"
[20716]1480#endif
[27565]1481 pThis->pNATState = NULL;
1482 }
[28258]1483
[60142]1484 RTReqQueueDestroy(pThis->hHostResQueue);
1485 pThis->hHostResQueue = NIL_RTREQQUEUE;
1486
[39498]1487 RTReqQueueDestroy(pThis->hSlirpReqQueue);
1488 pThis->hSlirpReqQueue = NIL_RTREQQUEUE;
[28258]1489
[39498]1490 RTReqQueueDestroy(pThis->hUrgRecvReqQueue);
1491 pThis->hUrgRecvReqQueue = NIL_RTREQQUEUE;
[28258]1492
[65700]1493 RTReqQueueDestroy(pThis->hRecvReqQueue);
1494 pThis->hRecvReqQueue = NIL_RTREQQUEUE;
1495
[28258]1496 RTSemEventDestroy(pThis->EventRecv);
1497 pThis->EventRecv = NIL_RTSEMEVENT;
1498
1499 RTSemEventDestroy(pThis->EventUrgRecv);
1500 pThis->EventUrgRecv = NIL_RTSEMEVENT;
1501
1502 if (RTCritSectIsInitialized(&pThis->DevAccessLock))
1503 RTCritSectDelete(&pThis->DevAccessLock);
1504
1505 if (RTCritSectIsInitialized(&pThis->XmitLock))
1506 RTCritSectDelete(&pThis->XmitLock);
[48056]1507
[65710]1508#ifndef RT_OS_WINDOWS
1509 RTPipeClose(pThis->hPipeRead);
1510 RTPipeClose(pThis->hPipeWrite);
1511#endif
1512
[48056]1513#ifdef RT_OS_DARWIN
1514 /* Cleanup the DNS watcher. */
[76017]1515 if (pThis->hRunLoopSrcDnsWatcher != NULL)
1516 {
1517 CFRunLoopRef hRunLoopMain = CFRunLoopGetMain();
1518 CFRetain(hRunLoopMain);
1519 CFRunLoopRemoveSource(hRunLoopMain, pThis->hRunLoopSrcDnsWatcher, kCFRunLoopCommonModes);
1520 CFRelease(hRunLoopMain);
1521 CFRelease(pThis->hRunLoopSrcDnsWatcher);
1522 pThis->hRunLoopSrcDnsWatcher = NULL;
1523 }
[48056]1524#endif
[8341]1525}
1526
1527
1528/**
[1]1529 * Construct a NAT network transport driver instance.
1530 *
[22277]1531 * @copydoc FNPDMDRVCONSTRUCT
[1]1532 */
[26173]1533static DECLCALLBACK(int) drvNATConstruct(PPDMDRVINS pDrvIns, PCFGMNODE pCfg, uint32_t fFlags)
[1]1534{
[62985]1535 RT_NOREF(fFlags);
1536 PDMDRV_CHECK_VERSIONS_RETURN(pDrvIns);
[91872]1537 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
[91883]1538 PCPDMDRVHLPR3 pHlp = pDrvIns->pHlpR3;
[91872]1539
[1]1540 LogFlow(("drvNATConstruct:\n"));
1541
1542 /*
1543 * Init the static parts.
1544 */
[11269]1545 pThis->pDrvIns = pDrvIns;
1546 pThis->pNATState = NULL;
1547 pThis->pszTFTPPrefix = NULL;
1548 pThis->pszBootFile = NULL;
[17437]1549 pThis->pszNextServer = NULL;
[39498]1550 pThis->hSlirpReqQueue = NIL_RTREQQUEUE;
1551 pThis->hUrgRecvReqQueue = NIL_RTREQQUEUE;
[60142]1552 pThis->hHostResQueue = NIL_RTREQQUEUE;
[28258]1553 pThis->EventRecv = NIL_RTSEMEVENT;
1554 pThis->EventUrgRecv = NIL_RTSEMEVENT;
[48056]1555#ifdef RT_OS_DARWIN
1556 pThis->hRunLoopSrcDnsWatcher = NULL;
1557#endif
[28258]1558
[1]1559 /* IBase */
1560 pDrvIns->IBase.pfnQueryInterface = drvNATQueryInterface;
[28258]1561
[1]1562 /* INetwork */
[28258]1563 pThis->INetworkUp.pfnBeginXmit = drvNATNetworkUp_BeginXmit;
[26574]1564 pThis->INetworkUp.pfnAllocBuf = drvNATNetworkUp_AllocBuf;
[27842]1565 pThis->INetworkUp.pfnFreeBuf = drvNATNetworkUp_FreeBuf;
[26574]1566 pThis->INetworkUp.pfnSendBuf = drvNATNetworkUp_SendBuf;
[28258]1567 pThis->INetworkUp.pfnEndXmit = drvNATNetworkUp_EndXmit;
[26574]1568 pThis->INetworkUp.pfnSetPromiscuousMode = drvNATNetworkUp_SetPromiscuousMode;
1569 pThis->INetworkUp.pfnNotifyLinkChanged = drvNATNetworkUp_NotifyLinkChanged;
[1039]1570
[33825]1571 /* NAT engine configuration */
[57600]1572 pThis->INetworkNATCfg.pfnRedirectRuleCommand = drvNATNetworkNatConfigRedirect;
[54109]1573#if HAVE_NOTIFICATION_FOR_DNS_UPDATE && !defined(RT_OS_DARWIN)
[54291]1574 /*
[54109]1575 * On OS X we stick to the old OS X specific notifications for
1576 * now. Elsewhere use IHostNameResolutionConfigurationChangeEvent
1577 * by enbaling HAVE_NOTIFICATION_FOR_DNS_UPDATE in libslirp.h.
1578 * This code is still in a bit of flux and is implemented and
1579 * enabled in steps to simplify more conservative backporting.
1580 */
1581 pThis->INetworkNATCfg.pfnNotifyDnsChanged = drvNATNotifyDnsChanged;
1582#else
[54106]1583 pThis->INetworkNATCfg.pfnNotifyDnsChanged = NULL;
[54109]1584#endif
[33825]1585
[1]1586 /*
[45061]1587 * Validate the config.
1588 */
[91872]1589 PDMDRV_VALIDATE_CONFIG_RETURN(pDrvIns,
1590 "PassDomain"
1591 "|TFTPPrefix"
1592 "|BootFile"
1593 "|Network"
1594 "|NextServer"
1595 "|DNSProxy"
1596 "|BindIP"
1597 "|UseHostResolver"
1598 "|SlirpMTU"
1599 "|AliasMode"
1600 "|SockRcv"
1601 "|SockSnd"
1602 "|TcpRcv"
1603 "|TcpSnd"
1604 "|ICMPCacheLimit"
1605 "|SoMaxConnection"
[92093]1606 "|LocalhostReachable"
[91872]1607//#ifdef VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER
1608 "|HostResolverMappings"
1609//#endif
[91892]1610 , "PortForwarding");
[45061]1611
1612 /*
[5332]1613 * Get the configuration settings.
1614 */
[21009]1615 int rc;
[5332]1616 bool fPassDomain = true;
[91872]1617 GET_BOOL(rc, pDrvIns, pCfg, "PassDomain", fPassDomain);
[21363]1618
[91872]1619 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "TFTPPrefix", pThis->pszTFTPPrefix);
1620 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "BootFile", pThis->pszBootFile);
1621 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "NextServer", pThis->pszNextServer);
[5332]1622
[21010]1623 int fDNSProxy = 0;
[91872]1624 GET_S32(rc, pDrvIns, pCfg, "DNSProxy", fDNSProxy);
[23158]1625 int fUseHostResolver = 0;
[91872]1626 GET_S32(rc, pDrvIns, pCfg, "UseHostResolver", fUseHostResolver);
[23163]1627 int MTU = 1500;
[91872]1628 GET_S32(rc, pDrvIns, pCfg, "SlirpMTU", MTU);
[28787]1629 int i32AliasMode = 0;
1630 int i32MainAliasMode = 0;
[91872]1631 GET_S32(rc, pDrvIns, pCfg, "AliasMode", i32MainAliasMode);
[38971]1632 int iIcmpCacheLimit = 100;
[91872]1633 GET_S32(rc, pDrvIns, pCfg, "ICMPCacheLimit", iIcmpCacheLimit);
[92093]1634 bool fLocalhostReachable = false;
1635 GET_BOOL(rc, pDrvIns, pCfg, "LocalhostReachable", fLocalhostReachable);
[5332]1636
[28787]1637 i32AliasMode |= (i32MainAliasMode & 0x1 ? 0x1 : 0);
1638 i32AliasMode |= (i32MainAliasMode & 0x2 ? 0x40 : 0);
1639 i32AliasMode |= (i32MainAliasMode & 0x4 ? 0x4 : 0);
[39465]1640 int i32SoMaxConn = 10;
[91872]1641 GET_S32(rc, pDrvIns, pCfg, "SoMaxConnection", i32SoMaxConn);
[5332]1642 /*
[1]1643 * Query the network port interface.
1644 */
[26305]1645 pThis->pIAboveNet = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMINETWORKDOWN);
1646 if (!pThis->pIAboveNet)
[1]1647 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_ABOVE,
[21009]1648 N_("Configuration error: the above device/driver didn't "
1649 "export the network port interface"));
[26305]1650 pThis->pIAboveConfig = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMINETWORKCONFIG);
1651 if (!pThis->pIAboveConfig)
[8285]1652 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_ABOVE,
[21009]1653 N_("Configuration error: the above device/driver didn't "
1654 "export the network config interface"));
[1]1655
[1033]1656 /* Generate a network address for this network card. */
[22277]1657 char szNetwork[32]; /* xxx.xxx.xxx.xxx/yy */
[91872]1658 GET_STRING(rc, pDrvIns, pCfg, "Network", szNetwork[0], sizeof(szNetwork));
[8287]1659 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
[57006]1660 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS, N_("NAT%d: Configuration error: missing network"),
1661 pDrvIns->iInstance);
[1033]1662
[45047]1663 RTNETADDRIPV4 Network, Netmask;
1664
[8287]1665 rc = RTCidrStrToIPv4(szNetwork, &Network, &Netmask);
1666 if (RT_FAILURE(rc))
[57006]1667 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS,
1668 N_("NAT#%d: Configuration error: network '%s' describes not a valid IPv4 network"),
[21010]1669 pDrvIns->iInstance, szNetwork);
[8287]1670
[1]1671 /*
[13986]1672 * Initialize slirp.
1673 */
[45047]1674 rc = slirp_init(&pThis->pNATState, RT_H2N_U32(Network.u), Netmask.u,
[38971]1675 fPassDomain, !!fUseHostResolver, i32AliasMode,
[92093]1676 iIcmpCacheLimit, fLocalhostReachable, pThis);
[11266]1677 if (RT_SUCCESS(rc))
[1]1678 {
[17437]1679 slirp_set_dhcp_TFTP_prefix(pThis->pNATState, pThis->pszTFTPPrefix);
1680 slirp_set_dhcp_TFTP_bootfile(pThis->pNATState, pThis->pszBootFile);
1681 slirp_set_dhcp_next_server(pThis->pNATState, pThis->pszNextServer);
[20257]1682 slirp_set_dhcp_dns_proxy(pThis->pNATState, !!fDNSProxy);
[23163]1683 slirp_set_mtu(pThis->pNATState, MTU);
[38111]1684 slirp_set_somaxconn(pThis->pNATState, i32SoMaxConn);
[68444]1685
[21004]1686 char *pszBindIP = NULL;
[91872]1687 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "BindIP", pszBindIP);
[68444]1688 slirp_set_binding_address(pThis->pNATState, pszBindIP);
1689 if (pszBindIP != NULL)
[91897]1690 PDMDrvHlpMMHeapFree(pDrvIns, pszBindIP);
[21004]1691
1692#define SLIRP_SET_TUNING_VALUE(name, setter) \
[20713]1693 do \
1694 { \
1695 int len = 0; \
[91883]1696 rc = pHlp->pfnCFGMQueryS32(pCfg, name, &len); \
[20713]1697 if (RT_SUCCESS(rc)) \
1698 setter(pThis->pNATState, len); \
1699 } while(0)
1700
[26404]1701 SLIRP_SET_TUNING_VALUE("SockRcv", slirp_set_rcvbuf);
1702 SLIRP_SET_TUNING_VALUE("SockSnd", slirp_set_sndbuf);
1703 SLIRP_SET_TUNING_VALUE("TcpRcv", slirp_set_tcp_rcvspace);
1704 SLIRP_SET_TUNING_VALUE("TcpSnd", slirp_set_tcp_sndspace);
[20555]1705
[20712]1706 slirp_register_statistics(pThis->pNATState, pDrvIns);
[20713]1707#ifdef VBOX_WITH_STATISTICS
[22406]1708# define DRV_PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pThis, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
1709# define DRV_COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pThis, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
[22458]1710# include "counters.h"
[20713]1711#endif
1712
[39766]1713#ifdef VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER
[91883]1714 PCFGMNODE pMappingsCfg = pHlp->pfnCFGMGetChild(pCfg, "HostResolverMappings");
[39766]1715
1716 if (pMappingsCfg)
1717 {
1718 rc = drvNATConstructDNSMappings(pDrvIns->iInstance, pThis, pMappingsCfg);
1719 AssertRC(rc);
1720 }
1721#endif
[45047]1722 rc = drvNATConstructRedir(pDrvIns->iInstance, pThis, pCfg, &Network);
[28258]1723 if (RT_SUCCESS(rc))
[1]1724 {
1725 /*
[13986]1726 * Register a load done notification to get the MAC address into the slirp
1727 * engine after we loaded a guest state.
[1]1728 */
[28258]1729 rc = PDMDrvHlpSSMRegisterLoadDone(pDrvIns, drvNATLoadDone);
[39498]1730 AssertLogRelRCReturn(rc, rc);
[28258]1731
[39498]1732 rc = RTReqQueueCreate(&pThis->hSlirpReqQueue);
1733 AssertLogRelRCReturn(rc, rc);
[13984]1734
[39498]1735 rc = RTReqQueueCreate(&pThis->hRecvReqQueue);
1736 AssertLogRelRCReturn(rc, rc);
[28258]1737
[39498]1738 rc = RTReqQueueCreate(&pThis->hUrgRecvReqQueue);
1739 AssertLogRelRCReturn(rc, rc);
[28258]1740
1741 rc = PDMDrvHlpThreadCreate(pDrvIns, &pThis->pRecvThread, pThis, drvNATRecv,
1742 drvNATRecvWakeup, 128 * _1K, RTTHREADTYPE_IO, "NATRX");
1743 AssertRCReturn(rc, rc);
1744
[22360]1745 rc = RTSemEventCreate(&pThis->EventRecv);
[28258]1746 AssertRCReturn(rc, rc);
[22160]1747
[65710]1748 rc = RTSemEventCreate(&pThis->EventUrgRecv);
1749 AssertRCReturn(rc, rc);
1750
[28258]1751 rc = PDMDrvHlpThreadCreate(pDrvIns, &pThis->pUrgRecvThread, pThis, drvNATUrgRecv,
1752 drvNATUrgRecvWakeup, 128 * _1K, RTTHREADTYPE_IO, "NATURGRX");
1753 AssertRCReturn(rc, rc);
1754
[60142]1755 rc = RTReqQueueCreate(&pThis->hHostResQueue);
1756 AssertRCReturn(rc, rc);
1757
[104388]1758#if defined(RT_OS_LINUX) && defined(RT_ARCH_ARM64)
1759 /* 64KiB stacks are not supported at least linux.arm64 (thread creation fails). */
1760 size_t const cbStack = _128K;
1761#else
[104389]1762 size_t const cbStack = _64K;
[104388]1763#endif
[60142]1764 rc = PDMDrvHlpThreadCreate(pThis->pDrvIns, &pThis->pHostResThread,
1765 pThis, drvNATHostResThread, drvNATHostResWakeup,
[104388]1766 cbStack, RTTHREADTYPE_IO, "HOSTRES");
[60142]1767 AssertRCReturn(rc, rc);
1768
[28258]1769 rc = RTCritSectInit(&pThis->DevAccessLock);
1770 AssertRCReturn(rc, rc);
1771
1772 rc = RTCritSectInit(&pThis->XmitLock);
1773 AssertRCReturn(rc, rc);
1774
[34209]1775 char szTmp[128];
1776 RTStrPrintf(szTmp, sizeof(szTmp), "nat%d", pDrvIns->iInstance);
1777 PDMDrvHlpDBGFInfoRegister(pDrvIns, szTmp, "NAT info.", drvNATInfo);
1778
[18902]1779#ifndef RT_OS_WINDOWS
[13986]1780 /*
1781 * Create the control pipe.
1782 */
[37596]1783 rc = RTPipeCreate(&pThis->hPipeRead, &pThis->hPipeWrite, 0 /*fFlags*/);
1784 AssertRCReturn(rc, rc);
[18902]1785#else
[14206]1786 pThis->hWakeupEvent = CreateEvent(NULL, FALSE, FALSE, NULL); /* auto-reset event */
[21363]1787 slirp_register_external_event(pThis->pNATState, pThis->hWakeupEvent,
[21011]1788 VBOX_WAKEUP_EVENT_INDEX);
[18902]1789#endif
[13951]1790
[28258]1791 rc = PDMDrvHlpThreadCreate(pDrvIns, &pThis->pSlirpThread, pThis, drvNATAsyncIoThread,
1792 drvNATAsyncIoWakeup, 128 * _1K, RTTHREADTYPE_IO, "NAT");
[37596]1793 AssertRCReturn(rc, rc);
[16291]1794
[30349]1795 pThis->enmLinkState = pThis->enmLinkStateWant = PDMNETWORKLINKSTATE_UP;
[13986]1796
[48056]1797#ifdef RT_OS_DARWIN
1798 /* Set up a watcher which notifies us everytime the DNS server changes. */
1799 int rc2 = VINF_SUCCESS;
1800 SCDynamicStoreContext SCDynStorCtx;
1801
1802 SCDynStorCtx.version = 0;
1803 SCDynStorCtx.info = pThis;
1804 SCDynStorCtx.retain = NULL;
1805 SCDynStorCtx.release = NULL;
1806 SCDynStorCtx.copyDescription = NULL;
1807
1808 SCDynamicStoreRef hDynStor = SCDynamicStoreCreate(NULL, CFSTR("org.virtualbox.drvnat"), drvNatDnsChanged, &SCDynStorCtx);
1809 if (hDynStor)
1810 {
1811 CFRunLoopSourceRef hRunLoopSrc = SCDynamicStoreCreateRunLoopSource(NULL, hDynStor, 0);
1812 if (hRunLoopSrc)
1813 {
1814 CFStringRef aWatchKeys[] =
1815 {
1816 CFSTR("State:/Network/Global/DNS")
1817 };
1818 CFArrayRef hArray = CFArrayCreate(NULL, (const void **)aWatchKeys, 1, &kCFTypeArrayCallBacks);
1819
1820 if (hArray)
1821 {
1822 if (SCDynamicStoreSetNotificationKeys(hDynStor, hArray, NULL))
1823 {
1824 CFRunLoopRef hRunLoopMain = CFRunLoopGetMain();
1825 CFRetain(hRunLoopMain);
1826 CFRunLoopAddSource(hRunLoopMain, hRunLoopSrc, kCFRunLoopCommonModes);
1827 CFRelease(hRunLoopMain);
1828 pThis->hRunLoopSrcDnsWatcher = hRunLoopSrc;
1829 }
1830 else
1831 rc2 = VERR_NO_MEMORY;
1832
1833 CFRelease(hArray);
1834 }
1835 else
1836 rc2 = VERR_NO_MEMORY;
1837
1838 if (RT_FAILURE(rc2)) /* Keep the runloop source referenced for destruction. */
1839 CFRelease(hRunLoopSrc);
1840 }
1841 CFRelease(hDynStor);
1842 }
1843 else
1844 rc2 = VERR_NO_MEMORY;
1845
1846 if (RT_FAILURE(rc2))
1847 LogRel(("NAT#%d: Failed to install DNS change notifier. The guest might loose DNS access when switching networks on the host\n",
1848 pDrvIns->iInstance));
1849#endif
[13986]1850 return rc;
[1]1851 }
[28258]1852
[13986]1853 /* failure path */
1854 slirp_term(pThis->pNATState);
1855 pThis->pNATState = NULL;
[1]1856 }
[13986]1857 else
1858 {
1859 PDMDRV_SET_ERROR(pDrvIns, rc, N_("Unknown error during NAT networking setup: "));
1860 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
1861 }
1862
[1]1863 return rc;
1864}
1865
1866
1867/**
1868 * NAT network transport driver registration record.
1869 */
1870const PDMDRVREG g_DrvNAT =
1871{
1872 /* u32Version */
1873 PDM_DRVREG_VERSION,
[26166]1874 /* szName */
[1]1875 "NAT",
[25893]1876 /* szRCMod */
1877 "",
1878 /* szR0Mod */
1879 "",
[1]1880 /* pszDescription */
1881 "NAT Network Transport Driver",
1882 /* fFlags */
1883 PDM_DRVREG_FLAGS_HOST_BITS_DEFAULT,
1884 /* fClass. */
1885 PDM_DRVREG_CLASS_NETWORK,
1886 /* cMaxInstances */
[40282]1887 ~0U,
[1]1888 /* cbInstance */
1889 sizeof(DRVNAT),
1890 /* pfnConstruct */
1891 drvNATConstruct,
1892 /* pfnDestruct */
1893 drvNATDestruct,
[25893]1894 /* pfnRelocate */
1895 NULL,
[1]1896 /* pfnIOCtl */
1897 NULL,
1898 /* pfnPowerOn */
[8341]1899 drvNATPowerOn,
[1]1900 /* pfnReset */
1901 NULL,
1902 /* pfnSuspend */
1903 NULL,
1904 /* pfnResume */
[47499]1905 drvNATResume,
[22277]1906 /* pfnAttach */
1907 NULL,
[1]1908 /* pfnDetach */
[22458]1909 NULL,
[22277]1910 /* pfnPowerOff */
[22458]1911 NULL,
[22277]1912 /* pfnSoftReset */
[1]1913 NULL,
[22277]1914 /* u32EndVersion */
1915 PDM_DRVREG_VERSION
[1]1916};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle
ContactPrivacy/Do Not Sell My InfoTerms of Use