VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/misc.c@ 95288

Last change on this file since 95288 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.1 KB
Line 
1/* $Id: misc.c 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * NAT - helpers.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1995 Danny Gasparovski.
22 *
23 * Please read the file COPYRIGHT for the
24 * terms and conditions of the copyright.
25 */
26
27#ifndef VBOX_NAT_TST_QUEUE
28#include <slirp.h>
29#include "zone.h"
30
31# ifndef HAVE_INET_ATON
32int
33inet_aton(const char *cp, struct in_addr *ia)
34{
35 u_int32_t addr = inet_addr(cp);
36 if (addr == 0xffffffff)
37 return 0;
38 ia->s_addr = addr;
39 return 1;
40}
41# endif
42
43/*
44 * Get our IP address and put it in our_addr
45 */
46void
47getouraddr(PNATState pData)
48{
49 our_addr.s_addr = loopback_addr.s_addr;
50}
51#else /* VBOX_NAT_TST_QUEUE */
52# include <iprt/cdefs.h>
53# include <iprt/types.h>
54# include "misc.h"
55#endif
56struct quehead
57{
58 struct quehead *qh_link;
59 struct quehead *qh_rlink;
60};
61
62void
63insque(PNATState pData, void *a, void *b)
64{
65 register struct quehead *element = (struct quehead *) a;
66 register struct quehead *head = (struct quehead *) b;
67 NOREF(pData);
68 element->qh_link = head->qh_link;
69 head->qh_link = (struct quehead *)element;
70 element->qh_rlink = (struct quehead *)head;
71 ((struct quehead *)(element->qh_link))->qh_rlink = (struct quehead *)element;
72}
73
74void
75remque(PNATState pData, void *a)
76{
77 register struct quehead *element = (struct quehead *) a;
78 NOREF(pData);
79 ((struct quehead *)(element->qh_link))->qh_rlink = element->qh_rlink;
80 ((struct quehead *)(element->qh_rlink))->qh_link = element->qh_link;
81 element->qh_rlink = NULL;
82 /* element->qh_link = NULL; TCP FIN1 crashes if you do this. Why ? */
83}
84
85#ifndef VBOX_NAT_TST_QUEUE
86
87/*
88 * Set fd blocking and non-blocking
89 */
90void
91fd_nonblock(int fd)
92{
93# ifdef FIONBIO
94# ifdef RT_OS_WINDOWS
95 u_long opt = 1;
96# else
97 int opt = 1;
98# endif
99 ioctlsocket(fd, FIONBIO, &opt);
100# else /* !FIONBIO */
101 int opt;
102
103 opt = fcntl(fd, F_GETFL, 0);
104 opt |= O_NONBLOCK;
105 fcntl(fd, F_SETFL, opt);
106# endif
107}
108
109
110# if defined(VBOX_NAT_MEM_DEBUG)
111# define NATMEM_LOG_FLOW_FUNC(a) LogFlowFunc(a)
112# define NATMEM_LOG_FLOW_FUNC_ENTER() LogFlowFuncEnter()
113# define NATMEM_LOG_FLOW_FUNC_LEAVE() LogFlowFuncLeave()
114# define NATMEM_LOG_2(a) Log2(a)
115# else
116# define NATMEM_LOG_FLOW_FUNC(a) do { } while (0)
117# define NATMEM_LOG_FLOW_FUNC_ENTER() do { } while (0)
118# define NATMEM_LOG_FLOW_FUNC_LEAVE() do { } while (0)
119# define NATMEM_LOG_2(a) do { } while (0)
120# endif
121
122
123/**
124 * Called when memory becomes available, works pfnXmitPending.
125 *
126 * @note This will LEAVE the critical section of the zone and RE-ENTER it
127 * again. Changes to the zone data should be expected across calls to
128 * this function!
129 *
130 * @param zone The zone.
131 */
132DECLINLINE(void) slirp_zone_check_and_send_pending(uma_zone_t zone)
133{
134 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone]\n", zone));
135 if ( zone->fDoXmitPending
136 && zone->master_zone == NULL)
137 {
138 int rc2;
139 zone->fDoXmitPending = false;
140 rc2 = RTCritSectLeave(&zone->csZone); AssertRC(rc2);
141
142 slirp_output_pending(zone->pData->pvUser);
143
144 rc2 = RTCritSectEnter(&zone->csZone); AssertRC(rc2);
145 }
146 NATMEM_LOG_FLOW_FUNC_LEAVE();
147}
148
149static void *slirp_uma_alloc(uma_zone_t zone,
150 int size, uint8_t *pflags, int fWait)
151{
152 struct item *it;
153 uint8_t *sub_area;
154 void *ret = NULL;
155 int rc;
156
157 NATMEM_LOG_FLOW_FUNC(("ENTER: %R[mzone], size:%d, pflags:%p, %RTbool\n", zone, size, pflags, fWait)); RT_NOREF(size, pflags, fWait);
158 RTCritSectEnter(&zone->csZone);
159 for (;;)
160 {
161 if (!LIST_EMPTY(&zone->free_items))
162 {
163 it = LIST_FIRST(&zone->free_items);
164 Assert(it->magic == ITEM_MAGIC);
165 rc = 0;
166 if (zone->pfInit)
167 rc = zone->pfInit(zone->pData, (void *)&it[1], (int /*sigh*/)zone->size, M_DONTWAIT);
168 if (rc == 0)
169 {
170 zone->cur_items++;
171 LIST_REMOVE(it, list);
172 LIST_INSERT_HEAD(&zone->used_items, it, list);
173 slirp_zone_check_and_send_pending(zone); /* may exit+enter the cs! */
174 ret = (void *)&it[1];
175 }
176 else
177 {
178 AssertMsgFailed(("NAT: item initialization failed for zone %s\n", zone->name));
179 ret = NULL;
180 }
181 break;
182 }
183
184 if (!zone->master_zone)
185 {
186 /* We're on the master zone and we can't allocate more. */
187 NATMEM_LOG_2(("NAT: no room on %s zone\n", zone->name));
188 /* AssertMsgFailed(("NAT: OOM!")); */
189 zone->fDoXmitPending = true;
190 break;
191 }
192
193 /* we're on a sub-zone, we need get a chunk from the master zone and split
194 * it into sub-zone conforming chunks.
195 */
196 sub_area = slirp_uma_alloc(zone->master_zone, (int /*sigh*/)zone->master_zone->size, NULL, 0);
197 if (!sub_area)
198 {
199 /* No room on master */
200 NATMEM_LOG_2(("NAT: no room on %s zone for %s zone\n", zone->master_zone->name, zone->name));
201 break;
202 }
203 zone->max_items++;
204 it = &((struct item *)sub_area)[-1];
205 /* It's the chunk descriptor of the master zone, we should remove it
206 * from the master list first.
207 */
208 Assert((it->zone && it->zone->magic == ZONE_MAGIC));
209 RTCritSectEnter(&it->zone->csZone);
210 /** @todo should we alter count of master counters? */
211 LIST_REMOVE(it, list);
212 RTCritSectLeave(&it->zone->csZone);
213
214 /** @todo '+ zone->size' should be depend on flag */
215 memset(it, 0, sizeof(struct item));
216 it->zone = zone;
217 it->magic = ITEM_MAGIC;
218 LIST_INSERT_HEAD(&zone->free_items, it, list);
219 if (zone->cur_items >= zone->max_items)
220 LogRel(("NAT: Zone(%s) has reached it maximum\n", zone->name));
221 }
222 RTCritSectLeave(&zone->csZone);
223 NATMEM_LOG_FLOW_FUNC(("LEAVE: %p\n", ret));
224 return ret;
225}
226
227static void slirp_uma_free(void *item, int size, uint8_t flags)
228{
229 struct item *it;
230 uma_zone_t zone;
231
232 Assert(item);
233 it = &((struct item *)item)[-1];
234 NATMEM_LOG_FLOW_FUNC(("ENTER: item:%p(%R[mzoneitem]), size:%d, flags:%RX8\n", item, it, size, flags)); RT_NOREF(size, flags);
235 Assert(it->magic == ITEM_MAGIC);
236 zone = it->zone;
237 /* check border magic */
238 Assert((*(uint32_t *)(((uint8_t *)&it[1]) + zone->size) == 0xabadbabe));
239
240 RTCritSectEnter(&zone->csZone);
241 Assert(zone->magic == ZONE_MAGIC);
242 LIST_REMOVE(it, list);
243 if (zone->pfFini)
244 {
245 zone->pfFini(zone->pData, item, (int /*sigh*/)zone->size);
246 }
247 if (zone->pfDtor)
248 {
249 zone->pfDtor(zone->pData, item, (int /*sigh*/)zone->size, NULL);
250 }
251 LIST_INSERT_HEAD(&zone->free_items, it, list);
252 zone->cur_items--;
253 slirp_zone_check_and_send_pending(zone); /* may exit+enter the cs! */
254 RTCritSectLeave(&zone->csZone);
255 NATMEM_LOG_FLOW_FUNC_LEAVE();
256}
257
258uma_zone_t uma_zcreate(PNATState pData, char *name, size_t size,
259 ctor_t ctor, dtor_t dtor, zinit_t init, zfini_t fini, int flags1, int flags2)
260{
261 uma_zone_t zone = NULL;
262 NATMEM_LOG_FLOW_FUNC(("ENTER: name:%s size:%d, ctor:%p, dtor:%p, init:%p, fini:%p, flags1:%RX32, flags2:%RX32\n",
263 name, ctor, dtor, init, fini, flags1, flags2)); RT_NOREF(flags1, flags2);
264 zone = RTMemAllocZ(sizeof(struct uma_zone));
265 Assert((pData));
266 zone->magic = ZONE_MAGIC;
267 zone->pData = pData;
268 zone->name = name;
269 zone->size = size;
270 zone->pfCtor = ctor;
271 zone->pfDtor = dtor;
272 zone->pfInit = init;
273 zone->pfFini = fini;
274 zone->pfAlloc = slirp_uma_alloc;
275 zone->pfFree = slirp_uma_free;
276 RTCritSectInit(&zone->csZone);
277 NATMEM_LOG_FLOW_FUNC(("LEAVE: %R[mzone]\n", zone));
278 return zone;
279
280}
281uma_zone_t uma_zsecond_create(char *name, ctor_t ctor,
282 dtor_t dtor, zinit_t init, zfini_t fini, uma_zone_t master)
283{
284 uma_zone_t zone;
285 Assert(master);
286 NATMEM_LOG_FLOW_FUNC(("ENTER: name:%s ctor:%p, dtor:%p, init:%p, fini:%p, master:%R[mzone]\n",
287 name, ctor, dtor, init, fini, master));
288 zone = RTMemAllocZ(sizeof(struct uma_zone));
289 if (zone == NULL)
290 {
291 NATMEM_LOG_FLOW_FUNC(("LEAVE: %R[mzone]\n", NULL));
292 return NULL;
293 }
294
295 Assert((master && master->pData));
296 zone->magic = ZONE_MAGIC;
297 zone->pData = master->pData;
298 zone->name = name;
299 zone->pfCtor = ctor;
300 zone->pfDtor = dtor;
301 zone->pfInit = init;
302 zone->pfFini = fini;
303 zone->pfAlloc = slirp_uma_alloc;
304 zone->pfFree = slirp_uma_free;
305 zone->size = master->size;
306 zone->master_zone = master;
307 RTCritSectInit(&zone->csZone);
308 NATMEM_LOG_FLOW_FUNC(("LEAVE: %R[mzone]\n", zone));
309 return zone;
310}
311
312void uma_zone_set_max(uma_zone_t zone, int max)
313{
314 int i = 0;
315 struct item *it;
316 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], max:%d\n", zone, max));
317 zone->max_items = max;
318 zone->area = RTMemAllocZ(max * (sizeof(struct item) + zone->size + sizeof(uint32_t)));
319 for (; i < max; ++i)
320 {
321 it = (struct item *)(((uint8_t *)zone->area) + i*(sizeof(struct item) + zone->size + sizeof(uint32_t)));
322 it->magic = ITEM_MAGIC;
323 it->zone = zone;
324 *(uint32_t *)(((uint8_t *)&it[1]) + zone->size) = 0xabadbabe;
325 LIST_INSERT_HEAD(&zone->free_items, it, list);
326 }
327 NATMEM_LOG_FLOW_FUNC_LEAVE();
328}
329
330void uma_zone_set_allocf(uma_zone_t zone, uma_alloc_t pfAlloc)
331{
332 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], pfAlloc:%Rfn\n", zone, pfAlloc));
333 zone->pfAlloc = pfAlloc;
334 NATMEM_LOG_FLOW_FUNC_LEAVE();
335}
336
337void uma_zone_set_freef(uma_zone_t zone, uma_free_t pfFree)
338{
339 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], pfAlloc:%Rfn\n", zone, pfFree));
340 zone->pfFree = pfFree;
341 NATMEM_LOG_FLOW_FUNC_LEAVE();
342}
343
344uint32_t *uma_find_refcnt(uma_zone_t zone, void *mem)
345{
346 /** @todo (vvl) this function supposed to work with special zone storing
347 reference counters */
348 struct item *it = NULL;
349 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], mem:%p\n", zone, mem)); RT_NOREF(zone);
350 it = (struct item *)mem; /* 1st element */
351 Assert(mem != NULL);
352 Assert(zone->magic == ZONE_MAGIC);
353 /* for returning pointer to counter we need get 0 elemnt */
354 Assert(it[-1].magic == ITEM_MAGIC);
355 NATMEM_LOG_FLOW_FUNC(("LEAVE: %p\n", &it[-1].ref_count));
356 return &it[-1].ref_count;
357}
358
359void *uma_zalloc_arg(uma_zone_t zone, void *args, int how)
360{
361 void *mem;
362 Assert(zone->magic == ZONE_MAGIC);
363 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], args:%p, how:%RX32\n", zone, args, how)); RT_NOREF(how);
364 if (zone->pfAlloc == NULL)
365 {
366 NATMEM_LOG_FLOW_FUNC(("LEAVE: NULL\n"));
367 return NULL;
368 }
369 RTCritSectEnter(&zone->csZone);
370 mem = zone->pfAlloc(zone, (int /*sigh*/)zone->size, NULL, 0);
371 if (mem != NULL)
372 {
373 if (zone->pfCtor)
374 zone->pfCtor(zone->pData, mem, (int /*sigh*/)zone->size, args, M_DONTWAIT);
375 }
376 RTCritSectLeave(&zone->csZone);
377 NATMEM_LOG_FLOW_FUNC(("LEAVE: %p\n", mem));
378 return mem;
379}
380
381void uma_zfree(uma_zone_t zone, void *item)
382{
383 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], item:%p\n", zone, item));
384 uma_zfree_arg(zone, item, NULL);
385 NATMEM_LOG_FLOW_FUNC_LEAVE();
386}
387
388void uma_zfree_arg(uma_zone_t zone, void *mem, void *flags)
389{
390 struct item *it;
391 Assert(zone->magic == ZONE_MAGIC);
392 Assert((zone->pfFree));
393 Assert((mem));
394 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], mem:%p, flags:%p\n", zone, mem, flags)); RT_NOREF(flags);
395
396 RTCritSectEnter(&zone->csZone);
397 it = &((struct item *)mem)[-1];
398 Assert((it->magic == ITEM_MAGIC));
399 Assert((zone->magic == ZONE_MAGIC && zone == it->zone));
400
401 zone->pfFree(mem, 0, 0);
402 RTCritSectLeave(&zone->csZone);
403
404 NATMEM_LOG_FLOW_FUNC_LEAVE();
405}
406
407int uma_zone_exhausted_nolock(uma_zone_t zone)
408{
409 int fExhausted;
410 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone]\n", zone));
411 RTCritSectEnter(&zone->csZone);
412 fExhausted = (zone->cur_items == zone->max_items);
413 RTCritSectLeave(&zone->csZone);
414 NATMEM_LOG_FLOW_FUNC(("LEAVE: %RTbool\n", fExhausted));
415 return fExhausted;
416}
417
418void zone_drain(uma_zone_t zone)
419{
420 struct item *it;
421 uma_zone_t master_zone;
422
423 /* vvl: Huh? What to do with zone which hasn't got backstore ? */
424 Assert((zone->master_zone));
425 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone]\n", zone));
426 master_zone = zone->master_zone;
427 while (!LIST_EMPTY(&zone->free_items))
428 {
429 it = LIST_FIRST(&zone->free_items);
430 Assert((it->magic == ITEM_MAGIC));
431
432 RTCritSectEnter(&zone->csZone);
433 LIST_REMOVE(it, list);
434 zone->max_items--;
435 RTCritSectLeave(&zone->csZone);
436
437 it->zone = master_zone;
438
439 RTCritSectEnter(&master_zone->csZone);
440 LIST_INSERT_HEAD(&master_zone->free_items, it, list);
441 master_zone->cur_items--;
442 slirp_zone_check_and_send_pending(master_zone); /* may exit+enter the cs! */
443 RTCritSectLeave(&master_zone->csZone);
444 }
445 NATMEM_LOG_FLOW_FUNC_LEAVE();
446}
447
448void slirp_null_arg_free(void *mem, void *arg)
449{
450 /** @todo (vvl) make it wiser */
451 NATMEM_LOG_FLOW_FUNC(("ENTER: mem:%p, arg:%p\n", mem, arg));
452 RT_NOREF(arg);
453 Assert(mem);
454 RTMemFree(mem);
455 NATMEM_LOG_FLOW_FUNC_LEAVE();
456}
457
458void *uma_zalloc(uma_zone_t zone, int len)
459{
460 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], len:%d\n", zone, len));
461 RT_NOREF(zone, len);
462 NATMEM_LOG_FLOW_FUNC(("LEAVE: NULL"));
463 return NULL;
464}
465
466struct mbuf *slirp_ext_m_get(PNATState pData, size_t cbMin, void **ppvBuf, size_t *pcbBuf)
467{
468 struct mbuf *m;
469 int size = MCLBYTES;
470 NATMEM_LOG_FLOW_FUNC(("ENTER: cbMin:%d, ppvBuf:%p, pcbBuf:%p\n", cbMin, ppvBuf, pcbBuf));
471
472 *ppvBuf = NULL;
473 *pcbBuf = 0;
474
475 if (cbMin < MCLBYTES)
476 size = MCLBYTES;
477 else if (cbMin < MJUM9BYTES)
478 size = MJUM9BYTES;
479 else if (cbMin < MJUM16BYTES)
480 size = MJUM16BYTES;
481 else
482 {
483 AssertMsgFailed(("Unsupported size %zu", cbMin));
484 NATMEM_LOG_FLOW_FUNC(("LEAVE: NULL (bad size %zu)\n", cbMin));
485 return NULL;
486 }
487
488 m = m_getjcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR, size);
489 if (m == NULL)
490 {
491 NATMEM_LOG_FLOW_FUNC(("LEAVE: NULL\n"));
492 return NULL;
493 }
494 m->m_len = size;
495 *ppvBuf = mtod(m, void *);
496 *pcbBuf = size;
497 NATMEM_LOG_FLOW_FUNC(("LEAVE: %p\n", m));
498 return m;
499}
500
501void slirp_ext_m_free(PNATState pData, struct mbuf *m, uint8_t *pu8Buf)
502{
503
504 NATMEM_LOG_FLOW_FUNC(("ENTER: m:%p, pu8Buf:%p\n", m, pu8Buf));
505 if ( !pu8Buf
506 && pu8Buf != mtod(m, uint8_t *))
507 RTMemFree(pu8Buf); /* This buffer was allocated on heap */
508 m_freem(pData, m);
509 NATMEM_LOG_FLOW_FUNC_LEAVE();
510}
511
512static void zone_destroy(uma_zone_t zone)
513{
514 RTCritSectEnter(&zone->csZone);
515 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone]\n", zone));
516 LogRel(("NAT: Zone(nm:%s, used:%d)\n", zone->name, zone->cur_items));
517 RTMemFree(zone->area);
518 RTCritSectLeave(&zone->csZone);
519 RTCritSectDelete(&zone->csZone);
520 RTMemFree(zone);
521 NATMEM_LOG_FLOW_FUNC_LEAVE();
522}
523
524void m_fini(PNATState pData)
525{
526 NATMEM_LOG_FLOW_FUNC_ENTER();
527# define ZONE_DESTROY(zone) do { zone_destroy((zone)); (zone) = NULL;} while (0)
528 ZONE_DESTROY(pData->zone_clust);
529 ZONE_DESTROY(pData->zone_pack);
530 ZONE_DESTROY(pData->zone_mbuf);
531 ZONE_DESTROY(pData->zone_jumbop);
532 ZONE_DESTROY(pData->zone_jumbo9);
533 ZONE_DESTROY(pData->zone_jumbo16);
534 ZONE_DESTROY(pData->zone_ext_refcnt);
535# undef ZONE_DESTROY
536 /** @todo do finalize here.*/
537 NATMEM_LOG_FLOW_FUNC_LEAVE();
538}
539
540void
541if_init(PNATState pData)
542{
543 /* 14 for ethernet */
544 if_maxlinkhdr = 14;
545 if_comp = IF_AUTOCOMP;
546 if_mtu = 1500;
547 if_mru = 1500;
548}
549
550#endif /* VBOX_NAT_TST_QUEUE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette