VirtualBox

source: vbox/trunk/src/VBox/NetworkServices/NAT/proxy_pollmgr.c@ 104620

Last change on this file since 104620 was 104603, checked in by vboxsync, 4 months ago

NetworkServices/NAT: Some unused variable fixes, bugref:3409

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 22.4 KB
Line 
1/* $Id: proxy_pollmgr.c 104603 2024-05-13 15:19:03Z vboxsync $ */
2/** @file
3 * NAT Network - poll manager.
4 */
5
6/*
7 * Copyright (C) 2013-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#define LOG_GROUP LOG_GROUP_NAT_SERVICE
29
30#include "winutils.h"
31
32#include "proxy_pollmgr.h"
33#include "proxy.h"
34
35#ifndef RT_OS_WINDOWS
36#include <sys/socket.h>
37#include <netinet/in.h>
38#include <err.h>
39#include <errno.h>
40#include <fcntl.h>
41#include <poll.h>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <time.h>
46#include <unistd.h>
47#else
48#include <iprt/errcore.h>
49#include <stdlib.h>
50#include <string.h>
51#include "winpoll.h"
52#endif
53
54#include <iprt/req.h>
55#include <iprt/errcore.h>
56
57
58#define POLLMGR_GARBAGE (-1)
59
60
61enum {
62 POLLMGR_QUEUE = 0,
63
64 POLLMGR_SLOT_STATIC_COUNT,
65 POLLMGR_SLOT_FIRST_DYNAMIC = POLLMGR_SLOT_STATIC_COUNT
66};
67
68
69struct pollmgr_chan {
70 struct pollmgr_handler *handler;
71 void *arg;
72 bool arg_valid;
73};
74
75struct pollmgr {
76 struct pollfd *fds;
77 struct pollmgr_handler **handlers;
78 nfds_t capacity; /* allocated size of the arrays */
79 nfds_t nfds; /* part of the arrays in use */
80
81 /* channels (socketpair) for static slots */
82 SOCKET chan[POLLMGR_SLOT_STATIC_COUNT][2];
83#define POLLMGR_CHFD_RD 0 /* - pollmgr side */
84#define POLLMGR_CHFD_WR 1 /* - client side */
85
86
87 /* emulate channels with request queue */
88 RTREQQUEUE queue;
89 struct pollmgr_handler queue_handler;
90 struct pollmgr_chan chan_handlers[POLLMGR_CHAN_COUNT];
91} pollmgr;
92
93
94static int pollmgr_queue_callback(struct pollmgr_handler *, SOCKET, int);
95static void pollmgr_chan_call_handler(int, void *);
96
97static void pollmgr_loop(void);
98
99static void pollmgr_add_at(int, struct pollmgr_handler *, SOCKET, int);
100static void pollmgr_refptr_delete(struct pollmgr_refptr *);
101
102
103/*
104 * We cannot portably peek at the length of the incoming datagram and
105 * pre-allocate pbuf chain to recvmsg() directly to it. On Linux it's
106 * possible to recv with MSG_PEEK|MSG_TRUC, but extra syscall is
107 * probably more expensive (haven't measured) than doing an extra copy
108 * of data, since typical UDP datagrams are small enough to avoid
109 * fragmentation.
110 *
111 * We can use shared buffer here since we read from sockets
112 * sequentially in a loop over pollfd.
113 */
114u8_t pollmgr_udpbuf[64 * 1024];
115
116
117int
118pollmgr_init(void)
119{
120 struct pollfd *newfds;
121 struct pollmgr_handler **newhdls;
122 nfds_t newcap;
123 int rc, status;
124 nfds_t i;
125
126 rc = RTReqQueueCreate(&pollmgr.queue);
127 if (RT_FAILURE(rc))
128 return -1;
129
130 pollmgr.fds = NULL;
131 pollmgr.handlers = NULL;
132 pollmgr.capacity = 0;
133 pollmgr.nfds = 0;
134
135 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
136 pollmgr.chan[i][POLLMGR_CHFD_RD] = INVALID_SOCKET;
137 pollmgr.chan[i][POLLMGR_CHFD_WR] = INVALID_SOCKET;
138 }
139
140 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
141#ifndef RT_OS_WINDOWS
142 int j;
143
144 status = socketpair(PF_LOCAL, SOCK_DGRAM, 0, pollmgr.chan[i]);
145 if (status < 0) {
146 DPRINTF(("socketpair: %R[sockerr]\n", SOCKERRNO()));
147 goto cleanup_close;
148 }
149
150 /* now manually make them O_NONBLOCK */
151 for (j = 0; j < 2; ++j) {
152 int s = pollmgr.chan[i][j];
153 int sflags;
154
155 sflags = fcntl(s, F_GETFL, 0);
156 if (sflags < 0) {
157 DPRINTF0(("F_GETFL: %R[sockerr]\n", errno));
158 goto cleanup_close;
159 }
160
161 status = fcntl(s, F_SETFL, sflags | O_NONBLOCK);
162 if (status < 0) {
163 DPRINTF0(("O_NONBLOCK: %R[sockerr]\n", errno));
164 goto cleanup_close;
165 }
166 }
167#else
168 status = RTWinSocketPair(PF_INET, SOCK_DGRAM, 0, pollmgr.chan[i]);
169 if (RT_FAILURE(status)) {
170 goto cleanup_close;
171 }
172#endif
173 }
174
175
176 newcap = 16; /* XXX: magic */
177 LWIP_ASSERT1(newcap >= POLLMGR_SLOT_STATIC_COUNT);
178
179 newfds = (struct pollfd *)
180 malloc(newcap * sizeof(*pollmgr.fds));
181 if (newfds == NULL) {
182 DPRINTF(("%s: Failed to allocate fds array\n", __func__));
183 goto cleanup_close;
184 }
185
186 newhdls = (struct pollmgr_handler **)
187 malloc(newcap * sizeof(*pollmgr.handlers));
188 if (newhdls == NULL) {
189 DPRINTF(("%s: Failed to allocate handlers array\n", __func__));
190 free(newfds);
191 goto cleanup_close;
192 }
193
194 pollmgr.capacity = newcap;
195 pollmgr.fds = newfds;
196 pollmgr.handlers = newhdls;
197
198 pollmgr.nfds = POLLMGR_SLOT_STATIC_COUNT;
199
200 for (i = 0; i < pollmgr.capacity; ++i) {
201 pollmgr.fds[i].fd = INVALID_SOCKET;
202 pollmgr.fds[i].events = 0;
203 pollmgr.fds[i].revents = 0;
204 }
205
206 /* add request queue notification */
207 pollmgr.queue_handler.callback = pollmgr_queue_callback;
208 pollmgr.queue_handler.data = NULL;
209 pollmgr.queue_handler.slot = -1;
210
211 pollmgr_add_at(POLLMGR_QUEUE, &pollmgr.queue_handler,
212 pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_RD],
213 POLLIN);
214
215 return 0;
216
217 cleanup_close:
218 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
219 SOCKET *chan = pollmgr.chan[i];
220 if (chan[POLLMGR_CHFD_RD] != INVALID_SOCKET) {
221 closesocket(chan[POLLMGR_CHFD_RD]);
222 closesocket(chan[POLLMGR_CHFD_WR]);
223 }
224 }
225
226 return -1;
227}
228
229
230/*
231 * Add new channel. We now implement channels with request queue, so
232 * all channels get the same socket that triggers queue processing.
233 *
234 * Must be called before pollmgr loop is started, so no locking.
235 */
236SOCKET
237pollmgr_add_chan(int slot, struct pollmgr_handler *handler)
238{
239 AssertReturn(0 <= slot && slot < POLLMGR_CHAN_COUNT, INVALID_SOCKET);
240 AssertReturn(handler != NULL && handler->callback != NULL, INVALID_SOCKET);
241
242 handler->slot = slot;
243 pollmgr.chan_handlers[slot].handler = handler;
244 return pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_WR];
245}
246
247
248/*
249 * This used to actually send data over the channel's socket. Now we
250 * queue a request and send single byte notification over shared
251 * POLLMGR_QUEUE socket.
252 */
253ssize_t
254pollmgr_chan_send(int slot, void *buf, size_t nbytes)
255{
256 static const char notification = 0x5a;
257
258 void *ptr;
259 SOCKET fd;
260 ssize_t nsent;
261
262 AssertReturn(0 <= slot && slot < POLLMGR_CHAN_COUNT, -1);
263
264 /*
265 * XXX: Hack alert. We only ever "sent" single pointer which was
266 * simultaneously both the wakeup event for the poll and the
267 * argument for the channel handler that it read from the channel.
268 * So now we pass this pointer to the request and arrange for the
269 * handler to "read" it when it asks for it.
270 */
271 if (nbytes != sizeof(void *)) {
272 return -1;
273 }
274
275 ptr = *(void **)buf;
276
277 int rc = RTReqQueueCallEx(pollmgr.queue, NULL, 0,
278 RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
279 (PFNRT)pollmgr_chan_call_handler, 2,
280 slot, ptr);
281 if (RT_FAILURE(rc))
282 {
283 DPRINTF(("Queuing pollmgr_chan_call_handler() on poll manager queue failed with %Rrc\n", rc));
284 return -1;
285 }
286
287 fd = pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_WR];
288 nsent = send(fd, &notification, 1, 0);
289 if (nsent == SOCKET_ERROR) {
290 DPRINTF(("send on chan %d: %R[sockerr]\n", slot, SOCKERRNO()));
291 return -1;
292 }
293 else if ((size_t)nsent != 1) {
294 DPRINTF(("send on chan %d: datagram truncated to %u bytes",
295 slot, (unsigned int)nsent));
296 return -1;
297 }
298
299 /* caller thinks it's sending the pointer */
300 return sizeof(void *);
301}
302
303
304/*
305 * pollmgr_chan_send() sent us a notification, process the queue.
306 */
307static int
308pollmgr_queue_callback(struct pollmgr_handler *handler, SOCKET fd, int revents)
309{
310 RT_NOREF(handler, revents);
311 Assert(pollmgr.queue != NIL_RTREQQUEUE);
312
313 ssize_t nread = recv(fd, (char *)pollmgr_udpbuf, sizeof(pollmgr_udpbuf), 0);
314 if (nread == SOCKET_ERROR) {
315 DPRINTF0(("%s: recv: %R[sockerr]\n", __func__, SOCKERRNO()));
316 return POLLIN;
317 }
318
319 DPRINTF2(("%s: read %zd\n", __func__, nread));
320 if (nread == 0) {
321 return POLLIN;
322 }
323
324 int rc = RTReqQueueProcess(pollmgr.queue, 0);
325 if (RT_UNLIKELY(rc != VERR_TIMEOUT && RT_FAILURE_NP(rc))) {
326 DPRINTF0(("%s: RTReqQueueProcess: %Rrc\n", __func__, rc));
327 }
328
329 return POLLIN;
330}
331
332
333/*
334 * Queued requests use this function to emulate the call to the
335 * handler's callback.
336 */
337static void
338pollmgr_chan_call_handler(int slot, void *arg)
339{
340 struct pollmgr_handler *handler;
341 int nevents;
342
343 AssertReturnVoid(0 <= slot && slot < POLLMGR_CHAN_COUNT);
344
345 handler = pollmgr.chan_handlers[slot].handler;
346 AssertReturnVoid(handler != NULL && handler->callback != NULL);
347
348 /* arrange for pollmgr_chan_recv_ptr() to "receive" the arg */
349 pollmgr.chan_handlers[slot].arg = arg;
350 pollmgr.chan_handlers[slot].arg_valid = true;
351
352 nevents = handler->callback(handler, INVALID_SOCKET, POLLIN);
353 if (nevents != POLLIN) {
354 DPRINTF2(("%s: nevents=0x%x!\n", __func__, nevents));
355 }
356}
357
358
359/*
360 * "Receive" a pointer "sent" over poll manager channel.
361 */
362void *
363pollmgr_chan_recv_ptr(struct pollmgr_handler *handler, SOCKET fd, int revents)
364{
365 int slot;
366 void *ptr;
367
368 RT_NOREF(fd);
369
370 slot = handler->slot;
371 Assert(0 <= slot && slot < POLLMGR_CHAN_COUNT);
372
373 if (revents & POLLNVAL) {
374 errx(EXIT_FAILURE, "chan %d: fd invalid", (int)handler->slot);
375 /* NOTREACHED */
376 }
377
378 if (revents & (POLLERR | POLLHUP)) {
379 errx(EXIT_FAILURE, "chan %d: fd error", (int)handler->slot);
380 /* NOTREACHED */
381 }
382
383 LWIP_ASSERT1(revents & POLLIN);
384
385 if (!pollmgr.chan_handlers[slot].arg_valid) {
386 err(EXIT_FAILURE, "chan %d: recv", (int)handler->slot);
387 /* NOTREACHED */
388 }
389
390 ptr = pollmgr.chan_handlers[slot].arg;
391 pollmgr.chan_handlers[slot].arg_valid = false;
392
393 return ptr;
394}
395
396
397/*
398 * Must be called from pollmgr loop (via callbacks), so no locking.
399 */
400int
401pollmgr_add(struct pollmgr_handler *handler, SOCKET fd, int events)
402{
403 int slot;
404
405 DPRINTF2(("%s: new fd %d\n", __func__, fd));
406
407 if (pollmgr.nfds == pollmgr.capacity) {
408 struct pollfd *newfds;
409 struct pollmgr_handler **newhdls;
410 nfds_t newcap;
411 nfds_t i;
412
413 newcap = pollmgr.capacity * 2;
414
415 newfds = (struct pollfd *)
416 realloc(pollmgr.fds, newcap * sizeof(*pollmgr.fds));
417 if (newfds == NULL) {
418 DPRINTF(("%s: Failed to reallocate fds array\n", __func__));
419 handler->slot = -1;
420 return -1;
421 }
422
423 pollmgr.fds = newfds; /* don't crash/leak if realloc(handlers) fails */
424 /* but don't update capacity yet! */
425
426 newhdls = (struct pollmgr_handler **)
427 realloc(pollmgr.handlers, newcap * sizeof(*pollmgr.handlers));
428 if (newhdls == NULL) {
429 DPRINTF(("%s: Failed to reallocate handlers array\n", __func__));
430 /* if we failed to realloc here, then fds points to the
431 * new array, but we pretend we still has old capacity */
432 handler->slot = -1;
433 return -1;
434 }
435
436 pollmgr.handlers = newhdls;
437 pollmgr.capacity = newcap;
438
439 for (i = pollmgr.nfds; i < newcap; ++i) {
440 newfds[i].fd = INVALID_SOCKET;
441 newfds[i].events = 0;
442 newfds[i].revents = 0;
443 newhdls[i] = NULL;
444 }
445 }
446
447 slot = pollmgr.nfds;
448 ++pollmgr.nfds;
449
450 pollmgr_add_at(slot, handler, fd, events);
451 return slot;
452}
453
454
455static void
456pollmgr_add_at(int slot, struct pollmgr_handler *handler, SOCKET fd, int events)
457{
458 pollmgr.fds[slot].fd = fd;
459 pollmgr.fds[slot].events = events;
460 pollmgr.fds[slot].revents = 0;
461 pollmgr.handlers[slot] = handler;
462
463 handler->slot = slot;
464}
465
466
467void
468pollmgr_update_events(int slot, int events)
469{
470 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
471 LWIP_ASSERT1((nfds_t)slot < pollmgr.nfds);
472
473 pollmgr.fds[slot].events = events;
474}
475
476
477void
478pollmgr_del_slot(int slot)
479{
480 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
481
482 DPRINTF2(("%s(%d): fd %d ! DELETED\n",
483 __func__, slot, pollmgr.fds[slot].fd));
484
485 pollmgr.fds[slot].fd = INVALID_SOCKET; /* see poll loop */
486}
487
488
489void
490pollmgr_thread(void *ignored)
491{
492 LWIP_UNUSED_ARG(ignored);
493 pollmgr_loop();
494}
495
496
497static void
498pollmgr_loop(void)
499{
500 int nready;
501 SOCKET delfirst;
502 SOCKET *pdelprev;
503 int i;
504
505 for (;;) {
506#ifndef RT_OS_WINDOWS
507 nready = poll(pollmgr.fds, pollmgr.nfds, -1);
508#else
509 int rc = RTWinPoll(pollmgr.fds, pollmgr.nfds,RT_INDEFINITE_WAIT, &nready);
510 if (RT_FAILURE(rc)) {
511 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
512 /* NOTREACHED*/
513 }
514#endif
515
516 DPRINTF2(("%s: ready %d fd%s\n",
517 __func__, nready, (nready == 1 ? "" : "s")));
518
519 if (nready < 0) {
520 if (errno == EINTR) {
521 continue;
522 }
523
524 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
525 /* NOTREACHED*/
526 }
527 else if (nready == 0) { /* cannot happen, we wait forever (-1) */
528 continue; /* - but be defensive */
529 }
530
531
532 delfirst = INVALID_SOCKET;
533 pdelprev = &delfirst;
534
535 for (i = 0; (nfds_t)i < pollmgr.nfds && nready > 0; ++i) {
536 struct pollmgr_handler *handler;
537 SOCKET fd;
538 int revents, nevents;
539
540 fd = pollmgr.fds[i].fd;
541 revents = pollmgr.fds[i].revents;
542
543 /*
544 * Channel handlers can request deletion of dynamic slots
545 * by calling pollmgr_del_slot() that clobbers slot's fd.
546 */
547 if (fd == INVALID_SOCKET && i >= POLLMGR_SLOT_FIRST_DYNAMIC) {
548 /* adjust count if events were pending for that slot */
549 if (revents != 0) {
550 --nready;
551 }
552
553 /* pretend that slot handler requested deletion */
554 nevents = -1;
555 goto update_events;
556 }
557
558 if (revents == 0) {
559 continue; /* next fd */
560 }
561 --nready;
562
563 handler = pollmgr.handlers[i];
564
565 if (handler != NULL && handler->callback != NULL) {
566#ifdef LWIP_PROXY_DEBUG
567# if LWIP_PROXY_DEBUG /* DEBUG */
568 if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
569 if (revents == POLLIN) {
570 DPRINTF2(("%s: ch %d\n", __func__, i));
571 }
572 else {
573 DPRINTF2(("%s: ch %d @ revents 0x%x!\n",
574 __func__, i, revents));
575 }
576 }
577 else {
578 DPRINTF2(("%s: fd %d @ revents 0x%x\n",
579 __func__, fd, revents));
580 }
581# endif /* LWIP_PROXY_DEBUG / DEBUG */
582#endif
583 nevents = (*handler->callback)(handler, fd, revents);
584 }
585 else {
586 DPRINTF0(("%s: invalid handler for fd %d: ", __func__, fd));
587 if (handler == NULL) {
588 DPRINTF0(("NULL\n"));
589 }
590 else {
591 DPRINTF0(("%p (callback = NULL)\n", (void *)handler));
592 }
593 nevents = -1; /* delete it */
594 }
595
596 update_events:
597 if (nevents >= 0) {
598 if (nevents != pollmgr.fds[i].events) {
599 DPRINTF2(("%s: fd %d ! nevents 0x%x\n",
600 __func__, fd, nevents));
601 }
602 pollmgr.fds[i].events = nevents;
603 }
604 else if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
605 /* Don't garbage-collect channels. */
606 DPRINTF2(("%s: fd %d ! DELETED (channel %d)\n",
607 __func__, fd, i));
608 pollmgr.fds[i].fd = INVALID_SOCKET;
609 pollmgr.fds[i].events = 0;
610 pollmgr.fds[i].revents = 0;
611 pollmgr.handlers[i] = NULL;
612 }
613 else {
614 DPRINTF2(("%s: fd %d ! DELETED\n", __func__, fd));
615
616 /* schedule for deletion (see g/c loop for details) */
617 *pdelprev = i; /* make previous entry point to us */
618 pdelprev = &pollmgr.fds[i].fd;
619
620 pollmgr.fds[i].fd = INVALID_SOCKET; /* end of list (for now) */
621 pollmgr.fds[i].events = POLLMGR_GARBAGE;
622 pollmgr.fds[i].revents = 0;
623 pollmgr.handlers[i] = NULL;
624 }
625 } /* processing loop */
626
627
628 /*
629 * Garbage collect and compact the array.
630 *
631 * We overload pollfd::fd of garbage entries to store the
632 * index of the next garbage entry. The garbage list is
633 * co-directional with the fds array. The index of the first
634 * entry is in "delfirst", the last entry "points to"
635 * INVALID_SOCKET.
636 *
637 * See update_events code for nevents < 0 at the end of the
638 * processing loop above.
639 */
640 while (delfirst != INVALID_SOCKET) {
641 const int last = pollmgr.nfds - 1;
642
643 /*
644 * We want a live entry in the last slot to swap into the
645 * freed slot, so make sure we have one.
646 */
647 if (pollmgr.fds[last].events == POLLMGR_GARBAGE /* garbage */
648 || pollmgr.fds[last].fd == INVALID_SOCKET) /* or killed */
649 {
650 /* drop garbage entry at the end of the array */
651 --pollmgr.nfds;
652
653 if (delfirst == (SOCKET)last) {
654 /* congruent to delnext >= pollmgr.nfds test below */
655 delfirst = INVALID_SOCKET; /* done */
656 }
657 }
658 else {
659 const SOCKET delnext = pollmgr.fds[delfirst].fd;
660
661 /* copy live entry at the end to the first slot being freed */
662 pollmgr.fds[delfirst] = pollmgr.fds[last]; /* struct copy */
663 pollmgr.handlers[delfirst] = pollmgr.handlers[last];
664 pollmgr.handlers[delfirst]->slot = (int)delfirst;
665 --pollmgr.nfds;
666
667 if ((nfds_t)delnext >= pollmgr.nfds) {
668 delfirst = INVALID_SOCKET; /* done */
669 }
670 else {
671 delfirst = delnext;
672 }
673 }
674
675 pollmgr.fds[last].fd = INVALID_SOCKET;
676 pollmgr.fds[last].events = 0;
677 pollmgr.fds[last].revents = 0;
678 pollmgr.handlers[last] = NULL;
679 }
680 } /* poll loop */
681}
682
683
684/**
685 * Create strongly held refptr.
686 */
687struct pollmgr_refptr *
688pollmgr_refptr_create(struct pollmgr_handler *ptr)
689{
690 struct pollmgr_refptr *rp;
691
692 LWIP_ASSERT1(ptr != NULL);
693
694 rp = (struct pollmgr_refptr *)malloc(sizeof (*rp));
695 if (rp == NULL) {
696 return NULL;
697 }
698
699 sys_mutex_new(&rp->lock);
700 rp->ptr = ptr;
701 rp->strong = 1;
702 rp->weak = 0;
703
704 return rp;
705}
706
707
708static void
709pollmgr_refptr_delete(struct pollmgr_refptr *rp)
710{
711 if (rp == NULL) {
712 return;
713 }
714
715 LWIP_ASSERT1(rp->strong == 0);
716 LWIP_ASSERT1(rp->weak == 0);
717
718 sys_mutex_free(&rp->lock);
719 free(rp);
720}
721
722
723/**
724 * Add weak reference before "rp" is sent over a poll manager channel.
725 */
726void
727pollmgr_refptr_weak_ref(struct pollmgr_refptr *rp)
728{
729 sys_mutex_lock(&rp->lock);
730
731 LWIP_ASSERT1(rp->ptr != NULL);
732 LWIP_ASSERT1(rp->strong > 0);
733
734 ++rp->weak;
735
736 sys_mutex_unlock(&rp->lock);
737}
738
739
740/**
741 * Try to get the pointer from implicitely weak reference we've got
742 * from a channel.
743 *
744 * If we detect that the object is still strongly referenced, but no
745 * longer registered with the poll manager we abort strengthening
746 * conversion here b/c lwip thread callback is already scheduled to
747 * destruct the object.
748 */
749struct pollmgr_handler *
750pollmgr_refptr_get(struct pollmgr_refptr *rp)
751{
752 struct pollmgr_handler *handler;
753 size_t weak;
754
755 sys_mutex_lock(&rp->lock);
756
757 LWIP_ASSERT1(rp->weak > 0);
758 weak = --rp->weak;
759
760 handler = rp->ptr;
761 if (handler == NULL) {
762 LWIP_ASSERT1(rp->strong == 0);
763 sys_mutex_unlock(&rp->lock);
764 if (weak == 0) {
765 pollmgr_refptr_delete(rp);
766 }
767 return NULL;
768 }
769
770 LWIP_ASSERT1(rp->strong == 1);
771
772 /*
773 * Here we woild do:
774 *
775 * ++rp->strong;
776 *
777 * and then, after channel handler is done, we would decrement it
778 * back.
779 *
780 * Instead we check that the object is still registered with poll
781 * manager. If it is, there's no race with lwip thread trying to
782 * drop its strong reference, as lwip thread callback to destruct
783 * the object is always scheduled by its poll manager callback.
784 *
785 * Conversly, if we detect that the object is no longer registered
786 * with poll manager, we immediately abort. Since channel handler
787 * can't do anything useful anyway and would have to return
788 * immediately.
789 *
790 * Since channel handler would always find rp->strong as it had
791 * left it, just elide extra strong reference creation to avoid
792 * the whole back-and-forth.
793 */
794
795 if (handler->slot < 0) { /* no longer polling */
796 sys_mutex_unlock(&rp->lock);
797 return NULL;
798 }
799
800 sys_mutex_unlock(&rp->lock);
801 return handler;
802}
803
804
805/**
806 * Remove (the only) strong reference.
807 *
808 * If it were real strong/weak pointers, we should also call
809 * destructor for the referenced object, but
810 */
811void
812pollmgr_refptr_unref(struct pollmgr_refptr *rp)
813{
814 sys_mutex_lock(&rp->lock);
815
816 LWIP_ASSERT1(rp->strong == 1);
817 --rp->strong;
818
819 if (rp->strong > 0) {
820 sys_mutex_unlock(&rp->lock);
821 }
822 else {
823 size_t weak;
824
825 /* void *ptr = rp->ptr; */
826 rp->ptr = NULL;
827
828 /* delete ptr; // see doc comment */
829
830 weak = rp->weak;
831 sys_mutex_unlock(&rp->lock);
832 if (weak == 0) {
833 pollmgr_refptr_delete(rp);
834 }
835 }
836}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette