VirtualBox

source: vbox/trunk/src/VBox/NetworkServices/NAT/proxy_pollmgr.c

Last change on this file was 106061, checked in by vboxsync, 3 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 22.3 KB
Line 
1/* $Id: proxy_pollmgr.c 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * NAT Network - poll manager.
4 */
5
6/*
7 * Copyright (C) 2013-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#define LOG_GROUP LOG_GROUP_NAT_SERVICE
29
30#include "winutils.h"
31
32#include "proxy_pollmgr.h"
33#include "proxy.h"
34
35#ifndef RT_OS_WINDOWS
36#include <sys/socket.h>
37#include <netinet/in.h>
38#include <err.h>
39#include <errno.h>
40#include <fcntl.h>
41#include <poll.h>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <time.h>
46#include <unistd.h>
47#else
48#include <iprt/errcore.h>
49#include <stdlib.h>
50#include <string.h>
51#include "winpoll.h"
52#endif
53
54#include <iprt/req.h>
55#include <iprt/errcore.h>
56
57
58#define POLLMGR_GARBAGE (-1)
59
60
61enum {
62 POLLMGR_QUEUE = 0,
63
64 POLLMGR_SLOT_STATIC_COUNT,
65 POLLMGR_SLOT_FIRST_DYNAMIC = POLLMGR_SLOT_STATIC_COUNT
66};
67
68
69struct pollmgr_chan {
70 struct pollmgr_handler *handler;
71 void *arg;
72 bool arg_valid;
73};
74
75struct pollmgr {
76 struct pollfd *fds;
77 struct pollmgr_handler **handlers;
78 nfds_t capacity; /* allocated size of the arrays */
79 nfds_t nfds; /* part of the arrays in use */
80
81 /* channels (socketpair) for static slots */
82 SOCKET chan[POLLMGR_SLOT_STATIC_COUNT][2];
83#define POLLMGR_CHFD_RD 0 /* - pollmgr side */
84#define POLLMGR_CHFD_WR 1 /* - client side */
85
86
87 /* emulate channels with request queue */
88 RTREQQUEUE queue;
89 struct pollmgr_handler queue_handler;
90 struct pollmgr_chan chan_handlers[POLLMGR_CHAN_COUNT];
91} pollmgr;
92
93
94static int pollmgr_queue_callback(struct pollmgr_handler *, SOCKET, int);
95static DECLCALLBACK(void) pollmgr_chan_call_handler(int, void *);
96
97static void pollmgr_loop(void);
98
99static void pollmgr_add_at(int, struct pollmgr_handler *, SOCKET, int);
100static void pollmgr_refptr_delete(struct pollmgr_refptr *);
101
102
103/*
104 * We cannot portably peek at the length of the incoming datagram and
105 * pre-allocate pbuf chain to recvmsg() directly to it. On Linux it's
106 * possible to recv with MSG_PEEK|MSG_TRUC, but extra syscall is
107 * probably more expensive (haven't measured) than doing an extra copy
108 * of data, since typical UDP datagrams are small enough to avoid
109 * fragmentation.
110 *
111 * We can use shared buffer here since we read from sockets
112 * sequentially in a loop over pollfd.
113 */
114u8_t pollmgr_udpbuf[64 * 1024];
115
116
117int
118pollmgr_init(void)
119{
120 struct pollfd *newfds;
121 struct pollmgr_handler **newhdls;
122 nfds_t newcap;
123 int rc, status;
124 nfds_t i;
125
126 rc = RTReqQueueCreate(&pollmgr.queue);
127 if (RT_FAILURE(rc))
128 return -1;
129
130 pollmgr.fds = NULL;
131 pollmgr.handlers = NULL;
132 pollmgr.capacity = 0;
133 pollmgr.nfds = 0;
134
135 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
136 pollmgr.chan[i][POLLMGR_CHFD_RD] = INVALID_SOCKET;
137 pollmgr.chan[i][POLLMGR_CHFD_WR] = INVALID_SOCKET;
138 }
139
140 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
141#ifndef RT_OS_WINDOWS
142 int j;
143
144 status = socketpair(PF_LOCAL, SOCK_DGRAM, 0, pollmgr.chan[i]);
145 if (status < 0) {
146 DPRINTF(("socketpair: %R[sockerr]\n", SOCKERRNO()));
147 goto cleanup_close;
148 }
149
150 /* now manually make them O_NONBLOCK */
151 for (j = 0; j < 2; ++j) {
152 int s = pollmgr.chan[i][j];
153 int sflags;
154
155 sflags = fcntl(s, F_GETFL, 0);
156 if (sflags < 0) {
157 DPRINTF0(("F_GETFL: %R[sockerr]\n", errno));
158 goto cleanup_close;
159 }
160
161 status = fcntl(s, F_SETFL, sflags | O_NONBLOCK);
162 if (status < 0) {
163 DPRINTF0(("O_NONBLOCK: %R[sockerr]\n", errno));
164 goto cleanup_close;
165 }
166 }
167#else
168 status = RTWinSocketPair(PF_INET, SOCK_DGRAM, 0, pollmgr.chan[i]);
169 if (RT_FAILURE(status)) {
170 goto cleanup_close;
171 }
172#endif
173 }
174
175
176 newcap = 16; /* XXX: magic */
177 LWIP_ASSERT1(newcap >= POLLMGR_SLOT_STATIC_COUNT);
178
179 newfds = (struct pollfd *)
180 malloc(newcap * sizeof(*pollmgr.fds));
181 if (newfds == NULL) {
182 DPRINTF(("%s: Failed to allocate fds array\n", __func__));
183 goto cleanup_close;
184 }
185
186 newhdls = (struct pollmgr_handler **)
187 malloc(newcap * sizeof(*pollmgr.handlers));
188 if (newhdls == NULL) {
189 DPRINTF(("%s: Failed to allocate handlers array\n", __func__));
190 free(newfds);
191 goto cleanup_close;
192 }
193
194 pollmgr.capacity = newcap;
195 pollmgr.fds = newfds;
196 pollmgr.handlers = newhdls;
197
198 pollmgr.nfds = POLLMGR_SLOT_STATIC_COUNT;
199
200 for (i = 0; i < pollmgr.capacity; ++i) {
201 pollmgr.fds[i].fd = INVALID_SOCKET;
202 pollmgr.fds[i].events = 0;
203 pollmgr.fds[i].revents = 0;
204 }
205
206 /* add request queue notification */
207 pollmgr.queue_handler.callback = pollmgr_queue_callback;
208 pollmgr.queue_handler.data = NULL;
209 pollmgr.queue_handler.slot = -1;
210
211 pollmgr_add_at(POLLMGR_QUEUE, &pollmgr.queue_handler,
212 pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_RD],
213 POLLIN);
214
215 return 0;
216
217 cleanup_close:
218 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
219 SOCKET *chan = pollmgr.chan[i];
220 if (chan[POLLMGR_CHFD_RD] != INVALID_SOCKET) {
221 closesocket(chan[POLLMGR_CHFD_RD]);
222 closesocket(chan[POLLMGR_CHFD_WR]);
223 }
224 }
225
226 return -1;
227}
228
229
230/*
231 * Add new channel. We now implement channels with request queue, so
232 * all channels get the same socket that triggers queue processing.
233 *
234 * Must be called before pollmgr loop is started, so no locking.
235 */
236SOCKET
237pollmgr_add_chan(int slot, struct pollmgr_handler *handler)
238{
239 AssertReturn(0 <= slot && slot < POLLMGR_CHAN_COUNT, INVALID_SOCKET);
240 AssertReturn(handler != NULL && handler->callback != NULL, INVALID_SOCKET);
241
242 handler->slot = slot;
243 pollmgr.chan_handlers[slot].handler = handler;
244 return pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_WR];
245}
246
247
248/*
249 * This used to actually send data over the channel's socket. Now we
250 * queue a request and send single byte notification over shared
251 * POLLMGR_QUEUE socket.
252 */
253ssize_t
254pollmgr_chan_send(int slot, void *buf, size_t nbytes)
255{
256 static const char notification = 0x5a;
257
258 void *ptr;
259 SOCKET fd;
260 ssize_t nsent;
261
262 AssertReturn(0 <= slot && slot < POLLMGR_CHAN_COUNT, -1);
263
264 /*
265 * XXX: Hack alert. We only ever "sent" single pointer which was
266 * simultaneously both the wakeup event for the poll and the
267 * argument for the channel handler that it read from the channel.
268 * So now we pass this pointer to the request and arrange for the
269 * handler to "read" it when it asks for it.
270 */
271 if (nbytes != sizeof(void *)) {
272 return -1;
273 }
274
275 ptr = *(void **)buf;
276
277 int rc = RTReqQueueCallEx(pollmgr.queue, NULL, 0, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
278 (PFNRT)pollmgr_chan_call_handler, 2, slot, ptr);
279 if (RT_FAILURE(rc))
280 {
281 DPRINTF(("Queuing pollmgr_chan_call_handler() on poll manager queue failed with %Rrc\n", rc));
282 return -1;
283 }
284
285 fd = pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_WR];
286 nsent = send(fd, &notification, 1, 0);
287 if (nsent == SOCKET_ERROR) {
288 DPRINTF(("send on chan %d: %R[sockerr]\n", slot, SOCKERRNO()));
289 return -1;
290 }
291 else if ((size_t)nsent != 1) {
292 DPRINTF(("send on chan %d: datagram truncated to %u bytes",
293 slot, (unsigned int)nsent));
294 return -1;
295 }
296
297 /* caller thinks it's sending the pointer */
298 return sizeof(void *);
299}
300
301
302/*
303 * pollmgr_chan_send() sent us a notification, process the queue.
304 */
305static int
306pollmgr_queue_callback(struct pollmgr_handler *handler, SOCKET fd, int revents)
307{
308 RT_NOREF(handler, revents);
309 Assert(pollmgr.queue != NIL_RTREQQUEUE);
310
311 ssize_t nread = recv(fd, (char *)pollmgr_udpbuf, sizeof(pollmgr_udpbuf), 0);
312 if (nread == SOCKET_ERROR) {
313 DPRINTF0(("%s: recv: %R[sockerr]\n", __func__, SOCKERRNO()));
314 return POLLIN;
315 }
316
317 DPRINTF2(("%s: read %zd\n", __func__, nread));
318 if (nread == 0) {
319 return POLLIN;
320 }
321
322 int rc = RTReqQueueProcess(pollmgr.queue, 0);
323 if (RT_UNLIKELY(rc != VERR_TIMEOUT && RT_FAILURE_NP(rc))) {
324 DPRINTF0(("%s: RTReqQueueProcess: %Rrc\n", __func__, rc));
325 }
326
327 return POLLIN;
328}
329
330
331/*
332 * Queued requests use this function to emulate the call to the
333 * handler's callback.
334 */
335static void
336pollmgr_chan_call_handler(int slot, void *arg)
337{
338 struct pollmgr_handler *handler;
339 int nevents;
340
341 AssertReturnVoid(0 <= slot && slot < POLLMGR_CHAN_COUNT);
342
343 handler = pollmgr.chan_handlers[slot].handler;
344 AssertReturnVoid(handler != NULL && handler->callback != NULL);
345
346 /* arrange for pollmgr_chan_recv_ptr() to "receive" the arg */
347 pollmgr.chan_handlers[slot].arg = arg;
348 pollmgr.chan_handlers[slot].arg_valid = true;
349
350 nevents = handler->callback(handler, INVALID_SOCKET, POLLIN);
351 if (nevents != POLLIN) {
352 DPRINTF2(("%s: nevents=0x%x!\n", __func__, nevents));
353 }
354}
355
356
357/*
358 * "Receive" a pointer "sent" over poll manager channel.
359 */
360void *
361pollmgr_chan_recv_ptr(struct pollmgr_handler *handler, SOCKET fd, int revents)
362{
363 int slot;
364 void *ptr;
365
366 RT_NOREF(fd);
367
368 slot = handler->slot;
369 Assert(0 <= slot && slot < POLLMGR_CHAN_COUNT);
370
371 if (revents & POLLNVAL) {
372 errx(EXIT_FAILURE, "chan %d: fd invalid", (int)handler->slot);
373 /* NOTREACHED */
374 }
375
376 if (revents & (POLLERR | POLLHUP)) {
377 errx(EXIT_FAILURE, "chan %d: fd error", (int)handler->slot);
378 /* NOTREACHED */
379 }
380
381 LWIP_ASSERT1(revents & POLLIN);
382
383 if (!pollmgr.chan_handlers[slot].arg_valid) {
384 err(EXIT_FAILURE, "chan %d: recv", (int)handler->slot);
385 /* NOTREACHED */
386 }
387
388 ptr = pollmgr.chan_handlers[slot].arg;
389 pollmgr.chan_handlers[slot].arg_valid = false;
390
391 return ptr;
392}
393
394
395/*
396 * Must be called from pollmgr loop (via callbacks), so no locking.
397 */
398int
399pollmgr_add(struct pollmgr_handler *handler, SOCKET fd, int events)
400{
401 int slot;
402
403 DPRINTF2(("%s: new fd %d\n", __func__, fd));
404
405 if (pollmgr.nfds == pollmgr.capacity) {
406 struct pollfd *newfds;
407 struct pollmgr_handler **newhdls;
408 nfds_t newcap;
409 nfds_t i;
410
411 newcap = pollmgr.capacity * 2;
412
413 newfds = (struct pollfd *)
414 realloc(pollmgr.fds, newcap * sizeof(*pollmgr.fds));
415 if (newfds == NULL) {
416 DPRINTF(("%s: Failed to reallocate fds array\n", __func__));
417 handler->slot = -1;
418 return -1;
419 }
420
421 pollmgr.fds = newfds; /* don't crash/leak if realloc(handlers) fails */
422 /* but don't update capacity yet! */
423
424 newhdls = (struct pollmgr_handler **)
425 realloc(pollmgr.handlers, newcap * sizeof(*pollmgr.handlers));
426 if (newhdls == NULL) {
427 DPRINTF(("%s: Failed to reallocate handlers array\n", __func__));
428 /* if we failed to realloc here, then fds points to the
429 * new array, but we pretend we still has old capacity */
430 handler->slot = -1;
431 return -1;
432 }
433
434 pollmgr.handlers = newhdls;
435 pollmgr.capacity = newcap;
436
437 for (i = pollmgr.nfds; i < newcap; ++i) {
438 newfds[i].fd = INVALID_SOCKET;
439 newfds[i].events = 0;
440 newfds[i].revents = 0;
441 newhdls[i] = NULL;
442 }
443 }
444
445 slot = pollmgr.nfds;
446 ++pollmgr.nfds;
447
448 pollmgr_add_at(slot, handler, fd, events);
449 return slot;
450}
451
452
453static void
454pollmgr_add_at(int slot, struct pollmgr_handler *handler, SOCKET fd, int events)
455{
456 pollmgr.fds[slot].fd = fd;
457 pollmgr.fds[slot].events = events;
458 pollmgr.fds[slot].revents = 0;
459 pollmgr.handlers[slot] = handler;
460
461 handler->slot = slot;
462}
463
464
465void
466pollmgr_update_events(int slot, int events)
467{
468 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
469 LWIP_ASSERT1((nfds_t)slot < pollmgr.nfds);
470
471 pollmgr.fds[slot].events = events;
472}
473
474
475void
476pollmgr_del_slot(int slot)
477{
478 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
479
480 DPRINTF2(("%s(%d): fd %d ! DELETED\n",
481 __func__, slot, pollmgr.fds[slot].fd));
482
483 pollmgr.fds[slot].fd = INVALID_SOCKET; /* see poll loop */
484}
485
486
487void
488pollmgr_thread(void *ignored)
489{
490 LWIP_UNUSED_ARG(ignored);
491 pollmgr_loop();
492}
493
494
495static void
496pollmgr_loop(void)
497{
498 int nready;
499 SOCKET delfirst;
500 SOCKET *pdelprev;
501 int i;
502
503 for (;;) {
504#ifndef RT_OS_WINDOWS
505 nready = poll(pollmgr.fds, pollmgr.nfds, -1);
506#else
507 int rc = RTWinPoll(pollmgr.fds, pollmgr.nfds,RT_INDEFINITE_WAIT, &nready);
508 if (RT_FAILURE(rc)) {
509 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
510 /* NOTREACHED*/
511 }
512#endif
513
514 DPRINTF2(("%s: ready %d fd%s\n",
515 __func__, nready, (nready == 1 ? "" : "s")));
516
517 if (nready < 0) {
518 if (errno == EINTR) {
519 continue;
520 }
521
522 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
523 /* NOTREACHED*/
524 }
525 else if (nready == 0) { /* cannot happen, we wait forever (-1) */
526 continue; /* - but be defensive */
527 }
528
529
530 delfirst = INVALID_SOCKET;
531 pdelprev = &delfirst;
532
533 for (i = 0; (nfds_t)i < pollmgr.nfds && nready > 0; ++i) {
534 struct pollmgr_handler *handler;
535 SOCKET fd;
536 int revents, nevents;
537
538 fd = pollmgr.fds[i].fd;
539 revents = pollmgr.fds[i].revents;
540
541 /*
542 * Channel handlers can request deletion of dynamic slots
543 * by calling pollmgr_del_slot() that clobbers slot's fd.
544 */
545 if (fd == INVALID_SOCKET && i >= POLLMGR_SLOT_FIRST_DYNAMIC) {
546 /* adjust count if events were pending for that slot */
547 if (revents != 0) {
548 --nready;
549 }
550
551 /* pretend that slot handler requested deletion */
552 nevents = -1;
553 goto update_events;
554 }
555
556 if (revents == 0) {
557 continue; /* next fd */
558 }
559 --nready;
560
561 handler = pollmgr.handlers[i];
562
563 if (handler != NULL && handler->callback != NULL) {
564#ifdef LWIP_PROXY_DEBUG
565# if LWIP_PROXY_DEBUG /* DEBUG */
566 if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
567 if (revents == POLLIN) {
568 DPRINTF2(("%s: ch %d\n", __func__, i));
569 }
570 else {
571 DPRINTF2(("%s: ch %d @ revents 0x%x!\n",
572 __func__, i, revents));
573 }
574 }
575 else {
576 DPRINTF2(("%s: fd %d @ revents 0x%x\n",
577 __func__, fd, revents));
578 }
579# endif /* LWIP_PROXY_DEBUG / DEBUG */
580#endif
581 nevents = (*handler->callback)(handler, fd, revents);
582 }
583 else {
584 DPRINTF0(("%s: invalid handler for fd %d: ", __func__, fd));
585 if (handler == NULL) {
586 DPRINTF0(("NULL\n"));
587 }
588 else {
589 DPRINTF0(("%p (callback = NULL)\n", (void *)handler));
590 }
591 nevents = -1; /* delete it */
592 }
593
594 update_events:
595 if (nevents >= 0) {
596 if (nevents != pollmgr.fds[i].events) {
597 DPRINTF2(("%s: fd %d ! nevents 0x%x\n",
598 __func__, fd, nevents));
599 }
600 pollmgr.fds[i].events = nevents;
601 }
602 else if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
603 /* Don't garbage-collect channels. */
604 DPRINTF2(("%s: fd %d ! DELETED (channel %d)\n",
605 __func__, fd, i));
606 pollmgr.fds[i].fd = INVALID_SOCKET;
607 pollmgr.fds[i].events = 0;
608 pollmgr.fds[i].revents = 0;
609 pollmgr.handlers[i] = NULL;
610 }
611 else {
612 DPRINTF2(("%s: fd %d ! DELETED\n", __func__, fd));
613
614 /* schedule for deletion (see g/c loop for details) */
615 *pdelprev = i; /* make previous entry point to us */
616 pdelprev = &pollmgr.fds[i].fd;
617
618 pollmgr.fds[i].fd = INVALID_SOCKET; /* end of list (for now) */
619 pollmgr.fds[i].events = POLLMGR_GARBAGE;
620 pollmgr.fds[i].revents = 0;
621 pollmgr.handlers[i] = NULL;
622 }
623 } /* processing loop */
624
625
626 /*
627 * Garbage collect and compact the array.
628 *
629 * We overload pollfd::fd of garbage entries to store the
630 * index of the next garbage entry. The garbage list is
631 * co-directional with the fds array. The index of the first
632 * entry is in "delfirst", the last entry "points to"
633 * INVALID_SOCKET.
634 *
635 * See update_events code for nevents < 0 at the end of the
636 * processing loop above.
637 */
638 while (delfirst != INVALID_SOCKET) {
639 const int last = pollmgr.nfds - 1;
640
641 /*
642 * We want a live entry in the last slot to swap into the
643 * freed slot, so make sure we have one.
644 */
645 if (pollmgr.fds[last].events == POLLMGR_GARBAGE /* garbage */
646 || pollmgr.fds[last].fd == INVALID_SOCKET) /* or killed */
647 {
648 /* drop garbage entry at the end of the array */
649 --pollmgr.nfds;
650
651 if (delfirst == (SOCKET)last) {
652 /* congruent to delnext >= pollmgr.nfds test below */
653 delfirst = INVALID_SOCKET; /* done */
654 }
655 }
656 else {
657 const SOCKET delnext = pollmgr.fds[delfirst].fd;
658
659 /* copy live entry at the end to the first slot being freed */
660 pollmgr.fds[delfirst] = pollmgr.fds[last]; /* struct copy */
661 pollmgr.handlers[delfirst] = pollmgr.handlers[last];
662 pollmgr.handlers[delfirst]->slot = (int)delfirst;
663 --pollmgr.nfds;
664
665 if ((nfds_t)delnext >= pollmgr.nfds) {
666 delfirst = INVALID_SOCKET; /* done */
667 }
668 else {
669 delfirst = delnext;
670 }
671 }
672
673 pollmgr.fds[last].fd = INVALID_SOCKET;
674 pollmgr.fds[last].events = 0;
675 pollmgr.fds[last].revents = 0;
676 pollmgr.handlers[last] = NULL;
677 }
678 } /* poll loop */
679}
680
681
682/**
683 * Create strongly held refptr.
684 */
685struct pollmgr_refptr *
686pollmgr_refptr_create(struct pollmgr_handler *ptr)
687{
688 struct pollmgr_refptr *rp;
689
690 LWIP_ASSERT1(ptr != NULL);
691
692 rp = (struct pollmgr_refptr *)malloc(sizeof (*rp));
693 if (rp == NULL) {
694 return NULL;
695 }
696
697 sys_mutex_new(&rp->lock);
698 rp->ptr = ptr;
699 rp->strong = 1;
700 rp->weak = 0;
701
702 return rp;
703}
704
705
706static void
707pollmgr_refptr_delete(struct pollmgr_refptr *rp)
708{
709 if (rp == NULL) {
710 return;
711 }
712
713 LWIP_ASSERT1(rp->strong == 0);
714 LWIP_ASSERT1(rp->weak == 0);
715
716 sys_mutex_free(&rp->lock);
717 free(rp);
718}
719
720
721/**
722 * Add weak reference before "rp" is sent over a poll manager channel.
723 */
724void
725pollmgr_refptr_weak_ref(struct pollmgr_refptr *rp)
726{
727 sys_mutex_lock(&rp->lock);
728
729 LWIP_ASSERT1(rp->ptr != NULL);
730 LWIP_ASSERT1(rp->strong > 0);
731
732 ++rp->weak;
733
734 sys_mutex_unlock(&rp->lock);
735}
736
737
738/**
739 * Try to get the pointer from implicitely weak reference we've got
740 * from a channel.
741 *
742 * If we detect that the object is still strongly referenced, but no
743 * longer registered with the poll manager we abort strengthening
744 * conversion here b/c lwip thread callback is already scheduled to
745 * destruct the object.
746 */
747struct pollmgr_handler *
748pollmgr_refptr_get(struct pollmgr_refptr *rp)
749{
750 struct pollmgr_handler *handler;
751 size_t weak;
752
753 sys_mutex_lock(&rp->lock);
754
755 LWIP_ASSERT1(rp->weak > 0);
756 weak = --rp->weak;
757
758 handler = rp->ptr;
759 if (handler == NULL) {
760 LWIP_ASSERT1(rp->strong == 0);
761 sys_mutex_unlock(&rp->lock);
762 if (weak == 0) {
763 pollmgr_refptr_delete(rp);
764 }
765 return NULL;
766 }
767
768 LWIP_ASSERT1(rp->strong == 1);
769
770 /*
771 * Here we woild do:
772 *
773 * ++rp->strong;
774 *
775 * and then, after channel handler is done, we would decrement it
776 * back.
777 *
778 * Instead we check that the object is still registered with poll
779 * manager. If it is, there's no race with lwip thread trying to
780 * drop its strong reference, as lwip thread callback to destruct
781 * the object is always scheduled by its poll manager callback.
782 *
783 * Conversly, if we detect that the object is no longer registered
784 * with poll manager, we immediately abort. Since channel handler
785 * can't do anything useful anyway and would have to return
786 * immediately.
787 *
788 * Since channel handler would always find rp->strong as it had
789 * left it, just elide extra strong reference creation to avoid
790 * the whole back-and-forth.
791 */
792
793 if (handler->slot < 0) { /* no longer polling */
794 sys_mutex_unlock(&rp->lock);
795 return NULL;
796 }
797
798 sys_mutex_unlock(&rp->lock);
799 return handler;
800}
801
802
803/**
804 * Remove (the only) strong reference.
805 *
806 * If it were real strong/weak pointers, we should also call
807 * destructor for the referenced object, but
808 */
809void
810pollmgr_refptr_unref(struct pollmgr_refptr *rp)
811{
812 sys_mutex_lock(&rp->lock);
813
814 LWIP_ASSERT1(rp->strong == 1);
815 --rp->strong;
816
817 if (rp->strong > 0) {
818 sys_mutex_unlock(&rp->lock);
819 }
820 else {
821 size_t weak;
822
823 /* void *ptr = rp->ptr; */
824 rp->ptr = NULL;
825
826 /* delete ptr; // see doc comment */
827
828 weak = rp->weak;
829 sys_mutex_unlock(&rp->lock);
830 if (weak == 0) {
831 pollmgr_refptr_delete(rp);
832 }
833 }
834}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette