VirtualBox

source: vbox/trunk/src/VBox/NetworkServices/NAT/proxy_pollmgr.c@ 103795

Last change on this file since 103795 was 98103, checked in by vboxsync, 21 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 22.3 KB
Line 
1/* $Id: proxy_pollmgr.c 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * NAT Network - poll manager.
4 */
5
6/*
7 * Copyright (C) 2013-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#define LOG_GROUP LOG_GROUP_NAT_SERVICE
29
30#include "winutils.h"
31
32#include "proxy_pollmgr.h"
33#include "proxy.h"
34
35#ifndef RT_OS_WINDOWS
36#include <sys/socket.h>
37#include <netinet/in.h>
38#include <err.h>
39#include <errno.h>
40#include <fcntl.h>
41#include <poll.h>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <time.h>
46#include <unistd.h>
47#else
48#include <iprt/errcore.h>
49#include <stdlib.h>
50#include <string.h>
51#include "winpoll.h"
52#endif
53
54#include <iprt/req.h>
55#include <iprt/errcore.h>
56
57
58#define POLLMGR_GARBAGE (-1)
59
60
61enum {
62 POLLMGR_QUEUE = 0,
63
64 POLLMGR_SLOT_STATIC_COUNT,
65 POLLMGR_SLOT_FIRST_DYNAMIC = POLLMGR_SLOT_STATIC_COUNT
66};
67
68
69struct pollmgr_chan {
70 struct pollmgr_handler *handler;
71 void *arg;
72 bool arg_valid;
73};
74
75struct pollmgr {
76 struct pollfd *fds;
77 struct pollmgr_handler **handlers;
78 nfds_t capacity; /* allocated size of the arrays */
79 nfds_t nfds; /* part of the arrays in use */
80
81 /* channels (socketpair) for static slots */
82 SOCKET chan[POLLMGR_SLOT_STATIC_COUNT][2];
83#define POLLMGR_CHFD_RD 0 /* - pollmgr side */
84#define POLLMGR_CHFD_WR 1 /* - client side */
85
86
87 /* emulate channels with request queue */
88 RTREQQUEUE queue;
89 struct pollmgr_handler queue_handler;
90 struct pollmgr_chan chan_handlers[POLLMGR_CHAN_COUNT];
91} pollmgr;
92
93
94static int pollmgr_queue_callback(struct pollmgr_handler *, SOCKET, int);
95static void pollmgr_chan_call_handler(int, void *);
96
97static void pollmgr_loop(void);
98
99static void pollmgr_add_at(int, struct pollmgr_handler *, SOCKET, int);
100static void pollmgr_refptr_delete(struct pollmgr_refptr *);
101
102
103/*
104 * We cannot portably peek at the length of the incoming datagram and
105 * pre-allocate pbuf chain to recvmsg() directly to it. On Linux it's
106 * possible to recv with MSG_PEEK|MSG_TRUC, but extra syscall is
107 * probably more expensive (haven't measured) than doing an extra copy
108 * of data, since typical UDP datagrams are small enough to avoid
109 * fragmentation.
110 *
111 * We can use shared buffer here since we read from sockets
112 * sequentially in a loop over pollfd.
113 */
114u8_t pollmgr_udpbuf[64 * 1024];
115
116
117int
118pollmgr_init(void)
119{
120 struct pollfd *newfds;
121 struct pollmgr_handler **newhdls;
122 nfds_t newcap;
123 int rc, status;
124 nfds_t i;
125
126 rc = RTReqQueueCreate(&pollmgr.queue);
127 if (RT_FAILURE(rc))
128 return -1;
129
130 pollmgr.fds = NULL;
131 pollmgr.handlers = NULL;
132 pollmgr.capacity = 0;
133 pollmgr.nfds = 0;
134
135 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
136 pollmgr.chan[i][POLLMGR_CHFD_RD] = INVALID_SOCKET;
137 pollmgr.chan[i][POLLMGR_CHFD_WR] = INVALID_SOCKET;
138 }
139
140 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
141#ifndef RT_OS_WINDOWS
142 int j;
143
144 status = socketpair(PF_LOCAL, SOCK_DGRAM, 0, pollmgr.chan[i]);
145 if (status < 0) {
146 DPRINTF(("socketpair: %R[sockerr]\n", SOCKERRNO()));
147 goto cleanup_close;
148 }
149
150 /* now manually make them O_NONBLOCK */
151 for (j = 0; j < 2; ++j) {
152 int s = pollmgr.chan[i][j];
153 int sflags;
154
155 sflags = fcntl(s, F_GETFL, 0);
156 if (sflags < 0) {
157 DPRINTF0(("F_GETFL: %R[sockerr]\n", errno));
158 goto cleanup_close;
159 }
160
161 status = fcntl(s, F_SETFL, sflags | O_NONBLOCK);
162 if (status < 0) {
163 DPRINTF0(("O_NONBLOCK: %R[sockerr]\n", errno));
164 goto cleanup_close;
165 }
166 }
167#else
168 status = RTWinSocketPair(PF_INET, SOCK_DGRAM, 0, pollmgr.chan[i]);
169 if (RT_FAILURE(status)) {
170 goto cleanup_close;
171 }
172#endif
173 }
174
175
176 newcap = 16; /* XXX: magic */
177 LWIP_ASSERT1(newcap >= POLLMGR_SLOT_STATIC_COUNT);
178
179 newfds = (struct pollfd *)
180 malloc(newcap * sizeof(*pollmgr.fds));
181 if (newfds == NULL) {
182 DPRINTF(("%s: Failed to allocate fds array\n", __func__));
183 goto cleanup_close;
184 }
185
186 newhdls = (struct pollmgr_handler **)
187 malloc(newcap * sizeof(*pollmgr.handlers));
188 if (newhdls == NULL) {
189 DPRINTF(("%s: Failed to allocate handlers array\n", __func__));
190 free(newfds);
191 goto cleanup_close;
192 }
193
194 pollmgr.capacity = newcap;
195 pollmgr.fds = newfds;
196 pollmgr.handlers = newhdls;
197
198 pollmgr.nfds = POLLMGR_SLOT_STATIC_COUNT;
199
200 for (i = 0; i < pollmgr.capacity; ++i) {
201 pollmgr.fds[i].fd = INVALID_SOCKET;
202 pollmgr.fds[i].events = 0;
203 pollmgr.fds[i].revents = 0;
204 }
205
206 /* add request queue notification */
207 pollmgr.queue_handler.callback = pollmgr_queue_callback;
208 pollmgr.queue_handler.data = NULL;
209 pollmgr.queue_handler.slot = -1;
210
211 pollmgr_add_at(POLLMGR_QUEUE, &pollmgr.queue_handler,
212 pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_RD],
213 POLLIN);
214
215 return 0;
216
217 cleanup_close:
218 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
219 SOCKET *chan = pollmgr.chan[i];
220 if (chan[POLLMGR_CHFD_RD] != INVALID_SOCKET) {
221 closesocket(chan[POLLMGR_CHFD_RD]);
222 closesocket(chan[POLLMGR_CHFD_WR]);
223 }
224 }
225
226 return -1;
227}
228
229
230/*
231 * Add new channel. We now implement channels with request queue, so
232 * all channels get the same socket that triggers queue processing.
233 *
234 * Must be called before pollmgr loop is started, so no locking.
235 */
236SOCKET
237pollmgr_add_chan(int slot, struct pollmgr_handler *handler)
238{
239 AssertReturn(0 <= slot && slot < POLLMGR_CHAN_COUNT, INVALID_SOCKET);
240 AssertReturn(handler != NULL && handler->callback != NULL, INVALID_SOCKET);
241
242 handler->slot = slot;
243 pollmgr.chan_handlers[slot].handler = handler;
244 return pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_WR];
245}
246
247
248/*
249 * This used to actually send data over the channel's socket. Now we
250 * queue a request and send single byte notification over shared
251 * POLLMGR_QUEUE socket.
252 */
253ssize_t
254pollmgr_chan_send(int slot, void *buf, size_t nbytes)
255{
256 static const char notification = 0x5a;
257
258 void *ptr;
259 SOCKET fd;
260 ssize_t nsent;
261 int rc;
262
263 AssertReturn(0 <= slot && slot < POLLMGR_CHAN_COUNT, -1);
264
265 /*
266 * XXX: Hack alert. We only ever "sent" single pointer which was
267 * simultaneously both the wakeup event for the poll and the
268 * argument for the channel handler that it read from the channel.
269 * So now we pass this pointer to the request and arrange for the
270 * handler to "read" it when it asks for it.
271 */
272 if (nbytes != sizeof(void *)) {
273 return -1;
274 }
275
276 ptr = *(void **)buf;
277
278 rc = RTReqQueueCallEx(pollmgr.queue, NULL, 0,
279 RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
280 (PFNRT)pollmgr_chan_call_handler, 2,
281 slot, ptr);
282
283 fd = pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_WR];
284 nsent = send(fd, &notification, 1, 0);
285 if (nsent == SOCKET_ERROR) {
286 DPRINTF(("send on chan %d: %R[sockerr]\n", slot, SOCKERRNO()));
287 return -1;
288 }
289 else if ((size_t)nsent != 1) {
290 DPRINTF(("send on chan %d: datagram truncated to %u bytes",
291 slot, (unsigned int)nsent));
292 return -1;
293 }
294
295 /* caller thinks it's sending the pointer */
296 return sizeof(void *);
297}
298
299
300/*
301 * pollmgr_chan_send() sent us a notification, process the queue.
302 */
303static int
304pollmgr_queue_callback(struct pollmgr_handler *handler, SOCKET fd, int revents)
305{
306 ssize_t nread;
307 int sockerr;
308 int rc;
309
310 RT_NOREF(handler, revents);
311 Assert(pollmgr.queue != NIL_RTREQQUEUE);
312
313 nread = recv(fd, (char *)pollmgr_udpbuf, sizeof(pollmgr_udpbuf), 0);
314 sockerr = SOCKERRNO(); /* save now, may be clobbered */
315
316 if (nread == SOCKET_ERROR) {
317 DPRINTF0(("%s: recv: %R[sockerr]\n", __func__, sockerr));
318 return POLLIN;
319 }
320
321 DPRINTF2(("%s: read %zd\n", __func__, nread));
322 if (nread == 0) {
323 return POLLIN;
324 }
325
326 rc = RTReqQueueProcess(pollmgr.queue, 0);
327 if (RT_UNLIKELY(rc != VERR_TIMEOUT && RT_FAILURE_NP(rc))) {
328 DPRINTF0(("%s: RTReqQueueProcess: %Rrc\n", __func__, rc));
329 }
330
331 return POLLIN;
332}
333
334
335/*
336 * Queued requests use this function to emulate the call to the
337 * handler's callback.
338 */
339static void
340pollmgr_chan_call_handler(int slot, void *arg)
341{
342 struct pollmgr_handler *handler;
343 int nevents;
344
345 AssertReturnVoid(0 <= slot && slot < POLLMGR_CHAN_COUNT);
346
347 handler = pollmgr.chan_handlers[slot].handler;
348 AssertReturnVoid(handler != NULL && handler->callback != NULL);
349
350 /* arrange for pollmgr_chan_recv_ptr() to "receive" the arg */
351 pollmgr.chan_handlers[slot].arg = arg;
352 pollmgr.chan_handlers[slot].arg_valid = true;
353
354 nevents = handler->callback(handler, INVALID_SOCKET, POLLIN);
355 if (nevents != POLLIN) {
356 DPRINTF2(("%s: nevents=0x%x!\n", __func__, nevents));
357 }
358}
359
360
361/*
362 * "Receive" a pointer "sent" over poll manager channel.
363 */
364void *
365pollmgr_chan_recv_ptr(struct pollmgr_handler *handler, SOCKET fd, int revents)
366{
367 int slot;
368 void *ptr;
369
370 RT_NOREF(fd);
371
372 slot = handler->slot;
373 Assert(0 <= slot && slot < POLLMGR_CHAN_COUNT);
374
375 if (revents & POLLNVAL) {
376 errx(EXIT_FAILURE, "chan %d: fd invalid", (int)handler->slot);
377 /* NOTREACHED */
378 }
379
380 if (revents & (POLLERR | POLLHUP)) {
381 errx(EXIT_FAILURE, "chan %d: fd error", (int)handler->slot);
382 /* NOTREACHED */
383 }
384
385 LWIP_ASSERT1(revents & POLLIN);
386
387 if (!pollmgr.chan_handlers[slot].arg_valid) {
388 err(EXIT_FAILURE, "chan %d: recv", (int)handler->slot);
389 /* NOTREACHED */
390 }
391
392 ptr = pollmgr.chan_handlers[slot].arg;
393 pollmgr.chan_handlers[slot].arg_valid = false;
394
395 return ptr;
396}
397
398
399/*
400 * Must be called from pollmgr loop (via callbacks), so no locking.
401 */
402int
403pollmgr_add(struct pollmgr_handler *handler, SOCKET fd, int events)
404{
405 int slot;
406
407 DPRINTF2(("%s: new fd %d\n", __func__, fd));
408
409 if (pollmgr.nfds == pollmgr.capacity) {
410 struct pollfd *newfds;
411 struct pollmgr_handler **newhdls;
412 nfds_t newcap;
413 nfds_t i;
414
415 newcap = pollmgr.capacity * 2;
416
417 newfds = (struct pollfd *)
418 realloc(pollmgr.fds, newcap * sizeof(*pollmgr.fds));
419 if (newfds == NULL) {
420 DPRINTF(("%s: Failed to reallocate fds array\n", __func__));
421 handler->slot = -1;
422 return -1;
423 }
424
425 pollmgr.fds = newfds; /* don't crash/leak if realloc(handlers) fails */
426 /* but don't update capacity yet! */
427
428 newhdls = (struct pollmgr_handler **)
429 realloc(pollmgr.handlers, newcap * sizeof(*pollmgr.handlers));
430 if (newhdls == NULL) {
431 DPRINTF(("%s: Failed to reallocate handlers array\n", __func__));
432 /* if we failed to realloc here, then fds points to the
433 * new array, but we pretend we still has old capacity */
434 handler->slot = -1;
435 return -1;
436 }
437
438 pollmgr.handlers = newhdls;
439 pollmgr.capacity = newcap;
440
441 for (i = pollmgr.nfds; i < newcap; ++i) {
442 newfds[i].fd = INVALID_SOCKET;
443 newfds[i].events = 0;
444 newfds[i].revents = 0;
445 newhdls[i] = NULL;
446 }
447 }
448
449 slot = pollmgr.nfds;
450 ++pollmgr.nfds;
451
452 pollmgr_add_at(slot, handler, fd, events);
453 return slot;
454}
455
456
457static void
458pollmgr_add_at(int slot, struct pollmgr_handler *handler, SOCKET fd, int events)
459{
460 pollmgr.fds[slot].fd = fd;
461 pollmgr.fds[slot].events = events;
462 pollmgr.fds[slot].revents = 0;
463 pollmgr.handlers[slot] = handler;
464
465 handler->slot = slot;
466}
467
468
469void
470pollmgr_update_events(int slot, int events)
471{
472 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
473 LWIP_ASSERT1((nfds_t)slot < pollmgr.nfds);
474
475 pollmgr.fds[slot].events = events;
476}
477
478
479void
480pollmgr_del_slot(int slot)
481{
482 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
483
484 DPRINTF2(("%s(%d): fd %d ! DELETED\n",
485 __func__, slot, pollmgr.fds[slot].fd));
486
487 pollmgr.fds[slot].fd = INVALID_SOCKET; /* see poll loop */
488}
489
490
491void
492pollmgr_thread(void *ignored)
493{
494 LWIP_UNUSED_ARG(ignored);
495 pollmgr_loop();
496}
497
498
499static void
500pollmgr_loop(void)
501{
502 int nready;
503 SOCKET delfirst;
504 SOCKET *pdelprev;
505 int i;
506
507 for (;;) {
508#ifndef RT_OS_WINDOWS
509 nready = poll(pollmgr.fds, pollmgr.nfds, -1);
510#else
511 int rc = RTWinPoll(pollmgr.fds, pollmgr.nfds,RT_INDEFINITE_WAIT, &nready);
512 if (RT_FAILURE(rc)) {
513 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
514 /* NOTREACHED*/
515 }
516#endif
517
518 DPRINTF2(("%s: ready %d fd%s\n",
519 __func__, nready, (nready == 1 ? "" : "s")));
520
521 if (nready < 0) {
522 if (errno == EINTR) {
523 continue;
524 }
525
526 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
527 /* NOTREACHED*/
528 }
529 else if (nready == 0) { /* cannot happen, we wait forever (-1) */
530 continue; /* - but be defensive */
531 }
532
533
534 delfirst = INVALID_SOCKET;
535 pdelprev = &delfirst;
536
537 for (i = 0; (nfds_t)i < pollmgr.nfds && nready > 0; ++i) {
538 struct pollmgr_handler *handler;
539 SOCKET fd;
540 int revents, nevents;
541
542 fd = pollmgr.fds[i].fd;
543 revents = pollmgr.fds[i].revents;
544
545 /*
546 * Channel handlers can request deletion of dynamic slots
547 * by calling pollmgr_del_slot() that clobbers slot's fd.
548 */
549 if (fd == INVALID_SOCKET && i >= POLLMGR_SLOT_FIRST_DYNAMIC) {
550 /* adjust count if events were pending for that slot */
551 if (revents != 0) {
552 --nready;
553 }
554
555 /* pretend that slot handler requested deletion */
556 nevents = -1;
557 goto update_events;
558 }
559
560 if (revents == 0) {
561 continue; /* next fd */
562 }
563 --nready;
564
565 handler = pollmgr.handlers[i];
566
567 if (handler != NULL && handler->callback != NULL) {
568#ifdef LWIP_PROXY_DEBUG
569# if LWIP_PROXY_DEBUG /* DEBUG */
570 if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
571 if (revents == POLLIN) {
572 DPRINTF2(("%s: ch %d\n", __func__, i));
573 }
574 else {
575 DPRINTF2(("%s: ch %d @ revents 0x%x!\n",
576 __func__, i, revents));
577 }
578 }
579 else {
580 DPRINTF2(("%s: fd %d @ revents 0x%x\n",
581 __func__, fd, revents));
582 }
583# endif /* LWIP_PROXY_DEBUG / DEBUG */
584#endif
585 nevents = (*handler->callback)(handler, fd, revents);
586 }
587 else {
588 DPRINTF0(("%s: invalid handler for fd %d: ", __func__, fd));
589 if (handler == NULL) {
590 DPRINTF0(("NULL\n"));
591 }
592 else {
593 DPRINTF0(("%p (callback = NULL)\n", (void *)handler));
594 }
595 nevents = -1; /* delete it */
596 }
597
598 update_events:
599 if (nevents >= 0) {
600 if (nevents != pollmgr.fds[i].events) {
601 DPRINTF2(("%s: fd %d ! nevents 0x%x\n",
602 __func__, fd, nevents));
603 }
604 pollmgr.fds[i].events = nevents;
605 }
606 else if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
607 /* Don't garbage-collect channels. */
608 DPRINTF2(("%s: fd %d ! DELETED (channel %d)\n",
609 __func__, fd, i));
610 pollmgr.fds[i].fd = INVALID_SOCKET;
611 pollmgr.fds[i].events = 0;
612 pollmgr.fds[i].revents = 0;
613 pollmgr.handlers[i] = NULL;
614 }
615 else {
616 DPRINTF2(("%s: fd %d ! DELETED\n", __func__, fd));
617
618 /* schedule for deletion (see g/c loop for details) */
619 *pdelprev = i; /* make previous entry point to us */
620 pdelprev = &pollmgr.fds[i].fd;
621
622 pollmgr.fds[i].fd = INVALID_SOCKET; /* end of list (for now) */
623 pollmgr.fds[i].events = POLLMGR_GARBAGE;
624 pollmgr.fds[i].revents = 0;
625 pollmgr.handlers[i] = NULL;
626 }
627 } /* processing loop */
628
629
630 /*
631 * Garbage collect and compact the array.
632 *
633 * We overload pollfd::fd of garbage entries to store the
634 * index of the next garbage entry. The garbage list is
635 * co-directional with the fds array. The index of the first
636 * entry is in "delfirst", the last entry "points to"
637 * INVALID_SOCKET.
638 *
639 * See update_events code for nevents < 0 at the end of the
640 * processing loop above.
641 */
642 while (delfirst != INVALID_SOCKET) {
643 const int last = pollmgr.nfds - 1;
644
645 /*
646 * We want a live entry in the last slot to swap into the
647 * freed slot, so make sure we have one.
648 */
649 if (pollmgr.fds[last].events == POLLMGR_GARBAGE /* garbage */
650 || pollmgr.fds[last].fd == INVALID_SOCKET) /* or killed */
651 {
652 /* drop garbage entry at the end of the array */
653 --pollmgr.nfds;
654
655 if (delfirst == (SOCKET)last) {
656 /* congruent to delnext >= pollmgr.nfds test below */
657 delfirst = INVALID_SOCKET; /* done */
658 }
659 }
660 else {
661 const SOCKET delnext = pollmgr.fds[delfirst].fd;
662
663 /* copy live entry at the end to the first slot being freed */
664 pollmgr.fds[delfirst] = pollmgr.fds[last]; /* struct copy */
665 pollmgr.handlers[delfirst] = pollmgr.handlers[last];
666 pollmgr.handlers[delfirst]->slot = (int)delfirst;
667 --pollmgr.nfds;
668
669 if ((nfds_t)delnext >= pollmgr.nfds) {
670 delfirst = INVALID_SOCKET; /* done */
671 }
672 else {
673 delfirst = delnext;
674 }
675 }
676
677 pollmgr.fds[last].fd = INVALID_SOCKET;
678 pollmgr.fds[last].events = 0;
679 pollmgr.fds[last].revents = 0;
680 pollmgr.handlers[last] = NULL;
681 }
682 } /* poll loop */
683}
684
685
686/**
687 * Create strongly held refptr.
688 */
689struct pollmgr_refptr *
690pollmgr_refptr_create(struct pollmgr_handler *ptr)
691{
692 struct pollmgr_refptr *rp;
693
694 LWIP_ASSERT1(ptr != NULL);
695
696 rp = (struct pollmgr_refptr *)malloc(sizeof (*rp));
697 if (rp == NULL) {
698 return NULL;
699 }
700
701 sys_mutex_new(&rp->lock);
702 rp->ptr = ptr;
703 rp->strong = 1;
704 rp->weak = 0;
705
706 return rp;
707}
708
709
710static void
711pollmgr_refptr_delete(struct pollmgr_refptr *rp)
712{
713 if (rp == NULL) {
714 return;
715 }
716
717 LWIP_ASSERT1(rp->strong == 0);
718 LWIP_ASSERT1(rp->weak == 0);
719
720 sys_mutex_free(&rp->lock);
721 free(rp);
722}
723
724
725/**
726 * Add weak reference before "rp" is sent over a poll manager channel.
727 */
728void
729pollmgr_refptr_weak_ref(struct pollmgr_refptr *rp)
730{
731 sys_mutex_lock(&rp->lock);
732
733 LWIP_ASSERT1(rp->ptr != NULL);
734 LWIP_ASSERT1(rp->strong > 0);
735
736 ++rp->weak;
737
738 sys_mutex_unlock(&rp->lock);
739}
740
741
742/**
743 * Try to get the pointer from implicitely weak reference we've got
744 * from a channel.
745 *
746 * If we detect that the object is still strongly referenced, but no
747 * longer registered with the poll manager we abort strengthening
748 * conversion here b/c lwip thread callback is already scheduled to
749 * destruct the object.
750 */
751struct pollmgr_handler *
752pollmgr_refptr_get(struct pollmgr_refptr *rp)
753{
754 struct pollmgr_handler *handler;
755 size_t weak;
756
757 sys_mutex_lock(&rp->lock);
758
759 LWIP_ASSERT1(rp->weak > 0);
760 weak = --rp->weak;
761
762 handler = rp->ptr;
763 if (handler == NULL) {
764 LWIP_ASSERT1(rp->strong == 0);
765 sys_mutex_unlock(&rp->lock);
766 if (weak == 0) {
767 pollmgr_refptr_delete(rp);
768 }
769 return NULL;
770 }
771
772 LWIP_ASSERT1(rp->strong == 1);
773
774 /*
775 * Here we woild do:
776 *
777 * ++rp->strong;
778 *
779 * and then, after channel handler is done, we would decrement it
780 * back.
781 *
782 * Instead we check that the object is still registered with poll
783 * manager. If it is, there's no race with lwip thread trying to
784 * drop its strong reference, as lwip thread callback to destruct
785 * the object is always scheduled by its poll manager callback.
786 *
787 * Conversly, if we detect that the object is no longer registered
788 * with poll manager, we immediately abort. Since channel handler
789 * can't do anything useful anyway and would have to return
790 * immediately.
791 *
792 * Since channel handler would always find rp->strong as it had
793 * left it, just elide extra strong reference creation to avoid
794 * the whole back-and-forth.
795 */
796
797 if (handler->slot < 0) { /* no longer polling */
798 sys_mutex_unlock(&rp->lock);
799 return NULL;
800 }
801
802 sys_mutex_unlock(&rp->lock);
803 return handler;
804}
805
806
807/**
808 * Remove (the only) strong reference.
809 *
810 * If it were real strong/weak pointers, we should also call
811 * destructor for the referenced object, but
812 */
813void
814pollmgr_refptr_unref(struct pollmgr_refptr *rp)
815{
816 sys_mutex_lock(&rp->lock);
817
818 LWIP_ASSERT1(rp->strong == 1);
819 --rp->strong;
820
821 if (rp->strong > 0) {
822 sys_mutex_unlock(&rp->lock);
823 }
824 else {
825 size_t weak;
826
827 /* void *ptr = rp->ptr; */
828 rp->ptr = NULL;
829
830 /* delete ptr; // see doc comment */
831
832 weak = rp->weak;
833 sys_mutex_unlock(&rp->lock);
834 if (weak == 0) {
835 pollmgr_refptr_delete(rp);
836 }
837 }
838}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette