VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/misc.c

Last change on this file was 98103, checked in by vboxsync, 17 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.4 KB
RevLine 
[28449]1/* $Id: misc.c 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * NAT - helpers.
4 */
5
[1]6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[28449]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
[28449]26 */
27
28/*
29 * This code is based on:
30 *
[1]31 * Copyright (c) 1995 Danny Gasparovski.
[1033]32 *
[1]33 * Please read the file COPYRIGHT for the
34 * terms and conditions of the copyright.
35 */
36
[41322]37#ifndef VBOX_NAT_TST_QUEUE
[1]38#include <slirp.h>
[38492]39#include "zone.h"
[1]40
[41322]41# ifndef HAVE_INET_ATON
[1]42int
[14964]43inet_aton(const char *cp, struct in_addr *ia)
[1]44{
[14964]45 u_int32_t addr = inet_addr(cp);
46 if (addr == 0xffffffff)
47 return 0;
48 ia->s_addr = addr;
49 return 1;
[1]50}
[41322]51# endif
[1]52
53/*
54 * Get our IP address and put it in our_addr
55 */
56void
[1033]57getouraddr(PNATState pData)
[1]58{
[14964]59 our_addr.s_addr = loopback_addr.s_addr;
[1]60}
[41322]61#else /* VBOX_NAT_TST_QUEUE */
[41323]62# include <iprt/cdefs.h>
63# include <iprt/types.h>
[41322]64# include "misc.h"
65#endif
[14964]66struct quehead
67{
68 struct quehead *qh_link;
69 struct quehead *qh_rlink;
[1]70};
71
72void
[1048]73insque(PNATState pData, void *a, void *b)
[1]74{
[14964]75 register struct quehead *element = (struct quehead *) a;
76 register struct quehead *head = (struct quehead *) b;
[39101]77 NOREF(pData);
[14964]78 element->qh_link = head->qh_link;
79 head->qh_link = (struct quehead *)element;
80 element->qh_rlink = (struct quehead *)head;
81 ((struct quehead *)(element->qh_link))->qh_rlink = (struct quehead *)element;
[1]82}
83
84void
[1048]85remque(PNATState pData, void *a)
[1]86{
[14964]87 register struct quehead *element = (struct quehead *) a;
[39101]88 NOREF(pData);
[14964]89 ((struct quehead *)(element->qh_link))->qh_rlink = element->qh_rlink;
90 ((struct quehead *)(element->qh_rlink))->qh_link = element->qh_link;
91 element->qh_rlink = NULL;
92 /* element->qh_link = NULL; TCP FIN1 crashes if you do this. Why ? */
[1]93}
94
[41322]95#ifndef VBOX_NAT_TST_QUEUE
[88759]96
[1]97/*
98 * Set fd blocking and non-blocking
99 */
100void
[14964]101fd_nonblock(int fd)
[1]102{
[41322]103# ifdef FIONBIO
[63012]104# ifdef RT_OS_WINDOWS
105 u_long opt = 1;
106# else
[14964]107 int opt = 1;
[63012]108# endif
[14964]109 ioctlsocket(fd, FIONBIO, &opt);
[41322]110# else /* !FIONBIO */
[14964]111 int opt;
[1033]112
[14964]113 opt = fcntl(fd, F_GETFL, 0);
114 opt |= O_NONBLOCK;
115 fcntl(fd, F_SETFL, opt);
[41322]116# endif
[1]117}
118
[1033]119
[88759]120# if defined(VBOX_NAT_MEM_DEBUG)
121# define NATMEM_LOG_FLOW_FUNC(a) LogFlowFunc(a)
122# define NATMEM_LOG_FLOW_FUNC_ENTER() LogFlowFuncEnter()
123# define NATMEM_LOG_FLOW_FUNC_LEAVE() LogFlowFuncLeave()
124# define NATMEM_LOG_2(a) Log2(a)
125# else
126# define NATMEM_LOG_FLOW_FUNC(a) do { } while (0)
127# define NATMEM_LOG_FLOW_FUNC_ENTER() do { } while (0)
128# define NATMEM_LOG_FLOW_FUNC_LEAVE() do { } while (0)
129# define NATMEM_LOG_2(a) do { } while (0)
[41322]130# endif
[40204]131
132
[35955]133/**
134 * Called when memory becomes available, works pfnXmitPending.
135 *
136 * @note This will LEAVE the critical section of the zone and RE-ENTER it
137 * again. Changes to the zone data should be expected across calls to
138 * this function!
139 *
140 * @param zone The zone.
141 */
142DECLINLINE(void) slirp_zone_check_and_send_pending(uma_zone_t zone)
[35922]143{
[88759]144 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone]\n", zone));
[35955]145 if ( zone->fDoXmitPending
146 && zone->master_zone == NULL)
[35922]147 {
[35955]148 int rc2;
149 zone->fDoXmitPending = false;
150 rc2 = RTCritSectLeave(&zone->csZone); AssertRC(rc2);
151
[35922]152 slirp_output_pending(zone->pData->pvUser);
[35955]153
[35957]154 rc2 = RTCritSectEnter(&zone->csZone); AssertRC(rc2);
[35922]155 }
[88759]156 NATMEM_LOG_FLOW_FUNC_LEAVE();
[35922]157}
158
[23369]159static void *slirp_uma_alloc(uma_zone_t zone,
[28440]160 int size, uint8_t *pflags, int fWait)
[22942]161{
162 struct item *it;
[28034]163 uint8_t *sub_area;
[28440]164 void *ret = NULL;
[28502]165 int rc;
[28440]166
[88759]167 NATMEM_LOG_FLOW_FUNC(("ENTER: %R[mzone], size:%d, pflags:%p, %RTbool\n", zone, size, pflags, fWait)); RT_NOREF(size, pflags, fWait);
[26404]168 RTCritSectEnter(&zone->csZone);
[28440]169 for (;;)
[22942]170 {
[28440]171 if (!LIST_EMPTY(&zone->free_items))
172 {
173 it = LIST_FIRST(&zone->free_items);
[34040]174 Assert(it->magic == ITEM_MAGIC);
[28502]175 rc = 0;
[28440]176 if (zone->pfInit)
[63012]177 rc = zone->pfInit(zone->pData, (void *)&it[1], (int /*sigh*/)zone->size, M_DONTWAIT);
[28502]178 if (rc == 0)
179 {
180 zone->cur_items++;
181 LIST_REMOVE(it, list);
182 LIST_INSERT_HEAD(&zone->used_items, it, list);
[35955]183 slirp_zone_check_and_send_pending(zone); /* may exit+enter the cs! */
[28502]184 ret = (void *)&it[1];
185 }
186 else
187 {
[35922]188 AssertMsgFailed(("NAT: item initialization failed for zone %s\n", zone->name));
[28502]189 ret = NULL;
190 }
[28440]191 break;
192 }
[26404]193
[28440]194 if (!zone->master_zone)
195 {
[35955]196 /* We're on the master zone and we can't allocate more. */
[88759]197 NATMEM_LOG_2(("NAT: no room on %s zone\n", zone->name));
[35922]198 /* AssertMsgFailed(("NAT: OOM!")); */
[35955]199 zone->fDoXmitPending = true;
[28440]200 break;
201 }
[28034]202
[35955]203 /* we're on a sub-zone, we need get a chunk from the master zone and split
204 * it into sub-zone conforming chunks.
[28440]205 */
[63012]206 sub_area = slirp_uma_alloc(zone->master_zone, (int /*sigh*/)zone->master_zone->size, NULL, 0);
[28440]207 if (!sub_area)
208 {
209 /* No room on master */
[88759]210 NATMEM_LOG_2(("NAT: no room on %s zone for %s zone\n", zone->master_zone->name, zone->name));
[28440]211 break;
212 }
213 zone->max_items++;
214 it = &((struct item *)sub_area)[-1];
[35955]215 /* It's the chunk descriptor of the master zone, we should remove it
216 * from the master list first.
[28440]217 */
218 Assert((it->zone && it->zone->magic == ZONE_MAGIC));
219 RTCritSectEnter(&it->zone->csZone);
[35955]220 /** @todo should we alter count of master counters? */
[28440]221 LIST_REMOVE(it, list);
222 RTCritSectLeave(&it->zone->csZone);
[35955]223
224 /** @todo '+ zone->size' should be depend on flag */
[28440]225 memset(it, 0, sizeof(struct item));
226 it->zone = zone;
227 it->magic = ITEM_MAGIC;
228 LIST_INSERT_HEAD(&zone->free_items, it, list);
229 if (zone->cur_items >= zone->max_items)
[58077]230 LogRel(("NAT: Zone(%s) has reached it maximum\n", zone->name));
[28034]231 }
[28440]232 RTCritSectLeave(&zone->csZone);
[88759]233 NATMEM_LOG_FLOW_FUNC(("LEAVE: %p\n", ret));
[28440]234 return ret;
[22942]235}
236
237static void slirp_uma_free(void *item, int size, uint8_t flags)
238{
239 struct item *it;
240 uma_zone_t zone;
[35955]241
[23369]242 Assert(item);
[22942]243 it = &((struct item *)item)[-1];
[88759]244 NATMEM_LOG_FLOW_FUNC(("ENTER: item:%p(%R[mzoneitem]), size:%d, flags:%RX8\n", item, it, size, flags)); RT_NOREF(size, flags);
[22942]245 Assert(it->magic == ITEM_MAGIC);
246 zone = it->zone;
[35955]247 /* check border magic */
[28394]248 Assert((*(uint32_t *)(((uint8_t *)&it[1]) + zone->size) == 0xabadbabe));
[35955]249
[26404]250 RTCritSectEnter(&zone->csZone);
[22942]251 Assert(zone->magic == ZONE_MAGIC);
[23369]252 LIST_REMOVE(it, list);
[28520]253 if (zone->pfFini)
254 {
[63012]255 zone->pfFini(zone->pData, item, (int /*sigh*/)zone->size);
[28520]256 }
257 if (zone->pfDtor)
258 {
[63012]259 zone->pfDtor(zone->pData, item, (int /*sigh*/)zone->size, NULL);
[28520]260 }
[22942]261 LIST_INSERT_HEAD(&zone->free_items, it, list);
[26404]262 zone->cur_items--;
[35955]263 slirp_zone_check_and_send_pending(zone); /* may exit+enter the cs! */
[26404]264 RTCritSectLeave(&zone->csZone);
[88759]265 NATMEM_LOG_FLOW_FUNC_LEAVE();
[22942]266}
267
[23369]268uma_zone_t uma_zcreate(PNATState pData, char *name, size_t size,
[27571]269 ctor_t ctor, dtor_t dtor, zinit_t init, zfini_t fini, int flags1, int flags2)
[22942]270{
[38492]271 uma_zone_t zone = NULL;
[88759]272 NATMEM_LOG_FLOW_FUNC(("ENTER: name:%s size:%d, ctor:%p, dtor:%p, init:%p, fini:%p, flags1:%RX32, flags2:%RX32\n",
273 name, ctor, dtor, init, fini, flags1, flags2)); RT_NOREF(flags1, flags2);
[38492]274 zone = RTMemAllocZ(sizeof(struct uma_zone));
[22942]275 Assert((pData));
276 zone->magic = ZONE_MAGIC;
277 zone->pData = pData;
278 zone->name = name;
279 zone->size = size;
280 zone->pfCtor = ctor;
281 zone->pfDtor = dtor;
282 zone->pfInit = init;
283 zone->pfFini = fini;
284 zone->pfAlloc = slirp_uma_alloc;
285 zone->pfFree = slirp_uma_free;
[26404]286 RTCritSectInit(&zone->csZone);
[88759]287 NATMEM_LOG_FLOW_FUNC(("LEAVE: %R[mzone]\n", zone));
[22942]288 return zone;
[23369]289
[22942]290}
[23369]291uma_zone_t uma_zsecond_create(char *name, ctor_t ctor,
[22942]292 dtor_t dtor, zinit_t init, zfini_t fini, uma_zone_t master)
293{
294 uma_zone_t zone;
[28034]295 Assert(master);
[88759]296 NATMEM_LOG_FLOW_FUNC(("ENTER: name:%s ctor:%p, dtor:%p, init:%p, fini:%p, master:%R[mzone]\n",
[38492]297 name, ctor, dtor, init, fini, master));
[22942]298 zone = RTMemAllocZ(sizeof(struct uma_zone));
[23369]299 if (zone == NULL)
[38492]300 {
[88759]301 NATMEM_LOG_FLOW_FUNC(("LEAVE: %R[mzone]\n", NULL));
[22942]302 return NULL;
[38492]303 }
[27571]304
[22942]305 Assert((master && master->pData));
306 zone->magic = ZONE_MAGIC;
307 zone->pData = master->pData;
308 zone->name = name;
309 zone->pfCtor = ctor;
310 zone->pfDtor = dtor;
311 zone->pfInit = init;
312 zone->pfFini = fini;
313 zone->pfAlloc = slirp_uma_alloc;
314 zone->pfFree = slirp_uma_free;
[23154]315 zone->size = master->size;
[28034]316 zone->master_zone = master;
[26404]317 RTCritSectInit(&zone->csZone);
[88759]318 NATMEM_LOG_FLOW_FUNC(("LEAVE: %R[mzone]\n", zone));
[22942]319 return zone;
320}
[27571]321
[22942]322void uma_zone_set_max(uma_zone_t zone, int max)
323{
[28034]324 int i = 0;
325 struct item *it;
[88759]326 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], max:%d\n", zone, max));
[22942]327 zone->max_items = max;
[28394]328 zone->area = RTMemAllocZ(max * (sizeof(struct item) + zone->size + sizeof(uint32_t)));
[28034]329 for (; i < max; ++i)
330 {
[28394]331 it = (struct item *)(((uint8_t *)zone->area) + i*(sizeof(struct item) + zone->size + sizeof(uint32_t)));
[28034]332 it->magic = ITEM_MAGIC;
333 it->zone = zone;
[28394]334 *(uint32_t *)(((uint8_t *)&it[1]) + zone->size) = 0xabadbabe;
[28034]335 LIST_INSERT_HEAD(&zone->free_items, it, list);
336 }
[88759]337 NATMEM_LOG_FLOW_FUNC_LEAVE();
[22942]338}
[27571]339
[22942]340void uma_zone_set_allocf(uma_zone_t zone, uma_alloc_t pfAlloc)
341{
[88759]342 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], pfAlloc:%Rfn\n", zone, pfAlloc));
[38492]343 zone->pfAlloc = pfAlloc;
[88759]344 NATMEM_LOG_FLOW_FUNC_LEAVE();
[22942]345}
[27571]346
[22942]347void uma_zone_set_freef(uma_zone_t zone, uma_free_t pfFree)
348{
[88759]349 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], pfAlloc:%Rfn\n", zone, pfFree));
[38492]350 zone->pfFree = pfFree;
[88759]351 NATMEM_LOG_FLOW_FUNC_LEAVE();
[22942]352}
353
354uint32_t *uma_find_refcnt(uma_zone_t zone, void *mem)
355{
[35955]356 /** @todo (vvl) this function supposed to work with special zone storing
[23369]357 reference counters */
[38492]358 struct item *it = NULL;
[88759]359 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], mem:%p\n", zone, mem)); RT_NOREF(zone);
[38492]360 it = (struct item *)mem; /* 1st element */
[26404]361 Assert(mem != NULL);
[22942]362 Assert(zone->magic == ZONE_MAGIC);
363 /* for returning pointer to counter we need get 0 elemnt */
364 Assert(it[-1].magic == ITEM_MAGIC);
[88759]365 NATMEM_LOG_FLOW_FUNC(("LEAVE: %p\n", &it[-1].ref_count));
[22942]366 return &it[-1].ref_count;
367}
[27571]368
[22942]369void *uma_zalloc_arg(uma_zone_t zone, void *args, int how)
370{
371 void *mem;
372 Assert(zone->magic == ZONE_MAGIC);
[88759]373 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], args:%p, how:%RX32\n", zone, args, how)); RT_NOREF(how);
[22942]374 if (zone->pfAlloc == NULL)
[38492]375 {
[88759]376 NATMEM_LOG_FLOW_FUNC(("LEAVE: NULL\n"));
[22942]377 return NULL;
[38492]378 }
[26404]379 RTCritSectEnter(&zone->csZone);
[63012]380 mem = zone->pfAlloc(zone, (int /*sigh*/)zone->size, NULL, 0);
[28490]381 if (mem != NULL)
382 {
383 if (zone->pfCtor)
[63012]384 zone->pfCtor(zone->pData, mem, (int /*sigh*/)zone->size, args, M_DONTWAIT);
[28490]385 }
[26404]386 RTCritSectLeave(&zone->csZone);
[88759]387 NATMEM_LOG_FLOW_FUNC(("LEAVE: %p\n", mem));
[22942]388 return mem;
389}
390
391void uma_zfree(uma_zone_t zone, void *item)
392{
[88759]393 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], item:%p\n", zone, item));
[22942]394 uma_zfree_arg(zone, item, NULL);
[88759]395 NATMEM_LOG_FLOW_FUNC_LEAVE();
[22942]396}
397
398void uma_zfree_arg(uma_zone_t zone, void *mem, void *flags)
399{
400 struct item *it;
401 Assert(zone->magic == ZONE_MAGIC);
[28520]402 Assert((zone->pfFree));
403 Assert((mem));
[88759]404 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], mem:%p, flags:%p\n", zone, mem, flags)); RT_NOREF(flags);
[27571]405
[26404]406 RTCritSectEnter(&zone->csZone);
[23369]407 it = &((struct item *)mem)[-1];
[28520]408 Assert((it->magic == ITEM_MAGIC));
[22942]409 Assert((zone->magic == ZONE_MAGIC && zone == it->zone));
[23369]410
[88759]411 zone->pfFree(mem, 0, 0);
[26404]412 RTCritSectLeave(&zone->csZone);
[88759]413
414 NATMEM_LOG_FLOW_FUNC_LEAVE();
[22942]415}
[27571]416
[22942]417int uma_zone_exhausted_nolock(uma_zone_t zone)
418{
[28520]419 int fExhausted;
[88759]420 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone]\n", zone));
[28520]421 RTCritSectEnter(&zone->csZone);
[30013]422 fExhausted = (zone->cur_items == zone->max_items);
[28520]423 RTCritSectLeave(&zone->csZone);
[88759]424 NATMEM_LOG_FLOW_FUNC(("LEAVE: %RTbool\n", fExhausted));
[28520]425 return fExhausted;
[22942]426}
[27571]427
[22942]428void zone_drain(uma_zone_t zone)
429{
[28520]430 struct item *it;
431 uma_zone_t master_zone;
[35955]432
[28520]433 /* vvl: Huh? What to do with zone which hasn't got backstore ? */
434 Assert((zone->master_zone));
[88759]435 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone]\n", zone));
[28520]436 master_zone = zone->master_zone;
[35955]437 while (!LIST_EMPTY(&zone->free_items))
[28520]438 {
439 it = LIST_FIRST(&zone->free_items);
[34040]440 Assert((it->magic == ITEM_MAGIC));
[35955]441
[28520]442 RTCritSectEnter(&zone->csZone);
443 LIST_REMOVE(it, list);
444 zone->max_items--;
445 RTCritSectLeave(&zone->csZone);
[35955]446
[28520]447 it->zone = master_zone;
[35955]448
[28520]449 RTCritSectEnter(&master_zone->csZone);
450 LIST_INSERT_HEAD(&master_zone->free_items, it, list);
451 master_zone->cur_items--;
[35955]452 slirp_zone_check_and_send_pending(master_zone); /* may exit+enter the cs! */
[28520]453 RTCritSectLeave(&master_zone->csZone);
454 }
[88759]455 NATMEM_LOG_FLOW_FUNC_LEAVE();
[22942]456}
457
458void slirp_null_arg_free(void *mem, void *arg)
459{
[35955]460 /** @todo (vvl) make it wiser */
[88759]461 NATMEM_LOG_FLOW_FUNC(("ENTER: mem:%p, arg:%p\n", mem, arg));
462 RT_NOREF(arg);
[22942]463 Assert(mem);
464 RTMemFree(mem);
[88759]465 NATMEM_LOG_FLOW_FUNC_LEAVE();
[22942]466}
467
468void *uma_zalloc(uma_zone_t zone, int len)
469{
[88759]470 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone], len:%d\n", zone, len));
471 RT_NOREF(zone, len);
472 NATMEM_LOG_FLOW_FUNC(("LEAVE: NULL"));
[22942]473 return NULL;
474}
[26404]475
[26597]476struct mbuf *slirp_ext_m_get(PNATState pData, size_t cbMin, void **ppvBuf, size_t *pcbBuf)
[26404]477{
478 struct mbuf *m;
[63012]479 int size = MCLBYTES;
[88759]480 NATMEM_LOG_FLOW_FUNC(("ENTER: cbMin:%d, ppvBuf:%p, pcbBuf:%p\n", cbMin, ppvBuf, pcbBuf));
[50016]481
[88543]482 *ppvBuf = NULL;
483 *pcbBuf = 0;
484
[50016]485 if (cbMin < MCLBYTES)
[26404]486 size = MCLBYTES;
[26597]487 else if (cbMin < MJUM9BYTES)
[26404]488 size = MJUM9BYTES;
[26597]489 else if (cbMin < MJUM16BYTES)
[26404]490 size = MJUM16BYTES;
491 else
[88543]492 {
493 AssertMsgFailed(("Unsupported size %zu", cbMin));
[88759]494 NATMEM_LOG_FLOW_FUNC(("LEAVE: NULL (bad size %zu)\n", cbMin));
[88543]495 return NULL;
496 }
[26404]497
498 m = m_getjcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR, size);
[28494]499 if (m == NULL)
500 {
[88759]501 NATMEM_LOG_FLOW_FUNC(("LEAVE: NULL\n"));
[28494]502 return NULL;
503 }
[28440]504 m->m_len = size;
[26597]505 *ppvBuf = mtod(m, void *);
506 *pcbBuf = size;
[88759]507 NATMEM_LOG_FLOW_FUNC(("LEAVE: %p\n", m));
[26597]508 return m;
[26404]509}
510
[30421]511void slirp_ext_m_free(PNATState pData, struct mbuf *m, uint8_t *pu8Buf)
[26404]512{
[32431]513
[88759]514 NATMEM_LOG_FLOW_FUNC(("ENTER: m:%p, pu8Buf:%p\n", m, pu8Buf));
[32431]515 if ( !pu8Buf
[30421]516 && pu8Buf != mtod(m, uint8_t *))
517 RTMemFree(pu8Buf); /* This buffer was allocated on heap */
[28501]518 m_freem(pData, m);
[88759]519 NATMEM_LOG_FLOW_FUNC_LEAVE();
[26404]520}
521
522static void zone_destroy(uma_zone_t zone)
523{
[26596]524 RTCritSectEnter(&zone->csZone);
[88759]525 NATMEM_LOG_FLOW_FUNC(("ENTER: zone:%R[mzone]\n", zone));
[58077]526 LogRel(("NAT: Zone(nm:%s, used:%d)\n", zone->name, zone->cur_items));
[43233]527 RTMemFree(zone->area);
[26596]528 RTCritSectLeave(&zone->csZone);
[26404]529 RTCritSectDelete(&zone->csZone);
530 RTMemFree(zone);
[88759]531 NATMEM_LOG_FLOW_FUNC_LEAVE();
[26404]532}
[27571]533
[26404]534void m_fini(PNATState pData)
535{
[88759]536 NATMEM_LOG_FLOW_FUNC_ENTER();
[41322]537# define ZONE_DESTROY(zone) do { zone_destroy((zone)); (zone) = NULL;} while (0)
[39362]538 ZONE_DESTROY(pData->zone_clust);
539 ZONE_DESTROY(pData->zone_pack);
540 ZONE_DESTROY(pData->zone_mbuf);
541 ZONE_DESTROY(pData->zone_jumbop);
542 ZONE_DESTROY(pData->zone_jumbo9);
543 ZONE_DESTROY(pData->zone_jumbo16);
[43233]544 ZONE_DESTROY(pData->zone_ext_refcnt);
[41322]545# undef ZONE_DESTROY
[35955]546 /** @todo do finalize here.*/
[88759]547 NATMEM_LOG_FLOW_FUNC_LEAVE();
[26404]548}
[30016]549
550void
551if_init(PNATState pData)
552{
553 /* 14 for ethernet */
554 if_maxlinkhdr = 14;
555 if_comp = IF_AUTOCOMP;
556 if_mtu = 1500;
557 if_mru = 1500;
558}
[88759]559
[41322]560#endif /* VBOX_NAT_TST_QUEUE */
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use