- Timestamp:
- Aug 16, 2016 1:51:47 PM (8 years ago)
- File:
-
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c
r63550 r63558 1 1 /* $Id$ */ 2 2 /** @file 3 * IPRT - Ring-0 Memory Objects, FreeBSD.3 * IPRT - Ring-0 Memory Objects, NetBSD. 4 4 */ 5 5 … … 7 7 * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net> 8 8 * Copyright (c) 2011 Andriy Gapon <avg@FreeBSD.org> 9 * Copyright (c) 2014 Arto Huusko 9 10 * 10 11 * Permission is hereby granted, free of charge, to any person … … 34 35 * Header Files * 35 36 *********************************************************************************************************************************/ 36 #include "the- freebsd-kernel.h"37 #include "the-netbsd-kernel.h" 37 38 38 39 #include <iprt/memobj.h> … … 50 51 *********************************************************************************************************************************/ 51 52 /** 52 * The FreeBSD version of the memory object structure.53 * The NetBSD version of the memory object structure. 53 54 */ 54 typedef struct RTR0MEMOBJ FREEBSD55 typedef struct RTR0MEMOBJNETBSD 55 56 { 56 57 /** The core structure. */ 57 58 RTR0MEMOBJINTERNAL Core; 58 /** The VM object associated with the allocation. */ 59 vm_object_t pObject; 60 } RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD; 61 62 63 MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj"); 64 59 size_t size; 60 struct pglist pglist; 61 } RTR0MEMOBJNETBSD, *PRTR0MEMOBJNETBSD; 62 63 64 typedef struct vm_map* vm_map_t; 65 65 66 66 /** … … 70 70 * @param pMem The memory object. 71 71 */ 72 static vm_map_t rtR0MemObj FreeBSDGetMap(PRTR0MEMOBJINTERNAL pMem)72 static vm_map_t rtR0MemObjNetBSDGetMap(PRTR0MEMOBJINTERNAL pMem) 73 73 { 74 74 switch (pMem->enmType) … … 106 106 DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem) 107 107 { 108 PRTR0MEMOBJ FREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;108 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem; 109 109 int rc; 110 110 111 switch (pMem FreeBSD->Core.enmType)111 switch (pMemNetBSD->Core.enmType) 112 112 { 113 113 case RTR0MEMOBJTYPE_PAGE: 114 { 115 kmem_free(pMemNetBSD->Core.pv, pMemNetBSD->Core.cb); 116 break; 117 } 114 118 case RTR0MEMOBJTYPE_LOW: 115 119 case RTR0MEMOBJTYPE_CONT: 116 rc = vm_map_remove(kernel_map, 117 (vm_offset_t)pMemFreeBSD->Core.pv, 118 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 119 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 120 { 121 /* Unmap */ 122 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb); 123 /* Free the virtual space */ 124 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY); 125 /* Free the physical pages */ 126 uvm_pglistfree(&pMemNetBSD->pglist); 120 127 break; 121 122 case RTR0MEMOBJTYPE_LOCK: 123 { 124 vm_map_t pMap = kernel_map; 125 126 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS) 127 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map; 128 129 rc = vm_map_unwire(pMap, 130 (vm_offset_t)pMemFreeBSD->Core.pv, 131 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb, 132 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 133 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 134 break; 135 } 136 137 case RTR0MEMOBJTYPE_RES_VIRT: 138 { 139 vm_map_t pMap = kernel_map; 140 if (pMemFreeBSD->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS) 141 pMap = &((struct proc *)pMemFreeBSD->Core.u.ResVirt.R0Process)->p_vmspace->vm_map; 142 rc = vm_map_remove(pMap, 143 (vm_offset_t)pMemFreeBSD->Core.pv, 144 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 145 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 146 break; 147 } 148 149 case RTR0MEMOBJTYPE_MAPPING: 150 { 151 vm_map_t pMap = kernel_map; 152 153 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS) 154 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map; 155 rc = vm_map_remove(pMap, 156 (vm_offset_t)pMemFreeBSD->Core.pv, 157 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 158 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 159 break; 160 } 161 128 } 162 129 case RTR0MEMOBJTYPE_PHYS: 163 130 case RTR0MEMOBJTYPE_PHYS_NC: 164 131 { 165 #if __FreeBSD_version >= 1000030 166 VM_OBJECT_WLOCK(pMemFreeBSD->pObject); 167 #else 168 VM_OBJECT_LOCK(pMemFreeBSD->pObject); 169 #endif 170 vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0); 171 #if __FreeBSD_version < 1000000 172 vm_page_lock_queues(); 173 #endif 174 for (vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0); 175 pPage != NULL; 176 pPage = vm_page_next(pPage)) 132 /* Free the physical pages */ 133 uvm_pglistfree(&pMemNetBSD->pglist); 134 break; 135 } 136 case RTR0MEMOBJTYPE_LOCK: 137 if (pMemNetBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS) 177 138 { 178 vm_page_unwire(pPage, 0); 139 uvm_map_pageable( 140 &((struct proc *)pMemNetBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map, 141 (vaddr_t)pMemNetBSD->Core.pv, 142 ((vaddr_t)pMemNetBSD->Core.pv) + pMemNetBSD->Core.cb, 143 1, 0); 179 144 } 180 #if __FreeBSD_version < 1000000181 vm_page_unlock_queues();182 #endif183 #if __FreeBSD_version >= 1000030184 VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);185 #else186 VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);187 #endif188 vm_object_deallocate(pMemFreeBSD->pObject);189 145 break; 190 } 146 case RTR0MEMOBJTYPE_RES_VIRT: 147 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS) 148 { 149 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY); 150 } 151 break; 152 case RTR0MEMOBJTYPE_MAPPING: 153 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS) 154 { 155 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb); 156 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY); 157 } 158 break; 191 159 192 160 default: 193 AssertMsgFailed(("enmType=%d\n", pMem FreeBSD->Core.enmType));161 AssertMsgFailed(("enmType=%d\n", pMemNetBSD->Core.enmType)); 194 162 return VERR_INTERNAL_ERROR; 195 163 } … … 198 166 } 199 167 200 201 static vm_page_t rtR0MemObjFreeBSDContigPhysAllocHelper(vm_object_t pObject, vm_pindex_t iPIndex, 202 u_long cPages, vm_paddr_t VmPhysAddrHigh, 203 u_long uAlignment, bool fWire) 204 { 205 vm_page_t pPages; 206 int cTries = 0; 207 208 #if __FreeBSD_version > 1000000 209 int fFlags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY; 210 if (fWire) 211 fFlags |= VM_ALLOC_WIRED; 212 213 while (cTries <= 1) 214 { 215 #if __FreeBSD_version >= 1000030 216 VM_OBJECT_WLOCK(pObject); 217 #else 218 VM_OBJECT_LOCK(pObject); 219 #endif 220 pPages = vm_page_alloc_contig(pObject, iPIndex, fFlags, cPages, 0, 221 VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT); 222 #if __FreeBSD_version >= 1000030 223 VM_OBJECT_WUNLOCK(pObject); 224 #else 225 VM_OBJECT_UNLOCK(pObject); 226 #endif 227 if (pPages) 228 break; 229 vm_pageout_grow_cache(cTries, 0, VmPhysAddrHigh); 230 cTries++; 231 } 232 233 return pPages; 234 #else 235 while (cTries <= 1) 236 { 237 pPages = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0); 238 if (pPages) 239 break; 240 vm_contig_grow_cache(cTries, 0, VmPhysAddrHigh); 241 cTries++; 242 } 243 244 if (!pPages) 245 return pPages; 246 #if __FreeBSD_version >= 1000030 247 VM_OBJECT_WLOCK(pObject); 248 #else 249 VM_OBJECT_LOCK(pObject); 250 #endif 251 for (vm_pindex_t iPage = 0; iPage < cPages; iPage++) 252 { 253 vm_page_t pPage = pPages + iPage; 254 vm_page_insert(pPage, pObject, iPIndex + iPage); 255 pPage->valid = VM_PAGE_BITS_ALL; 256 if (fWire) 257 { 258 pPage->wire_count = 1; 259 atomic_add_int(&cnt.v_wire_count, 1); 260 } 261 } 262 #if __FreeBSD_version >= 1000030 263 VM_OBJECT_WUNLOCK(pObject); 264 #else 265 VM_OBJECT_UNLOCK(pObject); 266 #endif 267 return pPages; 268 #endif 269 } 270 271 static int rtR0MemObjFreeBSDPhysAllocHelper(vm_object_t pObject, u_long cPages, 272 vm_paddr_t VmPhysAddrHigh, u_long uAlignment, 273 bool fContiguous, bool fWire, int rcNoMem) 274 { 168 static int rtR0MemObjNetBSDAllocHelper(PRTR0MEMOBJNETBSD pMemNetBSD, size_t cb, bool fExecutable, 169 paddr_t VmPhysAddrHigh, bool fContiguous) 170 { 171 /* Virtual space first */ 172 vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0, 173 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL); 174 if (virt == 0) 175 return VERR_NO_MEMORY; 176 177 struct pglist *rlist = &pMemNetBSD->pglist; 178 179 int nsegs = fContiguous ? 1 : INT_MAX; 180 181 /* Physical pages */ 182 if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh, 183 PAGE_SIZE, 0, rlist, nsegs, 1) != 0) 184 { 185 uvm_km_free(kernel_map, virt, cb, UVM_KMF_VAONLY); 186 return VERR_NO_MEMORY; 187 } 188 189 /* Map */ 190 struct vm_page *page; 191 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE; 192 if (fExecutable) 193 prot |= VM_PROT_EXECUTE; 194 vaddr_t virt2 = virt; 195 TAILQ_FOREACH(page, rlist, pageq.queue) 196 { 197 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0); 198 virt2 += PAGE_SIZE; 199 } 200 201 pMemNetBSD->Core.pv = (void *)virt; 275 202 if (fContiguous) 276 203 { 277 if (rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, 0, cPages, VmPhysAddrHigh, 278 uAlignment, fWire) != NULL) 279 return VINF_SUCCESS; 280 return rcNoMem; 281 } 282 283 for (vm_pindex_t iPage = 0; iPage < cPages; iPage++) 284 { 285 vm_page_t pPage = rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, iPage, 1, VmPhysAddrHigh, 286 uAlignment, fWire); 287 if (!pPage) 288 { 289 /* Free all allocated pages */ 290 #if __FreeBSD_version >= 1000030 291 VM_OBJECT_WLOCK(pObject); 292 #else 293 VM_OBJECT_LOCK(pObject); 294 #endif 295 while (iPage-- > 0) 296 { 297 pPage = vm_page_lookup(pObject, iPage); 298 #if __FreeBSD_version < 1000000 299 vm_page_lock_queues(); 300 #endif 301 if (fWire) 302 vm_page_unwire(pPage, 0); 303 vm_page_free(pPage); 304 #if __FreeBSD_version < 1000000 305 vm_page_unlock_queues(); 306 #endif 307 } 308 #if __FreeBSD_version >= 1000030 309 VM_OBJECT_WUNLOCK(pObject); 310 #else 311 VM_OBJECT_UNLOCK(pObject); 312 #endif 313 return rcNoMem; 314 } 315 } 316 return VINF_SUCCESS; 317 } 318 319 static int rtR0MemObjFreeBSDAllocHelper(PRTR0MEMOBJFREEBSD pMemFreeBSD, bool fExecutable, 320 vm_paddr_t VmPhysAddrHigh, bool fContiguous, int rcNoMem) 321 { 322 vm_offset_t MapAddress = vm_map_min(kernel_map); 323 size_t cPages = atop(pMemFreeBSD->Core.cb); 324 int rc; 325 326 pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, cPages); 327 328 /* No additional object reference for auto-deallocation upon unmapping. */ 329 #if __FreeBSD_version >= 1000055 330 rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0, 331 &MapAddress, pMemFreeBSD->Core.cb, 0, VMFS_ANY_SPACE, 332 fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0); 333 #else 334 rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0, 335 &MapAddress, pMemFreeBSD->Core.cb, VMFS_ANY_SPACE, 336 fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0); 337 #endif 338 339 if (rc == KERN_SUCCESS) 340 { 341 rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, 342 VmPhysAddrHigh, PAGE_SIZE, fContiguous, 343 false, rcNoMem); 344 if (RT_SUCCESS(rc)) 345 { 346 vm_map_wire(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb, 347 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 348 349 /* Store start address */ 350 pMemFreeBSD->Core.pv = (void *)MapAddress; 351 return VINF_SUCCESS; 352 } 353 354 vm_map_remove(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb); 355 } 356 else 357 { 358 rc = rcNoMem; /** @todo fix translation (borrow from darwin) */ 359 vm_object_deallocate(pMemFreeBSD->pObject); 360 } 361 362 rtR0MemObjDelete(&pMemFreeBSD->Core); 363 return rc; 364 } 204 page = TAILQ_FIRST(rlist); 205 pMemNetBSD->Core.u.Cont.Phys = VM_PAGE_TO_PHYS(page); 206 } 207 return VINF_SUCCESS; 208 } 209 365 210 DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) 366 211 { 367 PRTR0MEMOBJ FREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),212 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), 368 213 RTR0MEMOBJTYPE_PAGE, NULL, cb); 369 if (!pMemFreeBSD) 370 return VERR_NO_MEMORY; 371 372 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, ~(vm_paddr_t)0, false, VERR_NO_MEMORY); 373 if (RT_FAILURE(rc)) 374 { 375 rtR0MemObjDelete(&pMemFreeBSD->Core); 214 if (!pMemNetBSD) 215 return VERR_NO_MEMORY; 216 217 void *pvMem = kmem_alloc(cb, KM_SLEEP); 218 if (RT_UNLIKELY(!pvMem)) 219 { 220 rtR0MemObjDelete(&pMemNetBSD->Core); 221 return VERR_NO_PAGE_MEMORY; 222 } 223 if (fExecutable) 224 { 225 pmap_protect(pmap_kernel(), (vaddr_t)pvMem, ((vaddr_t)pvMem) + cb, 226 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE); 227 } 228 229 pMemNetBSD->Core.pv = pvMem; 230 *ppMem = &pMemNetBSD->Core; 231 return VINF_SUCCESS; 232 } 233 234 235 DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) 236 { 237 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), 238 RTR0MEMOBJTYPE_LOW, NULL, cb); 239 if (!pMemNetBSD) 240 return VERR_NO_MEMORY; 241 242 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false); 243 if (rc) 244 { 245 rtR0MemObjDelete(&pMemNetBSD->Core); 376 246 return rc; 377 247 } 378 248 379 *ppMem = &pMem FreeBSD->Core;380 return rc;381 } 382 383 384 DECLHIDDEN(int) rtR0MemObjNativeAlloc Low(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)385 { 386 PRTR0MEMOBJ FREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),387 RTR0MEMOBJTYPE_ LOW, NULL, cb);388 if (!pMem FreeBSD)389 return VERR_NO_MEMORY; 390 391 int rc = rtR0MemObj FreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, false, VERR_NO_LOW_MEMORY);392 if ( RT_FAILURE(rc))393 { 394 rtR0MemObjDelete(&pMem FreeBSD->Core);249 *ppMem = &pMemNetBSD->Core; 250 return VINF_SUCCESS; 251 } 252 253 254 DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) 255 { 256 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), 257 RTR0MEMOBJTYPE_CONT, NULL, cb); 258 if (!pMemNetBSD) 259 return VERR_NO_MEMORY; 260 261 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true); 262 if (rc) 263 { 264 rtR0MemObjDelete(&pMemNetBSD->Core); 395 265 return rc; 396 266 } 397 267 398 *ppMem = &pMemFreeBSD->Core; 399 return rc; 400 } 401 402 403 DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) 404 { 405 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), 406 RTR0MEMOBJTYPE_CONT, NULL, cb); 407 if (!pMemFreeBSD) 408 return VERR_NO_MEMORY; 409 410 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, true, VERR_NO_CONT_MEMORY); 411 if (RT_FAILURE(rc)) 412 { 413 rtR0MemObjDelete(&pMemFreeBSD->Core); 414 return rc; 415 } 416 417 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv); 418 *ppMem = &pMemFreeBSD->Core; 419 return rc; 420 } 421 422 423 static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, 268 *ppMem = &pMemNetBSD->Core; 269 return VINF_SUCCESS; 270 } 271 272 273 static int rtR0MemObjNetBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, 424 274 size_t cb, 425 275 RTHCPHYS PhysHighest, size_t uAlignment, 426 bool fContiguous, int rcNoMem) 427 { 428 uint32_t cPages = atop(cb); 429 vm_paddr_t VmPhysAddrHigh; 276 bool fContiguous) 277 { 278 paddr_t VmPhysAddrHigh; 430 279 431 280 /* create the object. */ 432 PRTR0MEMOBJ FREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),281 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), 433 282 enmType, NULL, cb); 434 if (!pMemFreeBSD) 435 return VERR_NO_MEMORY; 436 437 pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, atop(cb)); 283 if (!pMemNetBSD) 284 return VERR_NO_MEMORY; 438 285 439 286 if (PhysHighest != NIL_RTHCPHYS) 440 287 VmPhysAddrHigh = PhysHighest; 441 288 else 442 VmPhysAddrHigh = ~(vm_paddr_t)0; 443 444 int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh, 445 uAlignment, fContiguous, true, rcNoMem); 446 if (RT_SUCCESS(rc)) 447 { 448 if (fContiguous) 449 { 450 Assert(enmType == RTR0MEMOBJTYPE_PHYS); 451 #if __FreeBSD_version >= 1000030 452 VM_OBJECT_WLOCK(pMemFreeBSD->pObject); 453 #else 454 VM_OBJECT_LOCK(pMemFreeBSD->pObject); 455 #endif 456 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0)); 457 #if __FreeBSD_version >= 1000030 458 VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject); 459 #else 460 VM_OBJECT_UNLOCK(pMemFreeBSD->pObject); 461 #endif 462 pMemFreeBSD->Core.u.Phys.fAllocated = true; 463 } 464 465 *ppMem = &pMemFreeBSD->Core; 466 } 467 else 468 { 469 vm_object_deallocate(pMemFreeBSD->pObject); 470 rtR0MemObjDelete(&pMemFreeBSD->Core); 471 } 472 473 return rc; 289 VmPhysAddrHigh = ~(paddr_t)0; 290 291 int nsegs = fContiguous ? 1 : INT_MAX; 292 293 int error = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1); 294 if (error) 295 { 296 rtR0MemObjDelete(&pMemNetBSD->Core); 297 return VERR_NO_MEMORY; 298 } 299 300 if (fContiguous) 301 { 302 Assert(enmType == RTR0MEMOBJTYPE_PHYS); 303 const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist); 304 pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg); 305 pMemNetBSD->Core.u.Phys.fAllocated = true; 306 } 307 *ppMem = &pMemNetBSD->Core; 308 309 return VINF_SUCCESS; 474 310 } 475 311 … … 477 313 DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment) 478 314 { 479 return rtR0MemObj FreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true, VERR_NO_MEMORY);315 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true); 480 316 } 481 317 … … 483 319 DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest) 484 320 { 485 return rtR0MemObj FreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false, VERR_NO_PHYS_MEMORY);321 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false); 486 322 } 487 323 … … 492 328 493 329 /* create the object. */ 494 PRTR0MEMOBJ FREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);495 if (!pMem FreeBSD)330 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb); 331 if (!pMemNetBSD) 496 332 return VERR_NO_MEMORY; 497 333 498 334 /* there is no allocation here, it needs to be mapped somewhere first. */ 499 pMemFreeBSD->Core.u.Phys.fAllocated = false; 500 pMemFreeBSD->Core.u.Phys.PhysBase = Phys; 501 pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy; 502 *ppMem = &pMemFreeBSD->Core; 503 return VINF_SUCCESS; 504 } 505 506 507 /** 508 * Worker locking the memory in either kernel or user maps. 509 */ 510 static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap, 511 vm_offset_t AddrStart, size_t cb, uint32_t fAccess, 512 RTR0PROCESS R0Process, int fFlags) 513 { 514 int rc; 515 NOREF(fAccess); 516 517 /* create the object. */ 518 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)AddrStart, cb); 519 if (!pMemFreeBSD) 520 return VERR_NO_MEMORY; 521 522 /* 523 * We could've used vslock here, but we don't wish to be subject to 524 * resource usage restrictions, so we'll call vm_map_wire directly. 525 */ 526 rc = vm_map_wire(pVmMap, /* the map */ 527 AddrStart, /* start */ 528 AddrStart + cb, /* end */ 529 fFlags); /* flags */ 530 if (rc == KERN_SUCCESS) 531 { 532 pMemFreeBSD->Core.u.Lock.R0Process = R0Process; 533 *ppMem = &pMemFreeBSD->Core; 534 return VINF_SUCCESS; 535 } 536 rtR0MemObjDelete(&pMemFreeBSD->Core); 537 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */ 335 pMemNetBSD->Core.u.Phys.fAllocated = false; 336 pMemNetBSD->Core.u.Phys.PhysBase = Phys; 337 pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy; 338 TAILQ_INIT(&pMemNetBSD->pglist); 339 *ppMem = &pMemNetBSD->Core; 340 return VINF_SUCCESS; 538 341 } 539 342 … … 541 344 DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process) 542 345 { 543 return rtR0MemObjNativeLockInMap(ppMem, 544 &((struct proc *)R0Process)->p_vmspace->vm_map, 545 (vm_offset_t)R3Ptr, 546 cb, 547 fAccess, 548 R0Process, 549 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 346 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb); 347 if (!pMemNetBSD) 348 return VERR_NO_MEMORY; 349 350 int rc = uvm_map_pageable( 351 &((struct proc *)R0Process)->p_vmspace->vm_map, 352 R3Ptr, 353 R3Ptr + cb, 354 0, 0); 355 if (rc) 356 { 357 rtR0MemObjDelete(&pMemNetBSD->Core); 358 return VERR_NO_MEMORY; 359 } 360 361 pMemNetBSD->Core.u.Lock.R0Process = R0Process; 362 *ppMem = &pMemNetBSD->Core; 363 return VINF_SUCCESS; 550 364 } 551 365 … … 553 367 DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess) 554 368 { 555 return rtR0MemObjNativeLockInMap(ppMem, 556 kernel_map, 557 (vm_offset_t)pv, 558 cb, 559 fAccess, 560 NIL_RTR0PROCESS, 561 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 562 } 563 564 565 /** 566 * Worker for the two virtual address space reservers. 567 * 568 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here. 569 */ 570 static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap) 571 { 572 int rc; 573 574 /* 575 * The pvFixed address range must be within the VM space when specified. 576 */ 577 if ( pvFixed != (void *)-1 578 && ( (vm_offset_t)pvFixed < vm_map_min(pMap) 579 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap))) 580 return VERR_INVALID_PARAMETER; 581 582 /* 583 * Check that the specified alignment is supported. 584 */ 585 if (uAlignment > PAGE_SIZE) 369 /* Kernel memory (always?) wired; all memory allocated by vbox code is? */ 370 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, pv, cb); 371 if (!pMemNetBSD) 372 return VERR_NO_MEMORY; 373 374 pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS; 375 pMemNetBSD->Core.pv = pv; 376 *ppMem = &pMemNetBSD->Core; 377 return VINF_SUCCESS; 378 } 379 380 DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment) 381 { 382 if (pvFixed != (void *)-1) 383 { 384 /* can we support this? or can we assume the virtual space is already reserved? */ 385 printf("reserve specified kernel virtual address not supported\n"); 586 386 return VERR_NOT_SUPPORTED; 587 588 /* 589 * Create the object. 590 */ 591 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb); 592 if (!pMemFreeBSD) 593 return VERR_NO_MEMORY; 594 595 vm_offset_t MapAddress = pvFixed != (void *)-1 596 ? (vm_offset_t)pvFixed 597 : vm_map_min(pMap); 598 if (pvFixed != (void *)-1) 599 vm_map_remove(pMap, 600 MapAddress, 601 MapAddress + cb); 602 603 rc = vm_map_find(pMap, /* map */ 604 NULL, /* object */ 605 0, /* offset */ 606 &MapAddress, /* addr (IN/OUT) */ 607 cb, /* length */ 608 #if __FreeBSD_version >= 1000055 609 0, /* max addr */ 610 #endif 611 pvFixed == (void *)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE, 612 /* find_space */ 613 VM_PROT_NONE, /* protection */ 614 VM_PROT_ALL, /* max(_prot) ?? */ 615 0); /* cow (copy-on-write) */ 616 if (rc == KERN_SUCCESS) 617 { 618 if (R0Process != NIL_RTR0PROCESS) 619 { 620 rc = vm_map_inherit(pMap, 621 MapAddress, 622 MapAddress + cb, 623 VM_INHERIT_SHARE); 624 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc)); 625 } 626 pMemFreeBSD->Core.pv = (void *)MapAddress; 627 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process; 628 *ppMem = &pMemFreeBSD->Core; 629 return VINF_SUCCESS; 630 } 631 632 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */ 633 rtR0MemObjDelete(&pMemFreeBSD->Core); 634 return rc; 635 636 } 637 638 639 DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment) 640 { 641 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map); 387 } 388 389 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb); 390 if (!pMemNetBSD) 391 return VERR_NO_MEMORY; 392 393 vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment, 394 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL); 395 if (virt == 0) 396 { 397 rtR0MemObjDelete(&pMemNetBSD->Core); 398 return VERR_NO_MEMORY; 399 } 400 401 pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS; 402 pMemNetBSD->Core.pv = (void *)virt; 403 *ppMem = &pMemNetBSD->Core; 404 return VINF_SUCCESS; 642 405 } 643 406 … … 645 408 DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process) 646 409 { 647 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,648 &((struct proc *)R0Process)->p_vmspace->vm_map);410 printf("NativeReserveUser\n"); 411 return VERR_NOT_SUPPORTED; 649 412 } 650 413 … … 653 416 unsigned fProt, size_t offSub, size_t cbSub) 654 417 { 655 // AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED); 656 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED); 657 658 /* 659 * Check that the specified alignment is supported. 660 */ 661 if (uAlignment > PAGE_SIZE) 418 if (pvFixed != (void *)-1) 419 { 420 /* can we support this? or can we assume the virtual space is already reserved? */ 421 printf("map to specified kernel virtual address not supported\n"); 662 422 return VERR_NOT_SUPPORTED; 663 664 int rc; 665 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap; 666 667 /* calc protection */ 668 vm_prot_t ProtectionFlags = 0; 669 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE) 670 ProtectionFlags = VM_PROT_NONE; 423 } 424 425 PRTR0MEMOBJNETBSD pMemNetBSD0 = (PRTR0MEMOBJNETBSD)pMemToMap; 426 if ((pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS) 427 && (pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)) 428 { 429 printf("memory to map is not physical\n"); 430 return VERR_NOT_SUPPORTED; 431 } 432 size_t sz = cbSub > 0 ? cbSub : pMemNetBSD0->Core.cb; 433 434 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_MAPPING, NULL, sz); 435 436 vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment, 437 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL); 438 if (virt == 0) 439 { 440 rtR0MemObjDelete(&pMemNetBSD->Core); 441 return VERR_NO_MEMORY; 442 } 443 444 vm_prot_t prot = 0; 445 671 446 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ) 672 ProtectionFlags|= VM_PROT_READ;447 prot |= VM_PROT_READ; 673 448 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE) 674 ProtectionFlags|= VM_PROT_WRITE;449 prot |= VM_PROT_WRITE; 675 450 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC) 676 ProtectionFlags |= VM_PROT_EXECUTE; 677 678 vm_offset_t Addr = vm_map_min(kernel_map); 679 if (cbSub == 0) 680 cbSub = pMemToMap->cb - offSub; 681 682 vm_object_reference(pMemToMapFreeBSD->pObject); 683 rc = vm_map_find(kernel_map, /* Map to insert the object in */ 684 pMemToMapFreeBSD->pObject, /* Object to map */ 685 offSub, /* Start offset in the object */ 686 &Addr, /* Start address IN/OUT */ 687 cbSub, /* Size of the mapping */ 688 #if __FreeBSD_version >= 1000055 689 0, /* Upper bound of mapping */ 690 #endif 691 VMFS_ANY_SPACE, /* Whether a suitable address should be searched for first */ 692 ProtectionFlags, /* protection flags */ 693 VM_PROT_ALL, /* Maximum protection flags */ 694 0); /* copy-on-write and similar flags */ 695 696 if (rc == KERN_SUCCESS) 697 { 698 rc = vm_map_wire(kernel_map, Addr, Addr + cbSub, VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 699 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc)); 700 701 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD), 702 RTR0MEMOBJTYPE_MAPPING, 703 (void *)Addr, 704 cbSub); 705 if (pMemFreeBSD) 706 { 707 Assert((vm_offset_t)pMemFreeBSD->Core.pv == Addr); 708 pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; 709 *ppMem = &pMemFreeBSD->Core; 710 return VINF_SUCCESS; 711 } 712 rc = vm_map_remove(kernel_map, Addr, Addr + cbSub); 713 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n")); 714 } 715 else 716 vm_object_deallocate(pMemToMapFreeBSD->pObject); 717 718 return VERR_NO_MEMORY; 451 prot |= VM_PROT_EXECUTE; 452 453 struct vm_page *page; 454 vaddr_t virt2 = virt; 455 size_t map_pos = 0; 456 TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue) 457 { 458 if (map_pos >= offSub) 459 { 460 if (cbSub > 0 && (map_pos >= offSub + cbSub)) 461 break; 462 463 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0); 464 virt2 += PAGE_SIZE; 465 } 466 map_pos += PAGE_SIZE; 467 } 468 469 pMemNetBSD->Core.pv = (void *)virt; 470 pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; 471 *ppMem = &pMemNetBSD->Core; 472 473 return VINF_SUCCESS; 719 474 } 720 475 … … 723 478 unsigned fProt, RTR0PROCESS R0Process) 724 479 { 725 /* 726 * Check for unsupported stuff. 727 */ 728 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED); 729 if (uAlignment > PAGE_SIZE) 730 return VERR_NOT_SUPPORTED; 731 732 int rc; 733 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap; 734 struct proc *pProc = (struct proc *)R0Process; 735 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map; 736 737 /* calc protection */ 738 vm_prot_t ProtectionFlags = 0; 739 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE) 740 ProtectionFlags = VM_PROT_NONE; 741 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ) 742 ProtectionFlags |= VM_PROT_READ; 743 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE) 744 ProtectionFlags |= VM_PROT_WRITE; 745 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC) 746 ProtectionFlags |= VM_PROT_EXECUTE; 747 748 /* calc mapping address */ 749 vm_offset_t AddrR3; 750 if (R3PtrFixed == (RTR3PTR)-1) 751 { 752 /** @todo: is this needed?. */ 753 PROC_LOCK(pProc); 754 AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA)); 755 PROC_UNLOCK(pProc); 756 } 757 else 758 AddrR3 = (vm_offset_t)R3PtrFixed; 759 760 /* Insert the pObject in the map. */ 761 vm_object_reference(pMemToMapFreeBSD->pObject); 762 rc = vm_map_find(pProcMap, /* Map to insert the object in */ 763 pMemToMapFreeBSD->pObject, /* Object to map */ 764 0, /* Start offset in the object */ 765 &AddrR3, /* Start address IN/OUT */ 766 pMemToMap->cb, /* Size of the mapping */ 767 #if __FreeBSD_version >= 1000055 768 0, /* Upper bound of the mapping */ 769 #endif 770 R3PtrFixed == (RTR3PTR)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE, 771 /* Whether a suitable address should be searched for first */ 772 ProtectionFlags, /* protection flags */ 773 VM_PROT_ALL, /* Maximum protection flags */ 774 0); /* copy-on-write and similar flags */ 775 776 if (rc == KERN_SUCCESS) 777 { 778 rc = vm_map_wire(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 779 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc)); 780 781 rc = vm_map_inherit(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_INHERIT_SHARE); 782 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc)); 783 784 /* 785 * Create a mapping object for it. 786 */ 787 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD), 788 RTR0MEMOBJTYPE_MAPPING, 789 (void *)AddrR3, 790 pMemToMap->cb); 791 if (pMemFreeBSD) 792 { 793 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3); 794 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process; 795 *ppMem = &pMemFreeBSD->Core; 796 return VINF_SUCCESS; 797 } 798 799 rc = vm_map_remove(pProcMap, AddrR3, AddrR3 + pMemToMap->cb); 800 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n")); 801 } 802 else 803 vm_object_deallocate(pMemToMapFreeBSD->pObject); 804 805 return VERR_NO_MEMORY; 480 printf("NativeMapUser\n"); 481 return VERR_NOT_SUPPORTED; 806 482 } 807 483 … … 810 486 { 811 487 vm_prot_t ProtectionFlags = 0; 812 vm_offset_t AddrStart = (uintptr_t)pMem->pv + offSub; 813 vm_offset_t AddrEnd = AddrStart + cbSub; 814 vm_map_t pVmMap = rtR0MemObjFreeBSDGetMap(pMem); 488 vaddr_t AddrStart = (vaddr_t)pMem->pv + offSub; 489 vm_map_t pVmMap = rtR0MemObjNetBSDGetMap(pMem); 815 490 816 491 if (!pVmMap) 817 492 return VERR_NOT_SUPPORTED; 818 493 819 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)820 ProtectionFlags = VM_PROT_NONE;821 494 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ) 822 ProtectionFlags |= VM_PROT_READ;495 ProtectionFlags |= UVM_PROT_R; 823 496 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE) 824 ProtectionFlags |= VM_PROT_WRITE;497 ProtectionFlags |= UVM_PROT_W; 825 498 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC) 826 ProtectionFlags |= VM_PROT_EXECUTE; 827 828 int krc = vm_map_protect(pVmMap, AddrStart, AddrEnd, ProtectionFlags, FALSE); 829 if (krc == KERN_SUCCESS) 499 ProtectionFlags |= UVM_PROT_X; 500 501 int error = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub, 502 ProtectionFlags, 0); 503 if (!error) 830 504 return VINF_SUCCESS; 831 505 … … 836 510 DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage) 837 511 { 838 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem; 839 840 switch (pMemFreeBSD->Core.enmType) 841 { 842 case RTR0MEMOBJTYPE_LOCK: 843 { 844 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS 845 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc) 846 { 847 /* later */ 848 return NIL_RTHCPHYS; 849 } 850 851 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + ptoa(iPage); 852 853 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Lock.R0Process; 854 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map; 855 pmap_t pPhysicalMap = vm_map_pmap(pProcMap); 856 857 return pmap_extract(pPhysicalMap, pb); 858 } 859 860 case RTR0MEMOBJTYPE_MAPPING: 861 { 862 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + ptoa(iPage); 863 864 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS) 865 { 866 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process; 867 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map; 868 pmap_t pPhysicalMap = vm_map_pmap(pProcMap); 869 870 return pmap_extract(pPhysicalMap, pb); 871 } 872 return vtophys(pb); 873 } 874 512 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem; 513 514 switch (pMemNetBSD->Core.enmType) 515 { 875 516 case RTR0MEMOBJTYPE_PAGE: 876 517 case RTR0MEMOBJTYPE_LOW: 518 { 519 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage); 520 paddr_t pa = 0; 521 pmap_extract(pmap_kernel(), va, &pa); 522 return pa; 523 } 524 case RTR0MEMOBJTYPE_CONT: 525 return pMemNetBSD->Core.u.Cont.Phys + ptoa(iPage); 526 case RTR0MEMOBJTYPE_PHYS: 527 return pMemNetBSD->Core.u.Phys.PhysBase + ptoa(iPage); 877 528 case RTR0MEMOBJTYPE_PHYS_NC: 878 529 { 879 RTHCPHYS addr; 880 #if __FreeBSD_version >= 1000030 881 VM_OBJECT_WLOCK(pMemFreeBSD->pObject); 882 #else 883 VM_OBJECT_LOCK(pMemFreeBSD->pObject); 884 #endif 885 addr = VM_PAGE_TO_PHYS(vm_page_lookup(pMemFreeBSD->pObject, iPage)); 886 #if __FreeBSD_version >= 1000030 887 VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject); 888 #else 889 VM_OBJECT_UNLOCK(pMemFreeBSD->pObject); 890 #endif 891 return addr; 892 } 893 894 case RTR0MEMOBJTYPE_PHYS: 895 return pMemFreeBSD->Core.u.Cont.Phys + ptoa(iPage); 896 897 case RTR0MEMOBJTYPE_CONT: 898 return pMemFreeBSD->Core.u.Phys.PhysBase + ptoa(iPage); 899 530 struct vm_page *page; 531 size_t i = 0; 532 TAILQ_FOREACH(page, &pMemNetBSD->pglist, pageq.queue) 533 { 534 if (i == iPage) 535 break; 536 i++; 537 } 538 return VM_PAGE_TO_PHYS(page); 539 } 540 case RTR0MEMOBJTYPE_LOCK: 541 case RTR0MEMOBJTYPE_MAPPING: 542 { 543 pmap_t pmap; 544 if (pMem->u.Lock.R0Process == NIL_RTR0PROCESS) 545 pmap = pmap_kernel(); 546 else 547 pmap = ((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map.pmap; 548 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage); 549 paddr_t pa = 0; 550 pmap_extract(pmap, va, &pa); 551 return pa; 552 } 900 553 case RTR0MEMOBJTYPE_RES_VIRT: 554 return NIL_RTHCPHYS; 901 555 default: 902 556 return NIL_RTHCPHYS; 903 557 } 904 558 } 905
Note:
See TracChangeset
for help on using the changeset viewer.

