VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c

Last change on this file was 104057, checked in by vboxsync, 8 weeks ago

Additions: Linux: vboxvideo: Introduce initial support for kernel 6.9, bugref:10630.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.7 KB
RevLine 
[58129]1/* $Id: vbox_ttm.c 104057 2024-03-26 10:50:45Z vboxsync $ */
2/** @file
[53793]3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
[98103]7 * Copyright (C) 2013-2023 Oracle and/or its affiliates.
[66544]8 * This file is based on ast_ttm.c
[53793]9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
[66544]31 *
[53793]32 * Authors: Dave Airlie <airlied@redhat.com>
[66544]33 * Michael Thayer <michael.thayer@oracle.com>
[53793]34 */
35#include "vbox_drv.h"
[98868]36
[101029]37#if RTLNX_VER_MIN(6,3,0) || RTLNX_RHEL_RANGE(8,9, 8,99) || RTLNX_RHEL_MAJ_PREREQ(9,3)
[98868]38# include <drm/ttm/ttm_tt.h>
39#endif
40
[90577]41#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_MAJ_PREREQ(8,5)
[88207]42# include <drm/drm_gem.h>
43# include <drm/drm_gem_ttm_helper.h>
44# include <drm/drm_gem_vram_helper.h>
45#else
46# include <drm/ttm/ttm_page_alloc.h>
47#endif
[53793]48
[94330]49#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[90498]50# include <drm/ttm/ttm_range_manager.h>
51#endif
52
[85705]53#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
[67406]54#define PLACEMENT_FLAGS(placement) (placement)
[56467]55#else
[67406]56#define PLACEMENT_FLAGS(placement) ((placement).flags)
[56467]57#endif
58
[87092]59
[94329]60#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[89278]61static inline struct vbox_private *vbox_bdev(struct ttm_device *bd)
62#else
[67406]63static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
[89278]64#endif
[53793]65{
[67406]66 return container_of(bd, struct vbox_private, ttm.bdev);
[53793]67}
68
[85705]69#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
[67406]70static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
[53793]71{
[67406]72 return ttm_mem_global_init(ref->object);
[53793]73}
74
[67406]75static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
[53793]76{
[67406]77 ttm_mem_global_release(ref->object);
[53793]78}
79
80/**
81 * Adds the vbox memory manager object/structures to the global memory manager.
82 */
83static int vbox_ttm_global_init(struct vbox_private *vbox)
84{
[67406]85 struct drm_global_reference *global_ref;
[74773]86 int ret;
[53793]87
[85704]88#if RTLNX_VER_MAX(5,0,0)
[67406]89 global_ref = &vbox->ttm.mem_global_ref;
90 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
91 global_ref->size = sizeof(struct ttm_mem_global);
92 global_ref->init = &vbox_ttm_mem_global_init;
93 global_ref->release = &vbox_ttm_mem_global_release;
[74773]94 ret = drm_global_item_ref(global_ref);
95 if (ret) {
96 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
97 return ret;
[67406]98 }
[53793]99
[67406]100 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
[77850]101#endif
[67406]102 global_ref = &vbox->ttm.bo_global_ref.ref;
103 global_ref->global_type = DRM_GLOBAL_TTM_BO;
104 global_ref->size = sizeof(struct ttm_bo_global);
105 global_ref->init = &ttm_bo_global_init;
106 global_ref->release = &ttm_bo_global_release;
107
[74773]108 ret = drm_global_item_ref(global_ref);
109 if (ret) {
[67406]110 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
[85704]111#if RTLNX_VER_MAX(5,0,0)
[67406]112 drm_global_item_unref(&vbox->ttm.mem_global_ref);
[77850]113#endif
[74773]114 return ret;
[67406]115 }
116
117 return 0;
[53793]118}
119
120/**
121 * Removes the vbox memory manager object from the global memory manager.
122 */
[67406]123static void vbox_ttm_global_release(struct vbox_private *vbox)
[53793]124{
[67406]125 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
126 drm_global_item_unref(&vbox->ttm.mem_global_ref);
[53793]127}
[77850]128#endif
[53793]129
130static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
131{
[67406]132 struct vbox_bo *bo;
[53793]133
[67406]134 bo = container_of(tbo, struct vbox_bo, bo);
[53793]135
[67406]136 drm_gem_object_release(&bo->gem);
137 kfree(bo);
[53793]138}
139
[59526]140static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
[53793]141{
[67406]142 if (bo->destroy == &vbox_bo_ttm_destroy)
143 return true;
144
145 return false;
[53793]146}
147
[90577]148#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[53793]149static int
[67406]150vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
151 struct ttm_mem_type_manager *man)
[53793]152{
[67406]153 switch (type) {
154 case TTM_PL_SYSTEM:
155 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
156 man->available_caching = TTM_PL_MASK_CACHING;
157 man->default_caching = TTM_PL_FLAG_CACHED;
158 break;
159 case TTM_PL_VRAM:
160 man->func = &ttm_bo_manager_func;
161 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
162 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
163 man->default_caching = TTM_PL_FLAG_WC;
164 break;
165 default:
166 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
167 return -EINVAL;
168 }
169
170 return 0;
[53793]171}
[87092]172#endif
[53793]173
174static void
175vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
176{
[67406]177 struct vbox_bo *vboxbo = vbox_bo(bo);
[53793]178
[67406]179 if (!vbox_ttm_bo_is_vbox_bo(bo))
180 return;
[53793]181
[87092]182 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_SYSTEM);
[67406]183 *pl = vboxbo->placement;
[53793]184}
185
[94330]186#if RTLNX_VER_MAX(5,14,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
[67406]187static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
188 struct file *filp)
[53793]189{
[67406]190 return 0;
[53793]191}
[90498]192#endif
[53793]193
[90577]194#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
[53793]195static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
[67406]196 struct ttm_mem_reg *mem)
[53793]197{
[87092]198 struct vbox_private *vbox = vbox_bdev(bdev);
[67406]199 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
[53793]200
[67406]201 mem->bus.addr = NULL;
202 mem->bus.offset = 0;
203 mem->bus.size = mem->num_pages << PAGE_SHIFT;
204 mem->bus.base = 0;
205 mem->bus.is_iomem = false;
206 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
207 return -EINVAL;
208 switch (mem->mem_type) {
209 case TTM_PL_SYSTEM:
210 /* system memory */
211 return 0;
212 case TTM_PL_VRAM:
213 mem->bus.offset = mem->start << PAGE_SHIFT;
214 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
215 mem->bus.is_iomem = true;
216 break;
217 default:
218 return -EINVAL;
219 }
220 return 0;
[53793]221}
[87092]222#else
[94329]223# if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
[87092]224static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
225 struct ttm_resource *mem)
[89278]226# else /* > 5.13.0 */
227static int vbox_ttm_io_mem_reserve(struct ttm_device *bdev,
228 struct ttm_resource *mem)
229# endif /* > 5.13.0 */
[87092]230{
231 struct vbox_private *vbox = vbox_bdev(bdev);
232 mem->bus.addr = NULL;
233 mem->bus.offset = 0;
[90577]234# if RTLNX_VER_MAX(5,12,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[87092]235 mem->size = mem->num_pages << PAGE_SHIFT;
[88274]236# endif
[87092]237 mem->start = 0;
238 mem->bus.is_iomem = false;
239 switch (mem->mem_type) {
240 case TTM_PL_SYSTEM:
241 /* system memory */
242 return 0;
243 case TTM_PL_VRAM:
[90577]244# if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[88207]245 mem->bus.caching = ttm_write_combined;
[88509]246# endif
[90577]247# if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[90498]248 mem->bus.offset = (mem->start << PAGE_SHIFT) + pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
[88207]249# else
[87092]250 mem->bus.offset = mem->start << PAGE_SHIFT;
[90498]251 mem->start = pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
[88207]252# endif
[87092]253 mem->bus.is_iomem = true;
254 break;
255 default:
256 return -EINVAL;
257 }
258 return 0;
259}
260#endif
[53793]261
[87092]262
263
[94329]264#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[89278]265static void vbox_ttm_io_mem_free(struct ttm_device *bdev,
266 struct ttm_resource *mem)
267{
268}
[90577]269#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[67406]270static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
[87092]271 struct ttm_resource *mem)
272{
273}
274#else
275static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
[67406]276 struct ttm_mem_reg *mem)
[53793]277{
278}
[87092]279#endif
[53793]280
[94329]281#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[89278]282static void vbox_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
283{
284 ttm_tt_fini(tt);
285 kfree(tt);
286}
[90577]287#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[87092]288static void vbox_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
289{
290 ttm_tt_fini(tt);
291 kfree(tt);
292}
293#else
[53793]294static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
295{
[67406]296 ttm_tt_fini(tt);
297 kfree(tt);
[53793]298}
299
[59526]300static struct ttm_backend_func vbox_tt_backend_func = {
[67406]301 .destroy = &vbox_ttm_backend_destroy,
[53793]302};
[87092]303#endif
[53793]304
[85707]305#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
[59526]306static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
[67406]307 unsigned long size,
308 u32 page_flags,
309 struct page *dummy_read_page)
[72641]310#else
311static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
312 u32 page_flags)
313#endif
[53793]314{
[67406]315 struct ttm_tt *tt;
[53793]316
[67406]317 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
318 if (!tt)
319 return NULL;
320
[90577]321#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
[67406]322 tt->func = &vbox_tt_backend_func;
[87092]323#endif
[102874]324#if RTLNX_VER_MIN(5,19,0) || RTLNX_RHEL_RANGE(8,8, 8,99) || RTLNX_RHEL_RANGE(9,2, 9,99) || RTLNX_SUSE_MAJ_PREREQ(15,5)
[98455]325 if (ttm_tt_init(tt, bo, page_flags, ttm_write_combined, 0)) {
326#elif RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
327 if (ttm_tt_init(tt, bo, page_flags, ttm_write_combined)) {
328#elif RTLNX_VER_MIN(4,17,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
[88207]329 if (ttm_tt_init(tt, bo, page_flags)) {
[72641]330#else
[98455]331 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
[72641]332#endif
[98455]333
[67406]334 kfree(tt);
335 return NULL;
336 }
337
338 return tt;
[53793]339}
340
[85704]341#if RTLNX_VER_MAX(4,17,0)
[85707]342# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
[53793]343static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
344{
[67406]345 return ttm_pool_populate(ttm);
[53793]346}
[71985]347# else
348static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
349 struct ttm_operation_ctx *ctx)
350{
351 return ttm_pool_populate(ttm, ctx);
352}
353# endif
[53793]354
355static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
356{
[67406]357 ttm_pool_unpopulate(ttm);
[53793]358}
[71985]359#endif
[53793]360
[90577]361#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[88207]362static int vbox_bo_move(struct ttm_buffer_object *bo, bool evict,
363 struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem,
364 struct ttm_place *hop)
365{
[100800]366# if RTLNX_VER_MIN(6,4,0)
367 if (!bo->resource)
368 {
369 if (new_mem->mem_type != TTM_PL_SYSTEM)
370 {
371 hop->mem_type = TTM_PL_SYSTEM;
372 hop->flags = TTM_PL_FLAG_TEMPORARY;
373 return -EMULTIHOP;
374 }
375 ttm_bo_move_null(bo, new_mem);
376 return 0;
377 }
378# endif
[88207]379 return ttm_bo_move_memcpy(bo, ctx, new_mem);
380}
381#endif
382
[94329]383#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[89278]384static struct ttm_device_funcs vbox_bo_driver = {
385#else /* < 5.13.0 */
[74810]386static struct ttm_bo_driver vbox_bo_driver = {
[89278]387#endif /* < 5.13.0 */
[67406]388 .ttm_tt_create = vbox_ttm_tt_create,
[90577]389#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[87092]390 .ttm_tt_destroy = vbox_ttm_tt_destroy,
391#endif
[85704]392#if RTLNX_VER_MAX(4,17,0)
[67406]393 .ttm_tt_populate = vbox_ttm_tt_populate,
394 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
[71985]395#endif
[90577]396#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[67406]397 .init_mem_type = vbox_bo_init_mem_type,
[87092]398#endif
[85705]399#if RTLNX_VER_MIN(4,10,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
[67406]400 .eviction_valuable = ttm_bo_eviction_valuable,
[67174]401#endif
[67406]402 .evict_flags = vbox_bo_evict_flags,
[94330]403#if RTLNX_VER_MAX(5,14,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
[67406]404 .verify_access = vbox_bo_verify_access,
[90498]405#endif
[67406]406 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
407 .io_mem_free = &vbox_ttm_io_mem_free,
[85705]408#if RTLNX_VER_MIN(4,12,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
[85707]409# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
[67406]410 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
[71985]411# endif
[67174]412#endif
[85705]413#if (RTLNX_VER_RANGE(4,7,0, 4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)) && !RTLNX_RHEL_MAJ_PREREQ(7,5)
[67406]414 .lru_tail = &ttm_bo_default_lru_tail,
415 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
[63297]416#endif
[90577]417#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[88207]418 .move = &vbox_bo_move,
419#endif
[53793]420};
421
422int vbox_mm_init(struct vbox_private *vbox)
423{
[67406]424 int ret;
425 struct drm_device *dev = vbox->dev;
[94329]426#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[89278]427 struct ttm_device *bdev = &vbox->ttm.bdev;
428#else
[67406]429 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
[89278]430#endif
[53793]431
[85705]432#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
[67406]433 ret = vbox_ttm_global_init(vbox);
434 if (ret)
435 return ret;
[77850]436#endif
[94329]437#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[89278]438 ret = ttm_device_init(&vbox->ttm.bdev,
439#else
[67406]440 ret = ttm_bo_device_init(&vbox->ttm.bdev,
[89278]441#endif
[85705]442#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
[67406]443 vbox->ttm.bo_global_ref.ref.object,
[77850]444#endif
[67406]445 &vbox_bo_driver,
[90577]446#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[88207]447 dev->dev,
448#endif
[85705]449#if RTLNX_VER_MIN(3,15,0) || RTLNX_RHEL_MAJ_PREREQ(7,1)
[67406]450 dev->anon_inode->i_mapping,
[53793]451#endif
[89690]452#if RTLNX_VER_MIN(5,5,0) || RTLNX_RHEL_MIN(8,3) || RTLNX_SUSE_MAJ_PREREQ(15,3)
[88207]453 dev->vma_offset_manager,
[85705]454#elif RTLNX_VER_MAX(5,2,0) && !RTLNX_RHEL_MAJ_PREREQ(8,2)
[83073]455 DRM_FILE_PAGE_OFFSET,
456#endif
[90577]457#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[88207]458 false,
459#endif
[79026]460 true);
[67406]461 if (ret) {
462 DRM_ERROR("Error initialising bo driver; %d\n", ret);
[85705]463#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
[74773]464 goto err_ttm_global_release;
[77850]465#else
466 return ret;
467#endif
[67406]468 }
[53793]469
[90577]470#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[87092]471 ret = ttm_range_man_init(bdev, TTM_PL_VRAM, false,
472 vbox->available_vram_size >> PAGE_SHIFT);
473#else
[67406]474 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
475 vbox->available_vram_size >> PAGE_SHIFT);
[87092]476#endif
[67406]477 if (ret) {
478 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
[74773]479 goto err_device_release;
[67406]480 }
[74773]481
[53793]482#ifdef DRM_MTRR_WC
[90498]483 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
484 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0),
[67406]485 DRM_MTRR_WC);
[53793]486#else
[90498]487 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
488 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0));
[53793]489#endif
[74773]490 return 0;
[53793]491
[74773]492err_device_release:
[94329]493#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[89278]494 ttm_device_fini(&vbox->ttm.bdev);
495#else
[74773]496 ttm_bo_device_release(&vbox->ttm.bdev);
[89278]497#endif
[85705]498#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
[74773]499err_ttm_global_release:
500 vbox_ttm_global_release(vbox);
[77850]501#endif
[74773]502 return ret;
[53793]503}
504
505void vbox_mm_fini(struct vbox_private *vbox)
506{
[59526]507#ifdef DRM_MTRR_WC
[67406]508 drm_mtrr_del(vbox->fb_mtrr,
[90498]509 pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0),
510 pci_resource_len(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0), DRM_MTRR_WC);
[53793]511#else
[67406]512 arch_phys_wc_del(vbox->fb_mtrr);
[53793]513#endif
[94329]514#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[89278]515 ttm_device_fini(&vbox->ttm.bdev);
516#else
[74773]517 ttm_bo_device_release(&vbox->ttm.bdev);
[89278]518#endif
[85705]519#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
[74773]520 vbox_ttm_global_release(vbox);
[77850]521#endif
[53793]522}
523
[87092]524void vbox_ttm_placement(struct vbox_bo *bo, u32 mem_type)
[53793]525{
[67406]526 u32 c = 0;
[85705]527#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
[67406]528 bo->placement.fpfn = 0;
529 bo->placement.lpfn = 0;
[56467]530#else
[67406]531 unsigned int i;
[56467]532#endif
533
[67406]534 bo->placement.placement = bo->placements;
[104057]535#if RTLNX_VER_MAX(6,9,0)
[67406]536 bo->placement.busy_placement = bo->placements;
[104057]537#endif
[67406]538
[87092]539 if (mem_type & VBOX_MEM_TYPE_VRAM) {
[104057]540#if RTLNX_VER_MIN(6,9,0)
[87092]541 bo->placements[c].mem_type = TTM_PL_VRAM;
[104057]542 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_DESIRED;
543#elif RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
544 bo->placements[c].mem_type = TTM_PL_VRAM;
[88207]545 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
546#elif RTLNX_VER_MIN(5,10,0)
547 bo->placements[c].mem_type = TTM_PL_VRAM;
[67406]548 PLACEMENT_FLAGS(bo->placements[c++]) =
[87092]549 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
550#else
551 PLACEMENT_FLAGS(bo->placements[c++]) =
[67406]552 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
[87092]553#endif
554 }
555 if (mem_type & VBOX_MEM_TYPE_SYSTEM) {
[104057]556
557#if RTLNX_VER_MIN(6,9,0)
[87092]558 bo->placements[c].mem_type = TTM_PL_SYSTEM;
[104057]559 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_DESIRED;
560#elif RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
561 bo->placements[c].mem_type = TTM_PL_SYSTEM;
[88207]562 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
563#elif RTLNX_VER_MIN(5,10,0)
564 bo->placements[c].mem_type = TTM_PL_SYSTEM;
[67406]565 PLACEMENT_FLAGS(bo->placements[c++]) =
[87092]566 TTM_PL_MASK_CACHING;
567#else
568 PLACEMENT_FLAGS(bo->placements[c++]) =
[67406]569 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
[87092]570#endif
571 }
572 if (!c) {
[104057]573#if RTLNX_VER_MIN(6,9,0)
[87092]574 bo->placements[c].mem_type = TTM_PL_SYSTEM;
[104057]575 PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_DESIRED;
576#elif RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
577 bo->placements[c].mem_type = TTM_PL_SYSTEM;
[88207]578 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
579#elif RTLNX_VER_MIN(5,10,0)
580 bo->placements[c].mem_type = TTM_PL_SYSTEM;
[67406]581 PLACEMENT_FLAGS(bo->placements[c++]) =
[87092]582 TTM_PL_MASK_CACHING;
583#else
584 PLACEMENT_FLAGS(bo->placements[c++]) =
[67406]585 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
[87092]586#endif
587 }
[67406]588
589 bo->placement.num_placement = c;
[104057]590#if RTLNX_VER_MAX(6,9,0)
[67406]591 bo->placement.num_busy_placement = c;
[104057]592#endif
[74773]593
[85705]594#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
[67406]595 for (i = 0; i < c; ++i) {
596 bo->placements[i].fpfn = 0;
597 bo->placements[i].lpfn = 0;
598 }
[56467]599#endif
[53793]600}
601
[90577]602#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[88207]603static const struct drm_gem_object_funcs vbox_drm_gem_object_funcs = {
604 .free = vbox_gem_free_object,
605 .print_info = drm_gem_ttm_print_info,
[100800]606# if RTLNX_VER_MIN(6,5,0)
607 .vmap = drm_gem_ttm_vmap,
608 .vunmap = drm_gem_ttm_vunmap,
609# endif
[94330]610# if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[90498]611 .mmap = drm_gem_ttm_mmap,
612# endif
[88207]613};
614#endif
615
[53793]616int vbox_bo_create(struct drm_device *dev, int size, int align,
[67406]617 u32 flags, struct vbox_bo **pvboxbo)
[53793]618{
[67406]619 struct vbox_private *vbox = dev->dev_private;
620 struct vbox_bo *vboxbo;
[94329]621#if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
[67406]622 size_t acc_size;
[89278]623#endif
[67406]624 int ret;
[53793]625
[67406]626 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
627 if (!vboxbo)
628 return -ENOMEM;
[53793]629
[67406]630 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
[74773]631 if (ret)
632 goto err_free_vboxbo;
[53793]633
[90577]634#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[88207]635 if (!vboxbo->gem.funcs) {
636 vboxbo->gem.funcs = &vbox_drm_gem_object_funcs;
637 }
638#endif
[67406]639 vboxbo->bo.bdev = &vbox->ttm.bdev;
[85705]640#if RTLNX_VER_MAX(3,15,0) && !RTLNX_RHEL_MAJ_PREREQ(7,1)
[67406]641 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
[53793]642#endif
643
[87092]644 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_VRAM | VBOX_MEM_TYPE_SYSTEM);
[53793]645
[94329]646#if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
[67406]647 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
648 sizeof(struct vbox_bo));
[89278]649#endif
[53793]650
[94330]651#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[90498]652 /* Initialization of the following was removed from DRM stack
653 * in 5.14, so we need to do it manually. */
654 vboxbo->bo.base.funcs = &vbox_drm_gem_object_funcs;
655 kref_init(&vboxbo->bo.base.refcount);
656 vboxbo->bo.base.size = size;
657 vboxbo->bo.base.dev = dev;
658 dma_resv_init(&vboxbo->bo.base._resv);
659 drm_vma_node_reset(&vboxbo->bo.base.vma_node);
660#endif
661
[102874]662#if RTLNX_VER_MIN(6,1,0) || RTLNX_RHEL_RANGE(8,9, 8,99) || RTLNX_RHEL_RANGE(9,3, 9,99) || RTLNX_SUSE_MAJ_PREREQ(15,5)
[97164]663 ret = ttm_bo_init_validate(&vbox->ttm.bdev, &vboxbo->bo,
664#else
[67406]665 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
[97164]666#endif /* < 6.1.0 */
[67406]667 ttm_bo_type_device, &vboxbo->placement,
[85707]668#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
[67406]669 align >> PAGE_SHIFT, false, NULL, acc_size,
[94329]670#elif RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99) /* < 5.13.0, < RHEL(8.6, 8.99) */
[72641]671 align >> PAGE_SHIFT, false, acc_size,
[89278]672#else /* > 5.13.0 */
673 align >> PAGE_SHIFT, false,
674#endif /* > 5.13.0 */
[85705]675#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
[74773]676 NULL, NULL, vbox_bo_ttm_destroy);
677#else
678 NULL, vbox_bo_ttm_destroy);
[56467]679#endif
[67406]680 if (ret)
[90498]681 {
682 /* In case of failure, ttm_bo_init() supposed to call
683 * vbox_bo_ttm_destroy() which in turn will free @vboxbo. */
684 goto err_exit;
685 }
[53793]686
[67406]687 *pvboxbo = vboxbo;
688
689 return 0;
[74773]690
691err_free_vboxbo:
692 kfree(vboxbo);
[90498]693err_exit:
[74773]694 return ret;
[53793]695}
696
697static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
698{
[94330]699#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[90498]700 return bo->bo.resource->start << PAGE_SHIFT;
701#elif RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
[86542]702 return bo->bo.mem.start << PAGE_SHIFT;
703#else
[67406]704 return bo->bo.offset;
[86542]705#endif
[53793]706}
707
[87092]708int vbox_bo_pin(struct vbox_bo *bo, u32 mem_type, u64 *gpu_addr)
[53793]709{
[85707]710#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
[71985]711 struct ttm_operation_ctx ctx = { false, false };
712#endif
[88207]713 int ret;
[90577]714#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[88207]715 int i;
716#endif
[53793]717
[67406]718 if (bo->pin_count) {
719 bo->pin_count++;
720 if (gpu_addr)
721 *gpu_addr = vbox_bo_gpu_offset(bo);
[53793]722
[67406]723 return 0;
724 }
[53793]725
[87092]726 vbox_ttm_placement(bo, mem_type);
[67406]727
[90577]728#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[67406]729 for (i = 0; i < bo->placement.num_placement; i++)
730 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
[88207]731#endif
[67406]732
[85707]733#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
[67406]734 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
[71985]735#else
736 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
737#endif
[67406]738 if (ret)
739 return ret;
740
741 bo->pin_count = 1;
742
[90577]743#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[88207]744 ttm_bo_pin(&bo->bo);
745#endif
746
[67406]747 if (gpu_addr)
748 *gpu_addr = vbox_bo_gpu_offset(bo);
749
750 return 0;
[53793]751}
752
753int vbox_bo_unpin(struct vbox_bo *bo)
754{
[85707]755#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
[90577]756# if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[71985]757 struct ttm_operation_ctx ctx = { false, false };
[88207]758# endif
[71985]759#endif
[90498]760 int ret = 0;
[90577]761#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[88207]762 int i;
763#endif
[53793]764
[67406]765 if (!bo->pin_count) {
766 DRM_ERROR("unpin bad %p\n", bo);
767 return 0;
768 }
769 bo->pin_count--;
770 if (bo->pin_count)
771 return 0;
[53793]772
[90577]773#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[67406]774 for (i = 0; i < bo->placement.num_placement; i++)
775 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
[88207]776#endif
[67406]777
[85707]778#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
[67406]779 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
[90577]780#elif RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[71985]781 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
782#endif
[67406]783 if (ret)
784 return ret;
785
[90577]786#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
[88207]787 ttm_bo_unpin(&bo->bo);
788#endif
789
[67406]790 return 0;
[53793]791}
792
[90577]793#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
[67406]794/*
795 * Move a vbox-owned buffer object to system memory if no one else has it
[53793]796 * pinned. The caller must have pinned it previously, and this call will
[67406]797 * release the caller's pin.
798 */
[53793]799int vbox_bo_push_sysram(struct vbox_bo *bo)
800{
[88207]801# if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
[71985]802 struct ttm_operation_ctx ctx = { false, false };
[88207]803# endif
[67406]804 int i, ret;
[53793]805
[67406]806 if (!bo->pin_count) {
807 DRM_ERROR("unpin bad %p\n", bo);
808 return 0;
809 }
810 bo->pin_count--;
811 if (bo->pin_count)
812 return 0;
[53793]813
[67406]814 if (bo->kmap.virtual)
815 ttm_bo_kunmap(&bo->kmap);
[53793]816
[87092]817 vbox_ttm_placement(bo, VBOX_MEM_TYPE_SYSTEM);
[67406]818
819 for (i = 0; i < bo->placement.num_placement; i++)
820 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
821
[88207]822# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
[67406]823 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
[88207]824# else
[71985]825 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
[88207]826# endif
[67406]827 if (ret) {
828 DRM_ERROR("pushing to VRAM failed\n");
829 return ret;
830 }
831
832 return 0;
[53793]833}
[88207]834#endif
[53793]835
836int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
837{
[67406]838 struct drm_file *file_priv;
[95415]839 struct vbox_private *vbox;
[90498]840 int ret = -EINVAL;
[53793]841
[67406]842 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
843 return -EINVAL;
[53793]844
[67406]845 file_priv = filp->private_data;
[95414]846 vbox = file_priv->minor->dev->dev_private;
[67406]847
[94330]848#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
[95420]849 (void)vbox;
[90498]850 if (drm_dev_is_unplugged(file_priv->minor->dev))
851 return -ENODEV;
852 ret = drm_gem_mmap(filp, vma);
853#else
[95412]854 ret = ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
[90498]855#endif
856 return ret;
[53793]857}
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use