VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_main.c@ 69564

Last change on this file since 69564 was 68334, checked in by vboxsync, 7 years ago

Additions/linux/drm: refined the RHEL_7 test to distinguish between RHEL_73 (DRM patchlevel 4.6.5) and RHEL_74 (DRM patchlevel 4.10.13)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 13.9 KB
Line 
1/* $Id: vbox_main.c 68334 2017-08-08 09:26:39Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2017 Oracle Corporation
8 * This file is based on ast_main.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 * Authors: Dave Airlie <airlied@redhat.com>,
32 * Michael Thayer <michael.thayer@oracle.com,
33 * Hans de Goede <hdegoede@redhat.com>
34 */
35#include "vbox_drv.h"
36
37#include <VBoxVideoGuest.h>
38#include <VBoxVideoVBE.h>
39
40#include <drm/drm_fb_helper.h>
41#include <drm/drm_crtc_helper.h>
42
43static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
44{
45 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
46
47 if (vbox_fb->obj)
48 drm_gem_object_unreference_unlocked(vbox_fb->obj);
49
50 drm_framebuffer_cleanup(fb);
51 kfree(fb);
52}
53
54void vbox_enable_accel(struct vbox_private *vbox)
55{
56 unsigned int i;
57 struct VBVABUFFER *vbva;
58
59 if (!vbox->vbva_info || !vbox->vbva_buffers) {
60 /* Should never happen... */
61 DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
62 return;
63 }
64
65 for (i = 0; i < vbox->num_crtcs; ++i) {
66 if (!vbox->vbva_info[i].pVBVA) {
67 vbva = (struct VBVABUFFER *)
68 ((u8 *)vbox->vbva_buffers +
69 i * VBVA_MIN_BUFFER_SIZE);
70 if (!VBoxVBVAEnable(&vbox->vbva_info[i],
71 vbox->guest_pool, vbva, i)) {
72 /* very old host or driver error. */
73 DRM_ERROR("vboxvideo: VBoxVBVAEnable failed - heap allocation error.\n");
74 return;
75 }
76 }
77 }
78}
79
80void vbox_disable_accel(struct vbox_private *vbox)
81{
82 unsigned int i;
83
84 for (i = 0; i < vbox->num_crtcs; ++i)
85 VBoxVBVADisable(&vbox->vbva_info[i], vbox->guest_pool, i);
86}
87
88void vbox_report_caps(struct vbox_private *vbox)
89{
90 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION
91 | VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
92 if (vbox->initial_mode_queried)
93 caps |= VBVACAPS_VIDEO_MODE_HINTS;
94 VBoxHGSMISendCapsInfo(vbox->guest_pool, caps);
95}
96
97/**
98 * Send information about dirty rectangles to VBVA. If necessary we enable
99 * VBVA first, as this is normally disabled after a change of master in case
100 * the new master does not send dirty rectangle information (is this even
101 * allowed?)
102 */
103void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
104 struct drm_clip_rect *rects,
105 unsigned int num_rects)
106{
107 struct vbox_private *vbox = fb->dev->dev_private;
108 struct drm_crtc *crtc;
109 unsigned int i;
110
111 mutex_lock(&vbox->hw_mutex);
112 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
113 if (CRTC_FB(crtc) == fb) {
114 vbox_enable_accel(vbox);
115 for (i = 0; i < num_rects; ++i) {
116 VBVACMDHDR cmd_hdr;
117 unsigned int crtc_id =
118 to_vbox_crtc(crtc)->crtc_id;
119
120 if ((rects[i].x1 >
121 crtc->x + crtc->hwmode.hdisplay) ||
122 (rects[i].y1 >
123 crtc->y + crtc->hwmode.vdisplay) ||
124 (rects[i].x2 < crtc->x) ||
125 (rects[i].y2 < crtc->y))
126 continue;
127
128 cmd_hdr.x = (s16)rects[i].x1;
129 cmd_hdr.y = (s16)rects[i].y1;
130 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
131 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
132
133 if (VBoxVBVABufferBeginUpdate(
134 &vbox->vbva_info[crtc_id],
135 vbox->guest_pool)) {
136 VBoxVBVAWrite(&vbox->vbva_info[crtc_id],
137 vbox->guest_pool,
138 &cmd_hdr,
139 sizeof(cmd_hdr));
140 VBoxVBVABufferEndUpdate(
141 &vbox->vbva_info[crtc_id]);
142 }
143 }
144 }
145 }
146 mutex_unlock(&vbox->hw_mutex);
147}
148
149static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
150 struct drm_file *file_priv,
151 unsigned int flags, unsigned int color,
152 struct drm_clip_rect *rects,
153 unsigned int num_rects)
154{
155 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
156
157 return 0;
158}
159
160static const struct drm_framebuffer_funcs vbox_fb_funcs = {
161 .destroy = vbox_user_framebuffer_destroy,
162 .dirty = vbox_user_framebuffer_dirty,
163};
164
165int vbox_framebuffer_init(struct drm_device *dev,
166 struct vbox_framebuffer *vbox_fb,
167#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || defined(RHEL_73)
168 const
169#endif
170 struct DRM_MODE_FB_CMD *mode_cmd,
171 struct drm_gem_object *obj)
172{
173 int ret;
174
175#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
176 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
177#else
178 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
179#endif
180 vbox_fb->obj = obj;
181 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
182 if (ret) {
183 DRM_ERROR("framebuffer init failed %d\n", ret);
184 return ret;
185 }
186
187 return 0;
188}
189
190static struct drm_framebuffer *vbox_user_framebuffer_create(
191 struct drm_device *dev,
192 struct drm_file *filp,
193#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || defined(RHEL_73)
194 const struct drm_mode_fb_cmd2 *mode_cmd)
195#else
196 struct drm_mode_fb_cmd2 *mode_cmd)
197#endif
198{
199 struct drm_gem_object *obj;
200 struct vbox_framebuffer *vbox_fb;
201 int ret;
202
203#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || defined(RHEL_74)
204 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
205#else
206 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
207#endif
208 if (!obj)
209 return ERR_PTR(-ENOENT);
210
211 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
212 if (!vbox_fb) {
213 drm_gem_object_unreference_unlocked(obj);
214 return ERR_PTR(-ENOMEM);
215 }
216
217 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
218 if (ret) {
219 drm_gem_object_unreference_unlocked(obj);
220 kfree(vbox_fb);
221 return ERR_PTR(ret);
222 }
223
224 return &vbox_fb->base;
225}
226
227static const struct drm_mode_config_funcs vbox_mode_funcs = {
228 .fb_create = vbox_user_framebuffer_create,
229};
230
231static void vbox_accel_fini(struct vbox_private *vbox)
232{
233 if (vbox->vbva_info) {
234 vbox_disable_accel(vbox);
235 kfree(vbox->vbva_info);
236 vbox->vbva_info = NULL;
237 }
238 if (vbox->vbva_buffers) {
239 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
240 vbox->vbva_buffers = NULL;
241 }
242}
243
244#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) && !defined(RHEL_73)
245#define pci_iomap_range(dev, bar, offset, maxlen) \
246 ioremap(pci_resource_start(dev, bar) + (offset), maxlen)
247#endif
248
249static int vbox_accel_init(struct vbox_private *vbox)
250{
251 unsigned int i;
252
253 vbox->vbva_info = kcalloc(vbox->num_crtcs, sizeof(*vbox->vbva_info),
254 GFP_KERNEL);
255 if (!vbox->vbva_info)
256 return -ENOMEM;
257
258 /* Take a command buffer for each screen from the end of usable VRAM. */
259 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
260
261 vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
262 vbox->available_vram_size,
263 vbox->num_crtcs *
264 VBVA_MIN_BUFFER_SIZE);
265 if (!vbox->vbva_buffers)
266 return -ENOMEM;
267
268 for (i = 0; i < vbox->num_crtcs; ++i)
269 VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
270 vbox->available_vram_size +
271 i * VBVA_MIN_BUFFER_SIZE,
272 VBVA_MIN_BUFFER_SIZE);
273
274 return 0;
275}
276
277/** Do we support the 4.3 plus mode hint reporting interface? */
278static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
279{
280 u32 have_hints, have_cursor;
281 int ret;
282
283 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
284 VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
285 &have_hints);
286 if (RT_FAILURE(ret))
287 return false;
288
289 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
290 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
291 &have_cursor);
292 if (RT_FAILURE(ret))
293 return false;
294
295 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
296}
297
298/**
299 * Set up our heaps and data exchange buffers in VRAM before handing the rest
300 * to the memory manager.
301 */
302static int vbox_hw_init(struct vbox_private *vbox)
303{
304 int ret;
305
306 vbox->full_vram_size = VBoxVideoGetVRAMSize();
307 vbox->any_pitch = VBoxVideoAnyWidthAllowed();
308
309 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
310
311 /* Map guest-heap at end of vram */
312 vbox->guest_heap =
313 pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
314 GUEST_HEAP_SIZE);
315 if (!vbox->guest_heap)
316 return -ENOMEM;
317
318 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
319 vbox->guest_pool = gen_pool_create(4, -1);
320 if (!vbox->guest_pool)
321 return -ENOMEM;
322
323 ret = gen_pool_add_virt(vbox->guest_pool,
324 (unsigned long)vbox->guest_heap,
325 GUEST_HEAP_OFFSET(vbox),
326 GUEST_HEAP_USABLE_SIZE, -1);
327 if (ret)
328 return ret;
329
330 /* Reduce available VRAM size to reflect the guest heap. */
331 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
332 /* Linux drm represents monitors as a 32-bit array. */
333 vbox->num_crtcs = min_t(u32, VBoxHGSMIGetMonitorCount(vbox->guest_pool),
334 VBOX_MAX_SCREENS);
335
336 if (!have_hgsmi_mode_hints(vbox))
337 return -ENOTSUPP;
338
339 vbox->last_mode_hints =
340 kcalloc(vbox->num_crtcs, sizeof(VBVAMODEHINT), GFP_KERNEL);
341 if (!vbox->last_mode_hints)
342 return -ENOMEM;
343
344 return vbox_accel_init(vbox);
345}
346
347static void vbox_hw_fini(struct vbox_private *vbox)
348{
349 vbox_accel_fini(vbox);
350 kfree(vbox->last_mode_hints);
351 vbox->last_mode_hints = NULL;
352}
353
354int vbox_driver_load(struct drm_device *dev, unsigned long flags)
355{
356 struct vbox_private *vbox;
357 int ret = 0;
358
359 if (!VBoxHGSMIIsSupported())
360 return -ENODEV;
361
362 vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
363 if (!vbox)
364 return -ENOMEM;
365
366 dev->dev_private = vbox;
367 vbox->dev = dev;
368
369 mutex_init(&vbox->hw_mutex);
370
371 ret = vbox_hw_init(vbox);
372 if (ret)
373 goto out_free;
374
375 ret = vbox_mm_init(vbox);
376 if (ret)
377 goto out_free;
378
379 drm_mode_config_init(dev);
380
381 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
382 dev->mode_config.min_width = 64;
383 dev->mode_config.min_height = 64;
384 dev->mode_config.preferred_depth = 24;
385 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
386 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
387
388 ret = vbox_mode_init(dev);
389 if (ret)
390 goto out_free;
391
392 ret = vbox_irq_init(vbox);
393 if (ret)
394 goto out_free;
395
396 ret = vbox_fbdev_init(dev);
397 if (ret)
398 goto out_free;
399
400 return 0;
401
402out_free:
403 vbox_driver_unload(dev);
404 return ret;
405}
406
407#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
408void vbox_driver_unload(struct drm_device *dev)
409#else
410int vbox_driver_unload(struct drm_device *dev)
411#endif
412{
413 struct vbox_private *vbox = dev->dev_private;
414
415 vbox_fbdev_fini(dev);
416 vbox_irq_fini(vbox);
417 vbox_mode_fini(dev);
418 if (dev->mode_config.funcs)
419 drm_mode_config_cleanup(dev);
420
421 vbox_hw_fini(vbox);
422 vbox_mm_fini(vbox);
423 if (vbox->guest_pool)
424 gen_pool_destroy(vbox->guest_pool);
425 if (vbox->guest_heap)
426 pci_iounmap(dev->pdev, vbox->guest_heap);
427 kfree(vbox);
428 dev->dev_private = NULL;
429#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
430 return 0;
431#endif
432}
433
434/**
435 * @note this is described in the DRM framework documentation. AST does not
436 * have it, but we get an oops on driver unload if it is not present.
437 */
438void vbox_driver_lastclose(struct drm_device *dev)
439{
440 struct vbox_private *vbox = dev->dev_private;
441
442#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) || defined(RHEL_73)
443 if (vbox->fbdev)
444 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
445#else
446 drm_modeset_lock_all(dev);
447 if (vbox->fbdev)
448 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
449 drm_modeset_unlock_all(dev);
450#endif
451}
452
453int vbox_gem_create(struct drm_device *dev,
454 u32 size, bool iskernel, struct drm_gem_object **obj)
455{
456 struct vbox_bo *vboxbo;
457 int ret;
458
459 *obj = NULL;
460
461 size = roundup(size, PAGE_SIZE);
462 if (size == 0)
463 return -EINVAL;
464
465 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
466 if (ret) {
467 if (ret != -ERESTARTSYS)
468 DRM_ERROR("failed to allocate GEM object\n");
469 return ret;
470 }
471
472 *obj = &vboxbo->gem;
473
474 return 0;
475}
476
477int vbox_dumb_create(struct drm_file *file,
478 struct drm_device *dev, struct drm_mode_create_dumb *args)
479{
480 int ret;
481 struct drm_gem_object *gobj;
482 u32 handle;
483
484 args->pitch = args->width * ((args->bpp + 7) / 8);
485 args->size = args->pitch * args->height;
486
487 ret = vbox_gem_create(dev, args->size, false, &gobj);
488 if (ret)
489 return ret;
490
491 ret = drm_gem_handle_create(file, gobj, &handle);
492 drm_gem_object_unreference_unlocked(gobj);
493 if (ret)
494 return ret;
495
496 args->handle = handle;
497
498 return 0;
499}
500
501#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(RHEL_73)
502int vbox_dumb_destroy(struct drm_file *file,
503 struct drm_device *dev, u32 handle)
504{
505 return drm_gem_handle_delete(file, handle);
506}
507#endif
508
509static void vbox_bo_unref(struct vbox_bo **bo)
510{
511 struct ttm_buffer_object *tbo;
512
513 if ((*bo) == NULL)
514 return;
515
516 tbo = &((*bo)->bo);
517 ttm_bo_unref(&tbo);
518 if (!tbo)
519 *bo = NULL;
520}
521
522void vbox_gem_free_object(struct drm_gem_object *obj)
523{
524 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
525
526 vbox_bo_unref(&vbox_bo);
527}
528
529static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
530{
531#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(RHEL_73)
532 return bo->bo.addr_space_offset;
533#else
534 return drm_vma_node_offset_addr(&bo->bo.vma_node);
535#endif
536}
537
538int
539vbox_dumb_mmap_offset(struct drm_file *file,
540 struct drm_device *dev,
541 u32 handle, u64 *offset)
542{
543 struct drm_gem_object *obj;
544 int ret;
545 struct vbox_bo *bo;
546
547 mutex_lock(&dev->struct_mutex);
548#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || defined(RHEL_74)
549 obj = drm_gem_object_lookup(file, handle);
550#else
551 obj = drm_gem_object_lookup(dev, file, handle);
552#endif
553 if (!obj) {
554 ret = -ENOENT;
555 goto out_unlock;
556 }
557
558 bo = gem_to_vbox_bo(obj);
559 *offset = vbox_bo_mmap_offset(bo);
560
561 drm_gem_object_unreference(obj);
562 ret = 0;
563
564out_unlock:
565 mutex_unlock(&dev->struct_mutex);
566 return ret;
567}
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use