From 35ec0eafc7f96bf49f1cd4c1996027b52b0ebfb2 Mon Sep 17 00:00:00 2001 From: Daniel van Vugt Date: Tue, 30 Jan 2018 17:47:17 +0800 Subject: [PATCH 1/4] renderer-native: Reference count front buffers. Start reference counting front buffers instead of assuming we know their (scanout) lifetimes. Functionally, this should not change anything. But it is a prerequisite for other changes coming later (https://gitlab.gnome.org/GNOME/mutter/issues/3). --- src/backends/native/meta-kms-buffer.c | 287 +++++++++++++++++++++ src/backends/native/meta-kms-buffer.h | 50 ++++ src/backends/native/meta-renderer-native.c | 235 ++++------------- src/meson.build | 2 + 4 files changed, 390 insertions(+), 184 deletions(-) create mode 100644 src/backends/native/meta-kms-buffer.c create mode 100644 src/backends/native/meta-kms-buffer.h diff --git a/src/backends/native/meta-kms-buffer.c b/src/backends/native/meta-kms-buffer.c new file mode 100644 index 00000000000..e39d143b7d1 --- /dev/null +++ b/src/backends/native/meta-kms-buffer.c @@ -0,0 +1,287 @@ +/* + * Copyright (C) 2011 Intel Corporation. + * Copyright (C) 2016 Red Hat + * Copyright (C) 2018 DisplayLink (UK) Ltd. + * Copyright (C) 2018 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * + * Author: Daniel van Vugt + */ + +#include "backends/native/meta-kms-buffer.h" + +#include "config.h" + +#include +#include +#include +#include + +#define INVALID_FB_ID 0U + +typedef enum _MetaKmsBufferType +{ + META_KMS_BUFFER_TYPE_GBM, + META_KMS_BUFFER_TYPE_WRAPPED_DUMB +} MetaKmsBufferType; + +struct _MetaKmsBuffer +{ + GObject parent; + + MetaKmsBufferType type; + + union + { + uint32_t fb_id; + + struct + { + uint32_t fb_id; + struct gbm_surface *surface; + struct gbm_bo *bo; + MetaGpuKms *gpu_kms; + } gbm; + + struct + { + uint32_t fb_id; + } wrapped_dumb; + }; +}; + +G_DEFINE_TYPE (MetaKmsBuffer, meta_kms_buffer, G_TYPE_OBJECT) + +static gboolean +meta_kms_buffer_acquire_swapped_buffer (MetaKmsBuffer *kms_buffer, + gboolean use_modifiers, + GError **error) +{ + uint32_t handles[4] = {0, 0, 0, 0}; + uint32_t strides[4] = {0, 0, 0, 0}; + uint32_t offsets[4] = {0, 0, 0, 0}; + uint64_t modifiers[4] = {0, 0, 0, 0}; + uint32_t width, height, format; + struct gbm_bo *bo; + int i; + int drm_fd; + + g_return_val_if_fail (META_IS_KMS_BUFFER (kms_buffer), FALSE); + g_return_val_if_fail (kms_buffer->type == META_KMS_BUFFER_TYPE_GBM, FALSE); + g_return_val_if_fail (kms_buffer->gbm.bo == NULL, FALSE); + g_return_val_if_fail (kms_buffer->gbm.surface != NULL, FALSE); + g_return_val_if_fail (kms_buffer->gbm.gpu_kms != NULL, FALSE); + g_return_val_if_fail (error == NULL || *error == NULL, FALSE); + + drm_fd = meta_gpu_kms_get_fd (kms_buffer->gbm.gpu_kms); + g_return_val_if_fail (drm_fd >= 0, FALSE); + + bo = gbm_surface_lock_front_buffer (kms_buffer->gbm.surface); + if (!bo) + { + g_set_error (error, + G_IO_ERROR, + G_IO_ERROR_FAILED, + "gbm_surface_lock_front_buffer failed"); + return FALSE; + } + + if (gbm_bo_get_handle_for_plane (bo, 0).s32 == -1) + { + /* Failed to fetch handle to plane, falling back to old method */ + strides[0] = gbm_bo_get_stride (bo); + handles[0] = gbm_bo_get_handle (bo).u32; + offsets[0] = 0; + modifiers[0] = DRM_FORMAT_MOD_INVALID; + } + else + { + for (i = 0; i < gbm_bo_get_plane_count (bo); i++) + { + strides[i] = gbm_bo_get_stride_for_plane (bo, i); + handles[i] = gbm_bo_get_handle_for_plane (bo, i).u32; + offsets[i] = gbm_bo_get_offset (bo, i); + modifiers[i] = gbm_bo_get_modifier (bo); + } + } + + width = gbm_bo_get_width (bo); + height = gbm_bo_get_height (bo); + format = gbm_bo_get_format (bo); + + if (use_modifiers && modifiers[0] != DRM_FORMAT_MOD_INVALID) + { + if (drmModeAddFB2WithModifiers (drm_fd, + width, + height, + format, + handles, + strides, + offsets, + modifiers, + &kms_buffer->fb_id, + DRM_MODE_FB_MODIFIERS)) + { + g_set_error (error, + G_IO_ERROR, + g_io_error_from_errno (errno), + "drmModeAddFB2WithModifiers failed: %s", + g_strerror (errno)); + gbm_surface_release_buffer (kms_buffer->gbm.surface, bo); + return FALSE; + } + } + else if (drmModeAddFB2 (drm_fd, + width, + height, + format, + handles, + strides, + offsets, + &kms_buffer->fb_id, + 0)) + { + if (format != DRM_FORMAT_XRGB8888) + { + g_set_error (error, + G_IO_ERROR, + G_IO_ERROR_FAILED, + "drmModeAddFB does not support format 0x%x", + format); + gbm_surface_release_buffer (kms_buffer->gbm.surface, bo); + return FALSE; + } + + if (drmModeAddFB (drm_fd, + width, + height, + 24, + 32, + strides[0], + handles[0], + &kms_buffer->fb_id)) + { + g_set_error (error, + G_IO_ERROR, + g_io_error_from_errno (errno), + "drmModeAddFB failed: %s", + g_strerror (errno)); + gbm_surface_release_buffer (kms_buffer->gbm.surface, bo); + return FALSE; + } + } + + kms_buffer->gbm.bo = bo; + + return TRUE; +} + +MetaKmsBuffer * +meta_kms_buffer_new_from_gbm (MetaGpuKms *gpu_kms, + struct gbm_surface *gbm_surface, + gboolean use_modifiers, + GError **error) +{ + MetaKmsBuffer *kms_buffer; + + g_return_val_if_fail (META_IS_GPU_KMS (gpu_kms), NULL); + g_return_val_if_fail (error == NULL || *error == NULL, NULL); + + kms_buffer = g_object_new (META_TYPE_KMS_BUFFER, NULL); + kms_buffer->type = META_KMS_BUFFER_TYPE_GBM; + kms_buffer->gbm.gpu_kms = gpu_kms; + kms_buffer->gbm.surface = gbm_surface; + + if (!meta_kms_buffer_acquire_swapped_buffer (kms_buffer, + use_modifiers, + error)) + { + g_object_unref (kms_buffer); + return NULL; + } + + return kms_buffer; +} + +MetaKmsBuffer * +meta_kms_buffer_new_from_dumb (uint32_t dumb_fb_id) +{ + MetaKmsBuffer *kms_buffer; + + kms_buffer = g_object_new (META_TYPE_KMS_BUFFER, NULL); + kms_buffer->type = META_KMS_BUFFER_TYPE_WRAPPED_DUMB; + kms_buffer->wrapped_dumb.fb_id = dumb_fb_id; + + return kms_buffer; +} + +uint32_t +meta_kms_buffer_get_fb_id (const MetaKmsBuffer *kms_buffer) +{ + g_return_val_if_fail (kms_buffer != NULL, INVALID_FB_ID); + + return kms_buffer->fb_id; +} + +struct gbm_bo * +meta_kms_buffer_get_bo (const MetaKmsBuffer *kms_buffer) +{ + g_return_val_if_fail (kms_buffer != NULL, NULL); + g_return_val_if_fail (kms_buffer->type == META_KMS_BUFFER_TYPE_GBM, NULL); + + return kms_buffer->gbm.bo; +} + +static void +meta_kms_buffer_init (MetaKmsBuffer *kms_buffer) +{ +} + +static void +meta_kms_buffer_finalize (GObject *object) +{ + MetaKmsBuffer *kms_buffer = META_KMS_BUFFER (object); + + if (kms_buffer->type == META_KMS_BUFFER_TYPE_GBM) + { + if (kms_buffer->gbm.gpu_kms != NULL && + kms_buffer->gbm.fb_id != INVALID_FB_ID) + { + int drm_fd = meta_gpu_kms_get_fd (kms_buffer->gbm.gpu_kms); + + drmModeRmFB (drm_fd, kms_buffer->fb_id); + kms_buffer->fb_id = INVALID_FB_ID; + } + + if (kms_buffer->gbm.surface && + kms_buffer->gbm.bo) + { + gbm_surface_release_buffer (kms_buffer->gbm.surface, + kms_buffer->gbm.bo); + } + } + + G_OBJECT_CLASS (meta_kms_buffer_parent_class)->finalize (object); +} + +static void +meta_kms_buffer_class_init (MetaKmsBufferClass *klass) +{ + GObjectClass *object_class = G_OBJECT_CLASS (klass); + + object_class->finalize = meta_kms_buffer_finalize; +} diff --git a/src/backends/native/meta-kms-buffer.h b/src/backends/native/meta-kms-buffer.h new file mode 100644 index 00000000000..74d02d54323 --- /dev/null +++ b/src/backends/native/meta-kms-buffer.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2018 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * + * Author: Daniel van Vugt + */ + +#ifndef META_KMS_BUFFER_H +#define META_KMS_BUFFER_H + +#include +#include + +#include "backends/native/meta-gpu-kms.h" + +#define META_TYPE_KMS_BUFFER (meta_kms_buffer_get_type ()) +G_DECLARE_FINAL_TYPE (MetaKmsBuffer, + meta_kms_buffer, + META, + KMS_BUFFER, + GObject) + +MetaKmsBuffer * +meta_kms_buffer_new_from_gbm (MetaGpuKms *gpu_kms, + struct gbm_surface *gbm_surface, + gboolean use_modifiers, + GError **error); + +MetaKmsBuffer * +meta_kms_buffer_new_from_dumb (uint32_t dumb_fb_id); + +uint32_t meta_kms_buffer_get_fb_id (const MetaKmsBuffer *kms_buffer); + +struct gbm_bo *meta_kms_buffer_get_bo (const MetaKmsBuffer *kms_buffer); + +#endif /* META_KMS_BUFFER_H */ diff --git a/src/backends/native/meta-renderer-native.c b/src/backends/native/meta-renderer-native.c index 771ca0872af..8c3275b1eff 100644 --- a/src/backends/native/meta-renderer-native.c +++ b/src/backends/native/meta-renderer-native.c @@ -59,6 +59,7 @@ #include "backends/meta-renderer-view.h" #include "backends/native/meta-crtc-kms.h" #include "backends/native/meta-gpu-kms.h" +#include "backends/native/meta-kms-buffer.h" #include "backends/native/meta-monitor-manager-kms.h" #include "backends/native/meta-renderer-native-gles3.h" #include "backends/native/meta-renderer-native.h" @@ -146,10 +147,8 @@ typedef struct _MetaOnscreenNativeSecondaryGpuState struct { struct gbm_surface *surface; - uint32_t current_fb_id; - uint32_t next_fb_id; - struct gbm_bo *current_bo; - struct gbm_bo *next_bo; + MetaKmsBuffer *current_fb; + MetaKmsBuffer *next_fb; } gbm; struct { @@ -170,10 +169,8 @@ typedef struct _MetaOnscreenNative struct { struct gbm_surface *surface; - uint32_t current_fb_id; - uint32_t next_fb_id; - struct gbm_bo *current_bo; - struct gbm_bo *next_bo; + MetaKmsBuffer *current_fb; + MetaKmsBuffer *next_fb; } gbm; #ifdef HAVE_EGL_DEVICE @@ -961,25 +958,12 @@ free_current_secondary_bo (MetaGpuKms *gpu_kms, MetaOnscreenNativeSecondaryGpuState *secondary_gpu_state) { MetaRendererNativeGpuData *renderer_gpu_data; - int kms_fd; - - kms_fd = meta_gpu_kms_get_fd (gpu_kms); renderer_gpu_data = secondary_gpu_state->renderer_gpu_data; switch (renderer_gpu_data->secondary.copy_mode) { case META_SHARED_FRAMEBUFFER_COPY_MODE_GPU: - if (secondary_gpu_state->gbm.current_fb_id) - { - drmModeRmFB (kms_fd, secondary_gpu_state->gbm.current_fb_id); - secondary_gpu_state->gbm.current_fb_id = 0; - } - if (secondary_gpu_state->gbm.current_bo) - { - gbm_surface_release_buffer (secondary_gpu_state->gbm.surface, - secondary_gpu_state->gbm.current_bo); - secondary_gpu_state->gbm.current_bo = NULL; - } + g_clear_object (&secondary_gpu_state->gbm.current_fb); break; case META_SHARED_FRAMEBUFFER_COPY_MODE_CPU: break; @@ -991,22 +975,8 @@ free_current_bo (CoglOnscreen *onscreen) { CoglOnscreenEGL *onscreen_egl = onscreen->winsys; MetaOnscreenNative *onscreen_native = onscreen_egl->platform; - MetaGpuKms *render_gpu = onscreen_native->render_gpu; - int kms_fd; - - kms_fd = meta_gpu_kms_get_fd (render_gpu); - if (onscreen_native->gbm.current_fb_id) - { - drmModeRmFB (kms_fd, onscreen_native->gbm.current_fb_id); - onscreen_native->gbm.current_fb_id = 0; - } - if (onscreen_native->gbm.current_bo) - { - gbm_surface_release_buffer (onscreen_native->gbm.surface, - onscreen_native->gbm.current_bo); - onscreen_native->gbm.current_bo = NULL; - } + g_clear_object (&onscreen_native->gbm.current_fb); g_hash_table_foreach (onscreen_native->secondary_gpu_states, (GHFunc) free_current_secondary_bo, @@ -1300,11 +1270,9 @@ static void swap_secondary_drm_fb (MetaGpuKms *gpu_kms, MetaOnscreenNativeSecondaryGpuState *secondary_gpu_state) { - secondary_gpu_state->gbm.current_fb_id = secondary_gpu_state->gbm.next_fb_id; - secondary_gpu_state->gbm.next_fb_id = 0; - - secondary_gpu_state->gbm.current_bo = secondary_gpu_state->gbm.next_bo; - secondary_gpu_state->gbm.next_bo = NULL; + g_set_object (&secondary_gpu_state->gbm.current_fb, + secondary_gpu_state->gbm.next_fb); + g_clear_object (&secondary_gpu_state->gbm.next_fb); } static void @@ -1315,11 +1283,8 @@ meta_onscreen_native_swap_drm_fb (CoglOnscreen *onscreen) free_current_bo (onscreen); - onscreen_native->gbm.current_fb_id = onscreen_native->gbm.next_fb_id; - onscreen_native->gbm.next_fb_id = 0; - - onscreen_native->gbm.current_bo = onscreen_native->gbm.next_bo; - onscreen_native->gbm.next_bo = NULL; + g_set_object (&onscreen_native->gbm.current_fb, onscreen_native->gbm.next_fb); + g_clear_object (&onscreen_native->gbm.next_fb); g_hash_table_foreach (onscreen_native->secondary_gpu_states, (GHFunc) swap_secondary_drm_fb, @@ -1403,17 +1368,7 @@ free_next_secondary_bo (MetaGpuKms *gpu_kms, switch (renderer_gpu_data->secondary.copy_mode) { case META_SHARED_FRAMEBUFFER_COPY_MODE_GPU: - if (secondary_gpu_state->gbm.next_fb_id) - { - int kms_fd; - - kms_fd = meta_gpu_kms_get_fd (gpu_kms); - drmModeRmFB (kms_fd, secondary_gpu_state->gbm.next_fb_id); - gbm_surface_release_buffer (secondary_gpu_state->gbm.surface, - secondary_gpu_state->gbm.next_bo); - secondary_gpu_state->gbm.next_fb_id = 0; - secondary_gpu_state->gbm.next_bo = NULL; - } + g_clear_object (&secondary_gpu_state->gbm.next_fb); break; case META_SHARED_FRAMEBUFFER_COPY_MODE_CPU: break; @@ -1438,17 +1393,7 @@ flip_closure_destroyed (MetaRendererView *view) switch (renderer_gpu_data->mode) { case META_RENDERER_NATIVE_MODE_GBM: - if (onscreen_native->gbm.next_fb_id) - { - int kms_fd; - - kms_fd = meta_gpu_kms_get_fd (render_gpu); - drmModeRmFB (kms_fd, onscreen_native->gbm.next_fb_id); - gbm_surface_release_buffer (onscreen_native->gbm.surface, - onscreen_native->gbm.next_bo); - onscreen_native->gbm.next_bo = NULL; - onscreen_native->gbm.next_fb_id = 0; - } + g_clear_object (&onscreen_native->gbm.next_fb); g_hash_table_foreach (onscreen_native->secondary_gpu_states, (GHFunc) free_next_secondary_bo, @@ -1739,12 +1684,12 @@ meta_onscreen_native_flip_crtc (CoglOnscreen *onscreen, case META_RENDERER_NATIVE_MODE_GBM: if (gpu_kms == render_gpu) { - fb_id = onscreen_native->gbm.next_fb_id; + fb_id = meta_kms_buffer_get_fb_id (onscreen_native->gbm.next_fb); } else { secondary_gpu_state = get_secondary_gpu_state (onscreen, gpu_kms); - fb_id = secondary_gpu_state->gbm.next_fb_id; + fb_id = meta_kms_buffer_get_fb_id (secondary_gpu_state->gbm.next_fb); } if (!meta_gpu_kms_flip_crtc (gpu_kms, @@ -1810,7 +1755,7 @@ set_crtc_fb (CoglOnscreen *onscreen, if (!secondary_gpu_state) return; - fb_id = secondary_gpu_state->gbm.next_fb_id; + fb_id = meta_kms_buffer_get_fb_id (secondary_gpu_state->gbm.next_fb); } x = crtc->rect.x - logical_monitor->rect.x; @@ -1853,7 +1798,7 @@ meta_onscreen_native_set_crtc_modes (CoglOnscreen *onscreen) switch (renderer_gpu_data->mode) { case META_RENDERER_NATIVE_MODE_GBM: - fb_id = onscreen_native->gbm.next_fb_id; + fb_id = meta_kms_buffer_get_fb_id (onscreen_native->gbm.next_fb); break; #ifdef HAVE_EGL_DEVICE case META_RENDERER_NATIVE_MODE_EGL_DEVICE: @@ -1921,7 +1866,7 @@ crtc_mode_set_fallback (CoglOnscreen *onscreen, return FALSE; } - fb_id = onscreen_native->gbm.next_fb_id; + fb_id = meta_kms_buffer_get_fb_id (onscreen_native->gbm.next_fb); set_crtc_fb (onscreen, logical_monitor, crtc, fb_id); return TRUE; } @@ -2012,101 +1957,6 @@ meta_onscreen_native_flip_crtcs (CoglOnscreen *onscreen) g_closure_unref (flip_closure); } -static gboolean -gbm_get_next_fb_id (MetaGpuKms *gpu_kms, - struct gbm_surface *gbm_surface, - struct gbm_bo **out_next_bo, - uint32_t *out_next_fb_id) -{ - MetaRendererNative *renderer_native = meta_renderer_native_from_gpu (gpu_kms); - struct gbm_bo *next_bo; - uint32_t next_fb_id; - int kms_fd; - uint32_t handles[4] = { 0, }; - uint32_t strides[4] = { 0, }; - uint32_t offsets[4] = { 0, }; - uint64_t modifiers[4] = { 0, }; - int i; - - /* Now we need to set the CRTC to whatever is the front buffer */ - next_bo = gbm_surface_lock_front_buffer (gbm_surface); - - if (!next_bo) - { - g_error ("Impossible to lock surface front buffer: %m"); - return FALSE; - } - - if (gbm_bo_get_handle_for_plane (next_bo, 0).s32 == -1) - { - /* Failed to fetch handle to plane, falling back to old method */ - strides[0] = gbm_bo_get_stride (next_bo); - handles[0] = gbm_bo_get_handle (next_bo).u32; - offsets[0] = 0; - modifiers[0] = DRM_FORMAT_MOD_INVALID; - } - else - { - for (i = 0; i < gbm_bo_get_plane_count (next_bo); i++) - { - strides[i] = gbm_bo_get_stride_for_plane (next_bo, i); - handles[i] = gbm_bo_get_handle_for_plane (next_bo, i).u32; - offsets[i] = gbm_bo_get_offset (next_bo, i); - modifiers[i] = gbm_bo_get_modifier (next_bo); - } - } - - kms_fd = meta_gpu_kms_get_fd (gpu_kms); - - if (renderer_native->use_modifiers && - modifiers[0] != DRM_FORMAT_MOD_INVALID) - { - if (drmModeAddFB2WithModifiers (kms_fd, - gbm_bo_get_width (next_bo), - gbm_bo_get_height (next_bo), - gbm_bo_get_format (next_bo), - handles, - strides, - offsets, - modifiers, - &next_fb_id, - DRM_MODE_FB_MODIFIERS)) - { - g_warning ("Failed to create new back buffer handle: %m"); - gbm_surface_release_buffer (gbm_surface, next_bo); - return FALSE; - } - } - else if (drmModeAddFB2 (kms_fd, - gbm_bo_get_width (next_bo), - gbm_bo_get_height (next_bo), - gbm_bo_get_format (next_bo), - handles, - strides, - offsets, - &next_fb_id, - 0)) - { - if (drmModeAddFB (kms_fd, - gbm_bo_get_width (next_bo), - gbm_bo_get_height (next_bo), - 24, /* depth */ - 32, /* bpp */ - strides[0], - handles[0], - &next_fb_id)) - { - g_warning ("Failed to create new back buffer handle: %m"); - gbm_surface_release_buffer (gbm_surface, next_bo); - return FALSE; - } - } - - *out_next_bo = next_bo; - *out_next_fb_id = next_fb_id; - return TRUE; -} - static void wait_for_pending_flips (CoglOnscreen *onscreen) { @@ -2159,7 +2009,7 @@ copy_shared_framebuffer_gpu (CoglOnscreen *onscreen, renderer_gpu_data->egl_display, renderer_gpu_data->secondary.egl_context, secondary_gpu_state->egl_surface, - onscreen_native->gbm.next_bo, + meta_kms_buffer_get_bo (onscreen_native->gbm.next_fb), &error)) { g_warning ("Failed to blit shared framebuffer: %s", error->message); @@ -2177,10 +2027,19 @@ copy_shared_framebuffer_gpu (CoglOnscreen *onscreen, return; } - gbm_get_next_fb_id (secondary_gpu_state->gpu_kms, - secondary_gpu_state->gbm.surface, - &secondary_gpu_state->gbm.next_bo, - &secondary_gpu_state->gbm.next_fb_id); + g_clear_object (&secondary_gpu_state->gbm.next_fb); + secondary_gpu_state->gbm.next_fb = + meta_kms_buffer_new_from_gbm (secondary_gpu_state->gpu_kms, + secondary_gpu_state->gbm.surface, + renderer_native->use_modifiers, + &error); + if (!secondary_gpu_state->gbm.next_fb) + { + g_warning ("meta_kms_buffer_new_from_gbm failed: %s", + error->message); + g_error_free (error); + return; + } } typedef struct _PixelFormatMap { @@ -2299,7 +2158,9 @@ copy_shared_framebuffer_cpu (CoglOnscreen *onscreen, cogl_object_unref (dumb_bitmap); - secondary_gpu_state->gbm.next_fb_id = target_fb_id; + g_clear_object (&secondary_gpu_state->gbm.next_fb); + secondary_gpu_state->gbm.next_fb = + meta_kms_buffer_new_from_dumb (target_fb_id); } static void @@ -2383,6 +2244,7 @@ meta_onscreen_native_swap_buffers_with_damage (CoglOnscreen *onscreen, MetaGpuKms *render_gpu = onscreen_native->render_gpu; CoglFrameInfo *frame_info; gboolean egl_context_changed = FALSE; + g_autoptr (GError) error = NULL; /* * Wait for the flip callback before continuing, as we might have started the @@ -2404,14 +2266,19 @@ meta_onscreen_native_swap_buffers_with_damage (CoglOnscreen *onscreen, switch (renderer_gpu_data->mode) { case META_RENDERER_NATIVE_MODE_GBM: - g_warn_if_fail (onscreen_native->gbm.next_bo == NULL && - onscreen_native->gbm.next_fb_id == 0); - - if (!gbm_get_next_fb_id (render_gpu, - onscreen_native->gbm.surface, - &onscreen_native->gbm.next_bo, - &onscreen_native->gbm.next_fb_id)) - return; + g_warn_if_fail (onscreen_native->gbm.next_fb == NULL); + g_clear_object (&onscreen_native->gbm.next_fb); + onscreen_native->gbm.next_fb = + meta_kms_buffer_new_from_gbm (render_gpu, + onscreen_native->gbm.surface, + renderer_native->use_modifiers, + &error); + if (!onscreen_native->gbm.next_fb) + { + g_warning ("meta_kms_buffer_new_from_gbm failed: %s", + error->message); + return; + } break; #ifdef HAVE_EGL_DEVICE @@ -2992,7 +2859,7 @@ meta_renderer_native_release_onscreen (CoglOnscreen *onscreen) case META_RENDERER_NATIVE_MODE_GBM: /* flip state takes a reference on the onscreen so there should * never be outstanding flips when we reach here. */ - g_return_if_fail (onscreen_native->gbm.next_fb_id == 0); + g_return_if_fail (onscreen_native->gbm.next_fb == NULL); free_current_bo (onscreen); diff --git a/src/meson.build b/src/meson.build index 8779c956ed5..5fd8c134724 100644 --- a/src/meson.build +++ b/src/meson.build @@ -564,6 +564,8 @@ if have_native_backend 'backends/native/meta-gpu-kms.h', 'backends/native/meta-input-settings-native.c', 'backends/native/meta-input-settings-native.h', + 'backends/native/meta-kms-buffer.c', + 'backends/native/meta-kms-buffer.h', 'backends/native/meta-launcher.c', 'backends/native/meta-launcher.h', 'backends/native/meta-monitor-manager-kms.c', -- GitLab From ce0834ba9515ffbb4640e7dc3c0a49e16220f40e Mon Sep 17 00:00:00 2001 From: Daniel van Vugt Date: Fri, 24 Aug 2018 15:50:17 +0800 Subject: [PATCH 2/4] renderer-native: Count pending CRTCs per flip attempt Instead of using a global counter. This is necessary to support the future possibility of having multiple flips pending concurrently, so as to support multiple monitors each at full speed to eventually solve https://gitlab.gnome.org/GNOME/mutter/issues/3. --- src/backends/native/meta-renderer-native.c | 48 +++++++++++++++------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/src/backends/native/meta-renderer-native.c b/src/backends/native/meta-renderer-native.c index 8c3275b1eff..1d6a55383dc 100644 --- a/src/backends/native/meta-renderer-native.c +++ b/src/backends/native/meta-renderer-native.c @@ -159,6 +159,12 @@ typedef struct _MetaOnscreenNativeSecondaryGpuState int pending_flips; } MetaOnscreenNativeSecondaryGpuState; +typedef struct _ConcurrentFlip +{ + MetaRendererView *view; + int pending_crtcs_count; +} ConcurrentFlip; + typedef struct _MetaOnscreenNative { MetaRendererNative *renderer_native; @@ -193,7 +199,7 @@ typedef struct _MetaOnscreenNative GSource *retry_page_flips_source; MetaRendererView *view; - int total_pending_flips; + ConcurrentFlip *last_flip; } MetaOnscreenNative; struct _MetaRendererNative @@ -1292,12 +1298,13 @@ meta_onscreen_native_swap_drm_fb (CoglOnscreen *onscreen) } static void -on_crtc_flipped (GClosure *closure, - MetaGpuKms *gpu_kms, - MetaCrtc *crtc, - int64_t page_flip_time_ns, - MetaRendererView *view) +on_crtc_flipped (GClosure *closure, + MetaGpuKms *gpu_kms, + MetaCrtc *crtc, + int64_t page_flip_time_ns, + ConcurrentFlip *flip) { + MetaRendererView *view = flip->view; ClutterStageView *stage_view = CLUTTER_STAGE_VIEW (view); CoglFramebuffer *framebuffer = clutter_stage_view_get_onscreen (stage_view); @@ -1333,8 +1340,8 @@ on_crtc_flipped (GClosure *closure, secondary_gpu_state->pending_flips--; } - onscreen_native->total_pending_flips--; - if (onscreen_native->total_pending_flips == 0) + flip->pending_crtcs_count--; + if (flip->pending_crtcs_count == 0) { MetaRendererNativeGpuData *renderer_gpu_data; @@ -1376,8 +1383,9 @@ free_next_secondary_bo (MetaGpuKms *gpu_kms, } static void -flip_closure_destroyed (MetaRendererView *view) +flip_closure_destroyed (ConcurrentFlip *flip) { + MetaRendererView *view = flip->view; ClutterStageView *stage_view = CLUTTER_STAGE_VIEW (view); CoglFramebuffer *framebuffer = clutter_stage_view_get_onscreen (stage_view); @@ -1412,7 +1420,11 @@ flip_closure_destroyed (MetaRendererView *view) onscreen_native->pending_queue_swap_notify = FALSE; } - g_object_unref (view); + if (onscreen_native->last_flip == flip) + onscreen_native->last_flip = NULL; + + g_object_unref (flip->view); + g_free (flip); } #ifdef HAVE_EGL_DEVICE @@ -1552,7 +1564,7 @@ retry_page_flips (gpointer user_data) secondary_gpu_state->pending_flips--; } - onscreen_native->total_pending_flips--; + onscreen_native->last_flip->pending_crtcs_count--; } retry_page_flip_data_free (retry_page_flip_data); @@ -1712,7 +1724,7 @@ meta_onscreen_native_flip_crtc (CoglOnscreen *onscreen, } } - onscreen_native->total_pending_flips++; + onscreen_native->last_flip->pending_crtcs_count++; if (secondary_gpu_state) secondary_gpu_state->pending_flips++; @@ -1721,7 +1733,7 @@ meta_onscreen_native_flip_crtc (CoglOnscreen *onscreen, case META_RENDERER_NATIVE_MODE_EGL_DEVICE: if (flip_egl_stream (onscreen_native, flip_closure)) - onscreen_native->total_pending_flips++; + onscreen_native->last_flip->pending_crtcs_count++; break; #endif } @@ -1918,6 +1930,11 @@ meta_onscreen_native_flip_crtcs (CoglOnscreen *onscreen) MetaRendererView *view = onscreen_native->view; GClosure *flip_closure; MetaLogicalMonitor *logical_monitor; + ConcurrentFlip *flip; + + flip = g_new0 (ConcurrentFlip, 1); + flip->view = g_object_ref (view); + onscreen_native->last_flip = flip; /* * Create a closure that either will be invoked or destructed. @@ -1930,7 +1947,7 @@ meta_onscreen_native_flip_crtcs (CoglOnscreen *onscreen) * closure will be destructed before this function goes out of scope. */ flip_closure = g_cclosure_new (G_CALLBACK (on_crtc_flipped), - g_object_ref (view), + flip, (GClosureNotify) flip_closure_destroyed); g_closure_set_marshal (flip_closure, meta_marshal_VOID__OBJECT_OBJECT_INT64); @@ -1974,7 +1991,8 @@ wait_for_pending_flips (CoglOnscreen *onscreen) meta_gpu_kms_wait_for_flip (secondary_gpu_state->gpu_kms, NULL); } - while (onscreen_native->total_pending_flips) + while (onscreen_native->last_flip && + onscreen_native->last_flip->pending_crtcs_count > 0) meta_gpu_kms_wait_for_flip (onscreen_native->render_gpu, NULL); } -- GitLab From 03bae11967821cff39301ace17999cbf25ef97f0 Mon Sep 17 00:00:00 2001 From: Daniel van Vugt Date: Fri, 30 Nov 2018 16:07:32 +0800 Subject: [PATCH 3/4] clutter-master-clock: Check for CLUTTER_FEATURE_SWAP_EVENTS Superficially `CLUTTER_FEATURE_SWAP_EVENTS` might sound like it implies `CLUTTER_FEATURE_SWAP_THROTTLE` because it allows the master clock to implement another throttling mechanism. However `SWAP_THROTTLE` actually means the backend will block and therefore provides implicit throttling. So we need to check for both flags because they're potentially independent throttling mechanisms and may not be available simultaneously in future after fixing https://gitlab.gnome.org/GNOME/mutter/issues/3. --- clutter/clutter/clutter-master-clock-default.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clutter/clutter/clutter-master-clock-default.c b/clutter/clutter/clutter-master-clock-default.c index a0909ff95a4..f173ee80624 100644 --- a/clutter/clutter/clutter-master-clock-default.c +++ b/clutter/clutter/clutter-master-clock-default.c @@ -298,7 +298,8 @@ master_clock_next_frame_delay (ClutterMasterClockDefault *master_clock) * (NB: if there aren't even any timelines running then the master clock will * be completely stopped in master_clock_is_running()) */ - if (clutter_feature_available (CLUTTER_FEATURE_SWAP_THROTTLE) && + if ((clutter_feature_available (CLUTTER_FEATURE_SWAP_THROTTLE) || + clutter_feature_available (CLUTTER_FEATURE_SWAP_EVENTS)) && !master_clock->idle) { CLUTTER_NOTE (SCHEDULER, "swap throttling available and updated stages"); -- GitLab From 202530c9f63637da2ab91c83ac6a82086aef78d0 Mon Sep 17 00:00:00 2001 From: Daniel van Vugt Date: Tue, 30 Jan 2018 17:47:17 +0800 Subject: [PATCH 4/4] renderer-native: Accept frames without ever blocking CRTCs now hold references to front buffer objects for the duration they are required for scanouts. This allows for automatic release of a front buffer object at exactly the right time without needing to block on explicit synchronization. So `wait_for_pending_flips` is now removed. In modern Vulkan terminology this new algorithm is the *mailbox* presentation mode and the old algorithm was *FIFO* mode. Multi-monitor performance benefits the most from this change. Previously for identical monitors mutter would be blocked around 50% of the time on average (worst case 100%, best case 0%). For monitors of different frequencies the average could easily be higher than 50%. However this patch makes that figure 0% in all cases, never blocking. CPU/GPU frequency scaling should also behave better (go higher when required) now as we're no longer blocking accidentally in the wrong place and fooling the kernel into thinking we're intentionally idle. Where previously mutter could only render at the frequency of the slowest monitor, it now renders at the frequency of the fastest. For the slower monitors they just get whatever the latest frame was according to the rate of the fastest. So we no longer starve any monitor of new frames and they are all able to update at their maximum frequencies. Fixes: https://gitlab.gnome.org/GNOME/mutter/issues/3 --- src/backends/native/meta-crtc-kms.c | 21 ++ src/backends/native/meta-crtc-kms.h | 10 + src/backends/native/meta-gpu-kms.c | 231 +++++++++--- src/backends/native/meta-gpu-kms.h | 28 +- src/backends/native/meta-renderer-native.c | 388 ++++++--------------- src/meta-marshal.list | 2 +- 6 files changed, 348 insertions(+), 332 deletions(-) diff --git a/src/backends/native/meta-crtc-kms.c b/src/backends/native/meta-crtc-kms.c index 79a9b1e769b..56c0d206b45 100644 --- a/src/backends/native/meta-crtc-kms.c +++ b/src/backends/native/meta-crtc-kms.c @@ -55,6 +55,8 @@ typedef struct _MetaCrtcKms * value: owned GArray* (uint64_t modifier), or NULL */ GHashTable *formats_modifiers; + + MetaCrtcKmsScanouts scanouts; } MetaCrtcKms; /** @@ -292,6 +294,14 @@ meta_crtc_kms_supports_format (MetaCrtc *crtc, NULL); } +MetaCrtcKmsScanouts * +meta_crtc_kms_get_scanouts (MetaCrtc *crtc) +{ + MetaCrtcKms *crtc_kms = crtc->driver_private; + + return &crtc_kms->scanouts; +} + static inline uint32_t * formats_ptr (struct drm_format_modifier_blob *blob) { @@ -575,7 +585,18 @@ meta_crtc_destroy_notify (MetaCrtc *crtc) { MetaCrtcKms *crtc_kms = crtc->driver_private; + if (crtc_kms->scanouts.next_closure) + { + g_closure_unref (crtc_kms->scanouts.next_closure); + crtc_kms->scanouts.next_closure = NULL; + } + + g_clear_object (&crtc_kms->scanouts.next); + g_clear_object (&crtc_kms->scanouts.current); + g_clear_object (&crtc_kms->scanouts.previous); + g_hash_table_destroy (crtc_kms->formats_modifiers); + g_free (crtc->driver_private); } diff --git a/src/backends/native/meta-crtc-kms.h b/src/backends/native/meta-crtc-kms.h index 622ea5502c2..7c9acceeb56 100644 --- a/src/backends/native/meta-crtc-kms.h +++ b/src/backends/native/meta-crtc-kms.h @@ -30,6 +30,14 @@ #include "backends/meta-crtc.h" #include "backends/native/meta-gpu-kms.h" +typedef struct _MetaCrtcKmsScanouts +{ + /* These fields are maintained by MetaGpuKms on a per-CRTC basis. */ + MetaKmsBuffer *previous, *current, *next; + GClosure *next_closure; + int next_x, next_y; +} MetaCrtcKmsScanouts; + typedef struct _MetaDrmFormatBuf { char s[5]; @@ -61,4 +69,6 @@ MetaCrtc * meta_create_kms_crtc (MetaGpuKms *gpu_kms, drmModeCrtc *drm_crtc, unsigned int crtc_index); +MetaCrtcKmsScanouts * meta_crtc_kms_get_scanouts (MetaCrtc *crtc); + #endif /* META_CRTC_KMS_H */ diff --git a/src/backends/native/meta-gpu-kms.c b/src/backends/native/meta-gpu-kms.c index d924466f862..4ced5fc5bea 100644 --- a/src/backends/native/meta-gpu-kms.c +++ b/src/backends/native/meta-gpu-kms.c @@ -135,18 +135,74 @@ get_crtc_drm_connectors (MetaGpu *gpu, *connectors = (uint32_t *) g_array_free (connectors_array, FALSE); } +static void +invoke_flip_closure (GClosure *flip_closure, + MetaGpuKms *gpu_kms, + MetaCrtc *crtc, + int64_t page_flip_time_ns) +{ + GValue params[] = { + G_VALUE_INIT, + G_VALUE_INIT, + G_VALUE_INIT, + G_VALUE_INIT, + G_VALUE_INIT, + }; + + g_value_init (¶ms[0], G_TYPE_POINTER); + g_value_set_pointer (¶ms[0], flip_closure); + g_value_init (¶ms[1], G_TYPE_OBJECT); + g_value_set_object (¶ms[1], gpu_kms); + g_value_init (¶ms[2], G_TYPE_OBJECT); + g_value_set_object (¶ms[2], crtc); + g_value_init (¶ms[3], G_TYPE_BOOLEAN); + g_value_set_boolean (¶ms[3], TRUE); + g_value_init (¶ms[4], G_TYPE_INT64); + g_value_set_int64 (¶ms[4], page_flip_time_ns); + g_closure_invoke (flip_closure, NULL, 5, params, NULL); + g_closure_unref (flip_closure); +} + +static void +invoke_dropped_flip_closure (GClosure *flip_closure, + MetaGpuKms *gpu_kms, + MetaCrtc *crtc) +{ + GValue params[] = { + G_VALUE_INIT, + G_VALUE_INIT, + G_VALUE_INIT, + G_VALUE_INIT, + G_VALUE_INIT, + }; + + g_value_init (¶ms[0], G_TYPE_POINTER); + g_value_set_pointer (¶ms[0], flip_closure); + g_value_init (¶ms[1], G_TYPE_OBJECT); + g_value_set_object (¶ms[1], gpu_kms); + g_value_init (¶ms[2], G_TYPE_OBJECT); + g_value_set_object (¶ms[2], crtc); + g_value_init (¶ms[3], G_TYPE_BOOLEAN); + g_value_set_boolean (¶ms[3], FALSE); + g_value_init (¶ms[4], G_TYPE_INT64); + g_closure_invoke (flip_closure, NULL, 5, params, NULL); + g_closure_unref (flip_closure); +} + gboolean -meta_gpu_kms_apply_crtc_mode (MetaGpuKms *gpu_kms, - MetaCrtc *crtc, - int x, - int y, - uint32_t fb_id) +meta_gpu_kms_apply_crtc_mode (MetaGpuKms *gpu_kms, + MetaCrtc *crtc, + int x, + int y, + MetaKmsBuffer *kms_fb) { MetaGpu *gpu = meta_crtc_get_gpu (crtc); int kms_fd = meta_gpu_kms_get_fd (gpu_kms); uint32_t *connectors; unsigned int n_connectors; drmModeModeInfo *mode; + MetaCrtcKmsScanouts *scanouts; + uint32_t fb_id = kms_fb ? meta_kms_buffer_get_fb_id (kms_fb) : 0; get_crtc_drm_connectors (gpu, crtc, &connectors, &n_connectors); @@ -170,35 +226,21 @@ meta_gpu_kms_apply_crtc_mode (MetaGpuKms *gpu_kms, return FALSE; } + scanouts = meta_crtc_kms_get_scanouts (crtc); + g_set_object (&scanouts->previous, scanouts->current); + g_set_object (&scanouts->current, kms_fb); + if (scanouts->next_closure) + { + invoke_dropped_flip_closure (scanouts->next_closure, gpu_kms, crtc); + scanouts->next_closure = NULL; + } + g_clear_object (&scanouts->next); + g_free (connectors); return TRUE; } -static void -invoke_flip_closure (GClosure *flip_closure, - MetaGpuKms *gpu_kms, - MetaCrtc *crtc, - int64_t page_flip_time_ns) -{ - GValue params[] = { - G_VALUE_INIT, - G_VALUE_INIT, - G_VALUE_INIT, - G_VALUE_INIT, - }; - - g_value_init (¶ms[0], G_TYPE_POINTER); - g_value_set_pointer (¶ms[0], flip_closure); - g_value_init (¶ms[1], G_TYPE_OBJECT); - g_value_set_object (¶ms[1], gpu_kms); - g_value_init (¶ms[2], G_TYPE_OBJECT); - g_value_set_object (¶ms[2], crtc); - g_value_init (¶ms[3], G_TYPE_INT64); - g_value_set_int64 (¶ms[3], page_flip_time_ns); - g_closure_invoke (flip_closure, NULL, 4, params, NULL); -} - gboolean meta_gpu_kms_is_crtc_active (MetaGpuKms *gpu_kms, MetaCrtc *crtc) @@ -247,27 +289,31 @@ meta_gpu_kms_wrap_flip_closure (MetaGpuKms *gpu_kms, .crtc = crtc }; + if (crtc) + g_object_ref (crtc); + return closure_container; } void meta_gpu_kms_flip_closure_container_free (MetaGpuKmsFlipClosureContainer *closure_container) { - g_closure_unref (closure_container->flip_closure); + g_clear_object (&closure_container->crtc); g_free (closure_container); } gboolean -meta_gpu_kms_flip_crtc (MetaGpuKms *gpu_kms, - MetaCrtc *crtc, - uint32_t fb_id, - GClosure *flip_closure, - GError **error) +meta_gpu_kms_flip_crtc (MetaGpuKms *gpu_kms, + MetaCrtc *crtc, + int x, + int y, + MetaKmsBuffer *kms_fb, + GClosure *flip_closure, + gboolean *fb_in_use) { MetaGpu *gpu = META_GPU (gpu_kms); MetaMonitorManager *monitor_manager = meta_gpu_get_monitor_manager (gpu); - MetaGpuKmsFlipClosureContainer *closure_container; - int kms_fd = meta_gpu_kms_get_fd (gpu_kms); + MetaCrtcKmsScanouts *scanouts = meta_crtc_kms_get_scanouts (crtc); uint32_t *connectors; unsigned int n_connectors; int ret = -1; @@ -276,14 +322,53 @@ meta_gpu_kms_flip_crtc (MetaGpuKms *gpu_kms, g_assert (monitor_manager->power_save_mode == META_POWER_SAVE_ON); get_crtc_drm_connectors (gpu, crtc, &connectors, &n_connectors); - g_assert (n_connectors > 0); g_free (connectors); - g_assert (fb_id != 0); + /* + * If a monitor was unplugged while we had a deferred frame (scanouts->next) + * then this may happen as we are called from page_flip_handler. But we + * can recover; just ignore the frame we can't display. The caller will + * free it. + */ + if (n_connectors == 0) + return FALSE; - closure_container = meta_gpu_kms_wrap_flip_closure (gpu_kms, - crtc, - flip_closure); + if (!gpu_kms->page_flips_not_supported) + { + MetaGpuKmsFlipClosureContainer *closure_container; + int kms_fd = meta_gpu_kms_get_fd (gpu_kms); + + closure_container = meta_gpu_kms_wrap_flip_closure (gpu_kms, + crtc, + flip_closure); + + ret = drmModePageFlip (kms_fd, + crtc->crtc_id, + meta_kms_buffer_get_fb_id (kms_fb), + DRM_MODE_PAGE_FLIP_EVENT, + closure_container); + if (ret == -EBUSY) + { + meta_gpu_kms_flip_closure_container_free (closure_container); + + /* Drop previously queued frame (if any) */ + g_set_object (&scanouts->next, kms_fb); + if (scanouts->next_closure) + invoke_dropped_flip_closure (scanouts->next_closure, gpu_kms, crtc); + scanouts->next_closure = g_closure_ref (flip_closure); + scanouts->next_x = x; + scanouts->next_y = y; + + *fb_in_use = TRUE; + return TRUE; + } + if (ret != 0 && ret != -EACCES) + { + meta_gpu_kms_flip_closure_container_free (closure_container); + g_warning ("Failed to flip: %s", strerror (-ret)); + gpu_kms->page_flips_not_supported = TRUE; + } + } ret = drmModePageFlip (kms_fd, crtc->crtc_id, @@ -292,12 +377,34 @@ meta_gpu_kms_flip_crtc (MetaGpuKms *gpu_kms, closure_container); if (ret != 0) { - meta_gpu_kms_flip_closure_container_free (closure_container); - g_set_error (error, G_IO_ERROR, - g_io_error_from_errno (-ret), - "drmModePageFlip failed: %s", g_strerror (-ret)); - return FALSE; + if (meta_gpu_kms_apply_crtc_mode (gpu_kms, crtc, x, y, kms_fb)) + { + *fb_in_use = TRUE; + return FALSE; + } + } + + if (ret != 0) + return FALSE; + + /* + * If scanouts->next is set then won a race against MetaKmsSource before it + * could invoke page_flip_handler. That's OK because the frame we just + * scheduled is newer. Just make sure we drop that older frame which was + * queued. We no longer need to display it at all. + */ + if (scanouts->next_closure) + { + invoke_dropped_flip_closure (scanouts->next_closure, gpu_kms, crtc); + scanouts->next_closure = NULL; } + g_clear_object (&scanouts->next); + + g_set_object (&scanouts->previous, scanouts->current); + g_set_object (&scanouts->current, kms_fb); + + *fb_in_use = TRUE; + g_closure_ref (flip_closure); return TRUE; } @@ -330,11 +437,39 @@ page_flip_handler (int fd, GClosure *flip_closure = closure_container->flip_closure; MetaGpuKms *gpu_kms = closure_container->gpu_kms; struct timeval page_flip_time = {sec, usec}; + MetaCrtc *crtc = closure_container->crtc; invoke_flip_closure (flip_closure, gpu_kms, - closure_container->crtc, + crtc, timeval_to_nanoseconds (&page_flip_time)); + + if (crtc) + { + MetaCrtcKmsScanouts *scanouts = meta_crtc_kms_get_scanouts (crtc); + + if (scanouts->next) + { + gboolean fb_in_use; + MetaKmsBuffer *next_fb = scanouts->next; + GClosure *next_closure = scanouts->next_closure; + + scanouts->next = NULL; + scanouts->next_closure = NULL; + + meta_gpu_kms_flip_crtc (gpu_kms, + crtc, + scanouts->next_x, + scanouts->next_y, + next_fb, + next_closure, + &fb_in_use); + + g_object_unref (next_fb); + g_closure_unref (next_closure); + } + } + meta_gpu_kms_flip_closure_container_free (closure_container); } diff --git a/src/backends/native/meta-gpu-kms.h b/src/backends/native/meta-gpu-kms.h index 1f7a939e276..3b5c6c4bacd 100644 --- a/src/backends/native/meta-gpu-kms.h +++ b/src/backends/native/meta-gpu-kms.h @@ -29,6 +29,7 @@ #include "backends/meta-gpu.h" #include "backends/native/meta-monitor-manager-kms.h" +#include "backends/native/meta-kms-buffer.h" #define META_TYPE_GPU_KMS (meta_gpu_kms_get_type ()) G_DECLARE_FINAL_TYPE (MetaGpuKms, meta_gpu_kms, META, GPU_KMS, MetaGpu) @@ -51,16 +52,21 @@ typedef enum _MetaGpuKmsFlag META_GPU_KMS_FLAG_PLATFORM_DEVICE = (1 << 1), } MetaGpuKmsFlag; +/* We can't include meta-kms-framebuffer.h due to a cyclic dependency */ +#ifndef META_TYPE_KMS_FRAMEBUFFER +typedef struct _MetaKmsBuffer MetaKmsBuffer; +#endif + MetaGpuKms * meta_gpu_kms_new (MetaMonitorManagerKms *monitor_manager_kms, const char *kms_file_path, MetaGpuKmsFlag flags, GError **error); -gboolean meta_gpu_kms_apply_crtc_mode (MetaGpuKms *gpu_kms, - MetaCrtc *crtc, - int x, - int y, - uint32_t fb_id); +gboolean meta_gpu_kms_apply_crtc_mode (MetaGpuKms *gpu_kms, + MetaCrtc *crtc, + int x, + int y, + MetaKmsBuffer *kms_fb); gboolean meta_gpu_kms_can_have_outputs (MetaGpuKms *gpu_kms); @@ -70,11 +76,13 @@ gboolean meta_gpu_kms_is_crtc_active (MetaGpuKms *gpu_kms, gboolean meta_gpu_kms_is_boot_vga (MetaGpuKms *gpu_kms); gboolean meta_gpu_kms_is_platform_device (MetaGpuKms *gpu_kms); -gboolean meta_gpu_kms_flip_crtc (MetaGpuKms *gpu_kms, - MetaCrtc *crtc, - uint32_t fb_id, - GClosure *flip_closure, - GError **error); +gboolean meta_gpu_kms_flip_crtc (MetaGpuKms *gpu_kms, + MetaCrtc *crtc, + int x, + int y, + MetaKmsBuffer *kms_fb, + GClosure *flip_closure, + gboolean *fb_in_use); gboolean meta_gpu_kms_wait_for_flip (MetaGpuKms *gpu_kms, GError **error); diff --git a/src/backends/native/meta-renderer-native.c b/src/backends/native/meta-renderer-native.c index 1d6a55383dc..87bca77807b 100644 --- a/src/backends/native/meta-renderer-native.c +++ b/src/backends/native/meta-renderer-native.c @@ -163,6 +163,7 @@ typedef struct _ConcurrentFlip { MetaRendererView *view; int pending_crtcs_count; + float max_refresh_rate; } ConcurrentFlip; typedef struct _MetaOnscreenNative @@ -175,8 +176,6 @@ typedef struct _MetaOnscreenNative struct { struct gbm_surface *surface; - MetaKmsBuffer *current_fb; - MetaKmsBuffer *next_fb; } gbm; #ifdef HAVE_EGL_DEVICE @@ -982,8 +981,6 @@ free_current_bo (CoglOnscreen *onscreen) CoglOnscreenEGL *onscreen_egl = onscreen->winsys; MetaOnscreenNative *onscreen_native = onscreen_egl->platform; - g_clear_object (&onscreen_native->gbm.current_fb); - g_hash_table_foreach (onscreen_native->secondary_gpu_states, (GHFunc) free_current_secondary_bo, NULL); @@ -1289,9 +1286,6 @@ meta_onscreen_native_swap_drm_fb (CoglOnscreen *onscreen) free_current_bo (onscreen); - g_set_object (&onscreen_native->gbm.current_fb, onscreen_native->gbm.next_fb); - g_clear_object (&onscreen_native->gbm.next_fb); - g_hash_table_foreach (onscreen_native->secondary_gpu_states, (GHFunc) swap_secondary_drm_fb, NULL); @@ -1301,6 +1295,7 @@ static void on_crtc_flipped (GClosure *closure, MetaGpuKms *gpu_kms, MetaCrtc *crtc, + gboolean flip_was_used, int64_t page_flip_time_ns, ConcurrentFlip *flip) { @@ -1326,10 +1321,19 @@ on_crtc_flipped (GClosure *closure, * fastest monitor(s) we direct it to produce new frames fast enough to * satisfy all monitors. */ - if (refresh_rate >= frame_info->refresh_rate) + if (refresh_rate >= frame_info->refresh_rate && + flip_was_used) { frame_info->presentation_time = page_flip_time_ns; frame_info->refresh_rate = refresh_rate; + + /* If this is the fastest expected output, or first of the equal fastest + * ones, then notify immediately. This is the perfect time to emit a + * swap notification and we don't want to wait for the others because + * doing so would hurt performance. + */ + if (refresh_rate == flip->max_refresh_rate) + flip->pending_crtcs_count = 1; } if (gpu_kms != render_gpu) @@ -1401,8 +1405,6 @@ flip_closure_destroyed (ConcurrentFlip *flip) switch (renderer_gpu_data->mode) { case META_RENDERER_NATIVE_MODE_GBM: - g_clear_object (&onscreen_native->gbm.next_fb); - g_hash_table_foreach (onscreen_native->secondary_gpu_states, (GHFunc) free_next_secondary_bo, NULL); @@ -1498,179 +1500,13 @@ typedef struct _RetryPageFlipData } RetryPageFlipData; static void -retry_page_flip_data_free (RetryPageFlipData *retry_page_flip_data) -{ - g_closure_unref (retry_page_flip_data->flip_closure); - g_free (retry_page_flip_data); -} - -static gboolean -retry_page_flips (gpointer user_data) -{ - MetaOnscreenNative *onscreen_native = user_data; - uint64_t now_us; - GList *l; - - now_us = g_source_get_time (onscreen_native->retry_page_flips_source); - - l = onscreen_native->pending_page_flip_retries; - while (l) - { - RetryPageFlipData *retry_page_flip_data = l->data; - MetaCrtc *crtc = retry_page_flip_data->crtc; - MetaGpuKms *gpu_kms = META_GPU_KMS (meta_crtc_get_gpu (crtc)); - GList *l_next = l->next; - g_autoptr (GError) error = NULL; - gboolean did_flip; - - if (is_timestamp_earlier_than (now_us, - retry_page_flip_data->retry_time_us)) - { - l = l_next; - continue; - } - - did_flip = meta_gpu_kms_flip_crtc (gpu_kms, - crtc, - retry_page_flip_data->fb_id, - retry_page_flip_data->flip_closure, - &error); - if (!did_flip && - g_error_matches (error, G_IO_ERROR, G_IO_ERROR_BUSY)) - { - retry_page_flip_data->retry_time_us += - G_USEC_PER_SEC / crtc->current_mode->refresh_rate; - l = l_next; - continue; - } - - onscreen_native->pending_page_flip_retries = - g_list_remove_link (onscreen_native->pending_page_flip_retries, l); - - if (!did_flip) - { - if (!g_error_matches (error, - G_IO_ERROR, - G_IO_ERROR_PERMISSION_DENIED)) - g_critical ("Failed to page flip: %s", error->message); - - if (gpu_kms != onscreen_native->render_gpu) - { - MetaOnscreenNativeSecondaryGpuState *secondary_gpu_state; - - secondary_gpu_state = - meta_onscreen_native_get_secondary_gpu_state (onscreen_native, - gpu_kms); - secondary_gpu_state->pending_flips--; - } - - onscreen_native->last_flip->pending_crtcs_count--; - } - - retry_page_flip_data_free (retry_page_flip_data); - - l = l_next; - } - - if (onscreen_native->pending_page_flip_retries) - { - GList *l; - uint64_t earliest_retry_time_us = 0; - - for (l = onscreen_native->pending_page_flip_retries; l; l = l->next) - { - RetryPageFlipData *retry_page_flip_data = l->data; - - if (l == onscreen_native->pending_page_flip_retries || - is_timestamp_earlier_than (retry_page_flip_data->retry_time_us, - earliest_retry_time_us)) - earliest_retry_time_us = retry_page_flip_data->retry_time_us; - } - - g_source_set_ready_time (onscreen_native->retry_page_flips_source, - earliest_retry_time_us); - return G_SOURCE_CONTINUE; - } - else - { - g_clear_pointer (&onscreen_native->retry_page_flips_source, - g_source_unref); - return G_SOURCE_REMOVE; - } -} - -static gboolean -retry_page_flips_source_dispatch (GSource *source, - GSourceFunc callback, - gpointer user_data) -{ - return callback (user_data); -} - -static GSourceFuncs retry_page_flips_source_funcs = { - .dispatch = retry_page_flips_source_dispatch, -}; - -static void -schedule_retry_page_flip (MetaOnscreenNative *onscreen_native, - MetaCrtc *crtc, - uint32_t fb_id, - GClosure *flip_closure) -{ - RetryPageFlipData *retry_page_flip_data; - uint64_t now_us; - uint64_t retry_time_us; - - now_us = g_get_monotonic_time (); - retry_time_us = - now_us + (G_USEC_PER_SEC / crtc->current_mode->refresh_rate); - - retry_page_flip_data = g_new0 (RetryPageFlipData, 1); - retry_page_flip_data->crtc = crtc; - retry_page_flip_data->fb_id = fb_id; - retry_page_flip_data->flip_closure = g_closure_ref (flip_closure); - retry_page_flip_data->retry_time_us = retry_time_us; - - if (!onscreen_native->retry_page_flips_source) - { - GSource *source; - - source = g_source_new (&retry_page_flips_source_funcs, sizeof (GSource)); - g_source_set_callback (source, retry_page_flips, onscreen_native, NULL); - g_source_set_ready_time (source, retry_time_us); - g_source_attach (source, NULL); - - onscreen_native->retry_page_flips_source = source; - } - else - { - GList *l; - - for (l = onscreen_native->pending_page_flip_retries; l; l = l->next) - { - RetryPageFlipData *pending_retry_page_flip_data = l->data; - uint64_t pending_retry_time_us = - pending_retry_page_flip_data->retry_time_us; - - if (is_timestamp_earlier_than (retry_time_us, pending_retry_time_us)) - { - g_source_set_ready_time (onscreen_native->retry_page_flips_source, - retry_time_us); - break; - } - } - } - - onscreen_native->pending_page_flip_retries = - g_list_append (onscreen_native->pending_page_flip_retries, - retry_page_flip_data); -} - -static gboolean meta_onscreen_native_flip_crtc (CoglOnscreen *onscreen, GClosure *flip_closure, MetaCrtc *crtc, - GError **error) + int x, + int y, + MetaKmsBuffer *next_fb, + gboolean *fb_in_use) { CoglOnscreenEGL *onscreen_egl = onscreen->winsys; MetaOnscreenNative *onscreen_native = onscreen_egl->platform; @@ -1679,7 +1515,8 @@ meta_onscreen_native_flip_crtc (CoglOnscreen *onscreen, MetaRendererNativeGpuData *renderer_gpu_data; MetaGpuKms *gpu_kms; MetaOnscreenNativeSecondaryGpuState *secondary_gpu_state = NULL; - uint32_t fb_id; + MetaKmsBuffer *kms_fb; + float refresh_rate; gpu_kms = META_GPU_KMS (meta_crtc_get_gpu (crtc)); if (!meta_gpu_kms_is_crtc_active (gpu_kms, crtc)) @@ -1689,6 +1526,13 @@ meta_onscreen_native_flip_crtc (CoglOnscreen *onscreen, return FALSE; } + refresh_rate = crtc && crtc->current_mode ? + crtc->current_mode->refresh_rate : + 0.0f; + + if (refresh_rate > onscreen_native->last_flip->max_refresh_rate) + onscreen_native->last_flip->max_refresh_rate = refresh_rate; + renderer_gpu_data = meta_renderer_native_get_gpu_data (renderer_native, render_gpu); switch (renderer_gpu_data->mode) @@ -1696,17 +1540,22 @@ meta_onscreen_native_flip_crtc (CoglOnscreen *onscreen, case META_RENDERER_NATIVE_MODE_GBM: if (gpu_kms == render_gpu) { - fb_id = meta_kms_buffer_get_fb_id (onscreen_native->gbm.next_fb); + kms_fb = next_fb; } else { secondary_gpu_state = get_secondary_gpu_state (onscreen, gpu_kms); - fb_id = meta_kms_buffer_get_fb_id (secondary_gpu_state->gbm.next_fb); + kms_fb = secondary_gpu_state->gbm.next_fb; } + /* Secondary GPU using EGL streams? How else can this happen? */ + if (!kms_fb) + return; + if (!meta_gpu_kms_flip_crtc (gpu_kms, crtc, - fb_id, + x, y, + kms_fb, flip_closure, error)) { @@ -1738,8 +1587,12 @@ meta_onscreen_native_flip_crtc (CoglOnscreen *onscreen, #endif } - return TRUE; -} +typedef struct _SetCrtcFbData +{ + MetaGpuKms *render_gpu; + CoglOnscreen *onscreen; + MetaKmsBuffer *kms_fb; +} SetCrtcFbData; static void set_crtc_fb (CoglOnscreen *onscreen, @@ -1751,13 +1604,14 @@ set_crtc_fb (CoglOnscreen *onscreen, MetaOnscreenNative *onscreen_native = onscreen_egl->platform; MetaGpuKms *render_gpu = onscreen_native->render_gpu; MetaGpuKms *gpu_kms; + MetaKmsBuffer *kms_fb; int x, y; uint32_t fb_id; gpu_kms = META_GPU_KMS (meta_crtc_get_gpu (crtc)); if (gpu_kms == render_gpu) { - fb_id = render_fb_id; + kms_fb = data->kms_fb; } else { @@ -1767,13 +1621,13 @@ set_crtc_fb (CoglOnscreen *onscreen, if (!secondary_gpu_state) return; - fb_id = meta_kms_buffer_get_fb_id (secondary_gpu_state->gbm.next_fb); + kms_fb = secondary_gpu_state->gbm.next_fb; } x = crtc->rect.x - logical_monitor->rect.x; y = crtc->rect.y - logical_monitor->rect.y; - meta_gpu_kms_apply_crtc_mode (gpu_kms, crtc, x, y, fb_id); + meta_gpu_kms_apply_crtc_mode (gpu_kms, crtc, x, y, kms_fb); } typedef struct _SetCrtcFbData @@ -1794,7 +1648,8 @@ set_crtc_fb_cb (MetaLogicalMonitor *logical_monitor, } static void -meta_onscreen_native_set_crtc_modes (CoglOnscreen *onscreen) +meta_onscreen_native_set_crtc_modes (CoglOnscreen *onscreen, + MetaKmsBuffer *next_fb) { CoglOnscreenEGL *onscreen_egl = onscreen->winsys; MetaOnscreenNative *onscreen_native = onscreen_egl->platform; @@ -1802,7 +1657,7 @@ meta_onscreen_native_set_crtc_modes (CoglOnscreen *onscreen) MetaGpuKms *render_gpu = onscreen_native->render_gpu; MetaRendererNativeGpuData *renderer_gpu_data; MetaRendererView *view = onscreen_native->view; - uint32_t fb_id = 0; + MetaKmsBuffer *dumb_fb = NULL; MetaLogicalMonitor *logical_monitor; renderer_gpu_data = meta_renderer_native_get_gpu_data (renderer_native, @@ -1810,23 +1665,23 @@ meta_onscreen_native_set_crtc_modes (CoglOnscreen *onscreen) switch (renderer_gpu_data->mode) { case META_RENDERER_NATIVE_MODE_GBM: - fb_id = meta_kms_buffer_get_fb_id (onscreen_native->gbm.next_fb); break; #ifdef HAVE_EGL_DEVICE case META_RENDERER_NATIVE_MODE_EGL_DEVICE: - fb_id = onscreen_native->egl.dumb_fb.fb_id; + next_fb = dumb_fb = + meta_kms_buffer_new_from_dumb (onscreen_native->egl.dumb_fb.fb_id); break; #endif } - g_assert (fb_id != 0); + g_assert (next_fb); logical_monitor = meta_renderer_view_get_logical_monitor (view); if (logical_monitor) { SetCrtcFbData data = { .onscreen = onscreen, - .fb_id = fb_id + .kms_fb = next_fb }; meta_logical_monitor_foreach_crtc (logical_monitor, @@ -1844,9 +1699,11 @@ meta_onscreen_native_set_crtc_modes (CoglOnscreen *onscreen) meta_gpu_kms_apply_crtc_mode (render_gpu, crtc, crtc->rect.x, crtc->rect.y, - fb_id); + next_fb); } } + + g_clear_object (&dumb_fb); } static gboolean @@ -1887,6 +1744,7 @@ typedef struct _FlipCrtcData { CoglOnscreen *onscreen; GClosure *flip_closure; + MetaKmsBuffer *kms_fb; gboolean did_flip; gboolean did_mode_set; @@ -1900,30 +1758,16 @@ flip_crtc (MetaLogicalMonitor *logical_monitor, FlipCrtcData *data = user_data; GError *error = NULL; - if (!meta_onscreen_native_flip_crtc (data->onscreen, - data->flip_closure, - crtc, - &error)) - { - if (g_error_matches (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT)) - { - if (crtc_mode_set_fallback (data->onscreen, logical_monitor, crtc)) - data->did_mode_set = TRUE; - } - else if (!g_error_matches (error, G_IO_ERROR, G_IO_ERROR_PERMISSION_DENIED)) - { - g_warning ("Failed to flip onscreen: %s", error->message); - } - g_error_free (error); - } - else - { - data->did_flip = TRUE; - } + meta_onscreen_native_flip_crtc (data->onscreen, + data->flip_closure, + crtc, x, y, + data->kms_fb, + &data->out_fb_in_use); } static void -meta_onscreen_native_flip_crtcs (CoglOnscreen *onscreen) +meta_onscreen_native_flip_crtcs (CoglOnscreen *onscreen, + MetaKmsBuffer *next_fb) { CoglOnscreenEGL *onscreen_egl = onscreen->winsys; MetaOnscreenNative *onscreen_native = onscreen_egl->platform; @@ -1934,6 +1778,7 @@ meta_onscreen_native_flip_crtcs (CoglOnscreen *onscreen) flip = g_new0 (ConcurrentFlip, 1); flip->view = g_object_ref (view); + flip->max_refresh_rate = 0.f; onscreen_native->last_flip = flip; /* @@ -1949,7 +1794,8 @@ meta_onscreen_native_flip_crtcs (CoglOnscreen *onscreen) flip_closure = g_cclosure_new (G_CALLBACK (on_crtc_flipped), flip, (GClosureNotify) flip_closure_destroyed); - g_closure_set_marshal (flip_closure, meta_marshal_VOID__OBJECT_OBJECT_INT64); + g_closure_set_marshal (flip_closure, + meta_marshal_VOID__OBJECT_OBJECT_BOOLEAN_INT64); /* Either flip the CRTC's of the monitor info, if we are drawing just part * of the stage, or all of the CRTC's if we are drawing the whole stage. @@ -1959,7 +1805,33 @@ meta_onscreen_native_flip_crtcs (CoglOnscreen *onscreen) .flip_closure = flip_closure, }; logical_monitor = meta_renderer_view_get_logical_monitor (view); - meta_logical_monitor_foreach_crtc (logical_monitor, flip_crtc, &data); + if (logical_monitor) + { + FlipCrtcData data = { + .onscreen = onscreen, + .flip_closure = flip_closure, + .kms_fb = next_fb + }; + + meta_logical_monitor_foreach_crtc (logical_monitor, + flip_crtc, + &data); + fb_in_use = data.out_fb_in_use; + } + else + { + GList *l; + + for (l = meta_gpu_get_crtcs (META_GPU (render_gpu)); l; l = l->next) + { + MetaCrtc *crtc = l->data; + + meta_onscreen_native_flip_crtc (onscreen, flip_closure, + crtc, crtc->rect.x, crtc->rect.y, + next_fb, + &fb_in_use); + } + } /* * If we didn't queue a page flip, but instead directly changed the mode due @@ -1974,36 +1846,13 @@ meta_onscreen_native_flip_crtcs (CoglOnscreen *onscreen) g_closure_unref (flip_closure); } -static void -wait_for_pending_flips (CoglOnscreen *onscreen) -{ - CoglOnscreenEGL *onscreen_egl = onscreen->winsys; - MetaOnscreenNative *onscreen_native = onscreen_egl->platform; - MetaOnscreenNativeSecondaryGpuState *secondary_gpu_state; - GHashTableIter iter; - - g_hash_table_iter_init (&iter, onscreen_native->secondary_gpu_states); - while (g_hash_table_iter_next (&iter, - NULL, - (gpointer *) &secondary_gpu_state)) - { - while (secondary_gpu_state->pending_flips) - meta_gpu_kms_wait_for_flip (secondary_gpu_state->gpu_kms, NULL); - } - - while (onscreen_native->last_flip && - onscreen_native->last_flip->pending_crtcs_count > 0) - meta_gpu_kms_wait_for_flip (onscreen_native->render_gpu, NULL); -} - static void copy_shared_framebuffer_gpu (CoglOnscreen *onscreen, MetaOnscreenNativeSecondaryGpuState *secondary_gpu_state, MetaRendererNativeGpuData *renderer_gpu_data, - gboolean *egl_context_changed) + gboolean *egl_context_changed, + MetaKmsBuffer *next_fb) { - CoglOnscreenEGL *onscreen_egl = onscreen->winsys; - MetaOnscreenNative *onscreen_native = onscreen_egl->platform; MetaRendererNative *renderer_native = renderer_gpu_data->renderer_native; MetaEgl *egl = meta_renderer_native_get_egl (renderer_native); GError *error = NULL; @@ -2027,7 +1876,7 @@ copy_shared_framebuffer_gpu (CoglOnscreen *onscreen, renderer_gpu_data->egl_display, renderer_gpu_data->secondary.egl_context, secondary_gpu_state->egl_surface, - meta_kms_buffer_get_bo (onscreen_native->gbm.next_fb), + meta_kms_buffer_get_bo (next_fb), &error)) { g_warning ("Failed to blit shared framebuffer: %s", error->message); @@ -2212,8 +2061,9 @@ update_secondary_gpu_state_pre_swap_buffers (CoglOnscreen *onscreen) } static void -update_secondary_gpu_state_post_swap_buffers (CoglOnscreen *onscreen, - gboolean *egl_context_changed) +update_secondary_gpu_state_post_swap_buffers (CoglOnscreen *onscreen, + gboolean *egl_context_changed, + MetaKmsBuffer *next_fb) { CoglOnscreenEGL *onscreen_egl = onscreen->winsys; MetaOnscreenNative *onscreen_native = onscreen_egl->platform; @@ -2237,7 +2087,8 @@ update_secondary_gpu_state_post_swap_buffers (CoglOnscreen *onscreen, copy_shared_framebuffer_gpu (onscreen, secondary_gpu_state, renderer_gpu_data, - egl_context_changed); + egl_context_changed, + next_fb); break; case META_SHARED_FRAMEBUFFER_COPY_MODE_CPU: /* Done before eglSwapBuffers. */ @@ -2263,12 +2114,7 @@ meta_onscreen_native_swap_buffers_with_damage (CoglOnscreen *onscreen, CoglFrameInfo *frame_info; gboolean egl_context_changed = FALSE; g_autoptr (GError) error = NULL; - - /* - * Wait for the flip callback before continuing, as we might have started the - * animation earlier due to the animation being driven by some other monitor. - */ - wait_for_pending_flips (onscreen); + g_autoptr (MetaKmsBuffer) new_fb = NULL; frame_info = g_queue_peek_tail (&onscreen->pending_frame_infos); frame_info->global_frame_counter = renderer_native->frame_counter; @@ -2284,14 +2130,11 @@ meta_onscreen_native_swap_buffers_with_damage (CoglOnscreen *onscreen, switch (renderer_gpu_data->mode) { case META_RENDERER_NATIVE_MODE_GBM: - g_warn_if_fail (onscreen_native->gbm.next_fb == NULL); - g_clear_object (&onscreen_native->gbm.next_fb); - onscreen_native->gbm.next_fb = - meta_kms_buffer_new_from_gbm (render_gpu, - onscreen_native->gbm.surface, - renderer_native->use_modifiers, - &error); - if (!onscreen_native->gbm.next_fb) + new_fb = meta_kms_buffer_new_from_gbm (render_gpu, + onscreen_native->gbm.surface, + renderer_native->use_modifiers, + &error); + if (!new_fb) { g_warning ("meta_kms_buffer_new_from_gbm failed: %s", error->message); @@ -2301,22 +2144,25 @@ meta_onscreen_native_swap_buffers_with_damage (CoglOnscreen *onscreen, break; #ifdef HAVE_EGL_DEVICE case META_RENDERER_NATIVE_MODE_EGL_DEVICE: + /* XXX new_fb is NULL. Is that safe? */ break; #endif } - update_secondary_gpu_state_post_swap_buffers (onscreen, &egl_context_changed); + update_secondary_gpu_state_post_swap_buffers (onscreen, + &egl_context_changed, + new_fb); /* If this is the first framebuffer to be presented then we now setup the * crtc modes, else we flip from the previous buffer */ if (onscreen_native->pending_set_crtc) { - meta_onscreen_native_set_crtc_modes (onscreen); + meta_onscreen_native_set_crtc_modes (onscreen, new_fb); onscreen_native->pending_set_crtc = FALSE; } onscreen_native->pending_queue_swap_notify_frame_count = renderer_native->frame_counter; - meta_onscreen_native_flip_crtcs (onscreen); + meta_onscreen_native_flip_crtcs (onscreen, new_fb); /* * If we changed EGL context, cogl will have the wrong idea about what is @@ -2353,12 +2199,12 @@ meta_renderer_native_init_egl_context (CoglContext *cogl_context, COGL_WINSYS_FEATURE_MULTIPLE_ONSCREEN, TRUE); - /* COGL_WINSYS_FEATURE_SWAP_THROTTLE is always true for this renderer - * because we have the call to wait_for_pending_flips on every frame. + /* We don't thottle swaps, but we do have the above _EVENT features, so + * using those will still prevent the master clock from running too fast. */ COGL_FLAGS_SET (cogl_context->winsys_features, COGL_WINSYS_FEATURE_SWAP_THROTTLE, - TRUE); + FALSE); #ifdef HAVE_EGL_DEVICE if (renderer_gpu_data->mode == META_RENDERER_NATIVE_MODE_EGL_DEVICE) @@ -2875,10 +2721,6 @@ meta_renderer_native_release_onscreen (CoglOnscreen *onscreen) switch (renderer_gpu_data->mode) { case META_RENDERER_NATIVE_MODE_GBM: - /* flip state takes a reference on the onscreen so there should - * never be outstanding flips when we reach here. */ - g_return_if_fail (onscreen_native->gbm.next_fb == NULL); - free_current_bo (onscreen); if (onscreen_native->gbm.surface) @@ -3343,7 +3185,7 @@ meta_renderer_native_finish_frame (MetaRendererNative *renderer_native) if (crtc->current_mode) continue; - meta_gpu_kms_apply_crtc_mode (gpu_kms, crtc, 0, 0, 0); + meta_gpu_kms_apply_crtc_mode (gpu_kms, crtc, 0, 0, NULL); } } diff --git a/src/meta-marshal.list b/src/meta-marshal.list index c1f4781d2d8..1eeecee8dc2 100644 --- a/src/meta-marshal.list +++ b/src/meta-marshal.list @@ -1 +1 @@ -VOID:OBJECT,OBJECT,INT64 +VOID:OBJECT,OBJECT,BOOLEAN,INT64 -- GitLab