diff --git a/gdk/gdkdisplayprivate.h b/gdk/gdkdisplayprivate.h index bfd23b58f99e26c386efcc495e3983ab5ce45d60..2a9a231cdc1a78fb4cee5ece95a76fe3b8bd5ab8 100644 --- a/gdk/gdkdisplayprivate.h +++ b/gdk/gdkdisplayprivate.h @@ -47,16 +47,17 @@ typedef struct _GdkDisplayClass GdkDisplayClass; typedef enum { GDK_VULKAN_FEATURE_DUAL_SOURCE_BLEND = 1 << 0, - GDK_VULKAN_FEATURE_DMABUF = 1 << 1, - GDK_VULKAN_FEATURE_WIN32 = 1 << 2, - GDK_VULKAN_FEATURE_YCBCR = 1 << 3, - GDK_VULKAN_FEATURE_TIMELINE_SEMAPHORE = 1 << 4, - GDK_VULKAN_FEATURE_SEMAPHORE_EXPORT = 1 << 5, - GDK_VULKAN_FEATURE_SEMAPHORE_IMPORT = 1 << 6, - GDK_VULKAN_FEATURE_WIN32_SEMAPHORE = 1 << 7, - GDK_VULKAN_FEATURE_INCREMENTAL_PRESENT = 1 << 8, - GDK_VULKAN_FEATURE_SWAPCHAIN_MAINTENANCE = 1 << 9, - GDK_VULKAN_FEATURE_PORTABILITY_SUBSET = 1 << 10, + GDK_VULKAN_FEATURE_PROFILE = 1 << 1, + GDK_VULKAN_FEATURE_DMABUF = 1 << 2, + GDK_VULKAN_FEATURE_WIN32 = 1 << 3, + GDK_VULKAN_FEATURE_YCBCR = 1 << 4, + GDK_VULKAN_FEATURE_TIMELINE_SEMAPHORE = 1 << 5, + GDK_VULKAN_FEATURE_SEMAPHORE_EXPORT = 1 << 6, + GDK_VULKAN_FEATURE_SEMAPHORE_IMPORT = 1 << 7, + GDK_VULKAN_FEATURE_WIN32_SEMAPHORE = 1 << 8, + GDK_VULKAN_FEATURE_INCREMENTAL_PRESENT = 1 << 9, + GDK_VULKAN_FEATURE_SWAPCHAIN_MAINTENANCE = 1 << 10, + GDK_VULKAN_FEATURE_PORTABILITY_SUBSET = 1 << 11, } GdkVulkanFeatures; #define GDK_VULKAN_N_FEATURES 11 diff --git a/gdk/gdkvulkancontext.c b/gdk/gdkvulkancontext.c index 394b96364a2cfe8174fffe6596fb0935bc663c3e..3d7cc13fc691084fb847d0f9e9784913cd5e13b1 100644 --- a/gdk/gdkvulkancontext.c +++ b/gdk/gdkvulkancontext.c @@ -41,6 +41,7 @@ #ifdef GDK_RENDERING_VULKAN const GdkDebugKey gdk_vulkan_feature_keys[] = { { "dual-source-blend", GDK_VULKAN_FEATURE_DUAL_SOURCE_BLEND, "Disable dual source blending" }, + { "profile", GDK_VULKAN_FEATURE_PROFILE, "Disable profiling support" }, { "dmabuf", GDK_VULKAN_FEATURE_DMABUF, "Never import Dmabufs" }, { "win32", GDK_VULKAN_FEATURE_WIN32, "Never import Windows resources" }, { "ycbcr", GDK_VULKAN_FEATURE_YCBCR, "Do not support Ycbcr textures (also disables dmabufs)" }, @@ -644,6 +645,9 @@ physical_device_check_features (VkPhysicalDevice device) if (v10_features.features.dualSrcBlend) features |= GDK_VULKAN_FEATURE_DUAL_SOURCE_BLEND; + if (v10_features.features.pipelineStatisticsQuery) + features |= GDK_VULKAN_FEATURE_PROFILE; + if (ycbcr_features.samplerYcbcrConversion || physical_device_supports_extension (device, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME)) features |= GDK_VULKAN_FEATURE_YCBCR; @@ -1869,6 +1873,7 @@ gdk_display_create_vulkan_device (GdkDisplay *display, .ppEnabledExtensionNames = (const char * const *) device_extensions->pdata, .pEnabledFeatures = &(VkPhysicalDeviceFeatures) { .dualSrcBlend = ENABLE_IF (GDK_VULKAN_FEATURE_DUAL_SOURCE_BLEND), + .pipelineStatisticsQuery = ENABLE_IF (GDK_VULKAN_FEATURE_PROFILE), }, .pNext = &(VkPhysicalDeviceVulkan11Features) { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES, diff --git a/gsk/gpu/gskglrenderer.c b/gsk/gpu/gskglrenderer.c index 0b3141f416f4fa8d802388d82751c267c99b5605..79e7ec5e8c76cd4eaa17dd1b8f33d3f79985e77d 100644 --- a/gsk/gpu/gskglrenderer.c +++ b/gsk/gpu/gskglrenderer.c @@ -66,7 +66,7 @@ gsk_gl_renderer_create_context (GskGpuRenderer *renderer, return NULL; } - *supported = -1; + *supported = ~GSK_GPU_OPTIMIZE_PROFILE; if (!gdk_gl_context_has_feature (context, GDK_GL_FEATURE_BLEND_FUNC_EXTENDED)) *supported &= ~GSK_GPU_OPTIMIZE_DUAL_BLEND; @@ -138,6 +138,7 @@ gsk_gl_renderer_class_init (GskGLRendererClass *klass) GskRendererClass *renderer_class = GSK_RENDERER_CLASS (klass); gpu_renderer_class->frame_type = GSK_TYPE_GL_FRAME; + gpu_renderer_class->profile_frame_type = GSK_TYPE_GL_FRAME; /* unused */ gpu_renderer_class->get_device = gsk_gl_device_get_for_display; gpu_renderer_class->create_context = gsk_gl_renderer_create_context; diff --git a/gsk/gpu/gskgpublendop.c b/gsk/gpu/gskgpublendop.c index 995f1f8b1a0bab4fa056bc2e42913e27e9694ba4..4b939220254a65c907e335c1c2a56db131e24f65 100644 --- a/gsk/gpu/gskgpublendop.c +++ b/gsk/gpu/gskgpublendop.c @@ -2,6 +2,7 @@ #include "gskgpublendopprivate.h" +#include "gskgpuframeprivate.h" #include "gskgpuopprivate.h" #include "gskgpuprintprivate.h" @@ -140,7 +141,7 @@ gsk_gpu_blend_op (GskGpuFrame *frame, { GskGpuBlendOp *self; - self = (GskGpuBlendOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_BLEND_OP_CLASS); + self = (GskGpuBlendOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_BLEND_OP_CLASS); self->blend = blend; } diff --git a/gsk/gpu/gskgpublitop.c b/gsk/gpu/gskgpublitop.c index 6bb2e732ca9aef34a918429480f9742abf0ea7b0..5982c156a2d0ea6bcc2285a55c7b4fed6ed8f9db 100644 --- a/gsk/gpu/gskgpublitop.c +++ b/gsk/gpu/gskgpublitop.c @@ -3,6 +3,7 @@ #include "gskgpublitopprivate.h" #include "gskglimageprivate.h" +#include "gskgpuframeprivate.h" #include "gskgpuprintprivate.h" #ifdef GDK_RENDERING_VULKAN #include "gskvulkanimageprivate.h" @@ -211,7 +212,7 @@ gsk_gpu_blit_op (GskGpuFrame *frame, g_assert (filter != GSK_GPU_BLIT_LINEAR || (gsk_gpu_image_get_flags (src_image) & GSK_GPU_IMAGE_FILTERABLE) == GSK_GPU_IMAGE_FILTERABLE); g_assert ((gsk_gpu_image_get_flags (dest_image) & GSK_GPU_IMAGE_RENDERABLE) == GSK_GPU_IMAGE_RENDERABLE); - self = (GskGpuBlitOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_BLIT_OP_CLASS); + self = (GskGpuBlitOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_BLIT_OP_CLASS); self->src_image = g_object_ref (src_image); self->dest_image = g_object_ref (dest_image); diff --git a/gsk/gpu/gskgpuclearop.c b/gsk/gpu/gskgpuclearop.c index 9e82836c0e5c384d850eca6e40f26686e5bdcb82..0783f661f98095518da3f9108b02e8f46eb0bb54 100644 --- a/gsk/gpu/gskgpuclearop.c +++ b/gsk/gpu/gskgpuclearop.c @@ -2,6 +2,7 @@ #include "gskgpuclearopprivate.h" +#include "gskgpuframeprivate.h" #include "gskgpuopprivate.h" #include "gskgpuprintprivate.h" @@ -109,7 +110,7 @@ gsk_gpu_clear_op (GskGpuFrame *frame, { GskGpuClearOp *self; - self = (GskGpuClearOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_CLEAR_OP_CLASS); + self = (GskGpuClearOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_CLEAR_OP_CLASS); self->rect = *rect; memcpy (self->color, color, sizeof (float) * 4); diff --git a/gsk/gpu/gskgpudownloadop.c b/gsk/gpu/gskgpudownloadop.c index acef7a6f728c35cf15174398f876eb82f6abd719..b87817456a3dedb1b418cef618e9fae148334ed6 100644 --- a/gsk/gpu/gskgpudownloadop.c +++ b/gsk/gpu/gskgpudownloadop.c @@ -399,7 +399,7 @@ gsk_gpu_download_op (GskGpuFrame *frame, g_assert (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_DOWNLOADABLE); g_assert (out_texture != NULL && *out_texture == NULL); - self = (GskGpuDownloadOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_DOWNLOAD_OP_CLASS); + self = (GskGpuDownloadOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_DOWNLOAD_OP_CLASS); self->image = g_object_ref (image); self->color_state = gdk_color_state_ref (color_state); @@ -535,7 +535,7 @@ gsk_gpu_download_into_op (GskGpuFrame *frame, g_assert (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_DOWNLOADABLE); - self = (GskGpuDownloadIntoOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_DOWNLOAD_INTO_OP_CLASS); + self = (GskGpuDownloadIntoOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_DOWNLOAD_INTO_OP_CLASS); self->image = g_object_ref (image); self->image_color_state = gdk_color_state_ref (image_color_state); diff --git a/gsk/gpu/gskgpuframe.c b/gsk/gpu/gskgpuframe.c index 0c30817b154eb4aee9250ae5484f71d5caf9ec2e..854f99ada3b012d394dc415e005fee27876eeed1 100644 --- a/gsk/gpu/gskgpuframe.c +++ b/gsk/gpu/gskgpuframe.c @@ -35,6 +35,19 @@ #define GDK_ARRAY_BY_VALUE 1 #include "gdk/gdkarrayimpl.c" +typedef struct _GskNodeStackNode GskNodeStackNode; +struct _GskNodeStackNode { + GskRenderNode *node; + guint pos; +}; + +#define GDK_ARRAY_NAME gsk_node_stack +#define GDK_ARRAY_TYPE_NAME GskNodeStack +#define GDK_ARRAY_ELEMENT_TYPE GskNodeStackNode +#define GDK_ARRAY_BY_VALUE 1 +#define GDK_ARRAY_PREALLOC 64 +#include "gdk/gdkarrayimpl.c" + typedef struct _GskGpuFramePrivate GskGpuFramePrivate; struct _GskGpuFramePrivate @@ -58,6 +71,8 @@ struct _GskGpuFramePrivate GskGpuBuffer *storage_buffer; guchar *storage_buffer_data; gsize storage_buffer_used; + + GskNodeStack node_stack; }; G_DEFINE_TYPE_WITH_PRIVATE (GskGpuFrame, gsk_gpu_frame, G_TYPE_OBJECT) @@ -74,6 +89,8 @@ gsk_gpu_frame_default_cleanup (GskGpuFrame *self) GskGpuOp *op; gsize i; + g_assert (gsk_node_stack_get_size (&priv->node_stack) == 0); + priv->n_globals = 0; for (i = 0; i < gsk_gpu_ops_get_size (&priv->ops); i += op->op_class->size) @@ -135,6 +152,49 @@ gsk_gpu_frame_default_upload_texture (GskGpuFrame *self, return NULL; } +static gpointer +gsk_gpu_frame_default_alloc_op (GskGpuFrame *self, + const GskGpuOpClass *op_class) +{ + GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self); + GskGpuOp *op; + gsize pos; + + pos = gsk_gpu_ops_get_size (&priv->ops); + + gsk_gpu_ops_splice (&priv->ops, + pos, + 0, FALSE, + NULL, + op_class->size); + + op = (GskGpuOp *) gsk_gpu_ops_index (&priv->ops, pos); + + op->op_class = op_class; + + priv->last_op = op; + + return op; +} + +static void +gsk_gpu_frame_default_start_node (GskGpuFrame *self, + GskRenderNode *node, + gsize pos) +{ + GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self); + + gsk_node_stack_append (&priv->node_stack, &(GskNodeStackNode) { node, pos }); +} + +static void +gsk_gpu_frame_default_end_node (GskGpuFrame *self) +{ + GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self); + + gsk_node_stack_set_size (&priv->node_stack, gsk_node_stack_get_size (&priv->node_stack) - 1); +} + static void gsk_gpu_frame_dispose (GObject *object) { @@ -152,6 +212,7 @@ gsk_gpu_frame_finalize (GObject *object) GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self); gsk_gpu_ops_clear (&priv->ops); + gsk_node_stack_clear (&priv->node_stack); g_clear_object (&priv->vertex_buffer); g_clear_object (&priv->globals_buffer); @@ -173,6 +234,9 @@ gsk_gpu_frame_class_init (GskGpuFrameClass *klass) klass->end = gsk_gpu_frame_default_end; klass->sync = gsk_gpu_frame_default_sync; klass->upload_texture = gsk_gpu_frame_default_upload_texture; + klass->alloc_op = gsk_gpu_frame_default_alloc_op; + klass->start_node = gsk_gpu_frame_default_start_node; + klass->end_node = gsk_gpu_frame_default_end_node; object_class->dispose = gsk_gpu_frame_dispose; object_class->finalize = gsk_gpu_frame_finalize; @@ -184,6 +248,7 @@ gsk_gpu_frame_init (GskGpuFrame *self) GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self); gsk_gpu_ops_init (&priv->ops); + gsk_node_stack_init (&priv->node_stack); } void @@ -480,23 +545,10 @@ gsk_gpu_frame_sort_ops (GskGpuFrame *self) } gpointer -gsk_gpu_frame_alloc_op (GskGpuFrame *self, - gsize size) +gsk_gpu_frame_alloc_op (GskGpuFrame *self, + const GskGpuOpClass *op_class) { - GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self); - gsize pos; - - pos = gsk_gpu_ops_get_size (&priv->ops); - - gsk_gpu_ops_splice (&priv->ops, - pos, - 0, FALSE, - NULL, - size); - - priv->last_op = (GskGpuOp *) gsk_gpu_ops_index (&priv->ops, pos); - - return priv->last_op; + return GSK_GPU_FRAME_GET_CLASS (self)->alloc_op (self, op_class); } GskGpuOp * @@ -755,8 +807,12 @@ gsk_gpu_frame_record (GskGpuFrame *self, priv->timestamp = timestamp; gsk_gpu_cache_set_time (gsk_gpu_device_get_cache (priv->device), timestamp); + gsk_gpu_frame_start_node (self, node, 0); + gsk_gpu_node_processor_process (self, target, target_color_state, clip, node, viewport, pass_type); + gsk_gpu_frame_end_node (self); + if (texture) gsk_gpu_download_op (self, target, target_color_state, texture); } @@ -904,3 +960,62 @@ gsk_gpu_frame_download_texture (GskGpuFrame *self, return TRUE; } + +/* + * gsk_gpu_frame_start_node: + * @self: the frame + * @node: the rendernode to track + * @pos: the position in the parent node + * + * Starts rendering the given node, which is the child of + * the currently rendered node at the given position. + * + * To end rendering that node, call gsk_gpu_frame_end_node(). + **/ +void +gsk_gpu_frame_start_node (GskGpuFrame *self, + GskRenderNode *node, + gsize pos) +{ +#ifndef G_DISABLE_ASSERT + GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self); + gsize n = gsk_node_stack_get_size (&priv->node_stack); + + if (n > 0) + { + GskNodeStackNode *stack; + GskRenderNode **children; + gsize n_children; + + stack = gsk_node_stack_get (&priv->node_stack, n - 1); + children = gsk_render_node_get_children (stack->node, &n_children); + g_assert (pos < n_children); + g_assert (children[pos] == node); + } + else + { + g_assert (pos == 0); + } +#endif + + GSK_GPU_FRAME_GET_CLASS (self)->start_node (self, node, pos); +} + +/* + * gsk_gpu_frame_end_node: + * @self: the frame + * + * Ends the current node and continues with its parent. + **/ +void +gsk_gpu_frame_end_node (GskGpuFrame *self) +{ +#ifndef G_DISABLE_ASSERT + GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self); + + g_assert (gsk_node_stack_get_size (&priv->node_stack) > 0); +#endif + + GSK_GPU_FRAME_GET_CLASS (self)->end_node (self); +} + diff --git a/gsk/gpu/gskgpuframeprivate.h b/gsk/gpu/gskgpuframeprivate.h index 468558a9194ead2b095c812e23d7e79d9906a8ae..9909ca6919f1c2f35e43e3479deb719f9c87493a 100644 --- a/gsk/gpu/gskgpuframeprivate.h +++ b/gsk/gpu/gskgpuframeprivate.h @@ -54,6 +54,12 @@ struct _GskGpuFrameClass GskGpuBuffer *vertex_buffer, GskGpuBuffer *globals_buffer, GskGpuOp *op); + gpointer (* alloc_op) (GskGpuFrame *self, + const GskGpuOpClass *op_class); + void (* start_node) (GskGpuFrame *self, + GskRenderNode *node, + gsize pos); + void (* end_node) (GskGpuFrame *self); }; GType gsk_gpu_frame_get_type (void) G_GNUC_CONST; @@ -84,7 +90,7 @@ gboolean gsk_gpu_frame_should_optimize (GskGpuF GskGpuOptimizations optimization) G_GNUC_PURE; gpointer gsk_gpu_frame_alloc_op (GskGpuFrame *self, - gsize size); + const GskGpuOpClass *op_class); GskGpuImage * gsk_gpu_frame_upload_texture (GskGpuFrame *self, gboolean with_mipmap, GdkTexture *texture); @@ -123,6 +129,11 @@ gboolean gsk_gpu_frame_download_texture (GskGpuF GdkColorState *color_state); GskGpuOp *gsk_gpu_frame_get_last_op (GskGpuFrame *self); +void gsk_gpu_frame_start_node (GskGpuFrame *self, + GskRenderNode *node, + gsize pos); +void gsk_gpu_frame_end_node (GskGpuFrame *self); + G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuFrame, g_object_unref) G_END_DECLS diff --git a/gsk/gpu/gskgpuglobalsop.c b/gsk/gpu/gskgpuglobalsop.c index c232364ecf7815a5d9c04929360114cb57fe078f..ad6daca7c63a4836f20f341bb657e2b2a6d723df 100644 --- a/gsk/gpu/gskgpuglobalsop.c +++ b/gsk/gpu/gskgpuglobalsop.c @@ -98,7 +98,7 @@ gsk_gpu_globals_op (GskGpuFrame *frame, { GskGpuGlobalsOp *self; - self = (GskGpuGlobalsOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_GLOBALS_OP_CLASS); + self = (GskGpuGlobalsOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_GLOBALS_OP_CLASS); graphene_matrix_to_float (mvp, self->instance.mvp); gsk_rounded_rect_to_float (clip, graphene_point_zero (), self->instance.clip); diff --git a/gsk/gpu/gskgpumipmapop.c b/gsk/gpu/gskgpumipmapop.c index 6eba04cd2f04098d1bb44a24a108db7ba019a2f5..28e36319d436072896344dd5394ffdd830cc15f5 100644 --- a/gsk/gpu/gskgpumipmapop.c +++ b/gsk/gpu/gskgpumipmapop.c @@ -3,6 +3,7 @@ #include "gskgpumipmapopprivate.h" #include "gskglimageprivate.h" +#include "gskgpuframeprivate.h" #include "gskgpuprintprivate.h" #include "gskgpuutilsprivate.h" #ifdef GDK_RENDERING_VULKAN @@ -185,7 +186,7 @@ gsk_gpu_mipmap_op (GskGpuFrame *frame, g_assert ((gsk_gpu_image_get_flags (image) & (GSK_GPU_IMAGE_CAN_MIPMAP | GSK_GPU_IMAGE_MIPMAP)) == GSK_GPU_IMAGE_CAN_MIPMAP); - self = (GskGpuMipmapOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_MIPMAP_OP_CLASS); + self = (GskGpuMipmapOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_MIPMAP_OP_CLASS); self->image = g_object_ref (image); diff --git a/gsk/gpu/gskgpunodeprocessor.c b/gsk/gpu/gskgpunodeprocessor.c index 28552b4322b9f2440e018e73d953a16df2baefa7..1fd3b04e09e4644db77052c17eff956c8add1fcf 100644 --- a/gsk/gpu/gskgpunodeprocessor.c +++ b/gsk/gpu/gskgpunodeprocessor.c @@ -205,11 +205,12 @@ struct _GskGpuFirstNodeInfo guint has_started_rendering : 1; }; -static void gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self, +static void gsk_gpu_node_processor_add_node_untracked (GskGpuNodeProcessor *self, GskRenderNode *node); static gboolean gsk_gpu_node_processor_add_first_node (GskGpuNodeProcessor *self, GskGpuFirstNodeInfo *info, - GskRenderNode *node); + GskRenderNode *node, + gsize pos); static GskGpuImage * gsk_gpu_get_node_as_image (GskGpuFrame *frame, GskGpuAsImageFlags flags, GdkColorState *ccs, @@ -769,6 +770,18 @@ gsk_gpu_node_processor_create_offscreen (GskGpuFrame *frame, return image; } +static void +gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self, + GskRenderNode *node, + gsize pos) +{ + gsk_gpu_frame_start_node (self->frame, node, pos); + + gsk_gpu_node_processor_add_node_untracked (self, node); + + gsk_gpu_frame_end_node (self->frame); +} + static GskGpuImage * gsk_gpu_get_node_as_image_via_offscreen (GskGpuFrame *frame, GskGpuAsImageFlags flags, @@ -883,29 +896,12 @@ gsk_gpu_copy_image (GskGpuFrame *frame, return copy; } -/* - * gsk_gpu_node_processor_get_node_as_image: - * @self: a node processor - * @flags: flags for the image - * @clip_bounds: (nullable): clip rectangle to use or NULL to use - * the current clip - * @node: the node to turn into an image - * @out_bounds: bounds of the the image in node space - * - * Generates an image for the given node. The image is restricted to the - * region in the clip bounds. - * - * The resulting image is guaranteed to be premultiplied. - * - * Returns: (nullable): The node as an image or %NULL if the node is fully - * clipped - **/ static GskGpuImage * -gsk_gpu_node_processor_get_node_as_image (GskGpuNodeProcessor *self, - GskGpuAsImageFlags flags, - const graphene_rect_t *clip_bounds, - GskRenderNode *node, - graphene_rect_t *out_bounds) +gsk_gpu_node_processor_get_node_as_image_untracked (GskGpuNodeProcessor *self, + GskGpuAsImageFlags flags, + const graphene_rect_t *clip_bounds, + GskRenderNode *node, + graphene_rect_t *out_bounds) { graphene_rect_t clip; @@ -941,6 +937,48 @@ gsk_gpu_node_processor_get_node_as_image (GskGpuNodeProcessor *self, out_bounds); } +/* + * gsk_gpu_node_processor_get_node_as_image: + * @self: a node processor + * @flags: flags for the image + * @clip_bounds: (nullable): clip rectangle to use or NULL to use + * the current clip + * @node: the node to turn into an image + * @pos: position of the node in the parent for tracking purposes or + * -1 to not do tracking + * @out_bounds: bounds of the the image in node space + * + * Generates an image for the given node. The image is restricted to the + * region in the clip bounds. + * + * The resulting image is guaranteed to be premultiplied. + * + * Returns: (nullable): The node as an image or %NULL if the node is fully + * clipped + **/ +static GskGpuImage * +gsk_gpu_node_processor_get_node_as_image (GskGpuNodeProcessor *self, + GskGpuAsImageFlags flags, + const graphene_rect_t *clip_bounds, + GskRenderNode *node, + gsize pos, + graphene_rect_t *out_bounds) +{ + GskGpuImage *result; + + gsk_gpu_frame_start_node (self->frame, node, pos); + + result = gsk_gpu_node_processor_get_node_as_image_untracked (self, + flags, + clip_bounds, + node, + out_bounds); + + gsk_gpu_frame_end_node (self->frame); + + return result; +} + static void gsk_gpu_node_processor_blur_op (GskGpuNodeProcessor *self, const graphene_rect_t *rect, @@ -1073,11 +1111,11 @@ gsk_gpu_node_processor_add_with_offscreen (GskGpuNodeProcessor *self, gsk_gpu_node_processor_sync_globals (self, 0); - image = gsk_gpu_node_processor_get_node_as_image (self, - 0, - NULL, - node, - &tex_rect); + image = gsk_gpu_node_processor_get_node_as_image_untracked (self, + 0, + NULL, + node, + &tex_rect); if (image == NULL) return; @@ -1094,6 +1132,7 @@ gsk_gpu_node_processor_add_with_offscreen (GskGpuNodeProcessor *self, static void gsk_gpu_node_processor_add_node_clipped (GskGpuNodeProcessor *self, GskRenderNode *node, + gsize pos, const graphene_rect_t *clip_bounds) { GskGpuClip old_clip; @@ -1102,7 +1141,7 @@ gsk_gpu_node_processor_add_node_clipped (GskGpuNodeProcessor *self, if (gsk_rect_contains_rect (clip_bounds, &node->bounds)) { - gsk_gpu_node_processor_add_node (self, node); + gsk_gpu_node_processor_add_node (self, node, pos); return; } @@ -1136,7 +1175,7 @@ gsk_gpu_node_processor_add_node_clipped (GskGpuNodeProcessor *self, self->scissor = scissor; self->pending_globals |= GSK_GPU_GLOBAL_SCISSOR | GSK_GPU_GLOBAL_CLIP; - gsk_gpu_node_processor_add_node (self, node); + gsk_gpu_node_processor_add_node (self, node, pos); gsk_gpu_clip_init_copy (&self->clip, &old_clip); self->scissor = old_scissor; @@ -1149,7 +1188,7 @@ gsk_gpu_node_processor_add_node_clipped (GskGpuNodeProcessor *self, gsk_gpu_clip_init_copy (&self->clip, &old_clip); - gsk_gpu_node_processor_add_node (self, node); + gsk_gpu_node_processor_add_node (self, node, pos); self->scissor = old_scissor; self->pending_globals |= GSK_GPU_GLOBAL_SCISSOR; @@ -1184,6 +1223,7 @@ gsk_gpu_node_processor_add_node_clipped (GskGpuNodeProcessor *self, 0, &bounds, node, + pos, &tex_rect); else image = NULL; @@ -1208,7 +1248,7 @@ gsk_gpu_node_processor_add_node_clipped (GskGpuNodeProcessor *self, self->pending_globals |= GSK_GPU_GLOBAL_CLIP; - gsk_gpu_node_processor_add_node (self, node); + gsk_gpu_node_processor_add_node (self, node, pos); gsk_gpu_clip_init_copy (&self->clip, &old_clip); self->pending_globals |= GSK_GPU_GLOBAL_CLIP; @@ -1221,6 +1261,7 @@ gsk_gpu_node_processor_add_clip_node (GskGpuNodeProcessor *self, { gsk_gpu_node_processor_add_node_clipped (self, gsk_clip_node_get_child (node), + 0, gsk_clip_node_get_clip (node)); } @@ -1333,7 +1374,8 @@ static gboolean gsk_gpu_node_processor_add_first_node_clipped (GskGpuNodeProcessor *self, GskGpuFirstNodeInfo *info, const graphene_rect_t *clip, - GskRenderNode *node) + GskRenderNode *node, + gsize pos) { GskGpuClip old_clip; cairo_rectangle_int_t old_scissor; @@ -1346,7 +1388,8 @@ gsk_gpu_node_processor_add_first_node_clipped (GskGpuNodeProcessor *self, if (gsk_gpu_node_processor_add_first_node (self, info, - node)) + node, + pos)) { /* don't revert clip here, the add_first_node() adjusted it to a correct value */ return TRUE; @@ -1366,7 +1409,8 @@ gsk_gpu_node_processor_add_first_clip_node (GskGpuNodeProcessor *self, return gsk_gpu_node_processor_add_first_node_clipped (self, info, &node->bounds, - gsk_clip_node_get_child (node)); + gsk_clip_node_get_child (node), + 0); } static void @@ -1385,6 +1429,7 @@ gsk_gpu_node_processor_add_rounded_clip_node_with_mask (GskGpuNodeProcessor *sel 0, &clip_bounds, gsk_rounded_clip_node_get_child (node), + 0, &child_rect); if (child_image == NULL) return; @@ -1495,7 +1540,7 @@ gsk_gpu_node_processor_add_rounded_clip_node (GskGpuNodeProcessor *self, self->pending_globals |= GSK_GPU_GLOBAL_CLIP; - gsk_gpu_node_processor_add_node (self, gsk_rounded_clip_node_get_child (node)); + gsk_gpu_node_processor_add_node (self, gsk_rounded_clip_node_get_child (node), 0); gsk_gpu_clip_init_copy (&self->clip, &old_clip); self->pending_globals |= GSK_GPU_GLOBAL_CLIP; @@ -1516,7 +1561,8 @@ gsk_gpu_node_processor_add_first_rounded_clip_node (GskGpuNodeProcessor *self, return gsk_gpu_node_processor_add_first_node_clipped (self, info, &cover, - gsk_rounded_clip_node_get_child (node)); + gsk_rounded_clip_node_get_child (node), + 0); } static void @@ -1543,7 +1589,7 @@ gsk_gpu_node_processor_add_transform_node (GskGpuNodeProcessor *self, old_offset = self->offset; self->offset.x += dx; self->offset.y += dy; - gsk_gpu_node_processor_add_node (self, child); + gsk_gpu_node_processor_add_node (self, child, 0); self->offset = old_offset; } return; @@ -1626,11 +1672,11 @@ gsk_gpu_node_processor_add_transform_node (GskGpuNodeProcessor *self, gsk_transform_unref (clip_transform); /* This cannot loop because the next time we'll hit the branch above */ gsk_gpu_node_processor_sync_globals (self, 0); - image = gsk_gpu_node_processor_get_node_as_image (self, - 0, - NULL, - node, - &tex_rect); + image = gsk_gpu_node_processor_get_node_as_image_untracked (self, + 0, + NULL, + node, + &tex_rect); if (image != NULL) { gsk_gpu_node_processor_image_op (self, @@ -1701,7 +1747,7 @@ gsk_gpu_node_processor_add_transform_node (GskGpuNodeProcessor *self, if (self->modelview != old_modelview) self->pending_globals |= GSK_GPU_GLOBAL_MATRIX; - gsk_gpu_node_processor_add_node (self, child); + gsk_gpu_node_processor_add_node (self, child, 0); self->offset = old_offset; self->scale = old_scale; @@ -1739,7 +1785,8 @@ gsk_gpu_node_processor_add_first_transform_node (GskGpuNodeProcessor *self, self->offset.y += dy; result = gsk_gpu_node_processor_add_first_node (self, info, - gsk_transform_node_get_child (node)); + gsk_transform_node_get_child (node), + 0); self->offset = old_offset; return result; @@ -1760,7 +1807,8 @@ gsk_gpu_node_processor_add_first_transform_node (GskGpuNodeProcessor *self, result = gsk_gpu_node_processor_add_first_node (self, info, - gsk_transform_node_get_child (node)); + gsk_transform_node_get_child (node), + 0); self->offset = old_offset; self->scale = old_scale; @@ -1801,7 +1849,8 @@ gsk_gpu_node_processor_add_first_transform_node (GskGpuNodeProcessor *self, result = gsk_gpu_node_processor_add_first_node (self, info, - gsk_transform_node_get_child (node)); + gsk_transform_node_get_child (node), + 0); self->offset = old_offset; self->scale = old_scale; @@ -1837,10 +1886,14 @@ gsk_gpu_node_processor_add_opacity_node (GskGpuNodeProcessor *self, child = gsk_opacity_node_get_child (node); + gsk_gpu_frame_start_node (self->frame, child, 0); + if (gsk_render_node_clears_background (child)) gsk_gpu_node_processor_add_with_offscreen (self, child); else - gsk_gpu_node_processor_add_node (self, child); + gsk_gpu_node_processor_add_node_untracked (self, child); + + gsk_gpu_frame_end_node (self->frame); self->opacity = old_opacity; } @@ -2334,7 +2387,7 @@ gsk_gpu_node_processor_add_first_node_no_blend (GskGpuNodeProcessor *self, self->blend = GSK_GPU_BLEND_NONE; self->pending_globals |= GSK_GPU_GLOBAL_BLEND; - gsk_gpu_node_processor_add_node (self, node); + gsk_gpu_node_processor_add_node_untracked (self, node); self->blend = GSK_GPU_BLEND_OVER; self->pending_globals |= GSK_GPU_GLOBAL_BLEND; @@ -3053,7 +3106,7 @@ gsk_gpu_node_processor_add_blur_node (GskGpuNodeProcessor *self, blur_radius = gsk_blur_node_get_radius (node); if (blur_radius <= 0.f) { - gsk_gpu_node_processor_add_node (self, child); + gsk_gpu_node_processor_add_node (self, child, 0); return; } @@ -3064,6 +3117,7 @@ gsk_gpu_node_processor_add_blur_node (GskGpuNodeProcessor *self, GSK_GPU_AS_IMAGE_SAMPLED_OUT_OF_BOUNDS, &clip_rect, child, + 0, &tex_rect); if (image == NULL) return; @@ -3103,6 +3157,7 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self, GSK_GPU_AS_IMAGE_SAMPLED_OUT_OF_BOUNDS, &clip_bounds, child, + 0, &tex_rect); if (image == NULL) return; @@ -3186,11 +3241,13 @@ gsk_gpu_node_processor_add_blend_node (GskGpuNodeProcessor *self, 0, NULL, bottom_child, + 0, &bottom_rect); top_image = gsk_gpu_node_processor_get_node_as_image (self, 0, NULL, top_child, + 1, &top_rect); if (bottom_image == NULL) @@ -3244,11 +3301,13 @@ gsk_gpu_node_processor_add_arithmetic_node (GskGpuNodeProcessor *self, 0, NULL, first_child, + 0, &first_rect); second_image = gsk_gpu_node_processor_get_node_as_image (self, 0, NULL, second_child, + 1, &second_rect); if (first_image == NULL) @@ -3299,12 +3358,12 @@ gsk_gpu_node_processor_add_cross_fade_node (GskGpuNodeProcessor *self, if (progress <= 0.0) { - gsk_gpu_node_processor_add_node (self, start_child); + gsk_gpu_node_processor_add_node (self, start_child, 0); return; } if (progress >= 1.0) { - gsk_gpu_node_processor_add_node (self, end_child); + gsk_gpu_node_processor_add_node (self, end_child, 1); return; } @@ -3312,11 +3371,13 @@ gsk_gpu_node_processor_add_cross_fade_node (GskGpuNodeProcessor *self, 0, NULL, start_child, + 0, &start_rect); end_image = gsk_gpu_node_processor_get_node_as_image (self, 0, NULL, end_child, + 1, &end_rect); if (start_image == NULL) @@ -3395,6 +3456,7 @@ gsk_gpu_node_processor_add_displacement_node (GskGpuNodeProcessor *self, 0, &child_bounds, child, + 0, &child_rect); if (child_image == NULL) return; @@ -3403,6 +3465,7 @@ gsk_gpu_node_processor_add_displacement_node (GskGpuNodeProcessor *self, 0, &bounds, displacement_child, + 1, &displacement_rect); if (displacement_image == NULL) return; /* technically we have to render TRANSPARENT everywhere */ @@ -3449,11 +3512,12 @@ gsk_gpu_node_processor_add_mask_node (GskGpuNodeProcessor *self, 0, &bounds, mask_child, + 1, &mask_rect); if (mask_image == NULL) { if (mask_mode == GSK_MASK_MODE_INVERTED_ALPHA) - gsk_gpu_node_processor_add_node (self, source_child); + gsk_gpu_node_processor_add_node (self, source_child, 0); return; } @@ -3482,6 +3546,7 @@ gsk_gpu_node_processor_add_mask_node (GskGpuNodeProcessor *self, 0, &bounds, source_child, + 0, &source_rect); if (source_image == NULL) { @@ -3665,6 +3730,7 @@ gsk_gpu_node_processor_add_color_matrix_node (GskGpuNodeProcessor *self, 0, NULL, child, + 0, &tex_rect); if (image == NULL) return; @@ -3741,6 +3807,7 @@ gsk_gpu_node_processor_add_component_transfer_node (GskGpuNodeProcessor *self, 0, NULL, child, + 0, &tex_rect); if (image == NULL) return; @@ -3810,6 +3877,7 @@ gsk_gpu_node_processor_repeat_tile (GskGpuNodeProcessor *self, GSK_GPU_AS_IMAGE_EXACT_SIZE, &clipped_child_bounds, child, + 0, &clipped_child_bounds); g_return_if_fail (image); @@ -3851,6 +3919,7 @@ gsk_gpu_node_processor_add_repeat_node (GskGpuNodeProcessor *self, { gsk_gpu_node_processor_add_node_clipped (self, child, + 0, &node->bounds); return; } @@ -3877,6 +3946,7 @@ gsk_gpu_node_processor_add_repeat_node (GskGpuNodeProcessor *self, GSK_GPU_AS_IMAGE_EXACT_SIZE, &clipped_child_bounds, child, + 0, &clipped_child_bounds); g_return_if_fail (image); gsk_gpu_texture_op (self->frame, @@ -3903,6 +3973,7 @@ gsk_gpu_node_processor_add_repeat_node (GskGpuNodeProcessor *self, GSK_GPU_AS_IMAGE_EXACT_SIZE, &clipped_child_bounds, child, + 0, &clipped_child_bounds); g_return_if_fail (image); clipped_child_bounds.origin = pos; @@ -4008,6 +4079,7 @@ gsk_gpu_node_processor_add_repeat_node (GskGpuNodeProcessor *self, continue; gsk_gpu_node_processor_add_node_clipped (self, child, + 0, &clip_bounds); } } @@ -4069,6 +4141,7 @@ gsk_gpu_node_processor_add_fill_node (GskGpuNodeProcessor *self, 0, &clip_bounds, child, + 0, &source_rect); if (source_image != NULL) @@ -4146,6 +4219,7 @@ gsk_gpu_node_processor_add_stroke_node (GskGpuNodeProcessor *self, 0, &clip_bounds, child, + 0, &source_rect); if (source_image != NULL) { @@ -4181,7 +4255,7 @@ gsk_gpu_node_processor_add_subsurface_node (GskGpuNodeProcessor *self, gdk_subsurface_get_texture (subsurface) == NULL || gdk_subsurface_get_parent (subsurface) != gdk_draw_context_get_surface (gsk_gpu_frame_get_context (self->frame))) { - gsk_gpu_node_processor_add_node (self, gsk_subsurface_node_get_child (node)); + gsk_gpu_node_processor_add_node (self, gsk_subsurface_node_get_child (node), 0); return; } @@ -4243,7 +4317,8 @@ gsk_gpu_node_processor_add_first_subsurface_node (GskGpuNodeProcessor *self, { return gsk_gpu_node_processor_add_first_node (self, info, - gsk_subsurface_node_get_child (node)); + gsk_subsurface_node_get_child (node), + 0); } if (gdk_subsurface_is_above_parent (subsurface)) @@ -4266,6 +4341,8 @@ gsk_gpu_get_subsurface_node_as_image (GskGpuFrame *frame, GskRenderNode *node, graphene_rect_t *out_bounds) { + GskGpuImage *result; + GskRenderNode *child; #ifndef G_DISABLE_ASSERT GdkSubsurface *subsurface; @@ -4275,13 +4352,21 @@ gsk_gpu_get_subsurface_node_as_image (GskGpuFrame *frame, gdk_subsurface_get_parent (subsurface) != gdk_draw_context_get_surface (gsk_gpu_frame_get_context (frame))); #endif - return gsk_gpu_get_node_as_image (frame, - flags, - ccs, - clip_bounds, - scale, - gsk_subsurface_node_get_child (node), - out_bounds); + child = gsk_subsurface_node_get_child (node); + + gsk_gpu_frame_start_node (frame, child, 0); + + result = gsk_gpu_get_node_as_image (frame, + flags, + ccs, + clip_bounds, + scale, + child, + out_bounds); + + gsk_gpu_frame_end_node (frame); + + return result; } static void @@ -4393,6 +4478,7 @@ gsk_gpu_node_processor_add_composite_node (GskGpuNodeProcessor *self, 0, &bounds, gsk_composite_node_get_mask (node), + 1, &mask_rect); if (mask_image == NULL) return; @@ -4417,6 +4503,7 @@ gsk_gpu_node_processor_add_composite_node (GskGpuNodeProcessor *self, 0, &bounds, child, + 0, &child_rect); if (child_image == NULL) /* FIXME */ @@ -4480,12 +4567,14 @@ gsk_gpu_node_processor_add_isolation_node (GskGpuNodeProcessor *self, if (gsk_render_node_get_copy_mode (child) != GSK_COPY_NONE || gsk_render_node_clears_background (child)) { + gsk_gpu_frame_start_node (self->frame, child, 0); gsk_gpu_node_processor_add_with_offscreen (self, child); + gsk_gpu_frame_end_node (self->frame); return; } } - gsk_gpu_node_processor_add_node (self, child); + gsk_gpu_node_processor_add_node (self, child, 0); } static void @@ -4519,7 +4608,7 @@ gsk_gpu_node_processor_add_container_node (GskGpuNodeProcessor *self, i = 0; for (; i < n_children; i++) - gsk_gpu_node_processor_add_node (self, children[i]); + gsk_gpu_node_processor_add_node (self, children[i], i); } static gboolean @@ -4542,7 +4631,7 @@ gsk_gpu_node_processor_add_first_container_node (GskGpuNodeProcessor *self, for (i = n; i-- > 0; ) { - if (gsk_gpu_node_processor_add_first_node (self, info, children[i])) + if (gsk_gpu_node_processor_add_first_node (self, info, children[i], i)) break; } @@ -4550,7 +4639,7 @@ gsk_gpu_node_processor_add_first_container_node (GskGpuNodeProcessor *self, gsk_gpu_first_node_begin_rendering (self, info, GSK_VEC4_TRANSPARENT); for (i++; i < n; i++) - gsk_gpu_node_processor_add_node (self, children[i]); + gsk_gpu_node_processor_add_node (self, children[i], i); return TRUE; } @@ -4559,7 +4648,7 @@ static void gsk_gpu_node_processor_add_debug_node (GskGpuNodeProcessor *self, GskRenderNode *node) { - gsk_gpu_node_processor_add_node (self, gsk_debug_node_get_child (node)); + gsk_gpu_node_processor_add_node (self, gsk_debug_node_get_child (node), 0); } static gboolean @@ -4569,7 +4658,8 @@ gsk_gpu_node_processor_add_first_debug_node (GskGpuNodeProcessor *self, { return gsk_gpu_node_processor_add_first_node (self, info, - gsk_debug_node_get_child (node)); + gsk_debug_node_get_child (node), + 0); } static GskGpuImage * @@ -4581,13 +4671,24 @@ gsk_gpu_get_debug_node_as_image (GskGpuFrame *frame, GskRenderNode *node, graphene_rect_t *out_bounds) { - return gsk_gpu_get_node_as_image (frame, - flags, - ccs, - clip_bounds, - scale, - gsk_debug_node_get_child (node), - out_bounds); + GskGpuImage *result; + GskRenderNode *child; + + child = gsk_debug_node_get_child (node); + + gsk_gpu_frame_start_node (frame, child, 0); + + result = gsk_gpu_get_node_as_image (frame, + flags, + ccs, + clip_bounds, + scale, + child, + out_bounds); + + gsk_gpu_frame_end_node (frame); + + return result; } typedef enum { @@ -4880,8 +4981,8 @@ static const struct }; static void -gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self, - GskRenderNode *node) +gsk_gpu_node_processor_add_node_untracked (GskGpuNodeProcessor *self, + GskRenderNode *node) { GskRenderNodeType node_type; @@ -4924,9 +5025,9 @@ gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self, } static gboolean -gsk_gpu_node_processor_add_first_node (GskGpuNodeProcessor *self, - GskGpuFirstNodeInfo *info, - GskRenderNode *node) +gsk_gpu_node_processor_add_first_node_untracked (GskGpuNodeProcessor *self, + GskGpuFirstNodeInfo *info, + GskRenderNode *node) { GskRenderNodeType node_type; graphene_rect_t opaque; @@ -4956,11 +5057,28 @@ gsk_gpu_node_processor_add_first_node (GskGpuNodeProcessor *self, return FALSE; gsk_gpu_first_node_begin_rendering (self, info, GSK_VEC4_TRANSPARENT); - gsk_gpu_node_processor_add_node (self, node); + gsk_gpu_node_processor_add_node_untracked (self, node); return TRUE; } +static gboolean +gsk_gpu_node_processor_add_first_node (GskGpuNodeProcessor *self, + GskGpuFirstNodeInfo *info, + GskRenderNode *node, + gsize pos) +{ + gboolean result; + + gsk_gpu_frame_start_node (self->frame, node, pos); + + result = gsk_gpu_node_processor_add_first_node_untracked (self, info, node); + + gsk_gpu_frame_end_node (self->frame); + + return result; +} + /* * gsk_gpu_get_node_as_image: * @frame: frame to render in @@ -4969,6 +5087,7 @@ gsk_gpu_node_processor_add_first_node (GskGpuNodeProcessor *self, * @clip_bounds: region of node that must be included in image * @scale: scale factor to use for the image * @node: the node to render + * @pos: position in child to do tracking with or -1 for no tracking * @out_bounds: the actual bounds of the result * * Get the part of the node indicated by the clip bounds as an image. @@ -5086,12 +5205,12 @@ gsk_gpu_node_processor_render (GskGpuNodeProcessor *self, gsk_gpu_node_processor_set_scissor (self, &rect); - if (!gsk_gpu_node_processor_add_first_node (self, - &info, - node)) + if (!gsk_gpu_node_processor_add_first_node_untracked (self, + &info, + node)) { gsk_gpu_first_node_begin_rendering (self, &info, GSK_VEC4_TRANSPARENT); - gsk_gpu_node_processor_add_node (self, node); + gsk_gpu_node_processor_add_node_untracked (self, node); do_culling = FALSE; } else if (GSK_DEBUG_CHECK (OCCLUSION)) @@ -5119,12 +5238,12 @@ gsk_gpu_node_processor_render (GskGpuNodeProcessor *self, /* only run pass if it's covering the whole rectangle */ info.min_occlusion_pixels = rect.width * rect.height; - if (!gsk_gpu_node_processor_add_first_node (self, - &info, - node)) + if (!gsk_gpu_node_processor_add_first_node_untracked (self, + &info, + node)) { gsk_gpu_first_node_begin_rendering (self, &info, GSK_VEC4_TRANSPARENT); - gsk_gpu_node_processor_add_node (self, node); + gsk_gpu_node_processor_add_node_untracked (self, node); } } diff --git a/gsk/gpu/gskgpuop.c b/gsk/gpu/gskgpuop.c index caa7cd4b76e058757658b6c0dce833858c307dc5..8ecce171f965bb42a24a6497a067873c903e9135 100644 --- a/gsk/gpu/gskgpuop.c +++ b/gsk/gpu/gskgpuop.c @@ -4,18 +4,6 @@ #include "gskgpuframeprivate.h" -GskGpuOp * -gsk_gpu_op_alloc (GskGpuFrame *frame, - const GskGpuOpClass *op_class) -{ - GskGpuOp *op; - - op = gsk_gpu_frame_alloc_op (frame, op_class->size); - op->op_class = op_class; - - return op; -} - void gsk_gpu_op_finish (GskGpuOp *op) { diff --git a/gsk/gpu/gskgpuopprivate.h b/gsk/gpu/gskgpuopprivate.h index 47007133f21bdbc82232394adc8c16648e36d3f3..62c7e7e695bc6887798df09bf198d5175201ce35 100644 --- a/gsk/gpu/gskgpuopprivate.h +++ b/gsk/gpu/gskgpuopprivate.h @@ -52,6 +52,7 @@ struct _GskGpuOp { const GskGpuOpClass *op_class; + gsize node_id; /* debug info maintained by frame, usually 0 */ GskGpuOp *next; }; @@ -80,8 +81,6 @@ struct _GskGpuOpClass /* ensures alignment of ops to multiples of 16 bytes - and that makes graphene happy */ #define GSK_GPU_OP_SIZE(struct_name) ((sizeof(struct_name) + 15) & ~15) -GskGpuOp * gsk_gpu_op_alloc (GskGpuFrame *frame, - const GskGpuOpClass *op_class); void gsk_gpu_op_finish (GskGpuOp *op); void gsk_gpu_op_print (GskGpuOp *op, diff --git a/gsk/gpu/gskgpurenderer.c b/gsk/gpu/gskgpurenderer.c index b0d2dfe91db33ee63fbd81a162cbbda631543860..fd5d0a78be4cae6fde298537dc4097406eef8fc1 100644 --- a/gsk/gpu/gskgpurenderer.c +++ b/gsk/gpu/gskgpurenderer.c @@ -33,6 +33,7 @@ static const GdkDebugKey gsk_gpu_optimization_keys[] = { { "to-image", GSK_GPU_OPTIMIZE_TO_IMAGE, "Don't fast-path creation of images for nodes" }, { "occlusion", GSK_GPU_OPTIMIZE_OCCLUSION_CULLING, "Disable occlusion culling via opaque node tracking" }, { "repeat", GSK_GPU_OPTIMIZE_REPEAT, "Repeat drawing operations instead of using offscreen and GL_REPEAT" }, + { "profile", GSK_GPU_OPTIMIZE_PROFILE, "Disable profiling support" }, }; typedef struct _GskGpuRendererPrivate GskGpuRendererPrivate; @@ -79,7 +80,11 @@ gsk_gpu_renderer_create_frame (GskGpuRenderer *self) GskGpuRendererClass *klass = GSK_GPU_RENDERER_GET_CLASS (self); GskGpuFrame *result; - result = g_object_new (klass->frame_type, NULL); + if (GSK_RENDERER_DEBUG_CHECK (GSK_RENDERER (self), PROFILE) && + priv->optimizations & GSK_GPU_OPTIMIZE_PROFILE) + result = g_object_new (klass->profile_frame_type, NULL); + else + result = g_object_new (klass->frame_type, NULL); gsk_gpu_frame_setup (result, self, priv->device, priv->optimizations); diff --git a/gsk/gpu/gskgpurendererprivate.h b/gsk/gpu/gskgpurendererprivate.h index a0c5e24830b6af06cdb57a248973e83756840bc7..ed45c9a80604398def8a445eed2bdc15577f6f7b 100644 --- a/gsk/gpu/gskgpurendererprivate.h +++ b/gsk/gpu/gskgpurendererprivate.h @@ -23,6 +23,7 @@ struct _GskGpuRendererClass GskRendererClass parent_class; GType frame_type; + GType profile_frame_type; GskGpuOptimizations optimizations; /* subclasses cannot override this */ GskGpuDevice * (* get_device) (GdkDisplay *display, diff --git a/gsk/gpu/gskgpurenderpassop.c b/gsk/gpu/gskgpurenderpassop.c index cd0a23e6ab6fcc9a651c31a4dcb26b778ede0db0..d1f2ccb5d30aa8e4afafcf0379515225225408eb 100644 --- a/gsk/gpu/gskgpurenderpassop.c +++ b/gsk/gpu/gskgpurenderpassop.c @@ -186,15 +186,7 @@ gsk_gpu_render_pass_op_vk_command (GskGpuOp *op, }, VK_SUBPASS_CONTENTS_INLINE); - op = op->next; - while (op->op_class->stage != GSK_GPU_STAGE_END_PASS) - { - op = gsk_gpu_op_vk_command (op, frame, state); - } - - op = gsk_gpu_op_vk_command (op, frame, state); - - return op; + return op->next; } #endif @@ -229,15 +221,7 @@ gsk_gpu_render_pass_op_gl_command (GskGpuOp *op, glClear (GL_COLOR_BUFFER_BIT); } - op = op->next; - while (op->op_class->stage != GSK_GPU_STAGE_END_PASS) - { - op = gsk_gpu_op_gl_command (op, frame, state); - } - - op = gsk_gpu_op_gl_command (op, frame, state); - - return op; + return op->next; } static const GskGpuOpClass GSK_GPU_RENDER_PASS_OP_CLASS = { @@ -366,7 +350,7 @@ gsk_gpu_render_pass_begin_op (GskGpuFrame *frame, g_assert (load_op != GSK_GPU_LOAD_OP_CLEAR || clear_color != NULL); - self = (GskGpuRenderPassOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_RENDER_PASS_OP_CLASS); + self = (GskGpuRenderPassOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_RENDER_PASS_OP_CLASS); self->target = g_object_ref (image); self->area = *area; @@ -388,7 +372,7 @@ gsk_gpu_render_pass_end_op (GskGpuFrame *frame, { GskGpuFramePassEndOp *self; - self = (GskGpuFramePassEndOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_RENDER_PASS_END_OP_CLASS); + self = (GskGpuFramePassEndOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_RENDER_PASS_END_OP_CLASS); self->target = g_object_ref (image); self->pass_type = pass_type; diff --git a/gsk/gpu/gskgpuscissorop.c b/gsk/gpu/gskgpuscissorop.c index 34325082bb9818491e0cd04430c90205d6b454fc..62a905dfa1cdfe8034c17c6aab726a68b6a4faeb 100644 --- a/gsk/gpu/gskgpuscissorop.c +++ b/gsk/gpu/gskgpuscissorop.c @@ -2,6 +2,7 @@ #include "gskgpuscissoropprivate.h" +#include "gskgpuframeprivate.h" #include "gskgpuopprivate.h" #include "gskgpuprintprivate.h" @@ -84,7 +85,7 @@ gsk_gpu_scissor_op (GskGpuFrame *frame, { GskGpuScissorOp *self; - self = (GskGpuScissorOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_SCISSOR_OP_CLASS); + self = (GskGpuScissorOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_SCISSOR_OP_CLASS); self->rect = *rect; } diff --git a/gsk/gpu/gskgpushaderop.c b/gsk/gpu/gskgpushaderop.c index 902306f9ddc0d3c69bbd36f9df4e402ba26f095d..a2af7c0e572f0427fc82fc1f3c82968da2aff096 100644 --- a/gsk/gpu/gskgpushaderop.c +++ b/gsk/gpu/gskgpushaderop.c @@ -85,6 +85,7 @@ gsk_gpu_shader_op_vk_command (GskGpuOp *op, GskGpuShaderOp *next_shader = (GskGpuShaderOp *) next; if (next->op_class != op->op_class || + next->node_id != op->node_id || next_shader->flags != self->flags || next_shader->color_states != self->color_states || next_shader->variation != self->variation || @@ -197,6 +198,7 @@ gsk_gpu_shader_op_gl_command (GskGpuOp *op, GskGpuShaderOp *next_shader = (GskGpuShaderOp *) next; if (next->op_class != op->op_class || + next->node_id != op->node_id || next_shader->flags != self->flags || next_shader->color_states != self->color_states || next_shader->variation != self->variation || @@ -282,7 +284,7 @@ gsk_gpu_shader_op_alloc (GskGpuFrame *frame, else { GskGpuShaderOp *self; - self = (GskGpuShaderOp *) gsk_gpu_op_alloc (frame, &op_class->parent_class); + self = (GskGpuShaderOp *) gsk_gpu_frame_alloc_op (frame, &op_class->parent_class); self->flags = flags; self->color_states = color_states; diff --git a/gsk/gpu/gskgputypesprivate.h b/gsk/gpu/gskgputypesprivate.h index 9e6c852611d9d2d23ec69097d69c9823ad786e18..22569b35c72656d82955307b5e950b8c87d408f8 100644 --- a/gsk/gpu/gskgputypesprivate.h +++ b/gsk/gpu/gskgputypesprivate.h @@ -158,5 +158,6 @@ typedef enum { GSK_GPU_OPTIMIZE_OCCLUSION_CULLING = 1 << 6, GSK_GPU_OPTIMIZE_REPEAT = 1 << 7, GSK_GPU_OPTIMIZE_DUAL_BLEND = 1 << 8, + GSK_GPU_OPTIMIZE_PROFILE = 1 << 9, } GskGpuOptimizations; diff --git a/gsk/gpu/gskgpuuploadop.c b/gsk/gpu/gskgpuuploadop.c index ae3b25912b1772212cbcd185dab7b434943c0b47..ff6a47a4ea6776ead7d38613e9ea4881a440dcf6 100644 --- a/gsk/gpu/gskgpuuploadop.c +++ b/gsk/gpu/gskgpuuploadop.c @@ -420,7 +420,7 @@ gsk_gpu_upload_texture_op_try (GskGpuFrame *frame, g_type_class_unref (enum_class); } - self = (GskGpuUploadTextureOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_UPLOAD_TEXTURE_OP_CLASS); + self = (GskGpuUploadTextureOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_UPLOAD_TEXTURE_OP_CLASS); self->texture = g_object_ref (texture); self->lod_level = lod_level; @@ -599,7 +599,7 @@ gsk_gpu_upload_cairo_into_op (GskGpuFrame *frame, { GskGpuUploadCairoOp *self; - self = (GskGpuUploadCairoOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_UPLOAD_CAIRO_OP_CLASS); + self = (GskGpuUploadCairoOp *) gsk_gpu_frame_alloc_op (frame, &GSK_GPU_UPLOAD_CAIRO_OP_CLASS); self->image = g_object_ref (image); self->area = *area; diff --git a/gsk/gpu/gskvulkandebugframe.c b/gsk/gpu/gskvulkandebugframe.c new file mode 100644 index 0000000000000000000000000000000000000000..160cd6891f9a828708cad15e4e91be6d52b0de70 --- /dev/null +++ b/gsk/gpu/gskvulkandebugframe.c @@ -0,0 +1,438 @@ +#include "config.h" + +#include "gskvulkandebugframeprivate.h" + +#include "gskgpuopprivate.h" +#include "gskdebugnodeprivate.h" +#include "gskrendernodeprivate.h" +#include "gskrenderreplay.h" + +#include "gtk/inspector/window.h" + +typedef struct _GskVulkanDebugEntry GskVulkanDebugEntry; + +/* for position tracking, we can't use 0 because that's a valid index */ +#define NO_ITEM G_MAXSIZE + +struct _GskVulkanDebugEntry { + GskRenderNode *node; + guint pos; + /* positions in the debug array or NO_ITEM if none */ + gsize parent; + gsize first_child; + GskDebugProfile profile; +}; + +#define GDK_ARRAY_NAME gsk_vulkan_debug +#define GDK_ARRAY_TYPE_NAME GskVulkanDebug +#define GDK_ARRAY_ELEMENT_TYPE GskVulkanDebugEntry +#define GDK_ARRAY_BY_VALUE 1 +#include "gdk/gdkarrayimpl.c" + +struct _GskVulkanDebugFrame +{ + GskVulkanFrame parent_instance; + + GskRenderNode *node; + + gsize n_ops; + GskVulkanDebug debug; + gsize debug_current; + + float vk_timestamp_scale; + gsize pool_size; + + VkQueryPool vk_timestamp_pool; + uint64_t *timestamp_pool_values; + gsize *timestamp_pool_nodes; + VkQueryPool vk_pixels_pool; + uint64_t *pixels_pool_values; +}; + +struct _GskVulkanDebugFrameClass +{ + GskVulkanFrameClass parent_class; +}; + +G_DEFINE_TYPE (GskVulkanDebugFrame, gsk_vulkan_debug_frame, GSK_TYPE_VULKAN_FRAME) + +static void +gsk_vulkan_debug_frame_submit_ops (GskVulkanFrame *frame, + GskVulkanCommandState *state, + GskGpuOp *op) +{ + GskVulkanDebugFrame *self = GSK_VULKAN_DEBUG_FRAME (frame); + + if (self->n_ops > self->pool_size) + { + GskVulkanDevice *device; + VkDevice vk_device; + + device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self))); + vk_device = gsk_vulkan_device_get_vk_device (device); + + g_free (self->timestamp_pool_values); + g_free (self->timestamp_pool_nodes); + vkDestroyQueryPool (vk_device, + self->vk_timestamp_pool, + NULL); + /* reserve 50% more than needed */ + self->pool_size = 3 * self->n_ops / 2; + + self->timestamp_pool_values = g_new (uint64_t, self->pool_size * 2); + self->timestamp_pool_nodes = g_new (uint64_t, self->pool_size); + GSK_VK_CHECK (vkCreateQueryPool, vk_device, + &(VkQueryPoolCreateInfo) { + .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, + .flags = 0, + VK_QUERY_TYPE_TIMESTAMP, + .queryCount = self->pool_size * 2, + }, + NULL, + &self->vk_timestamp_pool); + + self->pixels_pool_values = g_new (uint64_t, self->pool_size); + GSK_VK_CHECK (vkCreateQueryPool, vk_device, + &(VkQueryPoolCreateInfo) { + .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, + .flags = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS, + .queryCount = self->pool_size, + .pipelineStatistics = VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT, + }, + NULL, + &self->vk_pixels_pool); + } + + vkCmdResetQueryPool (state->vk_command_buffer, self->vk_timestamp_pool, 0, self->n_ops * 2); + self->n_ops = 0; + + while (op) + { + if (op->node_id == NO_ITEM) + { + op = gsk_gpu_op_vk_command (op, GSK_GPU_FRAME (frame), state); + } + else + { + GskVulkanDebugEntry *entry; + + self->timestamp_pool_nodes[self->n_ops] = op->node_id; + vkCmdBeginQuery (state->vk_command_buffer, + self->vk_pixels_pool, + self->n_ops, + 0); + vkCmdWriteTimestamp (state->vk_command_buffer, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + self->vk_timestamp_pool, + self->n_ops * 2); + entry = gsk_vulkan_debug_get (&self->debug, op->node_id); + entry->profile.self.cpu_submit_ns -= g_get_monotonic_time () * 1000; + + op = gsk_gpu_op_vk_command (op, GSK_GPU_FRAME (frame), state); + + entry->profile.self.cpu_submit_ns += g_get_monotonic_time () * 1000; + + vkCmdWriteTimestamp (state->vk_command_buffer, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + self->vk_timestamp_pool, + self->n_ops * 2 + 1); + vkCmdEndQuery (state->vk_command_buffer, + self->vk_pixels_pool, + self->n_ops); + self->n_ops++; + } + } +} + +static void +gsk_vulkan_debug_frame_setup (GskGpuFrame *frame) +{ + GskVulkanDebugFrame *self = GSK_VULKAN_DEBUG_FRAME (frame); + GskVulkanDevice *device; + VkPhysicalDeviceProperties vk_props; + + GSK_GPU_FRAME_CLASS (gsk_vulkan_debug_frame_parent_class)->setup (frame); + + device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)); + + vkGetPhysicalDeviceProperties (gsk_vulkan_device_get_vk_physical_device (device), &vk_props); + self->vk_timestamp_scale = vk_props.limits.timestampPeriod; +} + +static GskRenderNode * +gsk_vulkan_debug_frame_filter_node (GskRenderReplay *replay, + GskRenderNode *node, + gpointer user_data) +{ + GskVulkanDebugFrame *self = user_data; + GskVulkanDebugEntry *entry; + GskRenderNode *result, *child; + gsize pos; + + pos = self->debug_current; + /* node wasn't rendered */ + if (pos == NO_ITEM) + return gsk_render_replay_default (replay, node); + + entry = gsk_vulkan_debug_get (&self->debug, pos); + + self->debug_current = entry->first_child; + child = gsk_render_replay_default (replay, node); + + entry->profile.self.cpu_record_ns = entry->profile.total.cpu_record_ns; + entry->profile.total.cpu_submit_ns = entry->profile.self.cpu_submit_ns; + entry->profile.total.gpu_ns = entry->profile.self.gpu_ns; + entry->profile.total.gpu_pixels = entry->profile.self.gpu_pixels; + if (entry->first_child != NO_ITEM) + { + gsize i, n_children; + + gsk_render_node_get_children (entry->node, &n_children); + for (i = 0; i < n_children; i++) + { + GskVulkanDebugEntry *child_entry = gsk_vulkan_debug_get (&self->debug, entry->first_child + i); + entry->profile.self.cpu_record_ns -= child_entry->profile.total.cpu_record_ns; + entry->profile.total.cpu_submit_ns += child_entry->profile.total.cpu_submit_ns; + entry->profile.total.gpu_ns += child_entry->profile.total.gpu_ns; + entry->profile.total.gpu_pixels += child_entry->profile.total.gpu_pixels; + } + } + entry->profile.self.cpu_ns = entry->profile.self.cpu_record_ns + entry->profile.self.cpu_submit_ns; + entry->profile.total.cpu_ns = entry->profile.total.cpu_record_ns + entry->profile.total.cpu_submit_ns; + + result = gsk_debug_node_new_profile (child, + &entry->profile, + g_strdup_printf ("record total: %lluns\n" + "record self : %lluns\n" + "submit total: %lluns\n" + "submit self : %lluns\n" + "GPU total : %lluns\n" + "GPU self : %lluns\n" + "pixels total: %llu\n" + "pixels self : %llu", + (long long unsigned) entry->profile.total.cpu_record_ns, + (long long unsigned) entry->profile.self.cpu_record_ns, + (long long unsigned) entry->profile.total.cpu_submit_ns, + (long long unsigned) entry->profile.self.cpu_submit_ns, + (long long unsigned) entry->profile.total.gpu_ns, + (long long unsigned) entry->profile.self.gpu_ns, + (long long unsigned) entry->profile.total.gpu_pixels, + (long long unsigned) entry->profile.self.gpu_pixels)); + gsk_render_node_unref (child); + + self->debug_current = pos + 1; + + return result; +} + +static void +gsk_vulkan_debug_frame_process (GskVulkanDebugFrame *self) +{ + GskRenderReplay *replay; + GskRenderNode *result; + + g_assert (self->debug_current == NO_ITEM); + + self->debug_current = 0; + replay = gsk_render_replay_new (); + gsk_render_replay_set_node_filter (replay, + gsk_vulkan_debug_frame_filter_node, + self, + NULL); + result = gsk_render_replay_filter_node (replay, self->node); + gsk_render_replay_free (replay); + + gtk_inspector_add_profile_node (gsk_gpu_device_get_display (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self))), + self->node, + result); + + gsk_render_node_unref (result); + self->debug_current = NO_ITEM; +} + +static void +gsk_vulkan_debug_frame_cleanup (GskGpuFrame *frame) +{ + GskVulkanDebugFrame *self = GSK_VULKAN_DEBUG_FRAME (frame); + GskVulkanDevice *device; + VkDevice vk_device; + gsize i; + + device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)); + vk_device = gsk_vulkan_device_get_vk_device (device); + + GSK_VK_CHECK (vkGetQueryPoolResults, vk_device, + self->vk_timestamp_pool, + 0, + 2 * self->n_ops, + 2 * self->n_ops * sizeof (uint64_t), + self->timestamp_pool_values, + sizeof (uint64_t), + VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); + GSK_VK_CHECK (vkGetQueryPoolResults, vk_device, + self->vk_pixels_pool, + 0, + self->n_ops, + self->n_ops * sizeof (uint64_t), + self->pixels_pool_values, + sizeof (uint64_t), + VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); + for (i = 0; i < self->n_ops; i++) + { + GskVulkanDebugEntry *entry; + + g_assert (self->timestamp_pool_nodes[i] < gsk_vulkan_debug_get_size (&self->debug)); + entry = gsk_vulkan_debug_get (&self->debug, self->timestamp_pool_nodes[i]); + entry->profile.self.gpu_ns += (self->timestamp_pool_values[2 * i + 1] - self->timestamp_pool_values[2 * i]) + * self->vk_timestamp_scale; + entry->profile.self.gpu_pixels += self->pixels_pool_values[i]; + } + + if (self->node) + gsk_vulkan_debug_frame_process (self); + + gsk_vulkan_debug_clear (&self->debug); + g_clear_pointer (&self->node, gsk_render_node_unref); + + self->n_ops = 0; + g_assert (self->debug_current == NO_ITEM); + + GSK_GPU_FRAME_CLASS (gsk_vulkan_debug_frame_parent_class)->cleanup (frame); +} + +static gpointer +gsk_vulkan_debug_frame_alloc_op (GskGpuFrame *frame, + const GskGpuOpClass *op_class) +{ + GskVulkanDebugFrame *self = GSK_VULKAN_DEBUG_FRAME (frame); + GskGpuOp *op; + + self->n_ops++; + + op = GSK_GPU_FRAME_CLASS (gsk_vulkan_debug_frame_parent_class)->alloc_op (frame, op_class); + op->node_id = self->debug_current; + + return op; +} + +static void +gsk_vulkan_debug_frame_start_node (GskGpuFrame *frame, + GskRenderNode *node, + gsize pos) +{ + GskVulkanDebugFrame *self = GSK_VULKAN_DEBUG_FRAME (frame); + GskVulkanDebugEntry *entry; + + GSK_GPU_FRAME_CLASS (gsk_vulkan_debug_frame_parent_class)->start_node (frame, node, pos); + + if (self->debug_current == NO_ITEM) + { + if (gsk_vulkan_debug_get_size (&self->debug) == 0) + { + gsk_vulkan_debug_append (&self->debug, + &(GskVulkanDebugEntry) { + .node = node, + .pos = pos, + .parent = NO_ITEM, + .first_child = NO_ITEM + }); + self->node = gsk_render_node_ref (node); + } + self->debug_current = 0; + } + else + { + GskVulkanDebugEntry *cur; + + cur = gsk_vulkan_debug_get (&self->debug, self->debug_current); + if (cur->first_child == NO_ITEM) + { + GskRenderNode **children; + gsize i, n_children; + + children = gsk_render_node_get_children (cur->node, &n_children); + g_assert (n_children > 0); + cur->first_child = gsk_vulkan_debug_get_size (&self->debug); + + for (i = 0; i < n_children; i++) + { + gsk_vulkan_debug_append (&self->debug, + &(GskVulkanDebugEntry) { + .node = children[i], + .pos = i, + .parent = self->debug_current, + .first_child = NO_ITEM, + }); + } + } + + self->debug_current = cur->first_child + pos; + } + + entry = gsk_vulkan_debug_get (&self->debug, self->debug_current); + + entry->profile.total.cpu_record_ns -= g_get_monotonic_time () * 1000; +} + +static void +gsk_vulkan_debug_frame_end_node (GskGpuFrame *frame) +{ + GskVulkanDebugFrame *self = GSK_VULKAN_DEBUG_FRAME (frame); + GskVulkanDebugEntry *entry; + + entry = gsk_vulkan_debug_get (&self->debug, self->debug_current); + + entry->profile.total.cpu_record_ns += g_get_monotonic_time () * 1000; + + self->debug_current = entry->parent; + + GSK_GPU_FRAME_CLASS (gsk_vulkan_debug_frame_parent_class)->end_node (frame); +} + +static void +gsk_vulkan_debug_frame_finalize (GObject *object) +{ + GskVulkanDebugFrame *self = GSK_VULKAN_DEBUG_FRAME (object); + GskVulkanDevice *device; + VkDevice vk_device; + + device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self))); + vk_device = gsk_vulkan_device_get_vk_device (device); + + g_free (self->timestamp_pool_values); + g_free (self->timestamp_pool_nodes); + vkDestroyQueryPool (vk_device, + self->vk_timestamp_pool, + NULL); + + gsk_vulkan_debug_clear (&self->debug); + + G_OBJECT_CLASS (gsk_vulkan_debug_frame_parent_class)->finalize (object); +} + +static void +gsk_vulkan_debug_frame_class_init (GskVulkanDebugFrameClass *klass) +{ + GskVulkanFrameClass *vulkan_frame_class = GSK_VULKAN_FRAME_CLASS (klass); + GskGpuFrameClass *gpu_frame_class = GSK_GPU_FRAME_CLASS (klass); + GObjectClass *object_class = G_OBJECT_CLASS (klass); + + vulkan_frame_class->submit_ops = gsk_vulkan_debug_frame_submit_ops; + + gpu_frame_class->setup = gsk_vulkan_debug_frame_setup; + gpu_frame_class->cleanup = gsk_vulkan_debug_frame_cleanup; + gpu_frame_class->alloc_op = gsk_vulkan_debug_frame_alloc_op; + gpu_frame_class->start_node = gsk_vulkan_debug_frame_start_node; + gpu_frame_class->end_node = gsk_vulkan_debug_frame_end_node; + + object_class->finalize = gsk_vulkan_debug_frame_finalize; +} + +static void +gsk_vulkan_debug_frame_init (GskVulkanDebugFrame *self) +{ + self->debug_current = NO_ITEM; + gsk_vulkan_debug_init (&self->debug); +} + diff --git a/gsk/gpu/gskvulkandebugframeprivate.h b/gsk/gpu/gskvulkandebugframeprivate.h new file mode 100644 index 0000000000000000000000000000000000000000..fa51d3327ec3b7ebbd55a36fffced49eef27f468 --- /dev/null +++ b/gsk/gpu/gskvulkandebugframeprivate.h @@ -0,0 +1,12 @@ +#pragma once + +#include "gskvulkanframeprivate.h" + +G_BEGIN_DECLS + +#define GSK_TYPE_VULKAN_DEBUG_FRAME (gsk_vulkan_debug_frame_get_type ()) + +G_DECLARE_FINAL_TYPE (GskVulkanDebugFrame, gsk_vulkan_debug_frame, GSK, VULKAN_DEBUG_FRAME, GskVulkanFrame) + + +G_END_DECLS diff --git a/gsk/gpu/gskvulkanframe.c b/gsk/gpu/gskvulkanframe.c index fa44a6ad63cb53b7caa13b00c70536a12351a561..59b97825b7e427829afb273e0e5910b44e20c9ea 100644 --- a/gsk/gpu/gskvulkanframe.c +++ b/gsk/gpu/gskvulkanframe.c @@ -45,10 +45,10 @@ struct _GskVulkanSemaphores GskSemaphores signal_semaphores; }; -struct _GskVulkanFrame -{ - GskGpuFrame parent_instance; +typedef struct _GskVulkanFramePrivate GskVulkanFramePrivate; +struct _GskVulkanFramePrivate +{ VkSemaphore vk_acquire_semaphore; VkFence vk_fence; VkCommandBuffer vk_command_buffer; @@ -58,23 +58,19 @@ struct _GskVulkanFrame gsize pool_n_buffers; }; -struct _GskVulkanFrameClass -{ - GskGpuFrameClass parent_class; -}; - -G_DEFINE_TYPE (GskVulkanFrame, gsk_vulkan_frame, GSK_TYPE_GPU_FRAME) +G_DEFINE_TYPE_WITH_PRIVATE (GskVulkanFrame, gsk_vulkan_frame, GSK_TYPE_GPU_FRAME) static gboolean gsk_vulkan_frame_is_busy (GskGpuFrame *frame) { GskVulkanFrame *self = GSK_VULKAN_FRAME (frame); + GskVulkanFramePrivate *priv = gsk_vulkan_frame_get_instance_private (self); VkDevice device; VkResult res; device = gsk_vulkan_device_get_vk_device (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame))); - res = vkGetFenceStatus (device, self->vk_fence); + res = vkGetFenceStatus (device, priv->vk_fence); if (res == VK_NOT_READY) return TRUE; @@ -86,13 +82,14 @@ static void gsk_vulkan_frame_wait (GskGpuFrame *frame) { GskVulkanFrame *self = GSK_VULKAN_FRAME (frame); + GskVulkanFramePrivate *priv = gsk_vulkan_frame_get_instance_private (self); VkDevice vk_device; vk_device = gsk_vulkan_device_get_vk_device (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame))); GSK_VK_CHECK (vkWaitForFences, vk_device, 1, - &self->vk_fence, + &priv->vk_fence, VK_FALSE, INT64_MAX); } @@ -101,6 +98,7 @@ static void gsk_vulkan_frame_setup (GskGpuFrame *frame) { GskVulkanFrame *self = GSK_VULKAN_FRAME (frame); + GskVulkanFramePrivate *priv = gsk_vulkan_frame_get_instance_private (self); GskVulkanDevice *device; VkDevice vk_device; VkCommandPool vk_command_pool; @@ -116,27 +114,28 @@ gsk_vulkan_frame_setup (GskGpuFrame *frame) .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, .commandBufferCount = 1, }, - &self->vk_command_buffer); + &priv->vk_command_buffer); GDK_VK_CHECK (vkCreateSemaphore, vk_device, &(VkSemaphoreCreateInfo) { .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, }, NULL, - &self->vk_acquire_semaphore); + &priv->vk_acquire_semaphore); GSK_VK_CHECK (vkCreateFence, vk_device, &(VkFenceCreateInfo) { .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, }, NULL, - &self->vk_fence); + &priv->vk_fence); } static void gsk_vulkan_frame_cleanup (GskGpuFrame *frame) { GskVulkanFrame *self = GSK_VULKAN_FRAME (frame); + GskVulkanFramePrivate *priv = gsk_vulkan_frame_get_instance_private (self); GskVulkanDevice *device; VkDevice vk_device; @@ -145,15 +144,15 @@ gsk_vulkan_frame_cleanup (GskGpuFrame *frame) GSK_VK_CHECK (vkWaitForFences, vk_device, 1, - &self->vk_fence, + &priv->vk_fence, VK_TRUE, INT64_MAX); GSK_VK_CHECK (vkResetFences, vk_device, 1, - &self->vk_fence); + &priv->vk_fence); - GSK_VK_CHECK (vkResetCommandBuffer, self->vk_command_buffer, + GSK_VK_CHECK (vkResetCommandBuffer, priv->vk_command_buffer, 0); GSK_GPU_FRAME_CLASS (gsk_vulkan_frame_parent_class)->cleanup (frame); @@ -167,10 +166,11 @@ gsk_vulkan_frame_begin (GskGpuFrame *frame, const graphene_rect_t *opaque) { GskVulkanFrame *self = GSK_VULKAN_FRAME (frame); + GskVulkanFramePrivate *priv = gsk_vulkan_frame_get_instance_private (self); gdk_draw_context_begin_frame_full (context, /* We pass a pointer here for 32bit architectures */ - &self->vk_acquire_semaphore, + &priv->vk_acquire_semaphore, depth, region, opaque); @@ -291,6 +291,17 @@ gsk_vulkan_frame_write_texture_vertex_data (GskGpuFrame *self, { } +static void +gsk_vulkan_frame_submit_ops (GskVulkanFrame *frame, + GskVulkanCommandState *state, + GskGpuOp *op) +{ + while (op) + { + op = gsk_gpu_op_vk_command (op, GSK_GPU_FRAME (frame), state); + } +} + static void gsk_vulkan_frame_submit (GskGpuFrame *frame, GskRenderPassType pass_type, @@ -299,20 +310,21 @@ gsk_vulkan_frame_submit (GskGpuFrame *frame, GskGpuOp *op) { GskVulkanFrame *self = GSK_VULKAN_FRAME (frame); + GskVulkanFramePrivate *priv = gsk_vulkan_frame_get_instance_private (self); GskVulkanDevice *device; GskVulkanSemaphores semaphores; GskVulkanCommandState state = { 0, }; device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame)); - GSK_VK_CHECK (vkBeginCommandBuffer, self->vk_command_buffer, + GSK_VK_CHECK (vkBeginCommandBuffer, priv->vk_command_buffer, &(VkCommandBufferBeginInfo) { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, }); if (vertex_buffer) - vkCmdBindVertexBuffers (self->vk_command_buffer, + vkCmdBindVertexBuffers (priv->vk_command_buffer, 0, 1, (VkBuffer[1]) { @@ -329,32 +341,29 @@ gsk_vulkan_frame_submit (GskGpuFrame *frame, { GdkVulkanContext *context = GDK_VULKAN_CONTEXT (gsk_gpu_frame_get_context (frame)); gsk_vulkan_semaphores_add_wait (&semaphores, - self->vk_acquire_semaphore, + priv->vk_acquire_semaphore, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); gsk_vulkan_semaphores_add_signal (&semaphores, gdk_vulkan_context_get_present_semaphore (context)); } - state.vk_command_buffer = self->vk_command_buffer; + state.vk_command_buffer = priv->vk_command_buffer; state.vk_render_pass = VK_NULL_HANDLE; state.vk_format = VK_FORMAT_UNDEFINED; state.blend = GSK_GPU_BLEND_OVER; /* should we have a BLEND_NONE? */ state.semaphores = &semaphores; - while (op) - { - op = gsk_gpu_op_vk_command (op, frame, &state); - } + GSK_VULKAN_FRAME_GET_CLASS (self)->submit_ops (self, &state, op); - GSK_VK_CHECK (vkEndCommandBuffer, self->vk_command_buffer); + GSK_VK_CHECK (vkEndCommandBuffer, priv->vk_command_buffer); GSK_VK_CHECK (vkQueueSubmit, gsk_vulkan_device_get_vk_queue (device), 1, &(VkSubmitInfo) { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .commandBufferCount = 1, - .pCommandBuffers = &self->vk_command_buffer, + .pCommandBuffers = &priv->vk_command_buffer, .pWaitSemaphores = gsk_semaphores_get_data (&semaphores.wait_semaphores), .pWaitDstStageMask = gsk_pipeline_stages_get_data (&semaphores.wait_stages), .waitSemaphoreCount = gsk_semaphores_get_size (&semaphores.wait_semaphores), @@ -366,7 +375,7 @@ gsk_vulkan_frame_submit (GskGpuFrame *frame, .pWaitSemaphoreValues = gsk_semaphore_values_get_data (&semaphores.wait_semaphore_values), } : NULL, }, - self->vk_fence); + priv->vk_fence); gsk_semaphores_clear (&semaphores.wait_semaphores); gsk_semaphore_values_clear (&semaphores.wait_semaphore_values); @@ -378,6 +387,7 @@ static void gsk_vulkan_frame_finalize (GObject *object) { GskVulkanFrame *self = GSK_VULKAN_FRAME (object); + GskVulkanFramePrivate *priv = gsk_vulkan_frame_get_instance_private (self); GskVulkanDevice *device; VkDevice vk_device; VkCommandPool vk_command_pool; @@ -388,12 +398,12 @@ gsk_vulkan_frame_finalize (GObject *object) vkFreeCommandBuffers (vk_device, vk_command_pool, - 1, &self->vk_command_buffer); + 1, &priv->vk_command_buffer); vkDestroySemaphore (vk_device, - self->vk_acquire_semaphore, + priv->vk_acquire_semaphore, NULL); vkDestroyFence (vk_device, - self->vk_fence, + priv->vk_fence, NULL); G_OBJECT_CLASS (gsk_vulkan_frame_parent_class)->finalize (object); @@ -405,6 +415,8 @@ gsk_vulkan_frame_class_init (GskVulkanFrameClass *klass) GskGpuFrameClass *gpu_frame_class = GSK_GPU_FRAME_CLASS (klass); GObjectClass *object_class = G_OBJECT_CLASS (klass); + klass->submit_ops = gsk_vulkan_frame_submit_ops; + gpu_frame_class->is_busy = gsk_vulkan_frame_is_busy; gpu_frame_class->wait = gsk_vulkan_frame_wait; gpu_frame_class->setup = gsk_vulkan_frame_setup; @@ -423,9 +435,11 @@ gsk_vulkan_frame_class_init (GskVulkanFrameClass *klass) static void gsk_vulkan_frame_init (GskVulkanFrame *self) { - self->pool_n_sets = 4; - self->pool_n_images = 8; - self->pool_n_buffers = 8; + GskVulkanFramePrivate *priv = gsk_vulkan_frame_get_instance_private (self); + + priv->pool_n_sets = 4; + priv->pool_n_images = 8; + priv->pool_n_buffers = 8; } void diff --git a/gsk/gpu/gskvulkanframeprivate.h b/gsk/gpu/gskvulkanframeprivate.h index 457ff3ba54f9e5526b52ed74994df397c64c7820..4104a38feae19ce1887b706a327f039f0b0c2311 100644 --- a/gsk/gpu/gskvulkanframeprivate.h +++ b/gsk/gpu/gskvulkanframeprivate.h @@ -2,13 +2,36 @@ #include "gskgpuframeprivate.h" +#include "gskgpuopprivate.h" #include "gskvulkandeviceprivate.h" G_BEGIN_DECLS -#define GSK_TYPE_VULKAN_FRAME (gsk_vulkan_frame_get_type ()) +#define GSK_TYPE_VULKAN_FRAME (gsk_vulkan_frame_get_type ()) +#define GSK_VULKAN_FRAME(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), GSK_TYPE_VULKAN_FRAME, GskVulkanFrame)) +#define GSK_VULKAN_FRAME_CLASS(k) (G_TYPE_CHECK_CLASS_CAST ((k), GSK_TYPE_VULKAN_FRAME, GskVulkanFrameClass)) +#define GSK_IS_VULKAN_FRAME(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), GSK_TYPE_VULKAN_FRAME)) +#define GSK_IS_VULKAN_FRAME_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), GSK_TYPE_VULKAN_FRAME)) +#define GSK_VULKAN_FRAME_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), GSK_TYPE_VULKAN_FRAME, GskVulkanFrameClass)) -G_DECLARE_FINAL_TYPE (GskVulkanFrame, gsk_vulkan_frame, GSK, VULKAN_FRAME, GskGpuFrame) +typedef struct _GskVulkanFrame GskVulkanFrame; +typedef struct _GskVulkanFrameClass GskVulkanFrameClass; + +struct _GskVulkanFrame +{ + GskGpuFrame parent_instance; +}; + +struct _GskVulkanFrameClass +{ + GskGpuFrameClass parent_class; + + void (* submit_ops) (GskVulkanFrame *frame, + GskVulkanCommandState *state, + GskGpuOp *op); +}; + +GType gsk_vulkan_frame_get_type (void) G_GNUC_CONST; void gsk_vulkan_semaphores_add_wait (GskVulkanSemaphores *self, VkSemaphore semaphore, @@ -17,4 +40,6 @@ void gsk_vulkan_semaphores_add_wait (GskVulk void gsk_vulkan_semaphores_add_signal (GskVulkanSemaphores *self, VkSemaphore semaphore); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskVulkanFrame, g_object_unref) + G_END_DECLS diff --git a/gsk/gpu/gskvulkanrenderer.c b/gsk/gpu/gskvulkanrenderer.c index a9d67ffdf5c474ba9a6d5fb23156e1974020ce15..46b8980f1965f2440333936ccedf573e513e0b00 100644 --- a/gsk/gpu/gskvulkanrenderer.c +++ b/gsk/gpu/gskvulkanrenderer.c @@ -9,6 +9,7 @@ #ifdef GDK_RENDERING_VULKAN #include "gskvulkandeviceprivate.h" +#include "gskvulkandebugframeprivate.h" #include "gskvulkanframeprivate.h" #include "gskvulkanimageprivate.h" @@ -103,6 +104,8 @@ gsk_vulkan_renderer_create_context (GskGpuRenderer *renderer, *supported = -1; if (!(display->vulkan_features & GDK_VULKAN_FEATURE_DUAL_SOURCE_BLEND)) *supported &= ~GSK_GPU_OPTIMIZE_DUAL_BLEND; + if (!(display->vulkan_features & GDK_VULKAN_FEATURE_PROFILE)) + *supported &= ~GSK_GPU_OPTIMIZE_PROFILE; return GDK_DRAW_CONTEXT (context); } @@ -172,6 +175,7 @@ gsk_vulkan_renderer_class_init (GskVulkanRendererClass *klass) GskGpuRendererClass *gpu_renderer_class = GSK_GPU_RENDERER_CLASS (klass); gpu_renderer_class->frame_type = GSK_TYPE_VULKAN_FRAME; + gpu_renderer_class->profile_frame_type = GSK_TYPE_VULKAN_DEBUG_FRAME; gpu_renderer_class->get_device = gsk_vulkan_device_get_for_display; gpu_renderer_class->create_context = gsk_vulkan_renderer_create_context; diff --git a/gsk/gskdebug.c b/gsk/gskdebug.c index 8470cef660b7464b469a0e07a16fd52abfc2419f..339b2dce7db7d0057c74864efdfda9f1b4ed1f7c 100644 --- a/gsk/gskdebug.c +++ b/gsk/gskdebug.c @@ -16,6 +16,7 @@ static const GdkDebugKey gsk_debug_keys[] = { { "staging", GSK_DEBUG_STAGING, "Use a staging image for texture upload (Vulkan only)" }, { "cairo", GSK_DEBUG_CAIRO, "Overlay error pattern over Cairo drawing (finds fallbacks)" }, { "occlusion", GSK_DEBUG_OCCLUSION, "Overlay highlight over areas optimized via occlusion culling" }, + { "profile", GSK_DEBUG_PROFILE, "Enable profiling (Vulkan only)" }, }; static guint gsk_debug_flags; diff --git a/gsk/gskdebugnode.c b/gsk/gskdebugnode.c index b67b97581bf29bb362f0b81b709bf66d200f2ebc..0834041eb6dfa1fb56b409db44fe2c4596f00be8 100644 --- a/gsk/gskdebugnode.c +++ b/gsk/gskdebugnode.c @@ -18,7 +18,7 @@ #include "config.h" -#include "gskdebugnode.h" +#include "gskdebugnodeprivate.h" #include "gskrectprivate.h" #include "gskrendernodeprivate.h" @@ -35,6 +35,7 @@ struct _GskDebugNode GskRenderNode render_node; GskRenderNode *child; + GskDebugProfile *profile; char *message; }; @@ -46,6 +47,7 @@ gsk_debug_node_finalize (GskRenderNode *node) gsk_render_node_unref (self->child); g_free (self->message); + g_free (self->profile); parent_class->finalize (node); } @@ -116,7 +118,7 @@ gsk_debug_node_replay (GskRenderNode *node, if (child == self->child) result = gsk_render_node_ref (node); else - result = gsk_debug_node_new (child, g_strdup (self->message)); + result = gsk_debug_node_new_profile (child, self->profile, g_strdup (self->message)); gsk_render_node_unref (child); @@ -142,21 +144,10 @@ gsk_debug_node_class_init (gpointer g_class, GSK_DEFINE_RENDER_NODE_TYPE (GskDebugNode, gsk_debug_node) -/** - * gsk_debug_node_new: - * @child: The child to add debug info for - * @message: (transfer full): The debug message - * - * Creates a `GskRenderNode` that will add debug information about - * the given @child. - * - * Adding this node has no visual effect. - * - * Returns: (transfer full) (type GskDebugNode): A new `GskRenderNode` - */ GskRenderNode * -gsk_debug_node_new (GskRenderNode *child, - char *message) +gsk_debug_node_new_profile (GskRenderNode *child, + const GskDebugProfile *perf, + char * message) { GskDebugNode *self; GskRenderNode *node; @@ -169,6 +160,8 @@ gsk_debug_node_new (GskRenderNode *child, self->child = gsk_render_node_ref (child); self->message = message; + if (perf) + self->profile = g_memdup2 (perf, sizeof (GskDebugProfile)); gsk_rect_init_from_rect (&node->bounds, &child->bounds); @@ -182,6 +175,25 @@ gsk_debug_node_new (GskRenderNode *child, return node; } +/** + * gsk_debug_node_new: + * @child: The child to add debug info for + * @message: (transfer full): The debug message + * + * Creates a `GskRenderNode` that will add debug information about + * the given @child. + * + * Adding this node has no visual effect. + * + * Returns: (transfer full) (type GskDebugNode): A new `GskRenderNode` + */ +GskRenderNode * +gsk_debug_node_new (GskRenderNode *child, + char *message) +{ + return gsk_debug_node_new_profile (child, NULL, message); +} + /** * gsk_debug_node_get_child: * @node: (type GskDebugNode): a debug `GskRenderNode` @@ -214,3 +226,18 @@ gsk_debug_node_get_message (const GskRenderNode *node) return self->message; } +/* + * gsk_debug_node_get_profile: + * @node: the node + * + * Gets the profile information carried by this debug node if available. + * + * Returns: (nullable) (transfer none): the profile information + **/ +const GskDebugProfile * +gsk_debug_node_get_profile (GskRenderNode *node) +{ + const GskDebugNode *self = (const GskDebugNode *) node; + + return self->profile; +} diff --git a/gsk/gskdebugnodeprivate.h b/gsk/gskdebugnodeprivate.h new file mode 100644 index 0000000000000000000000000000000000000000..28e27d3369e69ef44403975aaef3ecb15194d7bf --- /dev/null +++ b/gsk/gskdebugnodeprivate.h @@ -0,0 +1,26 @@ +#pragma once + +#include "gskdebugnode.h" + +G_BEGIN_DECLS + +typedef struct _GskDebugProfile GskDebugProfile; + +struct _GskDebugProfile { + struct { + guint64 cpu_ns; + guint64 cpu_record_ns; + guint64 cpu_submit_ns; + guint64 gpu_ns; + guint64 gpu_pixels; + } total, self; +}; + +GskRenderNode * gsk_debug_node_new_profile (GskRenderNode *child, + const GskDebugProfile *profile, + char * message); + +const GskDebugProfile * gsk_debug_node_get_profile (GskRenderNode *node) G_GNUC_PURE; + + +G_END_DECLS diff --git a/gsk/gskdebugprivate.h b/gsk/gskdebugprivate.h index 0bc63d933b91591c429fd9ac433060049a4fee2c..e4e5d4b2d4aab8c054e7d63e566b5deaa1447f4c 100644 --- a/gsk/gskdebugprivate.h +++ b/gsk/gskdebugprivate.h @@ -19,6 +19,7 @@ typedef enum { GSK_DEBUG_STAGING = 1 << 9, GSK_DEBUG_CAIRO = 1 << 10, GSK_DEBUG_OCCLUSION = 1 << 11, + GSK_DEBUG_PROFILE = 1 << 12, } GskDebugFlags; #define GSK_DEBUG_ANY ((1 << 12) - 1) diff --git a/gsk/meson.build b/gsk/meson.build index c9191656f9f7facf1c1ed3285b6fdd714153142b..cce40904dbbe9d47baa43d3da3f160e9d19ac29c 100644 --- a/gsk/meson.build +++ b/gsk/meson.build @@ -181,6 +181,7 @@ gsk_private_vulkan_shader_headers = [] if have_vulkan gsk_private_sources += files([ 'gpu/gskvulkanbuffer.c', + 'gpu/gskvulkandebugframe.c', 'gpu/gskvulkandevice.c', 'gpu/gskvulkanframe.c', 'gpu/gskvulkanimage.c', diff --git a/gtk/inspector/meson.build b/gtk/inspector/meson.build index 7493bfd89c18ebbfb0230953c7de066266a16714..394de0dc3d8362d04f93d73bfd5c95cf51309173 100644 --- a/gtk/inspector/meson.build +++ b/gtk/inspector/meson.build @@ -27,6 +27,7 @@ inspector_sources = files( 'measuregraph.c', 'menu.c', 'misc-info.c', + 'nodewrapper.c', 'object-tree.c', 'prop-editor.c', 'prop-holder.c', diff --git a/gtk/inspector/nodewrapper.c b/gtk/inspector/nodewrapper.c new file mode 100644 index 0000000000000000000000000000000000000000..e718656317c80467ba7b973a0606512f22e17d46 --- /dev/null +++ b/gtk/inspector/nodewrapper.c @@ -0,0 +1,655 @@ +/* + * Copyright (c) 2025 Benjamin Otte + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. If not, see . + */ + +#include "config.h" + +#include "nodewrapper.h" + +#include "gsk/gskdebugnodeprivate.h" +#include "gsk/gskdisplacementnodeprivate.h" +#include "gsk/gskrectprivate.h" +#include "gsk/gskrendernodeprivate.h" +#include "gdk/gdkcairoprivate.h" +#include "gdk/gdktextureprivate.h" + +#include + +struct _GtkInspectorNodeWrapper +{ + GObject parent; + + GskRenderNode *node; + GskRenderNode *profile_node; + GskRenderNode *draw_node; + char *role; +}; + +struct _GtkInspectorNodeWrapperClass +{ + GObjectClass parent; +}; + +enum +{ + PROP_0, + PROP_DRAW_NODE, + PROP_NODE, + PROP_PROFILE_NODE, + PROP_ROLE, + + N_PROPS, +}; + +static GParamSpec *props[N_PROPS] = { NULL, }; + + +G_DEFINE_TYPE (GtkInspectorNodeWrapper, gtk_inspector_node_wrapper, G_TYPE_OBJECT) + +static void +gtk_inspector_node_wrapper_get_property (GObject *object, + guint param_id, + GValue *value, + GParamSpec *pspec) +{ + GtkInspectorNodeWrapper *self = GTK_INSPECTOR_NODE_WRAPPER (object); + + switch (param_id) + { + case PROP_DRAW_NODE: + g_value_set_boxed (value, self->draw_node); + break; + + case PROP_NODE: + g_value_set_boxed (value, self->node); + break; + + case PROP_PROFILE_NODE: + g_value_set_boxed (value, self->profile_node); + break; + + case PROP_ROLE: + g_value_set_string (value, self->role); + break; + + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, param_id, pspec); + break; + } +} + +static void +gtk_inspector_node_wrapper_set_property (GObject *object, + guint param_id, + const GValue *value, + GParamSpec *pspec) +{ + GtkInspectorNodeWrapper *self = GTK_INSPECTOR_NODE_WRAPPER (object); + + switch (param_id) + { + case PROP_DRAW_NODE: + if (g_value_get_pointer (value)) + self->draw_node = gsk_render_node_ref (g_value_get_pointer (value)); + break; + + case PROP_NODE: + self->node = gsk_render_node_ref (g_value_get_pointer (value)); + break; + + case PROP_PROFILE_NODE: + if (g_value_get_pointer (value)) + self->profile_node = gsk_render_node_ref (g_value_get_pointer (value)); + break; + + case PROP_ROLE: + self->role = g_value_dup_string (value); + break; + + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, param_id, pspec); + break; + } +} + +static void +gtk_inspector_node_wrapper_dispose (GObject *object) +{ + GtkInspectorNodeWrapper *self = GTK_INSPECTOR_NODE_WRAPPER (object); + + g_clear_pointer (&self->node, gsk_render_node_unref); + g_clear_pointer (&self->profile_node, gsk_render_node_unref); + g_clear_pointer (&self->draw_node, gsk_render_node_unref); + + G_OBJECT_CLASS (gtk_inspector_node_wrapper_parent_class)->dispose (object); +} + +static void +gtk_inspector_node_wrapper_class_init (GtkInspectorNodeWrapperClass *klass) +{ + GObjectClass *object_class = G_OBJECT_CLASS (klass); + + object_class->dispose = gtk_inspector_node_wrapper_dispose; + object_class->get_property = gtk_inspector_node_wrapper_get_property; + object_class->set_property = gtk_inspector_node_wrapper_set_property; + + props[PROP_DRAW_NODE] = + g_param_spec_pointer ("draw-node", NULL, NULL, + G_PARAM_READWRITE | G_PARAM_CONSTRUCT); + props[PROP_NODE] = + g_param_spec_pointer ("node", NULL, NULL, + G_PARAM_READWRITE | G_PARAM_CONSTRUCT); + props[PROP_PROFILE_NODE] = + g_param_spec_pointer ("profile-node", NULL, NULL, + G_PARAM_READWRITE | G_PARAM_CONSTRUCT); + props[PROP_ROLE] = + g_param_spec_string ("role", NULL, NULL, + NULL, + G_PARAM_READWRITE | G_PARAM_CONSTRUCT); + + g_object_class_install_properties (object_class, N_PROPS, props); +} + +static void +gtk_inspector_node_wrapper_init (GtkInspectorNodeWrapper *vis) +{ +} + +GtkInspectorNodeWrapper * +gtk_inspector_node_wrapper_new (GskRenderNode *node, + GskRenderNode *profile_node, + GskRenderNode *draw_node, + const char *role) +{ + return g_object_new (GTK_TYPE_INSPECTOR_NODE_WRAPPER, + "node", node, + "profile-node", profile_node, + "draw-node", draw_node, + "role", role, + NULL); +} + +GskRenderNode * +gtk_inspector_node_wrapper_get_node (GtkInspectorNodeWrapper *self) +{ + return self->node; +} + +GskRenderNode * +gtk_inspector_node_wrapper_get_profile_node (GtkInspectorNodeWrapper *self) +{ + return self->profile_node; +} + +const GskDebugProfile * +gtk_inspector_node_wrapper_get_profile (GtkInspectorNodeWrapper *self) +{ + if (self->profile_node == NULL || + gsk_render_node_get_node_type (self->profile_node) != GSK_DEBUG_NODE) + return NULL; + + return gsk_debug_node_get_profile (self->profile_node); +} + +GskRenderNode * +gtk_inspector_node_wrapper_get_draw_node (GtkInspectorNodeWrapper *self) +{ + return self->draw_node; +} + +const char * +gtk_inspector_node_wrapper_get_role (GtkInspectorNodeWrapper *self) +{ + return self->role; +} + +static const char ** +get_roles (GskRenderNodeType node_type) +{ + static const char *blend_node_roles[] = { "Bottom", "Top", NULL }; + static const char *mask_node_roles[] = { "Source", "Mask", NULL }; + static const char *cross_fade_node_roles[] = { "Start", "End", NULL }; + static const char *composite_node_roles[] = { "Child", "Mask", NULL }; + static const char *displacement_node_roles[] = { "Child", "Displacement", NULL }; + static const char *arithmetic_node_roles[] = { "First", "Second", NULL }; + + switch (node_type) + { + case GSK_BLEND_NODE: + return blend_node_roles; + case GSK_MASK_NODE: + return mask_node_roles; + case GSK_CROSS_FADE_NODE: + return cross_fade_node_roles; + case GSK_COMPOSITE_NODE: + return composite_node_roles; + case GSK_DISPLACEMENT_NODE: + return displacement_node_roles; + case GSK_ARITHMETIC_NODE: + return arithmetic_node_roles; + case GSK_CONTAINER_NODE: + case GSK_CAIRO_NODE: + case GSK_LINEAR_GRADIENT_NODE: + case GSK_REPEATING_LINEAR_GRADIENT_NODE: + case GSK_RADIAL_GRADIENT_NODE: + case GSK_REPEATING_RADIAL_GRADIENT_NODE: + case GSK_CONIC_GRADIENT_NODE: + case GSK_BORDER_NODE: + case GSK_INSET_SHADOW_NODE: + case GSK_OUTSET_SHADOW_NODE: + case GSK_TRANSFORM_NODE: + case GSK_OPACITY_NODE: + case GSK_COLOR_MATRIX_NODE: + case GSK_REPEAT_NODE: + case GSK_CLIP_NODE: + case GSK_ROUNDED_CLIP_NODE: + case GSK_FILL_NODE: + case GSK_STROKE_NODE: + case GSK_SHADOW_NODE: + case GSK_TEXT_NODE: + case GSK_BLUR_NODE: + case GSK_GL_SHADER_NODE: + case GSK_SUBSURFACE_NODE: + case GSK_COMPONENT_TRANSFER_NODE: + case GSK_COPY_NODE: + case GSK_PASTE_NODE: + case GSK_DEBUG_NODE: + case GSK_COLOR_NODE: + case GSK_TEXTURE_NODE: + case GSK_TEXTURE_SCALE_NODE: + case GSK_ISOLATION_NODE: + case GSK_NOT_A_RENDER_NODE: + default: + return NULL; + } +}; + +GListModel * +gtk_inspector_node_wrapper_create_children_model (GtkInspectorNodeWrapper *self) +{ + GskRenderNode **children, **draw_children, **profile_children; + gsize i, n_children, n_draw_children, n_profile_children; + GListStore *store; + const char **roles; + + children = gsk_render_node_get_children (self->node, &n_children); + + if (n_children == 0) + return NULL; + + store = g_list_store_new (GTK_TYPE_INSPECTOR_NODE_WRAPPER); + + if (self->draw_node) + { + if (gsk_render_node_get_node_type (self->node) == GSK_COPY_NODE) + { + draw_children = &self->draw_node; + n_draw_children = 1; + } + else if (gsk_render_node_get_node_type (self->node) == GSK_PASTE_NODE) + { + draw_children = NULL; + n_draw_children = 0; + } + else + { + draw_children = gsk_render_node_get_children (self->draw_node, &n_draw_children); + } + } + else + { + draw_children = NULL; + n_draw_children = 0; + } + + if (self->profile_node) + { + if (gsk_render_node_get_node_type (self->profile_node) == GSK_DEBUG_NODE && + gsk_debug_node_get_profile (self->profile_node)) + { + profile_children = gsk_render_node_get_children (gsk_debug_node_get_child (self->profile_node), &n_profile_children); + } + else + { + profile_children = gsk_render_node_get_children (self->profile_node, &n_profile_children); + } + g_assert (n_profile_children == n_children); + } + else + { + profile_children = NULL; + n_profile_children = 0; + } + + roles = get_roles (gsk_render_node_get_node_type (self->node)); + + for (i = 0; i < n_children; i++) + { + GtkInspectorNodeWrapper *child; + + child = gtk_inspector_node_wrapper_new (children[i], + i < n_profile_children ? profile_children[i] : NULL, + i < n_draw_children ? draw_children[i] : NULL, + roles ? roles[i] : NULL); + g_list_store_append (store, child); + g_object_unref (child); + } + + return G_LIST_MODEL (store); +} + +static void +render_heatmap_node (cairo_t *cr, + GskRenderNode *node, + const graphene_size_t *scale, + const graphene_rect_t *clip, + gsize max_value) +{ + switch (gsk_render_node_get_node_type (node)) + { + case GSK_TRANSFORM_NODE: + { + float xx, yx, xy, yy, dx, dy; + GskTransform *transform, *inverse; + cairo_matrix_t ctm; + graphene_rect_t new_clip; + + transform = gsk_transform_node_get_transform (node); + if (gsk_transform_get_category (transform) < GSK_TRANSFORM_CATEGORY_2D) + break; + + gsk_transform_to_2d (transform, &xx, &yx, &xy, &yy, &dx, &dy); + cairo_matrix_init (&ctm, xx, yx, xy, yy, dx, dy); + if (xx * yy == xy * yx) + break; + + inverse = gsk_transform_invert (gsk_transform_ref (transform)); + gsk_transform_transform_bounds (inverse, clip, &new_clip); + gsk_transform_unref (inverse); + cairo_save (cr); + cairo_transform (cr, &ctm); + render_heatmap_node (cr, + gsk_transform_node_get_child (node), + &GRAPHENE_SIZE_INIT (scale->width * xx, scale->height * yy), + &new_clip, + max_value); + cairo_restore (cr); + } + break; + + case GSK_DEBUG_NODE: + { + const GskDebugProfile *profile = gsk_debug_node_get_profile (node); + graphene_rect_t bounds; + double val; + + gsk_render_node_get_bounds (node, &bounds); + if (!gsk_rect_intersection (&bounds, clip, &bounds)) + break; + if (profile && profile->self.gpu_ns) + { + gdk_cairo_rect (cr, &bounds); + val = ((double) profile->self.gpu_ns) / (bounds.size.width * scale->width * bounds.size.height * scale->height) / max_value; + cairo_set_source_rgb (cr, val, val, val); + cairo_fill (cr); + } + render_heatmap_node (cr, gsk_debug_node_get_child (node), scale, &bounds, max_value); + } + break; + + case GSK_CLIP_NODE: + { + graphene_rect_t new_clip; + + cairo_save (cr); + gdk_cairo_rect (cr, gsk_clip_node_get_clip (node)); + if (!gsk_rect_intersection (clip, gsk_clip_node_get_clip (node), &new_clip)) + break; + cairo_clip (cr); + render_heatmap_node (cr, gsk_clip_node_get_child (node), scale, &new_clip, max_value); + cairo_restore (cr); + } + break; + + case GSK_ROUNDED_CLIP_NODE: + { + graphene_rect_t new_clip; + + cairo_save (cr); + gdk_cairo_rect (cr, &gsk_rounded_clip_node_get_clip (node)->bounds); + if (!gsk_rect_intersection (clip, &gsk_rounded_clip_node_get_clip (node)->bounds, &new_clip)) + break; + cairo_clip (cr); + render_heatmap_node (cr, gsk_rounded_clip_node_get_child (node), scale, &new_clip, max_value); + cairo_restore (cr); + } + break; + + case GSK_CONTAINER_NODE: + case GSK_CAIRO_NODE: + case GSK_COLOR_NODE: + case GSK_LINEAR_GRADIENT_NODE: + case GSK_REPEATING_LINEAR_GRADIENT_NODE: + case GSK_RADIAL_GRADIENT_NODE: + case GSK_REPEATING_RADIAL_GRADIENT_NODE: + case GSK_CONIC_GRADIENT_NODE: + case GSK_BORDER_NODE: + case GSK_TEXTURE_NODE: + case GSK_INSET_SHADOW_NODE: + case GSK_OUTSET_SHADOW_NODE: + case GSK_OPACITY_NODE: + case GSK_COLOR_MATRIX_NODE: + case GSK_REPEAT_NODE: + case GSK_SHADOW_NODE: + case GSK_BLEND_NODE: + case GSK_CROSS_FADE_NODE: + case GSK_TEXT_NODE: + case GSK_BLUR_NODE: + case GSK_GL_SHADER_NODE: + case GSK_TEXTURE_SCALE_NODE: + case GSK_MASK_NODE: + case GSK_FILL_NODE: + case GSK_STROKE_NODE: + case GSK_SUBSURFACE_NODE: + case GSK_COMPONENT_TRANSFER_NODE: + case GSK_COPY_NODE: + case GSK_PASTE_NODE: + case GSK_COMPOSITE_NODE: + case GSK_ISOLATION_NODE: + case GSK_DISPLACEMENT_NODE: + case GSK_ARITHMETIC_NODE: + { + GskRenderNode **children; + gsize i, n_children; + graphene_rect_t bounds; + + cairo_save (cr); + gsk_render_node_get_bounds (node, &bounds); + gdk_cairo_rect (cr, &bounds); + cairo_clip (cr); + if (!gsk_rect_intersection (&bounds, clip, &bounds)) + break; + children = gsk_render_node_get_children (node, &n_children); + for (i = 0; i < n_children; i++) + render_heatmap_node (cr, children[i], scale, &bounds, max_value); + cairo_restore (cr); + } + break; + + case GSK_NOT_A_RENDER_NODE: + default: + g_return_if_reached (); + } +} + +static void +scale_surface (cairo_surface_t *surface) +{ + float max = 0; + guint8 *data; + gsize width, height, stride; + gsize x, y; + + cairo_surface_flush (surface); + + data = cairo_image_surface_get_data (surface); + width = cairo_image_surface_get_width (surface); + height = cairo_image_surface_get_height (surface); + stride = cairo_image_surface_get_stride (surface); + + for (y = 0; y < height; y++) + { + float *row = (float *) (data + y * stride); + + for (x = 0; x < width; x++) + max = MAX (max, row[3 * x]); + } + + if (max >= 1.0) + return; + + for (y = 0; y < height; y++) + { + float *row = (float *) (data + y * stride); + + for (x = 0; x < width; x++) + { + float val = row[3 * x] / max; + row[3 * x + 0] = val; + row[3 * x + 1] = val; + row[3 * x + 2] = val; + } + } + + cairo_surface_mark_dirty (surface); +} + +static GdkTexture * +render_heatmap_mask (GskRenderNode *node) +{ + cairo_surface_t *surface; + cairo_t *cr; + graphene_rect_t bounds; + GdkTexture *texture; + gsize max_value, n_pixels; + + gsk_render_node_get_bounds (node, &bounds); + + n_pixels = ceil (bounds.size.width) * ceil (bounds.size.height); + max_value = 100 * 1024 * n_pixels; + if (gsk_render_node_get_node_type (node) == GSK_DEBUG_NODE) + { + const GskDebugProfile *profile = gsk_debug_node_get_profile (node); + + if (profile != NULL) + max_value = 100 * profile->total.gpu_ns / n_pixels; + } + + surface = cairo_image_surface_create (CAIRO_FORMAT_RGB96F, + ceil (bounds.size.width), + ceil (bounds.size.height)); + + cr = cairo_create (surface); + cairo_set_operator (cr, CAIRO_OPERATOR_ADD); + cairo_translate (cr, - bounds.origin.x, - bounds.origin.y); + + render_heatmap_node (cr, node, &GRAPHENE_SIZE_INIT (1.0, 1.0), &bounds, max_value); + + scale_surface (surface); + + cairo_destroy (cr); + + texture = gdk_texture_new_for_surface (surface); + cairo_surface_destroy (surface); + + return texture; +} + +static GskRenderNode * +heatmap_from_mask (GskRenderNode *mask) +{ + GskRenderNode *mask_gradient, *container, *gradient_node, *displacement; + graphene_rect_t bounds; + + gsk_render_node_get_bounds (mask, &bounds); + + mask_gradient = gsk_linear_gradient_node_new (&bounds, + &GRAPHENE_POINT_INIT (bounds.origin.x, + bounds.origin.y), + &GRAPHENE_POINT_INIT (bounds.origin.x + bounds.size.width, + bounds.origin.y), + (GskColorStop[2]) { + { 0, { 1, 1, 1, 0.5 } }, + { 1, { 0, 0, 0, 0.5 } }, + }, + 2); + container = gsk_container_node_new ((GskRenderNode *[2]) { mask, mask_gradient }, 2); + gsk_render_node_unref (mask_gradient); + + gradient_node = gsk_linear_gradient_node_new (&GRAPHENE_RECT_INIT (bounds.origin.x - 10, + bounds.origin.y, + bounds.size.width + 20, + bounds.size.height), + &GRAPHENE_POINT_INIT (bounds.origin.x, + bounds.origin.y), + &GRAPHENE_POINT_INIT (bounds.origin.x + bounds.size.width, + bounds.origin.y), + (GskColorStop[4]) { + { 0.0, { 0.3, 0.7, 0, 0.0 } }, + { 0.1, { 0.3, 0.7, 0, 0.2 } }, + { 0.5, { 1, 1, 0, 0.8 } }, + { 1.0, { 1, 0, 0, 0.8 } }, + }, + 4); + + displacement = gsk_displacement_node_new (&bounds, + gradient_node, + container, + (GdkColorChannel[2]) { GDK_COLOR_CHANNEL_RED, GDK_COLOR_CHANNEL_GREEN }, + &GRAPHENE_SIZE_INIT (bounds.size.width * 2, 0.1), + &GRAPHENE_SIZE_INIT (bounds.size.width * 2, 0), + &GRAPHENE_POINT_INIT (0.5, 0)); + gsk_render_node_unref (container); + gsk_render_node_unref (gradient_node); + + return displacement; +} + +GskRenderNode * +gtk_inspector_node_wrapper_create_heat_map (GtkInspectorNodeWrapper *self) +{ + GdkTexture *texture; + GskRenderNode *result, *heatmap, *texture_node; + graphene_rect_t bounds; + + if (self->profile_node == NULL) + return self->draw_node; + + gsk_render_node_get_bounds (self->profile_node, &bounds); + + texture = render_heatmap_mask (self->profile_node); + texture_node = gsk_texture_node_new (texture, &bounds); + g_object_unref (texture); + + heatmap = heatmap_from_mask (texture_node); + gsk_render_node_unref (texture_node); + + result = gsk_container_node_new ((GskRenderNode *[2]) { self->draw_node, heatmap }, 2); + gsk_render_node_unref (heatmap); + + return result; +} + diff --git a/gtk/inspector/nodewrapper.h b/gtk/inspector/nodewrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..4dff0de8f0771a82ad5b1ce796be423453305e0d --- /dev/null +++ b/gtk/inspector/nodewrapper.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2025 Benjamin Otte + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. If not, see . + */ + +#pragma once + +#include + +#include "gsk/gskdebugnodeprivate.h" + +G_BEGIN_DECLS + +#define GTK_TYPE_INSPECTOR_NODE_WRAPPER gtk_inspector_node_wrapper_get_type () + +G_DECLARE_FINAL_TYPE (GtkInspectorNodeWrapper, gtk_inspector_node_wrapper, GTK, INSPECTOR_NODE_WRAPPER, GObject) + + +GtkInspectorNodeWrapper * + gtk_inspector_node_wrapper_new (GskRenderNode *node, + GskRenderNode *perf_node, + GskRenderNode *draw_node, + const char *role); + +GskRenderNode * gtk_inspector_node_wrapper_get_node (GtkInspectorNodeWrapper *self); +GskRenderNode * gtk_inspector_node_wrapper_get_draw_node (GtkInspectorNodeWrapper *self); +GskRenderNode * gtk_inspector_node_wrapper_get_profile_node (GtkInspectorNodeWrapper *self); +const GskDebugProfile * gtk_inspector_node_wrapper_get_profile (GtkInspectorNodeWrapper *self); +const char * gtk_inspector_node_wrapper_get_role (GtkInspectorNodeWrapper *self); + +GskRenderNode * gtk_inspector_node_wrapper_create_heat_map (GtkInspectorNodeWrapper *self); +GListModel * gtk_inspector_node_wrapper_create_children_model(GtkInspectorNodeWrapper *self); + +G_END_DECLS diff --git a/gtk/inspector/recorder.c b/gtk/inspector/recorder.c index f21c5614ef237c59f70bffdb48965d575df14050..19035dfcc4bdd856e9541f5a654da86cdf90be78 100644 --- a/gtk/inspector/recorder.c +++ b/gtk/inspector/recorder.c @@ -79,6 +79,7 @@ #include "gtk/gtkrendernodepaintableprivate.h" #include "gdk/gdkcairoprivate.h" +#include "nodewrapper.h" #include "recording.h" #include "renderrecording.h" #include "startrecording.h" @@ -270,158 +271,6 @@ static GParamSpec *props[LAST_PROP] = { NULL, }; G_DEFINE_TYPE (GtkInspectorRecorder, gtk_inspector_recorder, GTK_TYPE_WIDGET) -static const char ** -get_roles (GskRenderNodeType node_type) -{ - static const char *blend_node_roles[] = { "Bottom", "Top", NULL }; - static const char *mask_node_roles[] = { "Source", "Mask", NULL }; - static const char *cross_fade_node_roles[] = { "Start", "End", NULL }; - static const char *composite_node_roles[] = { "Child", "Mask", NULL }; - static const char *displacement_node_roles[] = { "Child", "Displacement", NULL }; - static const char *arithmetic_node_roles[] = { "First", "Second", NULL }; - - switch (node_type) - { - case GSK_BLEND_NODE: - return blend_node_roles; - case GSK_MASK_NODE: - return mask_node_roles; - case GSK_CROSS_FADE_NODE: - return cross_fade_node_roles; - case GSK_COMPOSITE_NODE: - return composite_node_roles; - case GSK_DISPLACEMENT_NODE: - return displacement_node_roles; - case GSK_ARITHMETIC_NODE: - return arithmetic_node_roles; - case GSK_CONTAINER_NODE: - case GSK_CAIRO_NODE: - case GSK_LINEAR_GRADIENT_NODE: - case GSK_REPEATING_LINEAR_GRADIENT_NODE: - case GSK_RADIAL_GRADIENT_NODE: - case GSK_REPEATING_RADIAL_GRADIENT_NODE: - case GSK_CONIC_GRADIENT_NODE: - case GSK_BORDER_NODE: - case GSK_INSET_SHADOW_NODE: - case GSK_OUTSET_SHADOW_NODE: - case GSK_TRANSFORM_NODE: - case GSK_OPACITY_NODE: - case GSK_COLOR_MATRIX_NODE: - case GSK_REPEAT_NODE: - case GSK_CLIP_NODE: - case GSK_ROUNDED_CLIP_NODE: - case GSK_FILL_NODE: - case GSK_STROKE_NODE: - case GSK_SHADOW_NODE: - case GSK_TEXT_NODE: - case GSK_BLUR_NODE: - case GSK_GL_SHADER_NODE: - case GSK_SUBSURFACE_NODE: - case GSK_COMPONENT_TRANSFER_NODE: - case GSK_COPY_NODE: - case GSK_PASTE_NODE: - case GSK_DEBUG_NODE: - case GSK_COLOR_NODE: - case GSK_TEXTURE_NODE: - case GSK_TEXTURE_SCALE_NODE: - case GSK_ISOLATION_NODE: - case GSK_NOT_A_RENDER_NODE: - default: - return NULL; - } -}; - -static GListModel * -create_list_model_for_render_node (GskRenderNode *node, - GskRenderNode *draw_node) -{ - GskRenderNode **children, **draw_children; - gsize i, n_children, n_draw_children; - GListStore *store; - const char **roles; - - /* can't put render nodes into list models - they're not GObjects */ - store = g_list_store_new (GDK_TYPE_PAINTABLE); - - children = gsk_render_node_get_children (node, &n_children); - - if (draw_node) - { - if (gsk_render_node_get_node_type (node) == GSK_COPY_NODE) - { - draw_children = &draw_node; - n_draw_children = 1; - } - else if (gsk_render_node_get_node_type (node) == GSK_PASTE_NODE) - { - draw_children = NULL; - n_draw_children = 0; - } - else - { - draw_children = gsk_render_node_get_children (draw_node, &n_draw_children); - } - } - else - { - draw_children = NULL; - n_draw_children = 0; - } - roles = get_roles (gsk_render_node_get_node_type (node)); - - for (i = 0; i < n_children; i++) - { - graphene_rect_t bounds; - GdkPaintable *paintable; - - gsk_render_node_get_bounds (children[i], &bounds); - paintable = gtk_render_node_paintable_new (children[i], &bounds); - if (roles) - { - if (roles[i]) - g_object_set_data (G_OBJECT (paintable), "role", (gpointer) roles[i]); - else - roles = NULL; - } - if (i < n_draw_children) - { - GdkPaintable *draw_paintable; - - gsk_render_node_get_bounds (node, &bounds); - draw_paintable = gtk_render_node_paintable_new (draw_children[i], &bounds); - - g_object_set_data_full (G_OBJECT (paintable), - "draw-paintable", - (gpointer) draw_paintable, - g_object_unref); - } - else - { - g_assert_not_reached (); - } - g_list_store_append (store, paintable); - g_object_unref (paintable); - } - - return G_LIST_MODEL (store); -} - -static GListModel * -create_list_model_for_render_node_paintable (gpointer paintable, - gpointer unused) -{ - GskRenderNode *node = gtk_render_node_paintable_get_render_node (paintable); - GtkRenderNodePaintable *draw_paintable = g_object_get_data (G_OBJECT (paintable), "draw-paintable"); - GskRenderNode *draw_node; - - if (draw_paintable) - draw_node = gtk_render_node_paintable_get_render_node (draw_paintable); - else - draw_node = NULL; - - return create_list_model_for_render_node (node, draw_node); -} - static void recordings_clear_all (GtkButton *button, GtkInspectorRecorder *recorder) @@ -584,15 +433,18 @@ prepare_render_node_drag (GtkDragSource *source, GtkListItem *list_item) { GtkTreeListRow *row_item; - GdkPaintable *paintable; + GtkInspectorNodeWrapper *wrapper; GskRenderNode *node; row_item = gtk_list_item_get_item (list_item); if (row_item == NULL) return NULL; - paintable = gtk_tree_list_row_get_item (row_item); - node = gtk_render_node_paintable_get_render_node (GTK_RENDER_NODE_PAINTABLE (paintable)); + wrapper = gtk_tree_list_row_get_item (row_item); + node = gtk_inspector_node_wrapper_get_profile_node (wrapper); + if (node == NULL) + node = gtk_inspector_node_wrapper_get_node (wrapper); + g_object_unref (wrapper); return gdk_content_provider_new_typed (GSK_TYPE_RENDER_NODE, node); } @@ -628,16 +480,18 @@ static void bind_widget_for_render_node (GtkSignalListItemFactory *factory, GtkListItem *list_item) { - GdkPaintable *paintable, *draw_paintable; - GskRenderNode *node; + GtkInspectorNodeWrapper *wrapper; + GskRenderNode *node, *draw_node; + GdkPaintable *paintable; + graphene_rect_t bounds; GtkTreeListRow *row_item; GtkWidget *expander, *box, *child; char *name; row_item = gtk_list_item_get_item (list_item); - paintable = gtk_tree_list_row_get_item (row_item); - draw_paintable = g_object_get_data (G_OBJECT (paintable), "draw-paintable"); - node = gtk_render_node_paintable_get_render_node (GTK_RENDER_NODE_PAINTABLE (paintable)); + wrapper = gtk_tree_list_row_get_item (row_item); + node = gtk_inspector_node_wrapper_get_node (wrapper); + draw_node = gtk_inspector_node_wrapper_get_draw_node (wrapper); /* expander */ expander = gtk_list_item_get_child (list_item); @@ -645,8 +499,11 @@ bind_widget_for_render_node (GtkSignalListItemFactory *factory, box = gtk_tree_expander_get_child (GTK_TREE_EXPANDER (expander)); /* icon */ + gsk_render_node_get_bounds (draw_node, &bounds); + paintable = gtk_render_node_paintable_new (draw_node, &bounds); child = gtk_widget_get_first_child (box); - gtk_image_set_from_paintable (GTK_IMAGE (child), draw_paintable); + gtk_image_set_from_paintable (GTK_IMAGE (child), paintable); + g_object_unref (paintable); /* name */ name = node_name (node); @@ -654,39 +511,39 @@ bind_widget_for_render_node (GtkSignalListItemFactory *factory, gtk_inscription_set_text (GTK_INSCRIPTION (child), name); g_free (name); - g_object_unref (paintable); + g_object_unref (wrapper); } static void show_render_node (GtkInspectorRecorder *recorder, - GskRenderNode *node) + GskRenderNode *node, + GskRenderNode *profile_node) { - graphene_rect_t bounds; - GdkPaintable *paintable, *draw_paintable; + GtkInspectorNodeWrapper *wrapper; GskRenderNode *draw_node; - - gsk_render_node_get_bounds (node, &bounds); - paintable = gtk_render_node_paintable_new (node, &bounds); - draw_node = gsk_render_node_replace_copy_paste (gsk_render_node_ref(node)); - draw_paintable = gtk_render_node_paintable_new (draw_node, &bounds); - gsk_render_node_unref (draw_node); - g_object_set_data_full (G_OBJECT (paintable), "draw-paintable", (gpointer) draw_paintable, g_object_unref); + GdkPaintable *paintable; + graphene_rect_t bounds; if (strcmp (gtk_stack_get_visible_child_name (GTK_STACK (recorder->recording_data_stack)), "frame_data") == 0) { - gtk_picture_set_paintable (GTK_PICTURE (recorder->render_node_view), draw_paintable); + draw_node = gsk_render_node_replace_copy_paste (gsk_render_node_ref (node)); + wrapper = gtk_inspector_node_wrapper_new (node, profile_node, draw_node, "Root"); g_list_store_splice (recorder->render_node_root_model, 0, g_list_model_get_n_items (G_LIST_MODEL (recorder->render_node_root_model)), - (gpointer[1]) { paintable }, + (gpointer[1]) { wrapper }, 1); + + g_object_unref (wrapper); + gsk_render_node_unref (draw_node); } else { + gsk_render_node_get_bounds (node, &bounds); + paintable = gtk_render_node_paintable_new (node, &bounds); gtk_picture_set_paintable (GTK_PICTURE (recorder->event_view), paintable); + g_object_unref (paintable); } - - g_object_unref (paintable); } static void @@ -842,12 +699,11 @@ recording_selected (GtkSingleSelection *selection, if (GTK_INSPECTOR_IS_RENDER_RECORDING (recording)) { - GskRenderNode *node; - gtk_stack_set_visible_child_name (GTK_STACK (recorder->recording_data_stack), "frame_data"); - node = gtk_inspector_render_recording_get_node (GTK_INSPECTOR_RENDER_RECORDING (recording)); - show_render_node (recorder, node); + show_render_node (recorder, + gtk_inspector_render_recording_get_node (GTK_INSPECTOR_RENDER_RECORDING (recording)), + gtk_inspector_render_recording_get_profile_node (GTK_INSPECTOR_RENDER_RECORDING (recording))); } else if (GTK_INSPECTOR_IS_EVENT_RECORDING (recording)) { @@ -873,7 +729,7 @@ recording_selected (GtkSingleSelection *selection, GskRenderNode *temp; temp = make_event_node (event, node, NULL); - show_render_node (recorder, temp); + show_render_node (recorder, temp, NULL); gsk_render_node_unref (temp); } @@ -1159,9 +1015,10 @@ add_rect_row (GListStore *store, } static void -populate_render_node_properties (GListStore *store, - GskRenderNode *node, - const char *role) +populate_render_node_properties (GListStore *store, + GskRenderNode *node, + const char *role, + const GskDebugProfile *profile) { graphene_rect_t bounds, opaque; @@ -1803,6 +1660,17 @@ G_GNUC_END_IGNORE_DEPRECATIONS default: break; } + + if (profile) + { + add_text_row (store, "Performance", "%s", ""); + add_text_row (store, "CPU total", "%'lluns", (unsigned long long) profile->total.cpu_ns); + add_text_row (store, "CPU self", "%'lluns", (unsigned long long) profile->self.cpu_ns); + add_text_row (store, "GPU total", "%'lluns", (unsigned long long) profile->total.gpu_ns); + add_text_row (store, "GPU self", "%'lluns", (unsigned long long) profile->self.gpu_ns); + add_text_row (store, "pixels total", "%'llu", (unsigned long long) profile->total.gpu_pixels); + add_text_row (store, "pixels self", "%'llu", (unsigned long long) profile->self.gpu_pixels); + } } static const char * @@ -2200,29 +2068,32 @@ static GskRenderNode * get_selected_node (GtkInspectorRecorder *recorder) { GtkTreeListRow *row_item; - GdkPaintable *paintable; + GtkInspectorNodeWrapper *wrapper; GskRenderNode *node; row_item = gtk_single_selection_get_selected_item (recorder->render_node_selection); if (row_item == NULL) return NULL; - paintable = gtk_tree_list_row_get_item (row_item); - node = gtk_render_node_paintable_get_render_node (GTK_RENDER_NODE_PAINTABLE (paintable)); - g_object_unref (paintable); + wrapper = gtk_tree_list_row_get_item (row_item); + node = gtk_inspector_node_wrapper_get_profile_node (wrapper); + if (node == NULL) + node = gtk_inspector_node_wrapper_get_node (wrapper); + g_object_unref (wrapper); return node; } static void -render_node_list_selection_changed (GtkListBox *list, - GtkListBoxRow *row, +render_node_list_selection_changed (GtkSingleSelection *selection, + GParamSpec *pspec, GtkInspectorRecorder *recorder) { - GskRenderNode *node; - GdkPaintable *paintable, *draw_paintable; + GskRenderNode *draw_node; + GdkPaintable *paintable; GtkTreeListRow *row_item; - const char *role; + GtkInspectorNodeWrapper *wrapper; + graphene_rect_t bounds; row_item = gtk_single_selection_get_selected_item (recorder->render_node_selection); @@ -2232,15 +2103,21 @@ render_node_list_selection_changed (GtkListBox *list, if (row_item == NULL) return; - paintable = gtk_tree_list_row_get_item (row_item); - draw_paintable = g_object_get_data (G_OBJECT (paintable), "draw-paintable"); + wrapper = gtk_tree_list_row_get_item (row_item); + draw_node = gtk_inspector_node_wrapper_create_heat_map (wrapper); + if (draw_node == NULL) + draw_node = gsk_render_node_ref (gtk_inspector_node_wrapper_get_draw_node (wrapper)); - gtk_picture_set_paintable (GTK_PICTURE (recorder->render_node_view), draw_paintable); - node = gtk_render_node_paintable_get_render_node (GTK_RENDER_NODE_PAINTABLE (paintable)); - role = g_object_get_data (G_OBJECT (paintable), "role"); - populate_render_node_properties (recorder->render_node_properties, node, role); + gsk_render_node_get_bounds (draw_node, &bounds); + paintable = gtk_render_node_paintable_new (draw_node, &bounds); + gtk_picture_set_paintable (GTK_PICTURE (recorder->render_node_view), paintable); + populate_render_node_properties (recorder->render_node_properties, + gtk_inspector_node_wrapper_get_node (wrapper), + gtk_inspector_node_wrapper_get_role (wrapper), + gtk_inspector_node_wrapper_get_profile (wrapper)); g_object_unref (paintable); + gsk_render_node_unref (draw_node); } static void @@ -2256,7 +2133,7 @@ event_properties_list_selection_changed (GtkSelectionModel *model, return; if (prop->node) - show_render_node (recorder, prop->node); + show_render_node (recorder, prop->node, NULL); } static void @@ -2684,11 +2561,11 @@ gtk_inspector_recorder_init (GtkInspectorRecorder *recorder) gtk_list_view_set_factory (GTK_LIST_VIEW (recorder->recordings_list), factory); g_object_unref (factory); - recorder->render_node_root_model = g_list_store_new (GDK_TYPE_PAINTABLE); + recorder->render_node_root_model = g_list_store_new (GTK_TYPE_INSPECTOR_NODE_WRAPPER); recorder->render_node_model = gtk_tree_list_model_new (g_object_ref (G_LIST_MODEL (recorder->render_node_root_model)), FALSE, TRUE, - create_list_model_for_render_node_paintable, + (GtkTreeListModelCreateModelFunc) gtk_inspector_node_wrapper_create_children_model, NULL, NULL); recorder->render_node_selection = gtk_single_selection_new (g_object_ref (G_LIST_MODEL (recorder->render_node_model))); g_signal_connect (recorder->render_node_selection, "notify::selected-item", G_CALLBACK (render_node_list_selection_changed), recorder); @@ -3003,4 +2880,29 @@ gtk_inspector_recorder_set_selected_sequence (GtkInspectorRecorder *recorder, g_object_notify_by_pspec (G_OBJECT (recorder), props[PROP_SELECTED_SEQUENCE]); } +void +gtk_inspector_recorder_add_profile_node (GtkInspectorRecorder *self, + GskRenderNode *node, + GskRenderNode *profile_node) +{ + guint i; + + for (i = g_list_model_get_n_items (self->recordings); + i-- > 0;) + { + GtkInspectorRecording *rec = g_list_model_get_item (self->recordings, i); + + if (!GTK_INSPECTOR_IS_RENDER_RECORDING (rec)) + { + g_object_unref (rec); + continue; + } + + if (node == gtk_inspector_render_recording_get_node (GTK_INSPECTOR_RENDER_RECORDING (rec))) + gtk_inspector_render_recording_set_profile_node (GTK_INSPECTOR_RENDER_RECORDING (rec), profile_node); + + g_object_unref (rec); + } +} + /* vim:set foldmethod=marker: */ diff --git a/gtk/inspector/recorder.h b/gtk/inspector/recorder.h index 9f134e1ef618fe99db74d11f91f8f470a098d34d..1f0a7b3edf576a81b03b3f28a55b272f02faaaf4 100644 --- a/gtk/inspector/recorder.h +++ b/gtk/inspector/recorder.h @@ -61,6 +61,9 @@ void gtk_inspector_recorder_trace_event (GtkInspectorRec GtkEventController *controller, GtkWidget *target, gboolean handled); +void gtk_inspector_recorder_add_profile_node (GtkInspectorRecorder *self, + GskRenderNode *node, + GskRenderNode *profile_node); G_END_DECLS diff --git a/gtk/inspector/renderrecording.c b/gtk/inspector/renderrecording.c index eefe96b4bb019633ce408e7383ea5a8ae07c4c25..bda2afe4daf723c349be7c897817cc67640d3a7f 100644 --- a/gtk/inspector/renderrecording.c +++ b/gtk/inspector/renderrecording.c @@ -29,6 +29,7 @@ gtk_inspector_render_recording_finalize (GObject *object) g_clear_pointer (&recording->clip_region, cairo_region_destroy); g_clear_pointer (&recording->node, gsk_render_node_unref); + g_clear_pointer (&recording->profile_node, gsk_render_node_unref); G_OBJECT_CLASS (gtk_inspector_render_recording_parent_class)->finalize (object); } @@ -73,6 +74,21 @@ gtk_inspector_render_recording_get_node (GtkInspectorRenderRecording *recording) return recording->node; } +GskRenderNode * +gtk_inspector_render_recording_get_profile_node (GtkInspectorRenderRecording *recording) +{ + return recording->profile_node; +} + +void +gtk_inspector_render_recording_set_profile_node (GtkInspectorRenderRecording *recording, + GskRenderNode *profile_node) +{ + g_clear_pointer (&recording->profile_node, gsk_render_node_unref); + if (profile_node) + recording->profile_node = gsk_render_node_ref (profile_node); +} + const cairo_region_t * gtk_inspector_render_recording_get_clip_region (GtkInspectorRenderRecording *recording) { diff --git a/gtk/inspector/renderrecording.h b/gtk/inspector/renderrecording.h index 250f5299cadac9288334ea09359393a9436829db..94485086972b13606ac3c7a57439213882834ead 100644 --- a/gtk/inspector/renderrecording.h +++ b/gtk/inspector/renderrecording.h @@ -41,6 +41,7 @@ typedef struct _GtkInspectorRenderRecording GdkRectangle area; cairo_region_t *clip_region; GskRenderNode *node; + GskRenderNode *profile_node; gpointer surface; } GtkInspectorRenderRecording; @@ -49,23 +50,26 @@ typedef struct _GtkInspectorRenderRecordingClass GtkInspectorRecordingClass parent; } GtkInspectorRenderRecordingClass; -GType gtk_inspector_render_recording_get_type (void); +GType gtk_inspector_render_recording_get_type (void); GtkInspectorRecording * - gtk_inspector_render_recording_new (gint64 timestamp, - const GdkRectangle *area, - const cairo_region_t *clip_region, - GskRenderNode *node, - gpointer surface); - -GskRenderNode * gtk_inspector_render_recording_get_node (GtkInspectorRenderRecording *recording); + gtk_inspector_render_recording_new (gint64 timestamp, + const GdkRectangle *area, + const cairo_region_t *clip_region, + GskRenderNode *node, + gpointer surface); + +GskRenderNode * gtk_inspector_render_recording_get_node (GtkInspectorRenderRecording *recording); +GskRenderNode * gtk_inspector_render_recording_get_profile_node (GtkInspectorRenderRecording *recording); +void gtk_inspector_render_recording_set_profile_node (GtkInspectorRenderRecording *recording, + GskRenderNode *profile_node); const cairo_region_t * - gtk_inspector_render_recording_get_clip_region (GtkInspectorRenderRecording *recording); + gtk_inspector_render_recording_get_clip_region (GtkInspectorRenderRecording *recording); const cairo_rectangle_int_t * - gtk_inspector_render_recording_get_area (GtkInspectorRenderRecording *recording); + gtk_inspector_render_recording_get_area (GtkInspectorRenderRecording *recording); gpointer - gtk_inspector_render_recording_get_surface (GtkInspectorRenderRecording *recording); + gtk_inspector_render_recording_get_surface (GtkInspectorRenderRecording *recording); G_END_DECLS diff --git a/gtk/inspector/window.c b/gtk/inspector/window.c index fb80a272a02df966134f3c1f758651e9f469d9e6..06b29339fb1ebab7c15eec69abf9087f709ae1a0 100644 --- a/gtk/inspector/window.c +++ b/gtk/inspector/window.c @@ -1121,5 +1121,23 @@ gtk_inspector_window_set_object (GtkInspectorWindow *iw, update_go_buttons (iw); } -// vim: set et sw=2 ts=2: +void +gtk_inspector_add_profile_node (GdkDisplay *display, + GskRenderNode *node, + GskRenderNode *profile_node) +{ + GtkInspectorWindow *iw; + + if (!any_inspector_window_constructed) + return; + iw = gtk_inspector_window_get_for_display (display); + if (iw == NULL) + return; + + gtk_inspector_recorder_add_profile_node (GTK_INSPECTOR_RECORDER (iw->widget_recorder), + node, + profile_node); +} + +// vim: set et sw=2 ts=2: diff --git a/gtk/inspector/window.h b/gtk/inspector/window.h index 5f2e6b6087b28df9a7fcb6fb032bd76b3597dad4..f2d70badfea674f29babc49b95308f45f40db592 100644 --- a/gtk/inspector/window.h +++ b/gtk/inspector/window.h @@ -163,6 +163,9 @@ void gtk_inspector_trace_event (GdkEvent GtkEventController *controller, GtkWidget *target, gboolean handled); +void gtk_inspector_add_profile_node (GdkDisplay *display, + GskRenderNode *node, + GskRenderNode *profile_node); G_END_DECLS