
A GPU (driver) is now responsible for reading and writing pixels from and to user data. The client (LibGL) is responsible for specifying how the user data must be interpreted or written to. This allows us to centralize all pixel format conversion in one class, `LibSoftGPU::PixelConverter`. For both the input and output image, it takes a specification containing the image dimensions, the pixel type and the selection (basically a clipping rect), and converts the pixels from the input image to the output image. Effectively this means we now support almost all OpenGL 1.5 formats, and all custom logic has disappeared from: - `glDrawPixels` - `glReadPixels` - `glTexImage2D` - `glTexSubImage2D` The new logic is still unoptimized, but on my machine I experienced no noticeable slowdown. :^)
51 lines
1 KiB
C++
51 lines
1 KiB
C++
/*
|
|
* Copyright (c) 2021, Stephan Unverwerth <s.unverwerth@serenityos.org>
|
|
* Copyright (c) 2022, Jelle Raaijmakers <jelle@gmta.nl>
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include <LibGPU/ImageFormat.h>
|
|
|
|
namespace GPU {
|
|
|
|
// Order of bytes within a single component
|
|
enum class ComponentBytesOrder {
|
|
Normal,
|
|
Reversed,
|
|
};
|
|
|
|
struct PackingSpecification final {
|
|
u32 depth_stride { 0 };
|
|
u32 row_stride { 0 };
|
|
u8 byte_alignment { 1 };
|
|
ComponentBytesOrder component_bytes_order { ComponentBytesOrder::Normal };
|
|
};
|
|
|
|
// Full dimensions of the image
|
|
struct DimensionSpecification final {
|
|
u32 width;
|
|
u32 height;
|
|
u32 depth;
|
|
};
|
|
|
|
// Subselection (source or target) within the image
|
|
struct ImageSelection final {
|
|
i32 offset_x { 0 };
|
|
i32 offset_y { 0 };
|
|
i32 offset_z { 0 };
|
|
u32 width;
|
|
u32 height;
|
|
u32 depth;
|
|
};
|
|
|
|
struct ImageDataLayout final {
|
|
PixelType pixel_type;
|
|
PackingSpecification packing {};
|
|
DimensionSpecification dimensions;
|
|
ImageSelection selection;
|
|
};
|
|
|
|
}
|