4
\$\begingroup\$

I'm working on a C++ graphics app and have put together a set of RAII classes to manage the initialization boilerplate for SDL3 and OpenGL. My goal is to make the setup process safer and more modular.

The basic structure is a three-tier hierarchy:

  1. ctx_sdl: A base class that handles the global SDL_Init() and SDL_Quit().
  2. window_manager: Manages the SDL_Window and holds a ctx_sdl instance to ensure SDL is ready.
  3. gl_window: Inherits from window_manager and adds the SDL_GLContext to create a complete, ready-to-use OpenGL window.

I've also experimented with a set_state helper function and some aliases to try and make configuration calls a bit cleaner. I'd appreciate a review of the overall approach, idioms, any potential pitfalls, etc.

Here is the code:

ctx_sdl.h

#ifndef SPACE_EXPLORER_CTX_SDL_H
#define SPACE_EXPLORER_CTX_SDL_H
#include <SDL3/SDL.h>
#include <algorithm>
#include <cstddef>
#include <exception>
#include <glm/glm.hpp>
#include <string>
#include <utility>
namespace raw {
class ctx_sdl {
public:
 ctx_sdl();
 virtual ~ctx_sdl() noexcept;
 ctx_sdl(const ctx_sdl&) = delete;
 ctx_sdl& operator=(const ctx_sdl&) = delete;
 ctx_sdl(ctx_sdl&&) = delete;
 ctx_sdl& operator=(ctx_sdl&&) = delete;
};
} // namespace raw
#endif // SPACE_EXPLORER_CTX_SDL_H

ctx_sdl.cpp

#include "ctx_sdl.h"
#include <SDL3/SDL.h>
#include <iostream>
namespace raw {
// first function to call in graphics application
ctx_sdl::ctx_sdl() {
 if (!SDL_Init(SDL_INIT_VIDEO)) {
 std::cerr << "SDL could not initialize! SDL_Error: " << SDL_GetError() << std::endl;
 throw std::runtime_error(
 "SDL could not initialize! SDL_Error: " + std::string(SDL_GetError()) + " in " +
 __FILE__ + " on " + std::to_string(__LINE__));
 }
}
ctx_sdl::~ctx_sdl() noexcept {
 SDL_Quit();
}
} // namespace raw

helper_macros.h

#ifndef SPACE_EXPLORER_HELPER_MACROS_H
#define SPACE_EXPLORER_HELPER_MACROS_H
#include <chrono>
#include <vector>
namespace raw {
#define PASSIVE_VALUE static inline constexpr auto
using std_clock = std::chrono::high_resolution_clock;
using UI = unsigned int;
template<typename T>
using vec = std::vector<T>;
} // namespace raw
#endif // SPACE_EXPLORER_HELPER_MACROS_H

helper_functions.h

#ifndef SPACE_EXPLORER_HELPER_FUNCTIONS_H
#define SPACE_EXPLORER_HELPER_FUNCTIONS_H
#include <glad/glad.h>
#include <SDL3/SDL.h>
#include <iostream>
namespace raw {
void init_glad() {
 if (!gladLoadGLLoader((GLADloadproc)SDL_GL_GetProcAddress)) {
 std::cerr << "Failed to initialize GLAD" << std::endl;
 throw std::runtime_error("Failed to initialize GLAD: " + std::string(__FILE__) + " - " +
 std::to_string(__LINE__));
 }
}
} // namespace raw
#endif // SPACE_EXPLORER_HELPER_FUNCTIONS_H

window_manager.h

#ifndef SPACE_EXPLORER_WINDOW_MANAGER_H
#define SPACE_EXPLORER_WINDOW_MANAGER_H
#include <SDL3/SDL.h>
#include <glad/glad.h>
#include <glm/glm.hpp>
#include <string>
#include "ctx_sdl.h"
#include "helper_macros.h"
namespace raw {
namespace gl {
// add more if you need
PASSIVE_VALUE& ATTR = SDL_GL_SetAttribute;
PASSIVE_VALUE& RULE = glEnable;
PASSIVE_VALUE& VIEW = glViewport;
PASSIVE_VALUE& MOUSE_GRAB = SDL_SetWindowMouseGrab;
PASSIVE_VALUE& RELATIVE_MOUSE_MODE = SDL_SetWindowRelativeMouseMode;
PASSIVE_VALUE& CLEAR_COLOR = glClearColor;
} // namespace gl
class window_manager {
protected:
 SDL_Window* window = nullptr;
 ctx_sdl sdl_ctx;
public:
 window_manager() = delete;
 window_manager(const std::string& window_name);
 window_manager(const window_manager&) = delete;
 window_manager& operator=(const window_manager&) = delete;
 window_manager(window_manager&&) = delete;
 window_manager& operator=(window_manager&&) = delete;
 // if you don't like what predefined attributes I have, you could set what you want manually.
 template<typename F, typename... Ts>
 void set_state(F&& func, Ts... values) {
 std::forward<F>(func)(std::forward<Ts>(values)...);
 }
 [[nodiscard]] bool poll_event(SDL_Event* event);
 SDL_Window* get();
 glm::ivec2 get_window_size();
 virtual ~window_manager() noexcept;
};
} // namespace raw
#endif // SPACE_EXPLORER_WINDOW_MANAGER_H

window_manager.cpp

#include "window_manager.h"
#include <glad/glad.h>
#include <iostream>
namespace raw {
window_manager::window_manager(const std::string& window_name) : sdl_ctx() {
 SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
 SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
 SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
 SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
 SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
 window = SDL_CreateWindow(window_name.c_str(), 2560, 1440,
 (SDL_WINDOW_OPENGL | SDL_WINDOW_FULLSCREEN));
 if (!window) {
 std::cerr << "Window could not be created! SDL_Error: " << SDL_GetError() << std::endl;
 throw std::runtime_error(
 "Window could not be created! SDL_Error: " + std::string(SDL_GetError()) + " in " +
 __FILE__ + " on " + std::to_string(__LINE__));
 }
}
window_manager::~window_manager() noexcept {
 SDL_DestroyWindow(window);
}
bool window_manager::poll_event(SDL_Event* event) {
 return SDL_PollEvent(event);
}
SDL_Window* window_manager::get() {
 return window;
}
glm::ivec2 window_manager::get_window_size() {
 glm::ivec2 resolution;
 SDL_GetWindowSize(window, &resolution.x, &resolution.y);
 return resolution;
}
} // namespace raw

gl_window.h

#ifndef SPACE_EXPLORER_GL_WINDOW_H
#define SPACE_EXPLORER_GL_WINDOW_H
#include "window_manager.h"
namespace raw {
class gl_window final : public window_manager {
private:
 SDL_GLContext ctx = nullptr;
public:
 gl_window(std::string window_name);
 ~gl_window() noexcept final;
};
} // namespace raw
#endif // SPACE_EXPLORER_GL_WINDOW_H

gl_window.cpp

#include "gl_window.h"
#include <iostream>
#include "helper_functions.h"
namespace raw {
gl_window::gl_window(std::string window_name) : window_manager(window_name) {
 ctx = SDL_GL_CreateContext(window);
 if (!ctx) {
 std::cerr << "OpenGL context could not be created! SDL_Error: " << SDL_GetError()
 << std::endl;
 throw std::runtime_error(
 "OpenGL context could not be created! SDL_Error: " + std::string(SDL_GetError()) +
 " in " + std::string(__FILE__) + " on " + std::to_string(__LINE__) + " line");
 }
 init_glad();
}
gl_window::~gl_window() noexcept {
 SDL_GL_DestroyContext(ctx);
}
} // namespace raw

My main questions are:

  1. Overall Design (Inheritance vs. Composition): My thinking was that a window_manager has an sdl_ctx (composition), while a gl_window is a specialized window_manager (public inheritance). Does this feel like a solid design, or would you recommend sticking to composition throughout for more flexibility or clearer ownership?

  2. The set_state function and gl aliases: I created the set_state function and the aliases in the gl namespace to make configuration calls cleaner (e.g., set_state(gl::RULE, GL_DEPTH_TEST)). While I still like it, I'm questioning if this is a good approach. Is there a more standard way to achieve pretty much the same result more C++ like? (my question on stack overflow didn't produce anything helpful as you can see)

  3. Use of helper macros and type aliases: What's the general view on helpers like my PASSIVE_VALUE macro (for static inline constexpr auto) and type aliases?

I'm open to all feedback on best practices, potential bugs, or any design improvements you can suggest.

toolic
14.6k5 gold badges29 silver badges204 bronze badges
asked Jun 30 at 10:19
\$\endgroup\$

2 Answers 2

3
\$\begingroup\$

Design review

Your model is very confused.

So, ctx_sdl is the class representing "SDL"—the SDL library. Fine, no problems so far.

Then window_manager is the class that "manages" the main/only SDL window to be created, and here’s where things start to get weird. Is window_manager a window manager, or is it just a window? The name suggests a manager—potentially of multiple windows (because why would one need a manager for a single window?)—but the code itself suggests it’s just a single window: it creates the window in the constructor and destroys it in the destructor.

And then things get really confusing, because gl_window inherits from window_manager. Is gl_window a window or a window manager?

But that’s just the tip of a very twisty, spaghettified iceberg. Because even though window_manager (assuming it’s a window and not a window manager) is not an gl_window, it has to do gl_window setup in its constructor.

And—and this is getting ahead to the first question—apparently, every window (manager?) and OpenGL window actually contains its own instance of the entire SDL library. So if you have two (OpenGL) window (managers), then you need two instances of SDL.

My friend... you have a mess.

C++ is a modelling language. It works well when you properly model the problem you are trying to solve. It turns into a disaster if you don’t think carefully about what you’re modelling.

Far too often I see coders getting tunnel vision and fixating on jargon like "composition vs. inheritance", "single responsibility principle", yadda yadda, and failing to see the blatantly obvious structure of what they’re modelling. For example:

My thinking was that a window_manager has an sdl_ctx (composition)...

Is that your thinking? That something that manages an SDL window(s) (or is an SDL window) holds all of SDL within it? A quick peek at what SDL3 has shows me:

  • logging
  • clipboard support
  • keyboard/mouse/joystick support
  • audio
  • threading
  • I/O
  • shared object/DLL management
  • power management
  • ... and more.

All of that is contained within a window manager? (Or a window?)

Forget everything you’ve read about software design, and just think about the basics. Does a window manager contain an entire media library? Is a window manager a window? I think the answers to those questions are obvious even to someone who knows no software design principles, or C++.

Pretending I know nothing about SDL, I would assume that if you were going to make a window using SDL, you would first create the SDL library instance, and then use that to make a window. So, something like:

auto s = sdl{};
auto w = s.create_window();

Or, with a "window manager":

auto s = sdl{};
auto wm = s.get_window_manager();
auto w = wm.create_window();

In other words, SDL is not "inside" of a window, or a window manager. A window (manager) is created from or out of SDL.

Pretending I know just a teensy bit more about SDL, I discover in the documentation that it has multiple subsystems that can be independently initialized. So now I would imagine that the code would look something more like this:

auto s = sdl{};
auto v = s.get_video_subsytem();
auto w = v.create_window();

In any case, because we’ve modelled the problem correctly, things work out properly. You can’t create a SDL window without first creating/initializing SDL itself. Or you can’t create a window without first creating/initializing the video subsystem, and you can’t do that without creating/initializing SDL. Everything just works.

(If it sounds like I’m harping on the obvious, it’s because I’m foreshadowing a potential bug I will be mentioning later.)

Forget the jargon. Just think about the problem. Think about the various components and how they interact. You can’t create a window (or window manager) without first creating/initializing the library that creates manages/windows. Initializing the library obviously has to happen first, and then you use the library to make the window (manager). (And of course, you could initialize the library and then never create any windows—you could just use SDL for its audio functionality for a music player, for example—so it should be obvious that the SDL library should neither be within nor attached to any windows (or window managers).)

So your current model has it back asswards. The "SDL context" is not a property of the window (manager); it is the thing used to create the window (manager). The window does not "HAS-A" SDL context; the SDL context "HAS-A" window.

The questions

Overall Design (Inheritance vs. Composition): My thinking was that a window_manager has an sdl_ctx (composition), while a gl_window is a specialized window_manager (public inheritance). Does this feel like a solid design, or would you recommend sticking to composition throughout for more flexibility or clearer ownership?

I’ve already answered most of this, except I focused mostly on the relationship between sdl_ctx and window_manager, and not so much on the relationship between window_manager and gl_window. But to even begin to speak on that, I have to throw away your terminology, because it’s massively confused.

Either we’re talking about window managers, or we’re talking about windows. Pick a lane. They are not the same thing. I’m going to assume we’re talking about windows, not window managers. So I am going to reparse your model so that gl_window inherits from window... which are both windows, not "managers".

Now, should gl_window inherit from window? That depends. Is a gl_window really just a plain old window with some extra capabilities?

In theory it could go either way. But from what I know about SDL (and I admit I am not hip on SDL3 yet, so I’m relying on SDL2 knowledge), the answer is no. A gl_window is not a plain window. An OpenGL window is created with the same function(s), yes (more or less)... but different parameters. Depending on the parameters to pass to SDL_CreateWindow(), you will get either an plain window or an OpenGL window. An OpenGL window is not just a plain window with extra capabilities, it is a specifically-created OpenGL window.

Inheritance is the correct model here, but not the way you’ve done it. Instead, what you want is an abstract base window class, that can be made concrete either as a plain window or an OpenGL window (or a Vulkan window, or...).

(It’s actually more complicated than that, because before you create any OpenGL windows, you first have to set up the global OpenGL context. That clearly isn’t necessary if you’re just creating non-OpenGL windows. So an OpenGL window not only needs to be a completely separate class from non-OpenGL windows, it needs to require somehow that OpenGL has been set up first.)

It really comes down to what window (or window_manager) is actually supposed to be modelling. If window is supposed to be a concrete window, created in the constructor, then it cannot be both an OpenGL window and a non-OpenGL window at the same time. It is either created as an OpenGL window, or it is not. On the other hand, if window is just modelling a handle to a window—that is, it is more of a window_handle or window_ptr than a window per se—then sure, it could be a handle to a window created by any method, so it could be a handle to both OpenGL windows and non-OpenGL windows.

So if window is supposed to model a window (which would make sense), then it must be abstract, because there are different kinds of windows, with different capabilities. Not every window is created the same. (Literally so. Even if they use the same function, they don’t use the same parameters.)

The set_state function and gl aliases: I created the set_state function and the aliases in the gl namespace to make configuration calls cleaner (e.g., set_state(gl::RULE, GL_DEPTH_TEST)). While I still like it, I'm questioning if this is a good approach. Is there a more standard way to achieve pretty much the same result more C++ like? (my question on stack overflow didn't produce anything helpful as you can see)

I’ll be frank... this looks like gibberish to me. I don’t think you’ve seriously thought this through, and I definitely don’t think you’ve actually tried to use it. I’m not even sure you understand what the word "helper" means.

Let’s say I’m a game programmer, using this library to make a game. Let’s further say that I am knowledgeable enough about OpenGL to know that I want to enable GL_DEPTH_TEST for the current context.

Having the knowledge of what the depth test is and why it has to be enabled means I pretty much certainly know enough to know that the way to do it is this:

glEnable(GL_DEPTH_TEST);

So why in the hell would I choose to write this instead:

window_mgr.set_state(glEnable, GL_DEPTH_TEST);

And worse, it seems you intend to have some macros or defined constants to further obfuscate things:

window_mgr.set_state(gl::RULE, GL_DEPTH_TEST);

Indeed for any:

window_mgr.set_state(f, x, y, z, ...);

... why would I not just write:

f(x, y, z, ...);

If .set_state() actually did something internally, then it might be useful (albeit ugly and dangerous). But it doesn’t. It is literally defined to just unravel .set_state(f, x, y, z, ...) to f(x, y, z, ...). So what is the point of it?

I don’t know about you, but I don’t want extra, unnecessary cognitive burden when using a library. I want a library that makes things easier... not harder. If I already know some technology—indeed, I need to already know that technology to even use the library—I don’t want a library that throws away everything I know and forces me to learn a whole new API just for shits and giggles. If a library changes an API I am familiar with (and need to know to use the library), it had better have a damn good reason for doing so. Otherwise, just let me write the code I know.

Speaking of...

Use of helper macros and type aliases: What's the general view on helpers like my PASSIVE_VALUE macro (for static inline constexpr auto) and type aliases?

The general view is: no.

Let’s ignore the specific "helpers" here, and instead focus on the "idea" of "helpers".

I know C++. I have to know C++ to use your C++ library. That’s a given. I don’t necessarily need to be a guru-level expert, but I do need to know the C++ language.

Knowing the C++ language means that I know what static constexpr auto or inline constexper auto (foreshadowing!) is. What I don’t know is what the hell PASSIVE_VALUE is.

So who exactly are these "helpers" helping?

It can’t be C++/SDL/OpenGL novices, because if someone who doesn’t know how to, say, set the number of bits in the depth buffer, then how would they figure that out? Assuming they have the documentation for your library they will see that to set an attribute of the current context, they would have to call window_mgr.set_state(gl::ATTR, ...). Okay... but what goes in the ...? Presumably your library documentation doesn’t document every single possible state of SDL AND OpenGL that exists now and forever. So instead, the coder will have to discover that gl::ATTR is actually SDL_GL_SetAttribute... and then take a second trip into documentation, this time SDL documentation, to discover that the real call is SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE), and THEN they have to translate that BACK into window_mgr.set_state(gl::ATTR, SDL_GL_DEPTH_SIZE), while wondering why they couldn’t just write SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE).

It also can’t be C++/SDL/OpenGL experts that these "helpers" are helping, because they would probably know without even consulting any documentation how to do something. Or at least, they would have familiarity with documentation for C++, SDL, or OpenGL. But now they have extra work to do, because they have to learn that this library does something differently, how to do it, and why... and when they learn that it serves literally no purpose other than obfuscation, expect to hear them curse up a blue streak at having their time wasted.

(To make things even loopier, the comment implies that it is intended that users go out, find the real way to do something, then translate that into this verbose, indirect form... with no indication whatsoever as to how that form is better in any way, or why the user wouldn’t/shouldn’t just do it the natural way. The only clue I’ve been able to suss out is that this nonsense is supposed to be "more C++ like"... which is true on its face, because using templates and perfect forwarding is indeed very much more C++-like than not using those those things... but using advanced C++ features just for the sake of using them—when they serve literally no purpose and add nothing—is silly.)

Every layer of code you create, and every abstraction and obfuscation, COSTS. It costs experts far more time to review, maintain, and extend code that has unnecessary bafflegab, and that means a much, much higher chance of bugs. Not to mention frustration.

So who are these "helpers" helping?

Who, exactly, will breathe a sigh of relief when they see UI in the code (!!!) and think: "Wow, that’s actually an unsigned int... and boy am I glad that was hidden from me! Everything is so much clearer!"

So, generally, your "helpers" are a terrible idea. But the specific "helpers" are even worse.

Like this:

#define PASSIVE_VALUE static inline constexpr auto

What the hell is a "passive value"? I’ve been programming C++ for almost 30 years, and I have never heard that term before. I had to DuckDuckGo it, and all I got were pages about financial investing. So apparently no-one else has ever heard the term either.

Now, macros are, in general, evil. But macros that obfuscate perfectly clear and understandable code, and replace it with nonsensical, nonexistent jargon? There has to be a special place in coding hell for those.

Or this:

using UI = unsigned int;

UI typically means "user interface". Of all the things to alias unsigmed int to, why would you choose that? I mean, if anything, why not uint? Or even better, why not just leave it as unsigned int... which is clear, exact, and universally understood.

Or this:

template<typename T>
using vec = std::vector<T>;

Who... does... this... HELP? Whose attention span is so short that std::vector<int> is too much for them to handle, while raw::vec<int> is just dandy? Everyone who writes C++ knows what std::vector<T> is; nobody knows what vec<T> is; so everyone that uses or reads this "helper" has to do more work to figure it out... which really doesn’t seem to "help" much, does it?

(To make things even worse, this code base has other types named vec, from the glm library, that are completely unrelated.)

No, throw them all away; every "helper" in your code. Do not obfuscate the obvious. It just infuriates and hampers the experts trying to review, maintain, extend, and use your library... which is not wise if you’re expecting them to be able to understand code well enough to avoid missing or introducing bugs. Obfuscating doesn’t help novices either.

Code review

This is the entirety of the code in ctx_sdl.h:

namespace raw {
class ctx_sdl {
public:
 ctx_sdl();
 virtual ~ctx_sdl() noexcept;
 ctx_sdl(const ctx_sdl&) = delete;
 ctx_sdl& operator=(const ctx_sdl&) = delete;
 ctx_sdl(ctx_sdl&&) = delete;
 ctx_sdl& operator=(ctx_sdl&&) = delete;
};
} // namespace raw

Yet this is the list of headers that precede it:

#include <SDL3/SDL.h>
#include <algorithm>
#include <cstddef>
#include <exception>
#include <glm/glm.hpp>
#include <string>
#include <utility>

Not a single one of those headers is actually used in the header file... and half of them aren’t even needed in ctx_sdl.cpp. Ironically, <stdexcept> is needed in ctx_sdl.cpp, because of runtime_error, yet it is nowhere to be seen.

namespace raw {

This is really not a good top-level namespace, not least because it makes no sense in context. What about any of this library is "raw"?

A better top-level namespace would be your name, and then maybe just SDL if this is meant to be a game-agnostic SDL library. Or if it is meant to be game-specific, then the name of the game. So namespace nekon::sdl or namespace nekon::space_explorer, or something like that. Length is not an issue; it can always be shortened with an alias.

ctx_sdl::ctx_sdl() {
 if (!SDL_Init(SDL_INIT_VIDEO)) {
 std::cerr << "SDL could not initialize! SDL_Error: " << SDL_GetError() << std::endl;
 throw std::runtime_error(
 "SDL could not initialize! SDL_Error: " + std::string(SDL_GetError()) + " in " +
 __FILE__ + " on " + std::to_string(__LINE__));
 }
}

If you really want to print the file and line number in the error message, the modern way to do it is with std::source_location. But, frankly, the file and line number is probably less useful than the function name. The line number at least changes frequently as the code gets refactored, and the file can sometimes change, too. The function name is much less likely to change, but even if it does, assuming your function names are descriptive, it will at least still be somewhat obvious where the problem occurred. And it will always be much easier to search a code base’s history for the cause of a problem using a function name than it would be using a specific file and line number.

You should also look into the modern formatting library, because constructing the string the way you are is wildly inefficient. Granted, you’re crashing anyway at that point, so efficiency is hardly a concern, but on the other hand, you are crashing, because something went terribly wrong, and maybe allocating memory over and over at that point should be minimized.

It is probably also not wise to use std::cerr to print an error message. SDL can run on systems where you don’t have easy access to a console. You might as well use SDL’s built-in logging functionality, and/or message box display.

(And you definitely shouldn’t use std::endl. std::endl is always wrong.)

So:

ctx_sdl::ctx_sdl(std::source_location src_loc = std::source_location::current())
{
 if (not ::SDL_Init(SDL_INIT_VIDEO))
 {
 auto message = std::format("SDL could not initialize! SDL_Error: {}", ::SDL_GetError());
 if (not ::SDL_ShowSimpleMessageBox(SDL_MESSAGEBOX_ERROR, "SDL Error", message.c_str(), nullptr))
 std::cerr << message << '\n';
 throw std::runtime_error{std::format("{} in {}", message, src_loc.function_name())};
 }
}

Now, I wonder if it makes sense to hard-code the initialization of the video subsystem and only the video subsystem. What about audio? What about gamepad support? Are you making a game with no sound and no input? So, basically, the world’s most over-engineered animated GIF?

Another thing to mention: making a type (non-copyable and) non-movable is almost always a sign of bad design. Virtually nothing in reality—no physical object and no concept—is truly immobile. Just about the only types in C++ that are justifiably non-movable are things like synchronization primitives—like std::mutex for example—because the operating system requires them to be in a fixed location in memory.

When someone makes a type non-movable, that’s usually a sign they are doing something conceptually wrong, like using the singleton pattern.

In this case, if ctx_sdl represents "the SDL library instance", then there is no reason it couldn’t be movable. (It still wouldn’t make sense to make it copyable, because you don’t want to enable copying the SDL instance, just moving it around to where its needed.)

In this case, all you would need is a flag to mark a given ctx_sdl instance as an owning instance. Then when you move it, you move the flag (unsetting it in the moved-from instance), and you only call SDL_Quit() on an instance where the flag is set.

SDL technically allows SDL_Init() and SDL_Quit() to be called multiple times, and uses reference-counting to know when to actually clean things up. You could rely on that, or guard to make sure that only a single instance is created, or use your own reference counting. Either way, the type could be movable, and should be.

On to helper_functions.h:

#ifndef SPACE_EXPLORER_HELPER_FUNCTIONS_H
#define SPACE_EXPLORER_HELPER_FUNCTIONS_H
#include <glad/glad.h>
#include <SDL3/SDL.h>
#include <iostream>
namespace raw {
void init_glad() {
 if (!gladLoadGLLoader((GLADloadproc)SDL_GL_GetProcAddress)) {
 std::cerr << "Failed to initialize GLAD" << std::endl;
 throw std::runtime_error("Failed to initialize GLAD: " + std::string(__FILE__) + " - " +
 std::to_string(__LINE__));
 }
}
} // namespace raw
#endif // SPACE_EXPLORER_HELPER_FUNCTIONS_H

What is the point of this file? Setting aside the issues with missing includes, the one and only thing in this header is the init_glad() function, and this function is defined... not declared, but defined... in the header, which is a big no-no. The only reason this project compiles at all is because this header is only used in one place. If you included it in multiple translation units, your build would break.

But back to the question... what is the point of it? Are there multiple modules that need to know about the init_glad() function? I couldn’t even tell you myself, because you have a mess of responsibilities here, where the OpenGL window... manager(?)... class gl_window is actually acting like a window manager and not a window. More or less.

It seems to me this function is an implementation detail. So I’m not sure why it’s in a header.

Now in window_manager.h, there is this:

namespace gl {
// add more if you need
PASSIVE_VALUE& ATTR = SDL_GL_SetAttribute;
PASSIVE_VALUE& RULE = glEnable;
PASSIVE_VALUE& VIEW = glViewport;
PASSIVE_VALUE& MOUSE_GRAB = SDL_SetWindowMouseGrab;
PASSIVE_VALUE& RELATIVE_MOUSE_MODE = SDL_SetWindowRelativeMouseMode;
PASSIVE_VALUE& CLEAR_COLOR = glClearColor;
} // namespace gl

Now I already noted in the design section what a silly idea this is... although I didn’t cover everything, like the fact that you would have to account for every possible OpenGL and SDL configuration function, now and in the future (where is glDepthFunc? where is SDL_SetWindowSurfaceVSync?)... but aside from the design issues, there are some practical problems, too.

For starters, PASSIVE_VALUE expands to static inline constexpr auto, so, the real definitions of these variables are more like:

static inline constexpr auto& ATTR = SDL_GL_SetAttribute;

Okay, now that we have un-obfuscated the code... what do you think the above line does?

Focus specifically on the static inline part. What do you think those keywords do?

Very briefly, static, when applied to variables at namespace level, means: "this variable is private, available in this translation unit only". (Although, this usage of static is archaic and ill-advised.)

Meanwhile, inline in the same context means: "all translation units should share a single coy of this variable".

So what do you think it means to have a variable that is private to a translation unit, yet shared universally across all translation units?

Don’t just blindly throw keywords at a problem; understand what they mean, and what you intend.

On to the next problem: the ampersand.

Why are these references? Each of them is a function pointer. Why do you want a reference to a function pointer? Why not just the function pointer?

Finally, SCREAMING_SNAKE_CASE is conventionally reserved for macros. (And PASSIVE_VALUE is a terrible macro name; you should always use a (hopefully) distinct prefix when you use macros at all... which should be almost never.)

class window_manager {
protected:
 SDL_Window* window = nullptr;
 ctx_sdl sdl_ctx;

Protected data members are almost always a bad idea.

But there is a much more subtle problem here. Class data members are constructed in the order they appear in the class, and—more importantly—destroyed in reverse order. That means window is constructed before sdl_ctx. And sdl_ctx is destroyed before window.

Right now, you’re safe, because window is just a pointer; the actual window is destroyed in the window_manager destructor, and then sdl_ctx is destroyed just after, and then the pointer (not the thing it was pointing to, the actual pointer) is destroyed... which is harmless.

But if someone decided to improve the code by using a smart pointer instead of a naked pointer (which would normally be a good refactor):

class window_manager
{
 class sdl_window_deleter
 {
 public:
 auto operator()(::SDL_Window* p) const noexcept
 {
 if (p)
 ::SDL_DestroyWindow(p);
 }
 };
 std::unique_ptr<SDL_Window, sdl_window_deleter> window;
 ctx_sdl sdl_ctx;
public:
 // ...
 virtual ~window_manager() = default; // don't need a destructor anymore
};

Now you have a problem. Because SDL is being shut down before the window is being destroyed.

The root cause of this problem is the poor modelling, but the direct issue is that you should avoid class data members having dependencies on each other. And you should certainly avoid requirements that they be constructed/destructed in a specific order. That is not always possible, but it is still important to strive for it. (It’s certainly possible in this case, because sdl_ctx shouldn’t be a member of the class to begin with.)

 window_manager() = delete;
 window_manager(const std::string& window_name);
 // ...
 virtual ~window_manager() noexcept;

There is no need to delete the default constructor. It is automatically deleted when you make a non-delete constructor.

You probably want to make the other constructor explicit though. I doubt you want strings implicitly convertible to window_manager.

There is also no need to mark the destructor as noexcept; that is the default anyway. You shouldn’t repeat defaults; that just creates confusion and noise.

Like ctx_sdl, this type could be movable (but not copyable), and it probably should be. If sdl_ctx were removed from the class (or if ctx_sdl were made movable), and the raw pointer replaced with a smart pointer, then this type would be automatically movable, but non-copyable, so you wouldn’t need to write any move or copy ops, or the destructor. That is the way of good C++ code.

Now I know I’ve already said quite a bit about .set_state(), but...:

 template<typename F, typename... Ts>
 void set_state(F&& func, Ts... values) {
 std::forward<F>(func)(std::forward<Ts>(values)...);
 }

If you’re going to use perfect forwarding for this, you might as well do it right. The Ts... values needs to use forwarding references: Ts&&... values. Right now the std::forward<Ts>(values)... does effectively nothing; you could just as well write values....

On to gl_window:

namespace raw {
class gl_window final : public window_manager {

final is an anti-pattern. If it made sense to extend "window manager" to "OpenGL window", then it would also make sense to extend "OpenGL window" to some more specialized form of an OpenGL window. There is no good reason to disallow that.

(And yes, I am aware that there may be de-virtualization opportunities available when the compiler knows you are using a gl_window. But 1) those situations are often not that common, because the point of a polymorphic type is to use polymorphism; and 2) when they do come up, in many (most?) cases the compiler can figure out that de-virtualization is safe without the need for final.)

And this final:

 ~gl_window() noexcept final;

... is completely unnecessary, and outright silly. If the class is final, obviously every virtual function (including the destructor) will be final as well.

As mentioned earlier, the noexcept is specious, too.

 gl_window(std::string window_name);

You’re taking a string by value here. I question whether it is a good idea to take a string at all. SDL is not using strings internally, just plain old NUL-terminated character arrays. If the window name is given as a plain old NUL-terminated character array, this will force creating a string for no reason. Ideally what you want is something like the proposed zstring_view, which is why so many code bases have their own version of it.

gl_window::gl_window(std::string window_name) : window_manager(window_name) {
 ctx = SDL_GL_CreateContext(window);
 // ... [snip] ...
 init_glad();
}
gl_window::~gl_window() noexcept {
 SDL_GL_DestroyContext(ctx);
}

Ignoring the inheritance, it seems pretty clear that this class is not a gl_window, but rather a gl_context. (Although, I don’t think it is the responsibility of either an OpenGL window or an OpenGL context to initialize glad.)

I think that’s everything.

The main problem here seems to be confusion about the problem being modelled: a window does not contain an entire media library, nor is a window a more specialized case of a window manager, and so on. But underlying that seems to be a fixation on using "modern" C++ features and/or specific software design principles. Don’t focus so much on the trees so much that you fail to see the forest. In other words, don’t fixate on things like "composition versus inheritance"; just model the problem as carefully and correctly (as close to the real structure of what you’re modelling as possible), and the vast majority of the time the "composition versus inheritance" dilemma will solve itself.

Also, don’t use flashy C++ features just for the sake of using them, and don’t use features you don’t understand. Like, don’t stick static on declarations just because that’s what all the old-school C hackers did. Understand why they used static, and what exactly it does. (And then don’t use it, because it’s archaic and there are better tools now.)

Extension from comments

Modelling OpenGL windows

There is no one "truly correct" way to model a problem. The right solution is very situation-specific, and depends not just on the problem you are modelling, and not just on the thing you are trying to accomplish, because there are often many different ways to look at both of those things. But just because there is no single right answer, that doesn’t mean there are no wrong answers.

There are two types of windows in your model: OpenGL-capable windows, and non-OpenGL-capable windows. There are many correct ways to model this, but direct inhertance would be always wrong. Why? Well, it would be more obvious if you renamed the types to opengl_capable_window and non_opengl_capable_window. Now it’s pretty clear that "opengl_capable_window IS-A non_opengl_capable_window" can’t be true.

(There is another universe where an OpenGL-capable window is just an ordinary window with OpenGL capability added to it after-the-fact, and I think that’s probably what SDL was striving for. In that universe, you would create a window, then set a flag or call a function or something to "turn on" OpenGL capability in that window. In that universe, then yes, an OpenGL window would be a plain window, and inheritance would be correct. But in this universe, an OpenGL window is a different beast altogether, that just happens to be (usually!) created with roughly the same function calls as an ordinary window... albeit with special parameters, and with extra function calls before and after.)

The model I suggested above is to say that an OpenGL window and a non-OpenGL window are different things... but both are windows. There is an abstract idea of a window, with a set of properties hat both OpenGL windows and non-OpenGL windows have. Both types of windows probably have sizes, and positions, and states (maximized, fullscreen, etc.), and maybe other stuff like a handle or ID.

The way to model that is with an abstract base class. Maybe something like this:

class window
{
 SDL_Window* _window = nullptr;
protected:
 explicit window(SDL_Window* w)
 : _window{w}
 {
 if (not _window)
 throw window_create_error{};
 }
public:
 virtual ~window()
 {
 if (_window)
 ::SDL_DestroyWindow(_window);
 }
 window(window&& w) noexcept
 {
 std::ranges::swap(_window, w._window);
 }
 auto operator=(window&& w) noexcept -> window&
 {
 std::ranges::swap(_window, w._window);
 return *this;
 }
 // Non-copyable:
 window(window const&) = delete;
 auto operator=(window const&) -> window& = delete;
 auto get() const noexcept { return _window; }
 // Universal window interface:
 // Some functions can have a sensible default (but you can still
 // override them if you need to):
 virtual auto dimensions() const -> rect<int>
 {
 auto dims = rect<int>{};
 if (not ::SDL_GetWindowSize(window, &dims.w, &dims.h))
 throw window_info_error{};
 if (not ::SDL_GetWindowPosition(SDL_Window *window, &dims.x, &dims.y))
 throw window_info_error{};
 return dims;
 }
 // Others need to be abstract:
 virtual get_surface() -> surface_ptr = 0;
};

Now both simple windows and OpenGL windows can be inherited from that base type:

class basic_window : public window
{
public:
 basic_window(char const* title, int width, int height, std::uint_fast32_t flags)
 : window{::SDL_CreateWindow(title.c_str(), width, height, flags)}
 {}
 // ... anything else you need ...
};

But remember that OpenGL windows require that OpenGL first be set up.

class opengl
{
 // ...
};
class gl_window : public window
{
 gl_context_ptr _gl_context = nullptr;
public:
 gl_window(opengl const&, char const* title, int width, int height, std::uint_fast32_t flags)
 : window{::SDL_CreateWindow(title.c_str(), width, height, SDL_WINDOW_OPENGL | flags)}
 {
 if (auto p_context = ::SDL_GL_CreateContext(get()); p_context)
 _gl_context = p_context;
 else
 throw ...;
 }
 // ... anything else you need ...
};
// Example usage:
auto ogl = opengl{version, profile, etc. ...};
auto w = gl_window{ogl, ...};

But again, there is no one "truly correct" way to model a problem. There might be another way to look at the system, and come up with a different kind of model that does have an OpenGL window inherit from a non-OpenGL window. Maybe you define a window class that constructs a basic window, but it has a protected constructor that allows derived classes to change the construction method. Like so:

class window
{
public:
 window() { /* construct normal window */ }
 virtual ~window() { /* destroy window */ }
 // ... etc. ...
protected:
 explicit window(::SDL_Window* p)
 {
 // Take ownership of window constructed by derived class.
 //
 // NOTE: You will have to violate the strong exception guarantee
 // and destroy the window *even on failure*. Otherwise it will
 // leak.
 }
};
class gl_window : public window
{
 static auto _init() -> ::SDL_Window* { /* construct OpenGL window */ }
public:
 gl_window()
 : window{_init()}
 { /* any other gl-specific window set up (getting context, for example) */ }
}

I don’t think that’s a good design, but I am not the god of good design. You may have reasons to prefer that design. Indeed, SDL itself seems to lean toward that structure.

But you can see how circuitous it is, and how it intertwines responsibilities. If you have reasons to prefer this design, fine... but they’d better be good reasons.

Initializing SDL

I’m going to share with you the basic gist of a really well-designed SDL library I once saw. I can’t share the exact code, because it’s not mine (and I don’t have it), and I’m re-engineering it from admittedly fuzzy memory, but hopefully it should give you some ideas.

But before I do that, I have to correct some really, really bad ideas I saw about SDL initialization in @user673679’s review.

  1. "It might be reasonable to make this class a singleton." It is not. Singleton is an anti-pattern, for very good reasons.

    One thing it is important to understand is that "singleton" does not mean "there can be only one". The singleton pattern requires two things: 1) "there can be only one"; AND 2) there is a global way to access it. That’s (one of) the reason(s) the singleton pattern is so bad: it’s just an over-engineered global variable, with all the problems that global variables bring with them.

  2. "... just stick a big comment in saying to only ever create one of it." 🤦🏼 Good grief, no. If there is only supposed to be one of a thing, only let there be one of the thing. Honestly, code reviews are supposed to be for offering good suggestions, not "fuck it, just be lazy". Minimizing effort is one thing; encouraging objectively bad practices is something else entirely.

  3. "Or... just put two functions - sdl_init and sdl_quit - in main.cpp and call them from main, and never worry about it again..." This is an incredibly terrible, incredibly dumb idea.

First of all, what would even be the point? You’re just rewriting the C interface.

There is a reason we use RAII in C++, and it is not just because we like to use the coolest features to impress the chicks (or boys, or both, as you please). RAII is vital for correctness.

"Just call sdl_quit." Yeah, sure, okay... only it’s not that simple. I’ll bet you a million dollars—because I am absolutely certain to win—that someone will eventually write a program where sdl_quit() fails to be called. The most likely cause will be because someone failed to take exceptions into account, but it’s also inevitable that someone will just fuck up the logic and have a non-exceptional return path that misses the call. It’s been a long time since I looked up SDL tutorials, but I recall that they were generally riddled with code that didn’t clean up properly.

But there is an even more important reason why non-RAII "clean-up" functions like sdl_quit() are a terrible idea... a much more subtle, and much more dangerous reason.

Can you see what is wrong with the following simple program? Assume no exceptions, and assume that window is a type pretty much like your window_manager or gl_window.

auto main() -> int
{
 sdl_init();
 auto w = window{};
 w.clear(colour::blue);
 std::this_thread::sleep_for(5s);
 sdl_quit();
}

That’s just five lines, and two of them are not even necessary for the problem. Yet there is a critical bug. Can you spot it?

Remember that window destroys the window it represents in the destructor. But the destructor is only called when the object is destroyed... which happens at the end of the scope. So the code really looks more like this:

auto main() -> int
{
 sdl_init();
 auto w = window{};
 // ...
 sdl_quit();
 // Implicit:
 w.~window();
}

Now is the problem obvious?

Yup, SDL is being cleaned up while the window still exists. And then the window is being destroyed after SDL has already been cleaned up.

This is why manual resource management is such a terrible idea, and why RAII is The WayTM to do things in C++.

It’s not just that mixing RAII and manual management is dangerous. Manual management is dangerous on its own. Because if you have to manually release both SDL and the window, you have to be careful to do it in the right order. As things get more complicated, that gets harder and harder. But RAII always works correctly. (Unless you consciously do something weird to circumvent it, of course... but then that’s your problem; C++ just does the right thing by default, but it assumes you know what you’re doing and gives you the power to break things if you think you know better. So don’t, unless you really do know better.)

Even if this were just a one-off thing that you didn’t want to make a library for, localized just to the main.cpp file, you should still make a RAII class for it. It can be a sloppy, slapped together class, and still do the job. And it takes barely two or three more lines than the old-school manual init/quit code... and ultimately much less if you do things properly.

Instead of:

auto sdl_init() { ... }
auto sdl_quit() { ... }
auto main() -> int
{
 sdl_init();
 // BE VERY CAREFUL NOT TO USE ANY RAII TYPES HERE THAT DEPEND ON SDL!!!
 //
 // ALSO, DON'T DO ANYTHING THAT MIGHT THROW!
 try
 {
 // DON'T RETURN FROM WITHIN THIS BLOCK!
 // ... etc. ...
 }
 catch (...)
 {
 // DON'T RETURN (OR THROW) HERE!
 }
 // AGAIN, DON'T DO ANYTHING THAT MIGHT THROW!
 //
 // AND NO RAII TYPES THAT DEPEND ON SDL!
 sdl_quit();
}

... compare this:

struct sdl
{
 sdl() { ... }
 ~sdl() { ... }
};
auto main() -> int
{
 auto _ = sdl{};
 // ... etc. ...
}
// Note: There is no mess of warning comments in main(), because there
// is nothing to warn about. Everything just works. Correctly. Safely.

That’s not just simpler. That’s not just shorter. That is correct; robust and safe.

Now, since you’re going to want to make a RAII type to manage SDL anyway, you might as well include it in your SDL library. And you might as well use it to enforce other levels of correctness, like that no SDL windows can be created unless SDL is initialized first.

So let’s do it right.

The SDL wrapper library I am basing this design on had a design principle to look as much like "vanilla" SDL as possible. Generally, you were supposed to be able to take any SDL_DoMeow() function, change it to sdl::do_meow(), and it would basically work the same. It even included functions like sdl::destroy_window() that took an sdl::window... and did effectively nothing (because sdl::window was a RAII type that would be destroyed at the end of the scope anyway). (I think in reality, the function did reset the smart pointer, for technical reasons relating to the fact that you were supposed to be able to interleave "vanilla" SDL.) The theory was that you could do an automatic, machine translation from "pure" SDL code to the C++-wrapped version.

I don’t really agree with that design principle—at least, not to the extreme they went to—but there was an interesting idea there I’ll be bringing up shortly.

So, let’s start with a basic SDL wrapper class:

namespace nekon::sdl {
class sdl
{
public:
 sdl();
 ~sdl();
};
// The actual implementations should be in the source file, not the header.
//
// I’m just putting them here for brevity.
sdl::sdl() { ::SDL_Init(0); }
sdl::~sdl() { ::SDL_Quit(); }
} // namespace nekon::sdl

This is already easy to use:

auto main() -> int
{
 try
 {
 auto sdl = nekon::sdl::sdl{};
 // ... rest of game goes here ...
 }
 catch (...)
 {
 return handle_error(); // Lippincott function to handle errors.
 }
}

Now, the SDL library object should not be copyable... but it could, and should, be movable:

class sdl
{
public:
 sdl();
 ~sdl();
 sdl(sdl&&) noexcept = default;
 auto operator=(sdl&&) noexcept -> sdl& = default;
 // automatically non-copyable because we specified the move ops
};
sdl::sdl() { ::SDL_Init(0); }
sdl::~sdl() { ::SDL_Quit(); }

Except there’s a problem. If we do:

auto s1 = sdl{};
auto s2 = std::move(s1);

Now SDL_Quit() will be called twice: once when s1 is destroyed, even though it is moved from, and once when s2 is destroyed.

There are a number of ways to fix this, but the easiest is a boolean flag:

class sdl
{
 bool _owner = false;
public:
 sdl();
 ~sdl();
 sdl(sdl&&) noexcept;
 auto operator=(sdl&&) noexcept -> sdl&;
};
sdl::sdl()
 : _owner{true}
{
 ::SDL_Init(0);
}
sdl::~sdl()
{
 if (_owner)
 ::SDL_Quit();
}
sdl::sdl(sdl&& s) noexcept
{
 std::ranges::swap(_owner, s._owner);
}
auto sdl::operator=(sdl&& s) noexcept -> sdl&
{
 std::ranges::swap(_owner, s._owner);
 return *this;
}

Now this code will work just fine:

auto s1 = sdl{};
auto s2 = std::move(s1);

The next problem is multiple initialization. Now, SDL itself allows you to repeatedly initialize it, so long as you de-initialize it the same number of times (it is reference-counted internally). Personally, I think that is a terrible design. I can’t think of a non-mistake reason for initializing a library repeatedly. Especially if you are initializing it differently each time. But if you really want to do it, you can do the same thing SDL does; simply use a reference counter, don’t call SDL_Init() if it’s not zero, and only call SDL_Quit() when it becomes zero again.

But let’s restrict it to initializing SDL only once at a time. And, for bonus, let’s make it thread-safe:

class sdl
{
 bool _owner = false;
public:
 sdl();
 ~sdl();
 sdl(sdl&&) noexcept;
 auto operator=(sdl&&) noexcept -> sdl&;
};
// In source file:
namespace {
// We could make this a (private) static data member of the class,
// but it's really an implementation detail the rest of the world
// doesn't need to know about. So let's hide it in the source file.
auto sdl_is_initialized = std::atomic_flag{};
} // anonymous namespace
sdl::sdl()
 : _owner{true}
{
 if (sdl_is_initialized.test_and_set())
 throw sdl_multiple_initialization_error{};
 if (not ::SDL_Init(0))
 {
 sdl_is_initialized.clear();
 throw sdl_initialization_error{};
 }
}
sdl::~sdl()
{
 if (_owner)
 {
 ::SDL_Quit();
 sdl_is_initialized.clear();
 }
}

Now, what if we want to pass flags to SDL_init()? Ignore the subsystems for the moment, and assume there are other flags that SDL_Init() accepts (there used to be!).

Well, we could add a constructor that takes the flags, but instead, let’s use the design principle I mentioned before. Let’s make it so that if old code called SDL_Init(/*flags*/), new code can do sdl::init(/*flags*/):

class sdl
{
 bool _owner = false;
 // Don't make this public, because we generally don't want to
 // encourage passing random integer values.
 //
 // We only allow calling init() with "random" values because we
 // presume it is transitioned from older, working code.
 //
 // For new code, we should take explicit types for any flags we
 // want to support (there are none in SDL3 right now).
 explicit sdl(std::uint_fast32_t flags);
public:
 sdl();
 ~sdl();
 sdl(sdl&&) noexcept;
 auto operator=(sdl&&) noexcept -> sdl&;
 friend auto init(std::uint_fast32_t) -> sdl;
};
auto init(std::uint_fast32_t flags) -> sdl;
auto init() -> sdl; // doesn't need to be a friend
// In source file:
auto init(std::uint_fast32_t flags) -> sdl
{
 return sdl{flags};
}
auto init() -> sdl
{
 return sdl{};
}
sdl::sdl()
 : sdl{std::uint_fast32_t{0}}
{}
sdl::sdl(std::uint_fast32_t flags)
 : _owner{true}
{
 if (not sdl_is_initialized.test_and_set())
 throw sdl_multiple_initialization_error{};
 if (not ::SDL_Init(static_cast<::SDL_InitFlags>(flags)))
 throw sdl_initialization_error{};
}

Okay, now up until this point I haven’t mentioned initializing the subsystems. In vanilla SDL, you could pass a set of flags to SDL_Init() to initialize the various subsystems... or you could do it manually after the fact:

// Ignoring error handling
SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_GAMEPAD);
// OR:
SDL_Init(0);
SDL_InitSubSystem(SDL_INIT_VIDEO);
SDL_InitSubSystem(SDL_INIT_AUDIO);
SDL_InitSubSystem(SDL_INIT_GAMEPAD);

C hackers prefer the former way, because doing it the latter way technically requires all kinds of nested-if shenanigans or other nonsense to handle errors correctly. But the latter way is superior because it gives you better control and better diagnostics if one of those systems should fail, and in C++, all of the headaches that the C hackers want to avoid don’t exist.

So what we would want to write is code something like:

auto s = sdl::sdl{}; // initialize SDL
s.initialize_video_subsystem();
s.initialize_audio_subsystem();
s.initialize_gamepad_subsystem();

And error handling should be automatic, as in each subsystem should be cleaned up in the reverse order that they were initialized.

So let’s start with a base class for subsystems:

namespace nekon::sdl {
class subsystem
{
public:
 virtual ~subsystem() = 0;
};
// In source file:
subsystem::~subsystem() = default;
} // namespace nekon::sdl

Now the main SDL object needs to keep track of which subsystems are initialized.

class sdl
{
 // ...
 // The order of the subsystems in the vector is the order they were
 // initialized.
 std::vector<std::unique_ptr<subsystem>> _subsystems;
 // Also need a mutex to guard the subystems, to prevent race
 // conditions when initializing, getting, quitting, etc.
 //
 // We wrap it in a unique pointer so the class is still movable.
 std::unique_ptr<std::mutex> _subsystems_mutex;
 // ...
};
// In source file:
sdl::sdl(std::uint_fast32_t flags)
 : _owner{true}
{
 if (not sdl_is_initialized.test_and_set())
 throw sdl_multiple_initialization_error{};
 _subsystems_mutex = std::make_unique<std::mutex>();
 if (not ::SDL_Init(static_cast<::SDL_InitFlags>(flags)))
 {
 sdl_is_initialized.clear();
 throw sdl_initialization_error{};
 }
}
sdl::~sdl()
{
 if (_owner)
 {
 // Make sure to quit subsystems in reverse order.
 std::ranges::for_each(
 _subsystems | std::views::reverse,
 [] (auto&& s) { s.reset(); }
 );
 ::SDL_Quit();
 sdl_is_initialized.clear();
 }
}

And there needs to be functions to initialize, get, and quit subystems:

class sdl
{
 // ...
 template<std::derived_from<subsystem> Subsystem>
 auto _find() const
 {
 auto const is_subsystem = [] (auto&& s) { return dynamic_cast<Subsystem*>(s.get()) != nullptr; };
 auto const p = std::ranges::find_if(_subsystems, is_subsystem);
 auto const e = std::ranges::end(_subsystems);
 if (p != e)
 {
 if (std::ranges::any_of(std::ranges::subrange{std::ranges::next(p), e}, is_subsystem)
 throw multiple_subsystems_found{};
 }
 return p;
 }
public:
 template<std::derived_from<subsystem> Subsystem, typename... Args>
 requires std::constructible_from<Subsystem, Args...>
 auto initialize(Args&&... args) -> Subsystem&
 {
 auto _ = std::scoped_lock{*_subsystems_mutex};
 if (auto p = _find<Subsystem>(); p != std::ranges::end(_subsystems))
 throw subsystem_already_initialized{};
 _subsystems.emplace_back(std::make_unique<Subsystem>(std::forward<Args>(args)...));
 }
 template<std::derived_from<subsystem> Subsystem>
 auto quit()
 {
 auto _ = std::scoped_lock{*_subsystems_mutex};
 if (auto p = _find<Subsystem>(); p != std::ranges::end(_subsystems))
 _subsystems.erase(p);
 else
 throw subsystem_not_found{};
 }
 template<std::derived_from<subsystem> Subsystem>
 auto find() const -> Subsystem*
 {
 auto _ = std::scoped_lock{*_subsystems_mutex};
 if (auto p = _find<Subsystem>(); p != std::ranges::end(_subsystems))
 return *p;
 else
 return nullptr;
 }
 template<std::derived_from<subsystem> Subsystem>
 auto get() const -> Subsystem&
 {
 if (auto s = find<Subsystem>(); s)
 return *s;
 throw subsystem_not_found{};
 }
 // ...
};
// Usage:
auto&& sdl = nekon::sdl::sdl{};
auto&& video = sdl.initialize<nekon::sdl::video_subsystem>();
auto&& audio = sdl.initialize<nekon::sdl::audio_subsystem>();
auto&& gamepad = sdl.initialize<nekon::sdl::gamepad_subsystem>();
// If the subsystem has already been initialized:
auto&& video = sdl.get<nekon::sdl::video_subsystem>();
// Or you could do it like this:
// Set everything up...
auto&& sdl = nekon::sdl::sdl{};
sdl.initialize<nekon::sdl::video_subsystem>();
sdl.initialize<nekon::sdl::audio_subsystem>();
sdl.initialize<nekon::sdl::gamepad_subsystem>();
// Now actually do stuff...
auto&& window = sdl.get<nekon::sdl::video_subsystem>().create_window(...);
// Or get the subsystem handle one time, and keep using it:
auto&& video = sdl.get<nekon::sdl::video_subsystem>();
auto&& window = video.create_window(...);

Just a couple of notes:

  • The original library I’m cribbing from didn’t use dynamic_cast because they didn’t enable RTTI. Instead they used some other technique; I think just giving each subsystem an identifier that was probably set to the same value as the SDL_INIT_??? flag.
  • The original library used shared pointers, not unique pointers, and didn’t return references but rather copies of the shared pointers held internally. The logic was that when you were quitting a subsystem—either manually or in the destructor—you could do a check to make sure the shared pointer was unique, and if it wasn’t, you would know someone was still holding on to a subsystem that was being destroyed. I don’t think that’s a good idea, because I don’t think the uniqueness check can be reliable. But maybe making some kind of handle type for the subsystems isn’t a bad idea. (Can’t be std::shared_ptr because of the existence of std::weak_ptr, but the idea might still be feasible.)
  • The original library also took into account that some subsystems are prerequisites of other subsystems. For example, the gamepad subsystem depends on the joystick subsystem which depends on the events subsystem. And a couple of subsystems are just initialized by default. When you tried to initialize a subsystem, it checked whether that subsystem was already initialized, but it also checked whether the prerequisite subsystems were initialized. And if you tried to quit a subsystem, it made sure no other subsystem depended on it.
  • The original library was designed many years ago. If it were being designed today, or if I were designing it, I don’t think I would use runtime polymorphism. The set of subsystems is small and fixed; if/when SDL ever adds a new subsystem, any game that wants to use it would need to be completely recompiled anyway, along with the SDL wrapper library. This is probably better done using static polymorphism. Or perhaps even simpler, because all you would need in the main SDL class is a simple mapping of the subsystem IDs to flags indicating whether they are initialized or not; just an array<tuple<id, bool>, number_of_subsystems> really. Or array<tuple<id, use_count>, number_of_subsystems> so you know if a subsystem is in use (so it and anything that depends on it can’t be quit).

Again I repeat myself: this is not an ultimate, perfect, or "the only correct" solution. I’ve even mentioned a couple different ways of doing the above. You may find a different model that works better for you. The model I mentioned above was designed for a general-purpose SDL wrapper library, used to make multiple games, along with a bunch of supporting utilities (map editors, etc.). If your goal is only a single game, it may be overkill; especially all the subsystems management stuff.

answered Jul 1 at 21:01
\$\endgroup\$
5
  • \$\begingroup\$ Well first of all thanks for such detailed review, I really appreciate it. And well, I have a lot of answers to my questions, as well as new questions. So for example, if you say I shouldn't inherit gl_window from window_manager (which I agree is basically a window and should be renamed), what is truly correct way to do that? And about gl_window - should it not be inherited from window_manager? How should I implement it then, I don't quite understand.. \$\endgroup\$ Commented Jul 2 at 7:48
  • \$\begingroup\$ And also about SDL initialization, since I am quite a newbie in it, I didn't know what SDL_initis all about, I only really used it for the video yet, but if I would need sound and/or gamepad, I think, I would've eventually came to renaming that sdl_ctx to something more concrete, for example sdl_video. But that's yet another problem that needs to be fixed now \$\endgroup\$ Commented Jul 2 at 7:49
  • \$\begingroup\$ Oh and about my preference for name of my namespace - raw, one day I made my own vector on raw pointers (with C-like memory management), then decided to put into the namespace raw for this reason, and from that moment I basically use raw for all of my projects \$\endgroup\$ Commented Jul 2 at 16:16
  • \$\begingroup\$ I’m extending the review with answers to the questions in the above comments, because answering them properly just won’t fit in a comment. \$\endgroup\$ Commented Jul 4 at 18:50
  • \$\begingroup\$ That's a lot of information. But, since my goal is just a game I don't really know will I do that complicated stuff with SDL subsystems or not, yet, your ideas are really good, thanks! \$\endgroup\$ Commented Jul 6 at 14:03
2
\$\begingroup\$

The code is quite neat and presumably functional. But I think the design is a bit too complicated for this purpose:

I'm working on a C++ graphics app and have put together a set of RAII classes to manage the initialization boilerplate for SDL3 and OpenGL.


ctx_sdl

There's nothing wrong with creating an SDL context class to automate calling SDL_Quit. But treating it as an ordinary "object" or tying it to the window class is probably a mistake. You might want to open multiple windows, but they share the global SDL state. With your current design, if you close one window, SDL_Quit will be called, and kill both.

It would be nice to be able to supply different arguments to SDL_Init (or call init again for different subsystems).

This class shouldn't be inherited from (it doesn't really make sense), so should not have a virtual destructor.

It might be reasonable to make this class a singleton. (Or just stick a big comment in saying to only ever create one of it).

Or... just put two functions - sdl_init and sdl_quit - in main.cpp and call them from main, and never worry about it again... It's not RAII, but you only do it once in the whole app.


window stuff

I don't think it makes sense for a window to derive from a window_manager. And as mentioned, it certainly shouldn't contain the SDL context.

I know the docs say that SDL_GL_SetAttribute should be called before window creation, but I've always found they work fine afterwards too (before SDL_GL_CreateContext). So I'd put that stuff in gl_window, instead of the base class.

Consider having a single window class that does SDL and GL setup in one place. Unless we have a single app that needs lots of windows, some of which need OpenGL, and some don't... I think we could merge them into one class.

SDL_PollEvent isn't window specific, so it's probably a mistake to associate it with a single window.


gl state

I used to worry about this quite a lot. Nowadays I just create a boring gl_renderer class, with a whole bunch of functions that wrap the raw opengl calls:

 void set_viewport(glm::ivec2 position, glm::uvec2 size);
 void set_framebuffer(framebuffer const& fb);
 void clear_framebuffer();
 enum class framebuffer_color_encoding { RGB, SRGB };
 void set_framebuffer_color_encoding(framebuffer_color_encoding mode);
 void clear_color_buffers(glm::f32vec4 color = { 0.f, 0.f, 0.f, 0.f });
 void clear_depth_buffers(float depth = 1.f);
 
 ...

which works fine for what I need ̄\_(ツ)_/ ̄


passive value stuff

Eh. Seems to be adding complexity for a particular coding "aesthetic". I think plain function calls are clearer and more readable.

answered Jul 1 at 19:19
\$\endgroup\$
2
  • \$\begingroup\$ Thanks for the answer! I do understand all your suggestions, but not that one - "merge them into one class", is it really worth doing? Won't that kinda destroy, the single responsibility principle? Yea it seems pretty easy to maintain the class that way, but idk, maybe there should be more OOP-like way to do it \$\endgroup\$ Commented Jul 2 at 8:11
  • 1
    \$\begingroup\$ A single gl_window class that contains both the SDL_Window* and the SDL_GLContext would be fine. There's a one-to-one relationship there, so they can be thought of together as just "an OpenGL window". The "SRP" guideline is usually very badly worded and too vague to be helpful. But if you look at the wikipedia article you'll see it paraphrased as "Gather together the things that change for the same reasons. Separate those things that change for different reasons." I think a single gl_window class fulfils the first part of this quite nicely. \$\endgroup\$ Commented Jul 2 at 9:29

Your Answer

Draft saved
Draft discarded

Sign up or log in

Sign up using Google
Sign up using Email and Password

Post as a guest

Required, but never shown

Post as a guest

Required, but never shown

By clicking "Post Your Answer", you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.