Perform image format conversion for VAAPI on the gpu

This commit is contained in:
loki
2021-06-04 21:12:06 +02:00
parent ff1ea1a63e
commit bdb9ed9001
29 changed files with 8524 additions and 217 deletions
+34
View File
@@ -5,6 +5,7 @@
#include "process.h"
#include <csignal>
#include <filesystem>
#include <iostream>
#include <thread>
@@ -72,6 +73,9 @@ int main(int argc, char *argv[]) {
if(config::sunshine.min_log_level >= 2) {
av_log_set_level(AV_LOG_QUIET);
}
else {
av_log_set_level(AV_LOG_DEBUG);
}
sink = boost::make_shared<text_sink>();
@@ -157,3 +161,33 @@ int main(int argc, char *argv[]) {
return 0;
}
std::string read_file(const char *path) {
if(!std::filesystem::exists(path)) {
return {};
}
std::ifstream in(path);
std::string input;
std::string base64_cert;
while(!in.eof()) {
std::getline(in, input);
base64_cert += input + '\n';
}
return base64_cert;
}
int write_file(const char *path, const std::string_view &contents) {
std::ofstream out(path);
if(!out.is_open()) {
return -1;
}
out << contents;
return 0;
}
+3
View File
@@ -19,4 +19,7 @@ extern boost::log::sources::severity_logger<int> error;
extern boost::log::sources::severity_logger<int> fatal;
void log_flush();
std::string read_file(const char *path);
int write_file(const char *path, const std::string_view &contents);
#endif //SUNSHINE_MAIN_H
-30
View File
@@ -40,9 +40,6 @@ constexpr auto GFE_VERSION = "3.12.0.1";
namespace fs = std::filesystem;
namespace pt = boost::property_tree;
std::string read_file(const char *path);
int write_file(const char *path, const std::string_view &contents);
using https_server_t = SimpleWeb::Server<SimpleWeb::HTTPS>;
using http_server_t = SimpleWeb::Server<SimpleWeb::HTTP>;
@@ -931,31 +928,4 @@ void start(std::shared_ptr<safe::signal_t> shutdown_event) {
ssl.join();
tcp.join();
}
int write_file(const char *path, const std::string_view &contents) {
std::ofstream out(path);
if(!out.is_open()) {
return -1;
}
out << contents;
return 0;
}
std::string read_file(const char *path) {
std::ifstream in(path);
std::string input;
std::string base64_cert;
//FIXME: Being unable to read file could result in infinite loop
while(!in.eof()) {
std::getline(in, input);
base64_cert += input + '\n';
}
return base64_cert;
}
} // namespace nvhttp
+12 -1
View File
@@ -11,6 +11,8 @@
#include <string>
struct sockaddr;
struct AVFrame;
namespace platf {
constexpr auto MAX_GAMEPADS = 32;
@@ -69,6 +71,7 @@ constexpr std::uint8_t map_surround71[] {
enum class mem_type_e {
system,
vaapi,
dxgi,
unknown
};
@@ -155,12 +158,20 @@ struct sink_t {
struct hwdevice_t {
void *data {};
void *img {};
AVFrame *frame {};
virtual int convert(platf::img_t &img) {
return -1;
}
/**
* implementations must take ownership of 'frame'
*/
virtual int set_frame(AVFrame *frame) {
std::abort(); // ^ This function must never be called
return -1;
};
virtual void set_colorspace(std::uint32_t colorspace, std::uint32_t color_range) {};
virtual ~hwdevice_t() = default;
+1 -5
View File
@@ -283,7 +283,7 @@ public:
sink_t sink;
// If hardware sink with more channels found, set that as host
int channels = 0;
int channels = 0;
// Count of all virtual sinks that are created by us
int nullcount = 0;
@@ -434,8 +434,4 @@ std::unique_ptr<audio_control_t> audio_control() {
return audio;
}
std::unique_ptr<deinit_t> init() {
return std::make_unique<deinit_t>();
}
} // namespace platf
+18 -5
View File
@@ -23,6 +23,8 @@
#include "sunshine/main.h"
#include "sunshine/task_pool.h"
#include "vaapi.h"
namespace platf {
using namespace std::literals;
@@ -136,18 +138,21 @@ void blend_cursor(Display *display, img_t &img, int offsetX, int offsetY) {
});
}
}
struct x11_attr_t : public display_t {
xdisplay_t xdisplay;
Window xwindow;
XWindowAttributes xattr;
mem_type_e mem_type;
/*
* Last X (NOT the streamed monitor!) size.
* This way we can trigger reinitialization if the dimensions changed while streaming
*/
int lastWidth, lastHeight;
x11_attr_t() : xdisplay { XOpenDisplay(nullptr) }, xwindow {}, xattr {} {
x11_attr_t(mem_type_e mem_type) : xdisplay { XOpenDisplay(nullptr) }, xwindow {}, xattr {}, mem_type { mem_type } {
XInitThreads();
}
@@ -238,6 +243,14 @@ struct x11_attr_t : public display_t {
return std::make_shared<x11_img_t>();
}
std::shared_ptr<hwdevice_t> make_hwdevice(int width, int height, pix_fmt_e pix_fmt) override {
if(mem_type == mem_type_e::vaapi) {
return egl::make_hwdevice();
}
return std::make_shared<hwdevice_t>();
}
int dummy_img(img_t *img) override {
snapshot(img, 0s, true);
return 0;
@@ -262,7 +275,7 @@ struct shm_attr_t : public x11_attr_t {
refresh_task_id = task_pool.pushDelayed(&shm_attr_t::delayed_refresh, 2s, this).task_id;
}
shm_attr_t() : x11_attr_t(), shm_xdisplay { XOpenDisplay(nullptr) } {
shm_attr_t(mem_type_e mem_type) : x11_attr_t(mem_type), shm_xdisplay { XOpenDisplay(nullptr) } {
refresh_task_id = task_pool.pushDelayed(&shm_attr_t::delayed_refresh, 2s, this).task_id;
}
@@ -356,13 +369,13 @@ struct shm_attr_t : public x11_attr_t {
};
std::shared_ptr<display_t> display(platf::mem_type_e hwdevice_type) {
if(hwdevice_type != platf::mem_type_e::system) {
if(hwdevice_type != platf::mem_type_e::system && hwdevice_type != platf::mem_type_e::vaapi) {
BOOST_LOG(error) << "Could not initialize display with the given hw device type."sv;
return nullptr;
}
// Attempt to use shared memory X11 to avoid copying the frame
auto shm_disp = std::make_shared<shm_attr_t>();
auto shm_disp = std::make_shared<shm_attr_t>(hwdevice_type);
auto status = shm_disp->init();
if(status > 0) {
@@ -375,7 +388,7 @@ std::shared_ptr<display_t> display(platf::mem_type_e hwdevice_type) {
}
// Fallback
auto x11_disp = std::make_shared<x11_attr_t>();
auto x11_disp = std::make_shared<x11_attr_t>(hwdevice_type);
if(x11_disp->init()) {
return nullptr;
}
+851
View File
@@ -0,0 +1,851 @@
#include <string>
#include <glad/egl.h>
#include <glad/gl.h>
#include <fcntl.h>
#include <gbm.h>
#include <va/va.h>
#include <va/va_drm.h>
#include <va/va_drmcommon.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext_vaapi.h>
}
#include "sunshine/main.h"
#include "sunshine/platform/common.h"
#include "sunshine/utility.h"
#include "sunshine/video.h"
// I want to have as little build dependencies as possible
// There aren't that many DRM_FORMAT I need to use, so define them here
//
// They aren't likely to change any time soon.
#define fourcc_code(a, b, c, d) ((std::uint32_t)(a) | ((std::uint32_t)(b) << 8) | \
((std::uint32_t)(c) << 16) | ((std::uint32_t)(d) << 24))
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
#define SUNSHINE_SHADERS_DIR SUNSHINE_ASSETS_DIR "/shaders/opengl"
#define STRINGIFY(x) #x
#define gl_drain_errors_helper(x) gl::drain_errors("line " STRINGIFY(x))
#define gl_drain_errors gl_drain_errors_helper(__LINE__)
using namespace std::literals;
namespace va {
using display_t = util::safe_ptr_v2<void, VAStatus, vaTerminate>;
}
namespace gl {
static GladGLContext ctx;
void drain_errors(const std::string_view &prefix) {
GLenum err;
while((err = ctx.GetError()) != GL_NO_ERROR) {
BOOST_LOG(error) << "GL: "sv << prefix << ": ["sv << util::hex(err).to_string_view() << ']';
}
}
class tex_t : public util::buffer_t<GLuint> {
using util::buffer_t<GLuint>::buffer_t;
public:
tex_t(tex_t &&) = default;
tex_t &operator=(tex_t &&) = default;
~tex_t() {
if(!size() == 0) {
ctx.DeleteTextures(size(), begin());
}
}
static tex_t make(std::size_t count) {
tex_t textures { count };
ctx.GenTextures(textures.size(), textures.begin());
float color[] = { 0.0f, 0.0f, 0.0f, 1.0f };
for(auto tex : textures) {
gl::ctx.BindTexture(GL_TEXTURE_2D, tex);
gl::ctx.TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); // x
gl::ctx.TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // y
gl::ctx.TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
gl::ctx.TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl::ctx.TexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
gl::ctx.TexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, color);
}
return textures;
}
};
class frame_buf_t : public util::buffer_t<GLuint> {
using util::buffer_t<GLuint>::buffer_t;
public:
frame_buf_t(frame_buf_t &&) = default;
frame_buf_t &operator=(frame_buf_t &&) = default;
~frame_buf_t() {
if(begin()) {
ctx.DeleteFramebuffers(size(), begin());
}
}
static frame_buf_t make(std::size_t count) {
frame_buf_t frame_buf { count };
ctx.GenFramebuffers(frame_buf.size(), frame_buf.begin());
return frame_buf;
}
template<class It>
void bind(It it_begin, It it_end) {
if(std::distance(it_begin, it_end) > size()) {
BOOST_LOG(warning) << "To many elements to bind"sv;
return;
}
int x = 0;
std::for_each(it_begin, it_end, [&](auto tex) {
ctx.BindFramebuffer(GL_FRAMEBUFFER, (*this)[x]);
ctx.BindTexture(GL_TEXTURE_2D, tex);
ctx.FramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + x, tex, 0);
++x;
});
}
};
class shader_t {
KITTY_USING_MOVE_T(shader_internal_t, GLuint, std::numeric_limits<GLuint>::max(), {
if(el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteShader(el);
}
});
public:
std::string err_str() {
int length;
ctx.GetShaderiv(handle(), GL_INFO_LOG_LENGTH, &length);
std::string string;
string.resize(length);
ctx.GetShaderInfoLog(handle(), length, &length, string.data());
string.resize(length - 1);
return string;
}
static util::Either<shader_t, std::string> compile(const std::string_view &source, GLenum type) {
shader_t shader;
auto data = source.data();
GLint length = source.length();
shader._shader.el = ctx.CreateShader(type);
ctx.ShaderSource(shader.handle(), 1, &data, &length);
ctx.CompileShader(shader.handle());
int status = 0;
ctx.GetShaderiv(shader.handle(), GL_COMPILE_STATUS, &status);
if(!status) {
return shader.err_str();
}
return shader;
}
GLuint handle() const {
return _shader.el;
}
private:
shader_internal_t _shader;
};
class buffer_t {
KITTY_USING_MOVE_T(buffer_internal_t, GLuint, std::numeric_limits<GLuint>::max(), {
if(el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteBuffers(1, &el);
}
});
public:
static buffer_t make(util::buffer_t<GLint> &&offsets, const char *block, const std::string_view &data) {
buffer_t buffer;
buffer._block = block;
buffer._size = data.size();
buffer._offsets = std::move(offsets);
ctx.GenBuffers(1, &buffer._buffer.el);
ctx.BindBuffer(GL_UNIFORM_BUFFER, buffer.handle());
ctx.BufferData(GL_UNIFORM_BUFFER, data.size(), (const std::uint8_t *)data.data(), GL_DYNAMIC_DRAW);
return buffer;
}
GLuint handle() const {
return _buffer.el;
}
const char *block() const {
return _block;
}
void update(const std::string_view &view, std::size_t offset = 0) {
ctx.BindBuffer(GL_UNIFORM_BUFFER, handle());
ctx.BufferSubData(GL_UNIFORM_BUFFER, offset, view.size(), (const void *)view.data());
}
void update(std::string_view *members, std::size_t count, std::size_t offset = 0) {
util::buffer_t<std::uint8_t> buffer { _size };
for(int x = 0; x < count; ++x) {
auto val = members[x];
std::copy_n((const std::uint8_t *)val.data(), val.size(), &buffer[_offsets[x]]);
}
update(util::view(buffer.begin(), buffer.end()), offset);
}
private:
const char *_block;
std::size_t _size;
util::buffer_t<GLint> _offsets;
buffer_internal_t _buffer;
};
class program_t {
KITTY_USING_MOVE_T(program_internal_t, GLuint, std::numeric_limits<GLuint>::max(), {
if(el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteProgram(el);
}
});
public:
std::string err_str() {
int length;
ctx.GetProgramiv(handle(), GL_INFO_LOG_LENGTH, &length);
std::string string;
string.resize(length);
ctx.GetShaderInfoLog(handle(), length, &length, string.data());
string.resize(length - 1);
return string;
}
static util::Either<program_t, std::string> link(const shader_t &vert, const shader_t &frag) {
program_t program;
program._program.el = ctx.CreateProgram();
ctx.AttachShader(program.handle(), vert.handle());
ctx.AttachShader(program.handle(), frag.handle());
// p_handle stores a copy of the program handle, since program will be moved before
// the fail guard funcion is called.
auto fg = util::fail_guard([p_handle = program.handle(), &vert, &frag]() {
ctx.DetachShader(p_handle, vert.handle());
ctx.DetachShader(p_handle, frag.handle());
});
ctx.LinkProgram(program.handle());
int status = 0;
ctx.GetProgramiv(program.handle(), GL_LINK_STATUS, &status);
if(!status) {
return program.err_str();
}
return program;
}
void bind(const buffer_t &buffer) {
ctx.UseProgram(handle());
auto i = ctx.GetUniformBlockIndex(handle(), buffer.block());
ctx.BindBufferBase(GL_UNIFORM_BUFFER, i, buffer.handle());
}
std::optional<buffer_t> uniform(const char *block, std::pair<const char *, std::string_view> *members, std::size_t count) {
auto i = ctx.GetUniformBlockIndex(handle(), block);
if(i == GL_INVALID_INDEX) {
BOOST_LOG(error) << "Couldn't find index of ["sv << block << ']';
return std::nullopt;
}
int size;
ctx.GetActiveUniformBlockiv(handle(), i, GL_UNIFORM_BLOCK_DATA_SIZE, &size);
bool error_flag = false;
util::buffer_t<GLint> offsets { count };
auto indices = (std::uint32_t *)alloca(count * sizeof(std::uint32_t));
auto names = (const char **)alloca(count * sizeof(const char *));
auto names_p = names;
std::for_each_n(members, count, [names_p](auto &member) mutable {
*names_p++ = std::get<0>(member);
});
std::fill_n(indices, count, GL_INVALID_INDEX);
ctx.GetUniformIndices(handle(), count, names, indices);
for(int x = 0; x < count; ++x) {
if(indices[x] == GL_INVALID_INDEX) {
error_flag = true;
BOOST_LOG(error) << "Couldn't find ["sv << block << '.' << members[x].first << ']';
}
}
if(error_flag) {
return std::nullopt;
}
ctx.GetActiveUniformsiv(handle(), count, indices, GL_UNIFORM_OFFSET, offsets.begin());
util::buffer_t<std::uint8_t> buffer { (std::size_t)size };
for(int x = 0; x < count; ++x) {
auto val = std::get<1>(members[x]);
std::copy_n((const std::uint8_t *)val.data(), val.size(), &buffer[offsets[x]]);
}
return buffer_t::make(std::move(offsets), block, std::string_view { (char *)buffer.begin(), buffer.size() });
}
GLuint handle() const {
return _program.el;
}
private:
program_internal_t _program;
};
} // namespace gl
namespace platf {
namespace egl {
auto constexpr render_device = "/dev/dri/renderD129";
constexpr auto EGL_LINUX_DMA_BUF_EXT = 0x3270;
constexpr auto EGL_LINUX_DRM_FOURCC_EXT = 0x3271;
constexpr auto EGL_DMA_BUF_PLANE0_FD_EXT = 0x3272;
constexpr auto EGL_DMA_BUF_PLANE0_OFFSET_EXT = 0x3273;
constexpr auto EGL_DMA_BUF_PLANE0_PITCH_EXT = 0x3274;
using display_t = util::dyn_safe_ptr_v2<void, EGLBoolean, &eglTerminate>;
using gbm_t = util::safe_ptr<gbm_device, gbm_device_destroy>;
int vaapi_make_hwdevice_ctx(platf::hwdevice_t *base, AVBufferRef **hw_device_buf);
KITTY_USING_MOVE_T(file_t, int, -1, {
if(el >= 0) {
close(el);
}
});
struct nv12_img_t {
display_t::pointer display;
EGLImage r8;
EGLImage bg88;
gl::tex_t tex;
gl::frame_buf_t buf;
};
KITTY_USING_MOVE_T(nv12_t, nv12_img_t, , {
if(el.r8) {
eglDestroyImageKHR(el.display, el.r8);
}
if(el.bg88) {
eglDestroyImageKHR(el.display, el.bg88);
}
});
KITTY_USING_MOVE_T(ctx_t, (std::tuple<display_t::pointer, EGLContext>), , {
TUPLE_2D_REF(disp, ctx, el);
if(ctx) {
if(ctx == eglGetCurrentContext()) {
eglMakeCurrent(disp, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
}
eglDestroyContext(disp, ctx);
}
});
bool fail() {
return eglGetError() != EGL_SUCCESS;
}
class egl_t : public platf::hwdevice_t {
public:
std::optional<nv12_t> import(VASurfaceID surface) {
// No deallocation necessary
VADRMPRIMESurfaceDescriptor prime;
auto status = vaExportSurfaceHandle(
va_display,
surface,
VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
VA_EXPORT_SURFACE_WRITE_ONLY | VA_EXPORT_SURFACE_COMPOSED_LAYERS,
&prime);
if(status) {
BOOST_LOG(error) << "Couldn't export va surface handle: "sv << vaErrorStr(status);
return std::nullopt;
}
int img_attr_planes[2][13] {
{ EGL_LINUX_DRM_FOURCC_EXT, DRM_FORMAT_R8,
EGL_WIDTH, (int)prime.width,
EGL_HEIGHT, (int)prime.height,
EGL_DMA_BUF_PLANE0_FD_EXT, prime.objects[prime.layers[0].object_index[0]].fd,
EGL_DMA_BUF_PLANE0_OFFSET_EXT, (int)prime.layers[0].offset[0],
EGL_DMA_BUF_PLANE0_PITCH_EXT, (int)prime.layers[0].pitch[0],
EGL_NONE },
{ EGL_LINUX_DRM_FOURCC_EXT, DRM_FORMAT_GR88,
EGL_WIDTH, (int)prime.width / 2,
EGL_HEIGHT, (int)prime.height / 2,
EGL_DMA_BUF_PLANE0_FD_EXT, prime.objects[prime.layers[0].object_index[1]].fd,
EGL_DMA_BUF_PLANE0_OFFSET_EXT, (int)prime.layers[0].offset[1],
EGL_DMA_BUF_PLANE0_PITCH_EXT, (int)prime.layers[0].pitch[1],
EGL_NONE },
};
nv12_t nv12 {
display.get(),
eglCreateImageKHR(display.get(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, nullptr, img_attr_planes[0]),
eglCreateImageKHR(display.get(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, nullptr, img_attr_planes[1]),
gl::tex_t::make(2),
gl::frame_buf_t::make(2)
};
if(!nv12->r8 || !nv12->bg88) {
BOOST_LOG(error) << "Couldn't create KHR Image"sv;
return std::nullopt;
}
gl::ctx.BindTexture(GL_TEXTURE_2D, nv12->tex[0]);
gl::ctx.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, nv12->r8);
gl::ctx.BindTexture(GL_TEXTURE_2D, nv12->tex[1]);
gl::ctx.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, nv12->bg88);
nv12->buf.bind(std::begin(nv12->tex), std::end(nv12->tex));
gl_drain_errors;
return nv12;
}
void set_colorspace(std::uint32_t colorspace, std::uint32_t color_range) override {
video::color_t *color_p;
switch(colorspace) {
case 5: // SWS_CS_SMPTE170M
color_p = &video::colors[0];
break;
case 1: // SWS_CS_ITU709
color_p = &video::colors[2];
break;
case 9: // SWS_CS_BT2020
default:
BOOST_LOG(warning) << "Colorspace: ["sv << colorspace << "] not yet supported: switching to default"sv;
color_p = &video::colors[0];
};
if(color_range > 1) {
// Full range
++color_p;
}
std::string_view members[] {
util::view(color_p->color_vec_y),
util::view(color_p->color_vec_u),
util::view(color_p->color_vec_v),
util::view(color_p->range_y),
util::view(color_p->range_uv),
};
color_matrix.update(members, sizeof(members) / sizeof(decltype(members[0])));
}
int init(const char *render_device) {
file.el = open(render_device, O_RDWR);
if(file.el < 0) {
char error_buf[1024];
BOOST_LOG(error) << "Couldn't open ["sv << render_device << "]: "sv << strerror_r(errno, error_buf, sizeof(error_buf));
return -1;
}
gbm.reset(gbm_create_device(file.el));
if(!gbm) {
BOOST_LOG(error) << "Couldn't create GBM device: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return -1;
}
constexpr auto EGL_PLATFORM_GBM_MESA = 0x31D7;
display.reset(eglGetPlatformDisplay(EGL_PLATFORM_GBM_MESA, gbm.get(), nullptr));
if(fail()) {
BOOST_LOG(error) << "Couldn't open EGL display: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return -1;
}
int major, minor;
if(!eglInitialize(display.get(), &major, &minor)) {
BOOST_LOG(error) << "Couldn't initialize EGL display: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return -1;
}
const char *extension_st = eglQueryString(display.get(), EGL_EXTENSIONS);
const char *version = eglQueryString(display.get(), EGL_VERSION);
const char *vendor = eglQueryString(display.get(), EGL_VENDOR);
const char *apis = eglQueryString(display.get(), EGL_CLIENT_APIS);
BOOST_LOG(debug) << "EGL: ["sv << vendor << "]: version ["sv << version << ']';
BOOST_LOG(debug) << "API's supported: ["sv << apis << ']';
const char *extensions[] {
"EGL_KHR_create_context",
"EGL_KHR_surfaceless_context",
"EGL_EXT_image_dma_buf_import",
"EGL_KHR_image_pixmap"
};
for(auto ext : extensions) {
if(!std::strstr(extension_st, ext)) {
BOOST_LOG(error) << "Missing extension: ["sv << ext << ']';
return -1;
}
}
constexpr int conf_attr[] {
EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_NONE
};
int count;
EGLConfig conf;
if(!eglChooseConfig(display.get(), conf_attr, &conf, 1, &count)) {
BOOST_LOG(error) << "Couldn't set config attributes: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return -1;
}
if(!eglBindAPI(EGL_OPENGL_API)) {
BOOST_LOG(error) << "Couldn't bind API: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return -1;
}
constexpr int attr[] {
EGL_CONTEXT_CLIENT_VERSION, 3, EGL_NONE
};
ctx.el = { display.get(), eglCreateContext(display.get(), conf, EGL_NO_CONTEXT, attr) };
if(fail()) {
BOOST_LOG(error) << "Couldn't create EGL context: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return -1;
}
TUPLE_EL_REF(ctx_p, 1, ctx.el);
if(!eglMakeCurrent(display.get(), EGL_NO_SURFACE, EGL_NO_SURFACE, ctx_p)) {
BOOST_LOG(error) << "Couldn't make current display"sv;
return -1;
}
if(!gladLoadGLContext(&gl::ctx, eglGetProcAddress)) {
BOOST_LOG(error) << "Couldn't load OpenGL library"sv;
return -1;
}
BOOST_LOG(debug) << "GL: vendor: "sv << gl::ctx.GetString(GL_VENDOR);
BOOST_LOG(debug) << "GL: renderer: "sv << gl::ctx.GetString(GL_RENDERER);
BOOST_LOG(debug) << "GL: version: "sv << gl::ctx.GetString(GL_VERSION);
BOOST_LOG(debug) << "GL: shader: "sv << gl::ctx.GetString(GL_SHADING_LANGUAGE_VERSION);
gl::ctx.PixelStorei(GL_UNPACK_ALIGNMENT, 1);
{
const char *sources[] {
SUNSHINE_SHADERS_DIR "/ConvertUV.frag",
SUNSHINE_SHADERS_DIR "/ConvertUV.vert",
SUNSHINE_SHADERS_DIR "/ConvertY.frag",
SUNSHINE_SHADERS_DIR "/Scene.vert",
SUNSHINE_SHADERS_DIR "/Scene.frag",
};
GLenum shader_type[2] {
GL_FRAGMENT_SHADER,
GL_VERTEX_SHADER,
};
constexpr auto count = sizeof(sources) / sizeof(const char *);
util::Either<gl::shader_t, std::string> compiled_sources[count];
bool error_flag = false;
for(int x = 0; x < count; ++x) {
auto &compiled_source = compiled_sources[x];
compiled_source = gl::shader_t::compile(read_file(sources[x]), shader_type[x % 2]);
gl_drain_errors;
if(compiled_source.has_right()) {
BOOST_LOG(error) << sources[x] << ": "sv << compiled_source.right();
error_flag = true;
}
}
if(error_flag) {
return -1;
}
auto program = gl::program_t::link(compiled_sources[1].left(), compiled_sources[0].left());
if(program.has_right()) {
BOOST_LOG(error) << "GL linker: "sv << program.right();
return -1;
}
// UV - shader
this->program[1] = std::move(program.left());
program = gl::program_t::link(compiled_sources[3].left(), compiled_sources[2].left());
if(program.has_right()) {
BOOST_LOG(error) << "GL linker: "sv << program.right();
return -1;
}
// Y - shader
this->program[0] = std::move(program.left());
}
auto color_p = &video::colors[0];
std::pair<const char *, std::string_view> members[] {
std::make_pair("color_vec_y", util::view(color_p->color_vec_y)),
std::make_pair("color_vec_u", util::view(color_p->color_vec_u)),
std::make_pair("color_vec_v", util::view(color_p->color_vec_v)),
std::make_pair("range_y", util::view(color_p->range_y)),
std::make_pair("range_uv", util::view(color_p->range_uv)),
};
auto color_matrix = program[0].uniform("ColorMatrix", members, sizeof(members) / sizeof(decltype(members[0])));
if(!color_matrix) {
return -1;
}
this->color_matrix = std::move(*color_matrix);
tex_in = gl::tex_t::make(1);
data = (void *)vaapi_make_hwdevice_ctx;
gl_drain_errors;
return 0;
}
int convert(platf::img_t &img) override {
auto tex = tex_in[0];
gl::ctx.ActiveTexture(GL_TEXTURE0);
gl::ctx.BindTexture(GL_TEXTURE_2D, tex);
gl::ctx.TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, out_width, out_height, GL_BGRA, GL_UNSIGNED_BYTE, img.data);
GLenum attachments[] {
GL_COLOR_ATTACHMENT0,
GL_COLOR_ATTACHMENT1
};
for(int x = 0; x < sizeof(attachments) / sizeof(decltype(attachments[0])); ++x) {
gl::ctx.BindFramebuffer(GL_FRAMEBUFFER, nv12->buf[x]);
gl::ctx.DrawBuffers(1, &attachments[x]);
auto status = gl::ctx.CheckFramebufferStatus(GL_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE) {
BOOST_LOG(error) << "Pass "sv << x << ": CheckFramebufferStatus() --> [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
gl::ctx.BindTexture(GL_TEXTURE_2D, tex);
gl::ctx.UseProgram(program[x].handle());
program[x].bind(color_matrix);
gl::ctx.Viewport(0, 0, out_width / (x + 1), out_height / (x + 1));
gl::ctx.DrawArrays(GL_TRIANGLES, 0, 3);
}
return 0;
}
int set_frame(AVFrame *frame) {
this->frame = frame;
if(av_hwframe_get_buffer(frame->hw_frames_ctx, frame, 0)) {
BOOST_LOG(error) << "Couldn't get hwframe for VAAPI"sv;
return -1;
}
VASurfaceID surface = (std::uintptr_t)frame->data[3];
auto nv12_opt = import(surface);
if(!nv12_opt) {
return -1;
}
nv12 = std::move(*nv12_opt);
out_width = frame->width;
out_height = frame->height;
auto tex = tex_in[0];
// gl::ctx.ActiveTexture(GL_TEXTURE0);
gl::ctx.BindTexture(GL_TEXTURE_2D, tex);
// gl::ctx.TexImage2D(GL_TEXTURE_2D, 0, 4, out_width, out_height, 0, GL_BGRA, GL_UNSIGNED_BYTE, (void *)dummy_img.begin());
gl::ctx.TexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA8, out_width, out_height);
auto loc_width_i = gl::ctx.GetUniformLocation(program[1].handle(), "width_i");
if(loc_width_i < 0) {
BOOST_LOG(error) << "Couldn't find uniform [width_i]"sv;
return -1;
}
auto width_i = 1.0f / out_width;
gl::ctx.UseProgram(program[1].handle());
gl::ctx.Uniform1fv(loc_width_i, 1, &width_i);
gl_drain_errors;
return 0;
}
~egl_t() override {
if(gl::ctx.GetError) {
gl_drain_errors;
}
}
std::uint32_t out_width, out_height;
va::display_t::pointer va_display;
file_t file;
gbm_t gbm;
display_t display;
ctx_t ctx;
gl::tex_t tex_in;
nv12_t nv12;
gl::program_t program[2];
gl::buffer_t color_matrix;
};
/**
* This is a private structure of FFmpeg, I need this to manually create
* a VAAPI hardware context
*
* xdisplay will not be used internally by FFmpeg
*/
typedef struct VAAPIDevicePriv {
union {
void *xdisplay;
int fd;
} drm;
int drm_fd;
} VAAPIDevicePriv;
static void __log(void *level, const char *msg) {
BOOST_LOG(*(boost::log::sources::severity_logger<int> *)level) << msg;
}
int vaapi_make_hwdevice_ctx(platf::hwdevice_t *base, AVBufferRef **hw_device_buf) {
auto *priv = (VAAPIDevicePriv *)av_mallocz(sizeof(VAAPIDevicePriv));
priv->drm_fd = -1;
priv->drm.fd = -1;
auto fg = util::fail_guard([priv]() {
av_free(priv);
});
auto egl = (platf::egl::egl_t *)base;
va::display_t display { vaGetDisplayDRM(egl->file.el) };
if(!display) {
BOOST_LOG(error) << "Couldn't open a va display from DRM with device: "sv << platf::egl::render_device;
return -1;
}
egl->va_display = display.get();
vaSetErrorCallback(display.get(), __log, &error);
vaSetErrorCallback(display.get(), __log, &info);
int major, minor;
auto status = vaInitialize(display.get(), &major, &minor);
if(status) {
BOOST_LOG(error) << "Couldn't initialize va display: "sv << vaErrorStr(status);
return -1;
}
BOOST_LOG(debug) << "vaapi vendor: "sv << vaQueryVendorString(display.get());
*hw_device_buf = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_VAAPI);
auto ctx = (AVVAAPIDeviceContext *)((AVHWDeviceContext *)(*hw_device_buf)->data)->hwctx;
ctx->display = display.release();
fg.disable();
auto err = av_hwdevice_ctx_init(*hw_device_buf);
if(err) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Failed to create FFMpeg hardware device context: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
return err;
}
return 0;
}
std::shared_ptr<platf::hwdevice_t> make_hwdevice() {
auto egl = std::make_shared<egl_t>();
if(egl->init(render_device)) {
return nullptr;
}
return egl;
}
} // namespace egl
std::unique_ptr<deinit_t> init() {
if(!gladLoaderLoadEGL(EGL_NO_DISPLAY) || !eglGetPlatformDisplay) {
BOOST_LOG(error) << "Couldn't load EGL library"sv;
return nullptr;
}
return std::make_unique<deinit_t>();
}
} // namespace platf
+8
View File
@@ -0,0 +1,8 @@
#ifndef SUNSHINE_DISPLAY_H
#define SUNSHINE_DISPLAY_H
#include "sunshine/platform/common.h"
namespace platf::egl {
std::shared_ptr<hwdevice_t> make_hwdevice();
} // namespace platf::egl
#endif
+7 -43
View File
@@ -7,8 +7,10 @@
#include "display.h"
#include "sunshine/main.h"
#include "sunshine/video.h"
#define SUNSHINE_SHADERS_DIR SUNSHINE_ASSETS_DIR "/shaders"
#define SUNSHINE_SHADERS_DIR SUNSHINE_ASSETS_DIR "/shaders/directx"
namespace platf {
using namespace std::literals;
}
@@ -27,44 +29,6 @@ using blob_t = util::safe_ptr<ID3DBlob, Release<ID3DBlob>>;
using depth_stencil_state_t = util::safe_ptr<ID3D11DepthStencilState, Release<ID3D11DepthStencilState>>;
using depth_stencil_view_t = util::safe_ptr<ID3D11DepthStencilView, Release<ID3D11DepthStencilView>>;
using float4 = DirectX::XMFLOAT4;
using float3 = DirectX::XMFLOAT3;
using float2 = DirectX::XMFLOAT2;
struct __attribute__((__aligned__(16))) color_t {
float4 color_vec_y;
float4 color_vec_u;
float4 color_vec_v;
float2 range_y;
float2 range_uv;
};
color_t make_color_matrix(float Cr, float Cb, float U_max, float V_max, float add_Y, float add_UV, float2 range_Y, float2 range_UV) {
float Cg = 1.0f - Cr - Cb;
float Cr_i = 1.0f - Cr;
float Cb_i = 1.0f - Cb;
float shift_y = range_Y.x / 256.0f;
float shift_uv = range_UV.x / 256.0f;
float scale_y = (range_Y.y - range_Y.x) / 256.0f;
float scale_uv = (range_UV.y - range_UV.x) / 256.0f;
return {
{ Cr, Cg, Cb, add_Y },
{ -(Cr * U_max / Cb_i), -(Cg * U_max / Cb_i), U_max, add_UV },
{ V_max, -(Cg * V_max / Cr_i), -(Cb * V_max / Cr_i), add_UV },
{ scale_y, shift_y },
{ scale_uv, shift_uv },
};
}
color_t colors[] {
make_color_matrix(0.299f, 0.114f, 0.436f, 0.615f, 0.0625, 0.5f, { 16.0f, 235.0f }, { 16.0f, 240.0f }), // BT601 MPEG
make_color_matrix(0.299f, 0.114f, 0.5f, 0.5f, 0.0f, 0.5f, { 0.0f, 255.0f }, { 0.0f, 255.0f }), // BT601 JPEG
make_color_matrix(0.2126f, 0.0722f, 0.436f, 0.615f, 0.0625, 0.5f, { 16.0f, 235.0f }, { 16.0f, 240.0f }), //BT701 MPEG
make_color_matrix(0.2126f, 0.0722f, 0.5f, 0.5f, 0.0f, 0.5f, { 0.0f, 255.0f }, { 0.0f, 255.0f }), //BT701 JPEG
};
template<class T>
buf_t make_buffer(device_t::pointer device, const T &t) {
static_assert(sizeof(T) % 16 == 0, "Buffer needs to be aligned on a 16-byte alignment");
@@ -362,15 +326,15 @@ public:
void set_colorspace(std::uint32_t colorspace, std::uint32_t color_range) override {
switch(colorspace) {
case 5: // SWS_CS_SMPTE170M
color_p = &colors[0];
color_p = &video::colors[0];
break;
case 1: // SWS_CS_ITU709
color_p = &colors[2];
color_p = &video::colors[2];
break;
case 9: // SWS_CS_BT2020
default:
BOOST_LOG(warning) << "Colorspace: ["sv << colorspace << "] not yet supported: switching to default"sv;
color_p = &colors[0];
color_p = &video::colors[0];
};
if(color_range > 1) {
@@ -461,7 +425,7 @@ public:
return -1;
}
color_matrix = make_buffer(device_p, colors[0]);
color_matrix = make_buffer(device_p, video::colors[0]);
if(!color_matrix) {
BOOST_LOG(error) << "Failed to create color matrix buffer"sv;
return -1;
+68 -12
View File
@@ -16,6 +16,41 @@
x; \
while(y) z \
}
template<typename T>
struct argument_type;
template<typename T, typename U>
struct argument_type<T(U)> { typedef U type; };
#define KITTY_USING_MOVE_T(move_t, t, init_val, z) \
class move_t { \
public: \
using element_type = typename argument_type<void(t)>::type; \
\
move_t() : el { init_val } {} \
template<class... Args> \
move_t(Args &&...args) : el { std::forward<Args>(args)... } {} \
move_t(const move_t &) = delete; \
\
explicit move_t(move_t &&other) : el { std::move(other.el) } { \
other.el = element_type { init_val }; \
} \
\
move_t &operator=(const move_t &) = delete; \
\
move_t &operator=(move_t &&other) { \
std::swap(el, other.el); \
return *this; \
} \
element_type *operator->() { return &el; } \
const element_type *operator->() const { return &el; } \
\
~move_t() z \
\
element_type el; \
}
#define KITTY_DECL_CONSTR(x) \
x(x &&) noexcept = default; \
x &operator=(x &&) noexcept = default; \
@@ -57,6 +92,9 @@
decltype(expr) a##_ = expr; \
auto &a = std::get<b>(a##_)
#define TUPLE_EL_REF(a, b, expr) \
auto &a = std::get<b>(expr)
namespace util {
template<template<typename...> class X, class... Y>
@@ -621,6 +659,13 @@ private:
pointer _p;
};
template<class T>
constexpr bool is_pointer_v =
instantiation_of_v<std::unique_ptr, T> ||
instantiation_of_v<std::shared_ptr, T> ||
instantiation_of_v<uniq_ptr, T> ||
std::is_pointer_v<T>;
template<class T, class V = void>
struct __false_v;
@@ -630,12 +675,7 @@ struct __false_v<T, std::enable_if_t<instantiation_of_v<std::optional, T>>> {
};
template<class T>
struct __false_v<T, std::enable_if_t<
(
std::is_pointer_v<T> ||
instantiation_of_v<std::unique_ptr, T> ||
instantiation_of_v<std::shared_ptr, T> ||
instantiation_of_v<uniq_ptr, T>)>> {
struct __false_v<T, std::enable_if_t<is_pointer_v<T>>> {
static constexpr std::nullptr_t value = nullptr;
};
@@ -649,11 +689,7 @@ static constexpr auto false_v = __false_v<T>::value;
template<class T>
using optional_t = either_t<
(std::is_same_v<T, bool> ||
instantiation_of_v<std::unique_ptr, T> ||
instantiation_of_v<std::shared_ptr, T> ||
instantiation_of_v<uniq_ptr, T> ||
std::is_pointer_v<T>),
(std::is_same_v<T, bool> || is_pointer_v<T>),
T, std::optional<T>>;
template<class T>
@@ -705,7 +741,6 @@ private:
std::unique_ptr<T[]> _buf;
};
template<class T>
T either(std::optional<T> &&l, T &&r) {
if(l) {
@@ -741,9 +776,30 @@ void c_free(T *p) {
free(p);
}
template<class T, class ReturnType, ReturnType (**function)(T *)>
void dynamic(T *p) {
(*function)(p);
}
template<class T, void (**function)(T *)>
using dyn_safe_ptr = safe_ptr<T, dynamic<T, void, function>>;
template<class T, class ReturnType, ReturnType (**function)(T *)>
using dyn_safe_ptr_v2 = safe_ptr<T, dynamic<T, ReturnType, function>>;
template<class T>
using c_ptr = safe_ptr<T, c_free<T>>;
template<class It>
std::string_view view(It begin, It end) {
return std::string_view { (const char *)begin, (std::size_t)(end - begin) };
}
template<class T>
std::string_view view(const T &data) {
return std::string_view((const char *)&data, sizeof(T));
}
namespace endian {
template<class T = void>
struct endianness {
+163 -119
View File
@@ -38,22 +38,6 @@ void free_buffer(AVBufferRef *ref) {
av_buffer_unref(&ref);
}
namespace nv {
enum class profile_h264_e : int {
baseline,
main,
high,
high_444p,
};
enum class profile_hevc_e : int {
main,
main_10,
rext,
};
} // namespace nv
using ctx_t = util::safe_ptr<AVCodecContext, free_ctx>;
using frame_t = util::safe_ptr<AVFrame, free_frame>;
using buffer_t = util::safe_ptr<AVBufferRef, free_buffer>;
@@ -63,9 +47,6 @@ using img_event_t = std::shared_ptr<safe::event_t<std::shared_ptr<platf::img_t>>
platf::mem_type_e map_dev_type(AVHWDeviceType type);
platf::pix_fmt_e map_pix_fmt(AVPixelFormat fmt);
int sw_img_to_frame(const void *img, frame_t &frame);
int vaapi_img_to_frame(const void *img, frame_t &frame);
int dxgi_img_to_frame(const void *img, frame_t &frame);
util::Either<buffer_t, int> dxgi_make_hwdevice_ctx(platf::hwdevice_t *hwdevice_ctx);
util::Either<buffer_t, int> vaapi_make_hwdevice_ctx(platf::hwdevice_t *hwdevice_ctx);
@@ -74,39 +55,58 @@ int hwframe_ctx(ctx_t &ctx, buffer_t &hwdevice, AVPixelFormat format);
class swdevice_t : public platf::hwdevice_t {
public:
int convert(platf::img_t &img) override {
auto frame = (AVFrame *)this->img;
av_frame_make_writable(frame);
av_frame_make_writable(sw_frame.get());
const int linesizes[2] {
img.row_pitch, 0
};
std::uint8_t *data[4];
data[0] = frame->data[0] + offset;
if(frame->format == AV_PIX_FMT_NV12) {
data[1] = frame->data[1] + offset;
data[0] = sw_frame->data[0] + offset;
if(sw_frame->format == AV_PIX_FMT_NV12) {
data[1] = sw_frame->data[1] + offset;
data[2] = nullptr;
}
else {
data[1] = frame->data[1] + offset / 2;
data[2] = frame->data[2] + offset / 2;
data[1] = sw_frame->data[1] + offset / 2;
data[2] = sw_frame->data[2] + offset / 2;
data[3] = nullptr;
}
int ret = sws_scale(sws.get(), (std::uint8_t *const *)&img.data, linesizes, 0, img.height, data, frame->linesize);
int ret = sws_scale(sws.get(), (std::uint8_t *const *)&img.data, linesizes, 0, img.height, data, sw_frame->linesize);
if(ret <= 0) {
BOOST_LOG(error) << "Couldn't convert image to required format and/or size"sv;
return -1;
}
// If frame is not a software frame, it means we still need to transfer from main memory
// to vram memory
if(frame->hw_frames_ctx) {
auto status = av_hwframe_transfer_data(frame, sw_frame.get(), 0);
if(status < 0) {
char string[AV_ERROR_MAX_STRING_SIZE];
BOOST_LOG(error) << "Failed to transfer image data to hardware frame: "sv << av_make_error_string(string, AV_ERROR_MAX_STRING_SIZE, status);
return -1;
}
}
return 0;
}
virtual void set_colorspace(std::uint32_t colorspace, std::uint32_t color_range) {
int set_frame(AVFrame *frame) {
this->frame = frame;
// If it's a hwframe, allocate buffers for hardware
if(frame->hw_frames_ctx && av_hwframe_get_buffer(frame->hw_frames_ctx, frame, 0)) {
return -1;
}
return 0;
}
void set_colorspace(std::uint32_t colorspace, std::uint32_t color_range) override {
sws_setColorspaceDetails(sws.get(),
sws_getCoefficients(SWS_CS_DEFAULT), 0,
sws_getCoefficients(colorspace), color_range - 1,
@@ -117,15 +117,13 @@ public:
* When preserving aspect ratio, ensure that padding is black
*/
int prefill() {
auto frame = (frame_t::pointer)img;
auto width = frame->width;
auto height = frame->height;
auto width = sw_frame->width;
auto height = sw_frame->height;
sws_t sws {
sws_getContext(
width, height, AV_PIX_FMT_BGR0,
width, height, (AVPixelFormat)frame->format,
width, height, (AVPixelFormat)sw_frame->format,
SWS_LANCZOS | SWS_ACCURATE_RND,
nullptr, nullptr, nullptr)
};
@@ -141,10 +139,10 @@ public:
width, 0
};
av_frame_make_writable(frame);
av_frame_make_writable(sw_frame.get());
auto data = img.begin();
int ret = sws_scale(sws.get(), (std::uint8_t *const *)&data, linesizes, 0, height, frame->data, frame->linesize);
int ret = sws_scale(sws.get(), (std::uint8_t *const *)&data, linesizes, 0, height, sw_frame->data, sw_frame->linesize);
if(ret <= 0) {
BOOST_LOG(error) << "Couldn't convert image to required format and/or size"sv;
@@ -155,25 +153,21 @@ public:
}
int init(int in_width, int in_height, AVFrame *frame, AVPixelFormat format) {
sw_frame.reset(av_frame_alloc());
// If the device used is hardware, yet the image resides on main memory
if(frame->hw_frames_ctx) {
if(av_hwframe_get_buffer(frame->hw_frames_ctx, frame, 0)) {
return -1;
}
img = av_frame_alloc();
auto sw_frame = (frame_t::pointer)img;
sw_frame->width = frame->width;
sw_frame->height = frame->height;
sw_frame->format = format;
av_frame_get_buffer(sw_frame.get(), 0);
}
else {
img = frame;
}
av_frame_get_buffer(frame, 0);
av_frame_get_buffer((frame_t::pointer)img, 0);
av_frame_ref(sw_frame.get(), frame);
}
if(prefill()) {
return -1;
@@ -202,11 +196,12 @@ public:
}
~swdevice_t() override {
if(img) {
av_frame_unref((frame_t::pointer)img);
if(frame) {
av_frame_unref(frame);
}
}
frame_t sw_frame;
sws_t sws;
// offset of input image to output frame in pixels
@@ -227,10 +222,27 @@ struct encoder_t {
REF_FRAMES_RESTRICT, // Set maximum reference frames
REF_FRAMES_AUTOSELECT, // Allow encoder to select maximum reference frames (If !REF_FRAMES_RESTRICT --> REF_FRAMES_AUTOSELECT)
SLICE, // Allow frame to be partitioned into multiple slices
DYNAMIC_RANGE,
DYNAMIC_RANGE, // hdr
MAX_FLAGS
};
static std::string_view from_flag(flag_e flag) {
#define _CONVERT(x) \
case flag_e::x: \
return #x##sv
switch(flag) {
_CONVERT(PASSED);
_CONVERT(REF_FRAMES_RESTRICT);
_CONVERT(REF_FRAMES_AUTOSELECT);
_CONVERT(SLICE);
_CONVERT(DYNAMIC_RANGE);
_CONVERT(MAX_FLAGS);
}
#undef _CONVERT
return "unknown"sv;
}
struct option_t {
KITTY_DEFAULT_CONSTR(option_t)
option_t(const option_t &) = default;
@@ -271,28 +283,25 @@ struct encoder_t {
int flags;
std::function<int(const void *, frame_t &)> img_to_frame;
std::function<util::Either<buffer_t, int>(platf::hwdevice_t *hwdevice)> make_hwdevice_ctx;
};
class session_t {
public:
session_t() = default;
session_t(ctx_t &&ctx, frame_t &&frame, util::wrap_ptr<platf::hwdevice_t> &&device) : ctx { std::move(ctx) }, frame { std::move(frame) }, device { std::move(device) } {}
session_t(ctx_t &&ctx, util::wrap_ptr<platf::hwdevice_t> &&device) : ctx { std::move(ctx) }, device { std::move(device) } {}
session_t(session_t &&other) noexcept : ctx { std::move(other.ctx) }, frame { std::move(other.frame) }, device { std::move(other.device) } {}
session_t(session_t &&other) noexcept : ctx { std::move(other.ctx) }, device { std::move(other.device) } {}
// Ensure objects are destroyed in the correct order
session_t &operator=(session_t &&other) {
device = std::move(other.device);
frame = std::move(other.frame);
ctx = std::move(other.ctx);
return *this;
}
ctx_t ctx;
frame_t frame;
util::wrap_ptr<platf::hwdevice_t> device;
};
@@ -413,9 +422,8 @@ static encoder_t amdvce {
std::make_optional<encoder_t::option_t>({ "qp"s, &config::video.qp }),
"h264_amf"s,
},
DEFAULT
DEFAULT,
dxgi_img_to_frame,
dxgi_make_hwdevice_ctx
};
#endif
@@ -452,7 +460,6 @@ static encoder_t software {
},
H264_ONLY | SYSTEM_MEMORY,
sw_img_to_frame,
nullptr
};
@@ -462,8 +469,7 @@ static encoder_t vaapi {
{ FF_PROFILE_H264_HIGH, FF_PROFILE_HEVC_MAIN, FF_PROFILE_HEVC_MAIN_10 },
AV_HWDEVICE_TYPE_VAAPI,
AV_PIX_FMT_VAAPI,
AV_PIX_FMT_NV12,
AV_PIX_FMT_YUV420P10,
AV_PIX_FMT_NV12, AV_PIX_FMT_YUV420P10,
{
{
{ "sei"s, 0 },
@@ -477,7 +483,6 @@ static encoder_t vaapi {
{
{ "sei"s, 0 },
{ "idr_interval"s, std::numeric_limits<int>::max() },
// { "quality"s, 10 },
},
std::nullopt,
std::nullopt,
@@ -485,7 +490,6 @@ static encoder_t vaapi {
},
LIMITED_GOP_SIZE | SYSTEM_MEMORY,
vaapi_img_to_frame,
vaapi_make_hwdevice_ctx
};
#endif
@@ -650,11 +654,11 @@ void captureThread(
}
}
int encode(int64_t frame_nr, ctx_t &ctx, frame_t &frame, packet_queue_t &packets, void *channel_data) {
int encode(int64_t frame_nr, ctx_t &ctx, frame_t::pointer frame, packet_queue_t &packets, void *channel_data) {
frame->pts = frame_nr;
/* send the frame to the encoder */
auto ret = avcodec_send_frame(ctx.get(), frame.get());
auto ret = avcodec_send_frame(ctx.get(), frame);
if(ret < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Could not send a frame for encoding: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, ret);
@@ -881,10 +885,13 @@ std::optional<session_t> make_session(const encoder_t &encoder, const config_t &
device = hwdevice;
}
if(device->set_frame(frame.release())) {
return std::nullopt;
}
device->set_colorspace(sws_color_space, ctx->color_range);
return std::make_optional(session_t {
std::move(ctx),
std::move(frame),
std::move(device),
});
}
@@ -910,15 +917,18 @@ void encode_run(
auto delay = std::chrono::floor<std::chrono::nanoseconds>(1s) / config.framerate;
auto next_frame = std::chrono::steady_clock::now();
auto frame = session->device->frame;
while(true) {
if(shutdown_event->peek() || reinit_event.peek() || !images->running()) {
break;
}
if(idr_events->peek()) {
session->frame->pict_type = AV_PICTURE_TYPE_I;
session->frame->key_frame = 1;
auto event = idr_events->pop();
frame->pict_type = AV_PICTURE_TYPE_I;
frame->key_frame = 1;
auto event = idr_events->pop();
if(!event) {
return;
}
@@ -928,8 +938,10 @@ void encode_run(
key_frame_nr = end + config.framerate;
}
else if(frame_nr == key_frame_nr) {
session->frame->pict_type = AV_PICTURE_TYPE_I;
session->frame->key_frame = 1;
auto frame = session->device->frame;
frame->pict_type = AV_PICTURE_TYPE_I;
frame->key_frame = 1;
}
std::this_thread::sleep_until(next_frame);
@@ -939,10 +951,6 @@ void encode_run(
if(frame_nr > (key_frame_nr + config.framerate) || images->peek()) {
if(auto img = images->pop(delay)) {
session->device->convert(*img);
if(encoder.img_to_frame(session->device->img, session->frame)) {
return;
}
}
else if(images->running()) {
continue;
@@ -952,13 +960,13 @@ void encode_run(
}
}
if(encode(frame_nr++, session->ctx, session->frame, packets, channel_data)) {
if(encode(frame_nr++, session->ctx, frame, packets, channel_data)) {
BOOST_LOG(error) << "Could not encode video packet"sv;
return;
}
session->frame->pict_type = AV_PICTURE_TYPE_NONE;
session->frame->key_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_NONE;
frame->key_frame = 0;
}
}
@@ -1064,7 +1072,8 @@ encode_e encode_run_sync(std::vector<std::unique_ptr<sync_session_ctx_t>> &synce
next_frame = now + 1s;
KITTY_WHILE_LOOP(auto pos = std::begin(synced_sessions), pos != std::end(synced_sessions), {
auto ctx = pos->ctx;
auto frame = pos->session.device->frame;
auto ctx = pos->ctx;
if(ctx->shutdown_event->peek()) {
// Let waiting thread know it can delete shutdown_event
ctx->join_event->raise(true);
@@ -1082,8 +1091,8 @@ encode_e encode_run_sync(std::vector<std::unique_ptr<sync_session_ctx_t>> &synce
}
if(ctx->idr_events->peek()) {
pos->session.frame->pict_type = AV_PICTURE_TYPE_I;
pos->session.frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
frame->key_frame = 1;
auto event = ctx->idr_events->pop();
auto end = event->second;
@@ -1092,8 +1101,8 @@ encode_e encode_run_sync(std::vector<std::unique_ptr<sync_session_ctx_t>> &synce
ctx->key_frame_nr = end + ctx->config.framerate;
}
else if(ctx->frame_nr == ctx->key_frame_nr) {
pos->session.frame->pict_type = AV_PICTURE_TYPE_I;
pos->session.frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
frame->key_frame = 1;
}
if(img_tmp) {
@@ -1120,21 +1129,17 @@ encode_e encode_run_sync(std::vector<std::unique_ptr<sync_session_ctx_t>> &synce
continue;
}
pos->img_tmp = nullptr;
if(encoder.img_to_frame(pos->hwdevice->img, pos->session.frame)) {
return encode_e::error;
}
}
if(encode(ctx->frame_nr++, pos->session.ctx, pos->session.frame, ctx->packets, ctx->channel_data)) {
if(encode(ctx->frame_nr++, pos->session.ctx, frame, ctx->packets, ctx->channel_data)) {
BOOST_LOG(error) << "Could not encode video packet"sv;
ctx->shutdown_event->raise(true);
continue;
}
pos->session.frame->pict_type = AV_PICTURE_TYPE_NONE;
pos->session.frame->key_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_NONE;
frame->key_frame = 0;
++pos;
})
@@ -1288,14 +1293,12 @@ bool validate_config(std::shared_ptr<platf::display_t> &disp, const encoder_t &e
return false;
}
if(encoder.img_to_frame(session->device->img, session->frame)) {
return false;
}
auto frame = session->device->frame;
session->frame->pict_type = AV_PICTURE_TYPE_I;
frame->pict_type = AV_PICTURE_TYPE_I;
auto packets = std::make_shared<packet_queue_t::element_type>(30);
if(encode(1, session->ctx, session->frame, packets, nullptr)) {
if(encode(1, session->ctx, frame, packets, nullptr)) {
return false;
}
@@ -1397,6 +1400,14 @@ int init() {
break;
})
BOOST_LOG(info);
BOOST_LOG(info) << "//////////////////////////////////////////////////////////////"sv;
BOOST_LOG(info) << "// //"sv;
BOOST_LOG(info) << "// Ignore any errors mentioned above, they are not relevant //"sv;
BOOST_LOG(info) << "// //"sv;
BOOST_LOG(info) << "//////////////////////////////////////////////////////////////"sv;
BOOST_LOG(info);
if(encoders.empty()) {
if(config::video.encoder.empty()) {
BOOST_LOG(fatal) << "Couldn't find any encoder"sv;
@@ -1408,16 +1419,23 @@ int init() {
return -1;
}
BOOST_LOG(info);
BOOST_LOG(info) << "//////////////////////////////////////////////////////////////"sv;
BOOST_LOG(info) << "// //"sv;
BOOST_LOG(info) << "// Ignore any errors mentioned above, they are not relevant //"sv;
BOOST_LOG(info) << "// //"sv;
BOOST_LOG(info) << "//////////////////////////////////////////////////////////////"sv;
BOOST_LOG(info);
auto &encoder = encoders.front();
BOOST_LOG(debug) << "------ h264 ------"sv;
for(int x = 0; x < encoder_t::MAX_FLAGS; ++x) {
auto flag = (encoder_t::flag_e)x;
BOOST_LOG(debug) << encoder_t::from_flag(flag) << (encoder.h264[flag] ? ": supported"sv : ": unsupported"sv);
}
BOOST_LOG(debug) << "-------------------"sv;
if(encoder.hevc[encoder_t::PASSED]) {
BOOST_LOG(debug) << "------ hevc ------"sv;
for(int x = 0; x < encoder_t::MAX_FLAGS; ++x) {
auto flag = (encoder_t::flag_e)x;
BOOST_LOG(debug) << encoder_t::from_flag(flag) << (encoder.hevc[flag] ? ": supported"sv : ": unsupported"sv);
}
BOOST_LOG(debug) << "-------------------"sv;
BOOST_LOG(info) << "Found encoder "sv << encoder.name << ": ["sv << encoder.h264.name << ", "sv << encoder.hevc.name << ']';
}
else {
@@ -1439,7 +1457,7 @@ int hwframe_ctx(ctx_t &ctx, buffer_t &hwdevice, AVPixelFormat format) {
frame_ctx->sw_format = format;
frame_ctx->height = ctx->height;
frame_ctx->width = ctx->width;
frame_ctx->initial_pool_size = 20;
frame_ctx->initial_pool_size = 0;
if(auto err = av_hwframe_ctx_init(frame_ref.get()); err < 0) {
return err;
@@ -1450,23 +1468,21 @@ int hwframe_ctx(ctx_t &ctx, buffer_t &hwdevice, AVPixelFormat format) {
return 0;
}
int sw_img_to_frame(const void *img, frame_t &frame) {
return 0;
}
// Linux only declaration
typedef int (*vaapi_make_hwdevice_ctx_fn)(platf::hwdevice_t *base, AVBufferRef **hw_device_buf);
int vaapi_img_to_frame(const void *img, frame_t &frame) {
auto status = av_hwframe_transfer_data(frame.get(), (frame_t::pointer)img, 0);
if(status < 0) {
char string[AV_ERROR_MAX_STRING_SIZE];
BOOST_LOG(error) << "Failed to transfer image data to hardware frame: "sv << av_make_error_string(string, AV_ERROR_MAX_STRING_SIZE, status);
return -1;
util::Either<buffer_t, int> vaapi_make_hwdevice_ctx(platf::hwdevice_t *base) {
buffer_t hw_device_buf;
// If an egl hwdevice
if(base->data) {
if(((vaapi_make_hwdevice_ctx_fn)base->data)(base, &hw_device_buf)) {
return -1;
}
return hw_device_buf;
}
return 0;
}
util::Either<buffer_t, int> vaapi_make_hwdevice_ctx(platf::hwdevice_t *) {
buffer_t hw_device_buf;
auto status = av_hwdevice_ctx_create(&hw_device_buf, AV_HWDEVICE_TYPE_VAAPI, "/dev/dri/renderD129", nullptr, 0);
if(status < 0) {
char string[AV_ERROR_MAX_STRING_SIZE];
@@ -1569,8 +1585,9 @@ platf::mem_type_e map_dev_type(AVHWDeviceType type) {
switch(type) {
case AV_HWDEVICE_TYPE_D3D11VA:
return platf::mem_type_e::dxgi;
case AV_PICTURE_TYPE_NONE:
case AV_HWDEVICE_TYPE_VAAPI:
return platf::mem_type_e::vaapi;
case AV_PICTURE_TYPE_NONE:
return platf::mem_type_e::system;
default:
return platf::mem_type_e::unknown;
@@ -1595,4 +1612,31 @@ platf::pix_fmt_e map_pix_fmt(AVPixelFormat fmt) {
return platf::pix_fmt_e::unknown;
}
color_t make_color_matrix(float Cr, float Cb, float U_max, float V_max, float add_Y, float add_UV, const float2 &range_Y, const float2 &range_UV) {
float Cg = 1.0f - Cr - Cb;
float Cr_i = 1.0f - Cr;
float Cb_i = 1.0f - Cb;
float shift_y = range_Y[0] / 256.0f;
float shift_uv = range_UV[0] / 256.0f;
float scale_y = (range_Y[1] - range_Y[0]) / 256.0f;
float scale_uv = (range_UV[1] - range_UV[0]) / 256.0f;
return {
{ Cr, Cg, Cb, add_Y },
{ -(Cr * U_max / Cb_i), -(Cg * U_max / Cb_i), U_max, add_UV },
{ V_max, -(Cg * V_max / Cr_i), -(Cb * V_max / Cr_i), add_UV },
{ scale_y, shift_y },
{ scale_uv, shift_uv },
};
}
color_t colors[] {
make_color_matrix(0.299f, 0.114f, 0.436f, 0.615f, 0.0625, 0.5f, { 16.0f, 235.0f }, { 16.0f, 240.0f }), // BT601 MPEG
make_color_matrix(0.299f, 0.114f, 0.5f, 0.5f, 0.0f, 0.5f, { 0.0f, 255.0f }, { 0.0f, 255.0f }), // BT601 JPEG
make_color_matrix(0.2126f, 0.0722f, 0.436f, 0.615f, 0.0625, 0.5f, { 16.0f, 235.0f }, { 16.0f, 240.0f }), // BT701 MPEG
make_color_matrix(0.2126f, 0.0722f, 0.5f, 0.5f, 0.0f, 0.5f, { 0.0f, 255.0f }, { 0.0f, 255.0f }), // BT701 JPEG
};
}
+14
View File
@@ -62,6 +62,20 @@ struct config_t {
int dynamicRange;
};
using float4 = float[4];
using float3 = float[3];
using float2 = float[2];
struct __attribute__((__aligned__(16))) color_t {
float4 color_vec_y;
float4 color_vec_u;
float4 color_vec_v;
float2 range_y;
float2 range_uv;
};
extern color_t colors[4];
void capture(
safe::signal_t *shutdown_event,
packet_queue_t packets,