Skip to content

Commit

Permalink
[libav] More audio work
Browse files Browse the repository at this point in the history
  • Loading branch information
jcelerier committed Jan 8, 2024
1 parent fcfdc6d commit 966ee77
Show file tree
Hide file tree
Showing 6 changed files with 289 additions and 172 deletions.
1 change: 1 addition & 0 deletions src/plugins/score-plugin-gfx/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@ target_include_directories(${PROJECT_NAME}
target_link_libraries(${PROJECT_NAME} PUBLIC
score_lib_base score_lib_localtree score_lib_process score_plugin_dataflow score_plugin_engine
${QT_PREFIX}::ShaderTools ${QT_PREFIX}::ShaderToolsPrivate ${QT_PREFIX}::GuiPrivate
"$<BUILD_INTERFACE:r8brain>"
)

# for HAP
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/score-plugin-gfx/Gfx/Kinect2Device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ void kinect2_camera::processDepth(libfreenect2::Frame* depthFrame)
}
}

class kinect2_parameter : public ossia::gfx::texture_input_parameter
class kinect2_parameter : public ossia::gfx::texture_parameter
{
GfxExecutionAction* context{};

Expand All @@ -348,7 +348,7 @@ class kinect2_parameter : public ossia::gfx::texture_input_parameter
kinect2_parameter(
const std::shared_ptr<kinect2_decoder>& dec, ossia::net::node_base& n,
GfxExecutionAction& ctx)
: ossia::gfx::texture_input_parameter{n}
: ossia::gfx::texture_parameter{n}
, context{&ctx}
, decoder{dec}
, node{new score::gfx::CameraNode(decoder, dec->filter)}
Expand Down
24 changes: 21 additions & 3 deletions src/plugins/score-plugin-gfx/Gfx/Libav/LibavEncoder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,21 +110,35 @@ int LibavEncoder::start()

int LibavEncoder::add_frame(tcb::span<ossia::float_vector> vec)
{
if(!m_formatContext)
return 1;
auto& stream = streams[audio_stream_index];
auto next_frame = stream.get_audio_frame();
AVFrame* next_frame = stream.get_audio_frame();

next_frame->format = AV_SAMPLE_FMT_FLT;
next_frame->sample_rate = SAMPLE_RATE_TEST;
next_frame->format = SAMPLE_FORMAT_TEST;
next_frame->nb_samples = vec[0].size();
next_frame->ch_layout.nb_channels = vec.size();
next_frame->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
// next_frame->data[0] = (unsigned char*)data;

std::vector<int16_t> data;
data.reserve(1024);
for(int i = 0; i < BUFFER_SIZE_TEST; i++)
for(int c = 0; c < CHANNELS_TEST; c++)
data.push_back(vec[c][i] * 32768.f);

next_frame->data[0] = (uint8_t*)data.data();
next_frame->data[1] = nullptr;
#if 0
{
auto& frame = next_frame;
int channels = vec.size();
if(channels <= AV_NUM_DATA_POINTERS)
{
for(int i = 0; i < channels; ++i)
{
frame->data[i] = reinterpret_cast<uint8_t*>(vec[i].data());
}
}
else
{
Expand All @@ -140,12 +154,16 @@ int LibavEncoder::add_frame(tcb::span<ossia::float_vector> vec)
frame->extended_data[i] = reinterpret_cast<uint8_t*>(vec[i].data());
}
}
#endif
return stream.write_audio_frame(m_formatContext, next_frame);
}

int LibavEncoder::add_frame(
const unsigned char* data, AVPixelFormat fmt, int width, int height)
{
if(!m_formatContext)
return 1;

auto& stream = streams[video_stream_index];
auto next_frame = stream.get_video_frame();
next_frame->format = fmt;
Expand Down
65 changes: 48 additions & 17 deletions src/plugins/score-plugin-gfx/Gfx/Libav/LibavOutputDevice.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#include "LibavOutputDevice.hpp"

#include "Gfx/Libav/LibavOutputStream.hpp"

#if SCORE_HAS_LIBAV
#include <State/MessageListSerialization.hpp>
#include <State/Widgets/AddressFragmentLineEdit.hpp>
Expand Down Expand Up @@ -90,9 +92,14 @@ class record_audio_parameter final : public ossia::audio_parameter

void push_value(const ossia::audio_port& mixed) noexcept override
{
// ossia::virtual_audio_parameter::push_value(mixed);
m_audio_data.resize(mixed.channels());
for(std::size_t i = 0; i < mixed.channels(); i++)
{
auto& chan = mixed.channel(i);
m_audio_data[i].assign(chan.begin(), chan.end());
}

m_encoder.add_frame(this->m_audio_data);
m_encoder.add_frame(m_audio_data);
}

void set_buffer_size(int bs)
Expand Down Expand Up @@ -144,7 +151,8 @@ class libav_output_device : public ossia::net::device_base
auto audio = root.add_child(
std::make_unique<ossia::net::generic_node>("Audio", *this, root));
SCORE_ASSERT(audio);
audio->set_parameter(std::make_unique<record_audio_parameter>(enc, 2, 512, *audio));
audio->set_parameter(std::make_unique<record_audio_parameter>(
enc, CHANNELS_TEST, BUFFER_SIZE_TEST, *audio));
}

const ossia::net::generic_node& get_root_node() const override { return root; }
Expand Down Expand Up @@ -202,7 +210,7 @@ static const std::map<QString, LibavOutputSettings> libav_preset_list{
.rate = 30,
},
.video_encoder_short = "mjpeg",
.video_input_pixfmt = "rgba",
.video_render_pixfmt = "rgba",
.video_converted_pixfmt = "yuv420p",
.muxer = "mjpeg",
.options = {{"fflags", "+nobuffer+genpts"}, {"flags", "+low_delay"}}}},
Expand All @@ -216,7 +224,7 @@ static const std::map<QString, LibavOutputSettings> libav_preset_list{
.rate = 30,
},
.video_encoder_short = "libx265",
.video_input_pixfmt = "rgba",
.video_render_pixfmt = "rgba",
.video_converted_pixfmt = "yuv420p",
.muxer = "matroska"}},

Expand All @@ -228,8 +236,8 @@ static const std::map<QString, LibavOutputSettings> libav_preset_list{
.height = 720,
.rate = 30,
},
.video_encoder_short = "foo",
.video_encoder_short = "bar",
.video_encoder_short = "",
.video_encoder_short = "",
.audio_encoder_short = "pcm_s16le",
.audio_encoder_long = "",
.muxer = "wav",
Expand Down Expand Up @@ -521,19 +529,22 @@ Device::DeviceSettings LibavOutputSettingsWidget::getSettings() const
specif.muxer = muxer->name;
if(muxer->long_name)
specif.muxer_long = muxer->long_name;

if(acodec)
{
specif.audio_encoder_short = acodec->name;
if(acodec->long_name)
specif.audio_encoder_long = acodec->long_name;
specif.audio_converted_smpfmt = this->m_smpfmt->currentText();
}

if(vcodec)
{
specif.video_encoder_short = vcodec->name;
if(vcodec->long_name)
specif.video_encoder_long = vcodec->long_name;

specif.video_input_pixfmt = av_pix_fmt_desc_get(AV_PIX_FMT_RGBA)->name;
specif.video_render_pixfmt = av_pix_fmt_desc_get(AV_PIX_FMT_RGBA)->name;
specif.video_converted_pixfmt = this->m_pixfmt->currentText();
}

Expand Down Expand Up @@ -636,9 +647,10 @@ void DataStreamReader::read(const Gfx::LibavOutputSettings& n)
read((const Gfx::SharedOutputSettings&)n);

m_stream << n.hardwareAcceleration;
m_stream << n.audio_encoder_short << n.audio_encoder_long;
m_stream << n.audio_encoder_short << n.audio_encoder_long << n.audio_converted_smpfmt
<< n.audio_sample_rate << n.audio_channels;
m_stream << n.video_encoder_short << n.video_encoder_long;
m_stream << n.video_input_pixfmt;
m_stream << n.video_render_pixfmt;
m_stream << n.video_converted_pixfmt;
m_stream << n.muxer << n.muxer_long;
m_stream << n.options;
Expand All @@ -652,11 +664,16 @@ void DataStreamWriter::write(Gfx::LibavOutputSettings& n)
write((Gfx::SharedOutputSettings&)n);

m_stream >> n.hardwareAcceleration;
m_stream >> n.audio_encoder_short >> n.audio_encoder_long;

m_stream >> n.audio_encoder_short >> n.audio_encoder_long >> n.audio_converted_smpfmt
>> n.audio_sample_rate >> n.audio_channels;

m_stream >> n.video_encoder_short >> n.video_encoder_long;
m_stream >> n.video_input_pixfmt;
m_stream >> n.video_render_pixfmt;
m_stream >> n.video_converted_pixfmt;

m_stream >> n.muxer >> n.muxer_long;

m_stream >> n.options;
m_stream >> n.threads;
checkDelimiter();
Expand All @@ -667,14 +684,21 @@ void JSONReader::read(const Gfx::LibavOutputSettings& n)
{
read((const Gfx::SharedOutputSettings&)n);
obj["HWAccel"] = n.hardwareAcceleration;

obj["AudioEncoderShort"] = n.audio_encoder_short;
obj["AudioEncoderLong"] = n.audio_encoder_long;
obj["AudioConvSmpFmt"] = n.audio_converted_smpfmt;
obj["AudioRate"] = n.audio_sample_rate;
obj["AudioChannels"] = n.audio_channels;

obj["VideoEncoderShort"] = n.video_encoder_short;
obj["VideoEncoderLong"] = n.video_encoder_long;
obj["InputPixFmt"] = n.video_input_pixfmt;
obj["ConvPixFmt"] = n.video_converted_pixfmt;
obj["VideoRenderPixFmt"] = n.video_render_pixfmt;
obj["VideoConvPixFmt"] = n.video_converted_pixfmt;

obj["MuxerShort"] = n.muxer;
obj["MuxerLong"] = n.muxer_long;

obj["Options"] = n.options;
obj["Threads"] = n.threads;
}
Expand All @@ -684,14 +708,21 @@ void JSONWriter::write(Gfx::LibavOutputSettings& n)
{
write((Gfx::SharedOutputSettings&)n);
n.hardwareAcceleration <<= obj["HWAccel"];

n.audio_encoder_short <<= obj["AudioEncoderShort"];
n.audio_encoder_long <<= obj["AudioEncoderLong"];
n.audio_converted_smpfmt <<= obj["AudioConvSmpFmt"];
n.audio_sample_rate <<= obj["AudioRate"];
n.audio_channels <<= obj["AudioChannels"];

n.video_encoder_short <<= obj["VideoEncoderShort"];
n.video_encoder_long <<= obj["VideoEncoderLong"];
n.video_input_pixfmt <<= obj["InputPixFmt"];
n.video_converted_pixfmt <<= obj["ConvPixFmt"];
n.video_render_pixfmt <<= obj["VideoRenderPixFmt"];
n.video_converted_pixfmt <<= obj["VideoConvPixFmt"];

n.muxer <<= obj["MuxerShort"];
n.muxer_long <<= obj["MuxerLong"];

n.options <<= obj["Options"];
n.threads <<= obj["Threads"];
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,11 @@ struct LibavOutputSettings : SharedOutputSettings
{
AVPixelFormat hardwareAcceleration{AV_PIX_FMT_NONE};
QString audio_encoder_short, audio_encoder_long;
QString audio_converted_smpfmt;
double audio_sample_rate{44100.};
int audio_channels{2};
QString video_encoder_short, video_encoder_long;
QString video_input_pixfmt;
QString video_render_pixfmt;
QString video_converted_pixfmt;
QString muxer, muxer_long;
ossia::hash_map<QString, QString> options;
Expand Down
Loading

0 comments on commit 966ee77

Please sign in to comment.