diff --git a/Dockerfile b/Dockerfile
index 70a979f..6c393ba 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -40,7 +40,7 @@ COPY --from=x264-builder $INSTALL_DIR $INSTALL_DIR
# Build libav
FROM libav-base AS libav-builder
-RUN emconfigure ./configure \
+RUN make distclean; emconfigure ./configure \
--target-os=none \
--arch=x86_32 \
--enable-cross-compile \
@@ -54,6 +54,22 @@ RUN emconfigure ./configure \
--disable-pthreads \
--disable-w32threads \
--disable-os2threads \
+ --disable-swscale-alpha \
+ --disable-swresample \
+ --disable-swscale \
+ --disable-postproc \
+ --disable-avfilter \
+ --disable-avdevice \
+ --disable-network \
+ --disable-dct \
+ --disable-dwt \
+ --disable-lsp \
+ --disable-mdct \
+ --disable-rdft \
+ --disable-fft \
+ --disable-faan \
+ --disable-pixelutils \
+ --enable-small \
--extra-cflags="$CFLAGS" \
--extra-cxxflags="$CFLAGS" \
--nm="llvm-nm" \
@@ -64,7 +80,17 @@ RUN emconfigure ./configure \
--objcc=emcc \
--dep-cc=emcc \
--enable-gpl \
- --enable-libx264 \
+ --disable-encoders \
+ --disable-decoders \
+ --enable-decoder=h264 \
+ --disable-hwaccels \
+ --disable-muxers \
+ --disable-parsers \
+ --disable-bsfs \
+ --disable-indevs \
+ --disable-outdevs \
+ --disable-devices \
+ --disable-filters \
&& \
emmake make -j
@@ -78,28 +104,18 @@ RUN emcc \
-I$INSTALL_DIR/include \
-L$INSTALL_DIR/lib \
-Llibavcodec \
- -Llibavdevice \
- -Llibavfilter \
-Llibavformat \
-Llibavutil \
- -Llibpostproc \
- -Llibswresample \
- -Llibswscale \
-lavcodec \
- -lavdevice \
- -lavfilter \
-lavformat \
-lavutil \
- -lpostproc \
- -lswresample \
- -lswscale \
- -lx264 \
-Wno-deprecated-declarations \
$LDFLAGS \
-sMODULARIZE \
-sALLOW_MEMORY_GROWTH \
-sEXPORTED_FUNCTIONS=$(node src/bind/export.js) \
-sEXPORTED_RUNTIME_METHODS=$(node src/bind/export-runtime.js) \
+ -sENVIRONMENT="worker" \
--pre-js src/bind/bind.js \
-o dist/libav-core.js \
src/bind/**/*.c
diff --git a/apps/web/index.html b/apps/web/index.html
new file mode 100644
index 0000000..a7540e0
--- /dev/null
+++ b/apps/web/index.html
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+ Document
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/apps/web/package.json b/apps/web/package.json
new file mode 100644
index 0000000..fe52488
--- /dev/null
+++ b/apps/web/package.json
@@ -0,0 +1,33 @@
+{
+ "name": "web",
+ "version": "0.0.0",
+ "description": "libav.wasm web demo apps",
+ "main": "dist/index.js",
+ "module": "dist/index.js",
+ "scripts": {
+ "dev": "concurrently --names \"Watch,Http\" -c \"bgBlue.bold,bgGreen.bold\" \"rollup -c rollup.config.mjs -w -m inline\" \"servez -p 8280\"",
+ "build": "rollup -c rollup.config.mjs",
+ "test": "echo \"Error: no test specified\" && exit 1"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "dependencies": {
+ "@ffmpeg/libav": "^0.0.1-alpha.0",
+ "@ffmpeg/libav-core": "^0.0.1-alpha.1",
+ "@ffmpeg/libav-core-mt": "^0.0.1-alpha.1",
+ "@types/ffmpeg__libav-core": "^0.0.0",
+ "yuv-buffer": "^1.0.0",
+ "yuv-canvas": "^1.2.11"
+ },
+ "devDependencies": {
+ "@rollup/plugin-commonjs": "^24.0.1",
+ "@rollup/plugin-node-resolve": "^15.0.1",
+ "concurrently": "^7.6.0",
+ "rollup": "^3.12.1",
+ "rollup-plugin-copy": "^3.4.0",
+ "rollup-plugin-esbuild": "^5.0.0",
+ "rollup-plugin-node-externals": "^5.1.2",
+ "servez": "^1.14.1"
+ }
+}
diff --git a/apps/web/rollup.config.mjs b/apps/web/rollup.config.mjs
new file mode 100644
index 0000000..ffcb9f2
--- /dev/null
+++ b/apps/web/rollup.config.mjs
@@ -0,0 +1,36 @@
+import { nodeResolve } from '@rollup/plugin-node-resolve';
+import commonjs from '@rollup/plugin-commonjs';
+import esbuild from 'rollup-plugin-esbuild';
+import externals from 'rollup-plugin-node-externals'
+import copy from 'rollup-plugin-copy'
+
+const plugins = [
+ nodeResolve(),
+ commonjs(),
+ esbuild({
+ target: 'es2017',
+ minify: false,
+ define: {
+ },
+ }),
+];
+
+export default [
+ {
+ input: 'src/index.js',
+ output: {
+ file: 'dist/index.js',
+ format: 'esm',
+ },
+ plugins,
+ external:['path','fs','ws']
+ },
+ {
+ input: 'src/index.js',
+ output: {
+ file: 'dist/index.iife.js',
+ format: 'iife',
+ },
+ plugins,
+ }
+];
diff --git a/apps/web/src/index.js b/apps/web/src/index.js
new file mode 100644
index 0000000..06dc974
--- /dev/null
+++ b/apps/web/src/index.js
@@ -0,0 +1,617 @@
+/**
+ * @ref: https://github.com/leandromoreira/ffmpeg-libav-tutorial/blob/46e8aba7bf1bc337d9b665f3541449d45e9d4202/3_transcoding.c
+ */
+
+// @ts-nocheck
+import createLibavCore from "@ffmpeg/libav-core";
+import { openMedia, initLibav } from "@ffmpeg/libav";
+import * as YUVCanvas from "yuv-canvas";
+import * as YUVBuffer from "yuv-buffer";
+
+// load libav module.
+async function init(wasmLocate) {
+ console.time("load-libav");
+ const libavCore = await createLibavCore({ locateFile: (path, scriptDirectory) => { return wasmLocate+'/'+path } });
+ console.timeEnd("load-libav");
+ console.log(libavCore)
+ return libavCore;
+}
+
+export const main = async (url,wasmLocate) => {
+ const libavCore = await init(wasmLocate);
+
+ const {
+ FS: { writeFile, readFile },
+ NULL,
+ ref,
+ deref,
+ stringToPtr,
+ AVPixelFormat,
+ AVIOContext,
+ AVCodec,
+ AVCodecContext,
+ AVDictionary,
+ AVERROR_EAGAIN,
+ AVERROR_EOF,
+ AVFMT_GLOBALHEADER,
+ AVFMT_NOFILE,
+ AVFormatContext,
+ AVFrame,
+ AVIO_FLAG_WRITE,
+ AVMEDIA_TYPE_AUDIO,
+ AVMEDIA_TYPE_VIDEO,
+ AVPacket,
+ AVRational,
+ AVStream,
+ AV_CODEC_FLAG_GLOBAL_HEADER,
+ AV_PICTURE_TYPE_NONE,
+ __av_guess_frame_rate,
+ __av_inv_q,
+ __av_packet_rescale_ts,
+ _av_dict_set,
+ _av_frame_alloc,
+ _av_frame_unref,
+ _av_interleaved_write_frame,
+ _av_opt_set,
+ _av_packet_alloc,
+ _av_packet_free,
+ _av_packet_unref,
+ _av_read_frame,
+ _av_seek_frame,
+ _av_write_trailer,
+ _avcodec_alloc_context3,
+ _avcodec_find_decoder,
+ _avcodec_find_encoder_by_name,
+ _avcodec_flush_buffers,
+ _avcodec_open2,
+ _avcodec_parameters_copy,
+ _avcodec_parameters_from_context,
+ _avcodec_parameters_to_context,
+ _avcodec_receive_frame,
+ _avcodec_receive_packet,
+ _avcodec_send_frame,
+ _avcodec_send_packet,
+ _avformat_alloc_output_context2,
+ _avformat_new_stream,
+ _avformat_write_header,
+ _avio_open,
+ _free,
+ } = libavCore;
+ initLibav(libavCore);
+
+
+
+class StreamingParams {
+ copy_audio = 0;
+ copy_video = 0;
+ output_extension = NULL;
+ muxer_opt_key = NULL;
+ muxer_opt_value = NULL;
+ video_codec = NULL;
+ audio_codec = NULL;
+ codec_priv_key = NULL;
+ codec_priv_value = NULL;
+}
+
+class StreamingContext {
+ avfc = new AVFormatContext(NULL);
+ video_avc = new AVCodec(NULL);
+ audio_avc = new AVCodec(NULL);
+ video_avs = new AVStream(NULL);
+ audio_avs = new AVStream(NULL);
+ video_avcc = new AVCodecContext(NULL);
+ audio_avcc = new AVCodecContext(NULL);
+ video_index = -1;
+ audio_index = -1;
+ filename = NULL;
+}
+
+const fill_stream_info = (avs, avc, avcc) => {
+ avc.ptr = _avcodec_find_decoder(avs.codecpar.codec_id);
+ console.log("find the codec",avs.codecpar.codec_id,avc.ptr);
+ if (!avc.ptr) {
+ console.log("failed to find the codec",avc);
+ return -1;
+ }
+
+ avcc.ptr = _avcodec_alloc_context3(avc.ptr);
+ if (!avcc.ptr) {
+ console.log("failed to alloc memory for codec context");
+ return -1;
+ }
+
+ if (_avcodec_parameters_to_context(avcc.ptr, avs.codecpar.ptr) < 0) {
+ console.log("failed to fill codec context");
+ return -1;
+ }
+
+ if (_avcodec_open2(avcc.ptr, avc.ptr, NULL) < 0) {
+ console.log("failed to open codec");
+ return -1;
+ }
+
+ return 0;
+};
+
+const prepare_decoder = (sc) => {
+ for (let i = 0; i < sc.avfc.nb_streams; i++) {
+ const codec_type = sc.avfc.nth_stream(i).codecpar.codec_type;
+ if (codec_type === AVMEDIA_TYPE_VIDEO) {
+ console.log('video codec')
+ sc.video_avs = sc.avfc.nth_stream(i);
+ sc.video_index = i;
+ if (fill_stream_info(sc.video_avs, sc.video_avc, sc.video_avcc))
+ return -1;
+ } else if (codec_type === AVMEDIA_TYPE_AUDIO) {
+ // console.log('audio codec')
+ // sc.audio_avs = sc.avfc.nth_stream(i);
+ // sc.audio_index = i;
+ // if (fill_stream_info(sc.audio_avs, sc.audio_avc, sc.audio_avcc))
+ // return -1;
+ } else {
+ console.log("skipping streams other than audio and video");
+ }
+ }
+
+ return 0;
+};
+
+
+const remux = (pkt, avfc, decoder_tb, encoder_tb) => {
+ __av_packet_rescale_ts(pkt.ptr, decoder_tb.ptr, encoder_tb.ptr);
+ if (_av_interleaved_write_frame(avfc.ptr, pkt.ptr) < 0) {
+ console.log("error while copying stream packet");
+ return -1;
+ }
+ return 0;
+};
+
+const encode_audio = (decoder, encoder, input_frame) => {
+ const output_packet = new AVPacket(_av_packet_alloc());
+ if (!output_packet) {
+ console.log("could not allocate memory for output packet");
+ return -1;
+ }
+
+ let response = _avcodec_send_frame(encoder.audio_avcc.ptr, input_frame.ptr);
+
+ while (response >= 0) {
+ response = _avcodec_receive_packet(
+ encoder.audio_avcc.ptr,
+ output_packet.ptr
+ );
+
+ if (response === AVERROR_EAGAIN || response === AVERROR_EOF) {
+ break;
+ } else if (response < 0) {
+ console.log("Error while receiving packet from encoder", response);
+ return response;
+ }
+
+ output_packet.stream_index = decoder.audio_index;
+
+ __av_packet_rescale_ts(
+ output_packet.ptr,
+ decoder.audio_avs.time_base.ptr,
+ encoder.audio_avs.time_base.ptr
+ );
+ response = _av_interleaved_write_frame(encoder.avfc.ptr, output_packet.ptr);
+ if (response != 0) {
+ console.log("Error while receiving packet from decoder", response);
+ return -1;
+ }
+ }
+ _av_packet_unref(output_packet.ptr);
+ _av_packet_free(ref(output_packet.ptr));
+ return 0;
+};
+
+const transcode_audio = (decoder, encoder, input_packet, input_frame) => {
+ let response = _avcodec_send_packet(decoder.audio_avcc.ptr, input_packet.ptr);
+ if (response < 0) {
+ console.log("Error while sending packet to decoder", response);
+ return response;
+ }
+
+ while (response >= 0) {
+ response = _avcodec_receive_frame(decoder.audio_avcc.ptr, input_frame.ptr);
+
+ if (response === AVERROR_EAGAIN || response === AVERROR_EOF) {
+ break;
+ } else if (response < 0) {
+ console.log("Error while receiving frame from decoder", response);
+ return response;
+ }
+
+ if (response >= 0) {
+ if (encode_audio(decoder, encoder, input_frame)) return -1;
+ }
+ _av_frame_unref(input_frame.ptr);
+ }
+ return 0;
+};
+ let cur_frame = -1;
+const transcode_video = async (decoder, encoder, input_packet, input_frame) => {
+ let response = _avcodec_send_packet(decoder.video_avcc.ptr, input_packet.ptr);
+ if (response < 0) {
+ console.log("Error while sending packet to decoder", response);
+ return response;
+ }
+
+ while (response >= 0) {
+ response = _avcodec_receive_frame(decoder.video_avcc.ptr, input_frame.ptr);
+ if (response === AVERROR_EAGAIN || response === AVERROR_EOF) {
+ break;
+ } else if (response < 0) {
+ console.log("Error while receiving frame from decoder", response);
+ return response;
+ }
+ const frame_data = _outputVideoFrame(input_frame.copyout_frame(),AVPixelFormat);
+ const raw = frame_data.data;
+ // console.log('input frame', input_frame.ptr, input_frame.width, input_frame.height)
+ await new Promise((resolve, reject) => {
+ setTimeout(resolve, 50);
+ })
+ draw(raw, input_frame.linesize[0], input_frame.height);
+ _av_frame_unref(input_frame.ptr);
+
+
+ cur_frame++;
+ if (cur_frame === video_information.nb_frames-1) {
+ cur_frame = -1;
+ // console.log(decoder.avfc.ptr, decoder.video_index, 0, 1);
+ _avcodec_flush_buffers(decoder.video_avcc.ptr);
+ console.log('seek', _av_seek_frame(decoder.avfc.ptr, -1, 0, 0));
+ }
+ }
+ return 0;
+};
+
+ const iFileName = "test"; //basename(iFilePath);
+ const response = await fetch(url);
+ const arraybuffer = await response.arrayBuffer();
+ const media = new Uint8Array(arraybuffer);
+
+ writeFile(iFileName, media);
+
+ const oFileName = "out"; //basename(oFilePath);
+
+ const sp = new StreamingParams();
+ sp.copy_audio = 1;
+ sp.copy_video = 0;
+ sp.video_codec = stringToPtr("libx264");
+ sp.codec_priv_key = stringToPtr("x264-params");
+ sp.codec_priv_value = stringToPtr(
+ "keyint=60:min-keyint=60:scenecut=0:force-cfr=1"
+ );
+
+ const decoder = new StreamingContext();
+ console.log(decoder)
+ decoder.filename = stringToPtr(iFileName);
+
+ const encoder = new StreamingContext();
+ encoder.filename = stringToPtr(oFileName);
+
+ decoder.avfc = openMedia(iFileName);
+ if (!decoder.avfc) return -1;
+ if (prepare_decoder(decoder)) return -1;
+
+ const video_information = {
+ duration: decoder.avfc.duration,
+ nb_frames: decoder.video_avs.nb_frames,
+ width: decoder.video_avcc.width,
+ height: decoder.video_avcc.height,
+ frame_rate: decoder.video_avs.avg_frame_rate.num / decoder.video_avs.avg_frame_rate.den,
+ }
+
+ const muxer_opts = new AVDictionary(NULL);
+
+ if (sp.muxer_opt_key && sp.muxer_opt_value) {
+ const ptr = ref(muxer_opts.ptr);
+ _av_dict_set(ptr, sp.muxer_opt_key, sp.muxer_opt_value, 0);
+ muxer_opts.ptr = deref(ptr);
+ _free(ptr);
+ }
+
+ let ptr = ref(muxer_opts.ptr);
+ muxer_opts.ptr = deref(ptr);
+ _free(ptr);
+
+ const input_frame = new AVFrame(_av_frame_alloc());
+ console.log('frame',input_frame.ptr)
+ if (!input_frame.ptr) {
+ console.log("failed to allocate memory for AVFrame");
+ return -1;
+ }
+ const input_packet = new AVPacket(_av_packet_alloc());
+ if (!input_packet.ptr) {
+ console.log("failed to allocate memory for AVPacket");
+ return -1;
+ }
+
+ console.log("start to transcode");
+ console.time("transcode");
+ while (_av_read_frame(decoder.avfc.ptr, input_packet.ptr) >= 0) {
+ if (
+ decoder.avfc.nth_stream(input_packet.stream_index).codecpar.codec_type ===
+ AVMEDIA_TYPE_VIDEO
+ ) {
+ if (!sp.copy_video) {
+ if (await transcode_video(decoder, encoder, input_packet, input_frame))
+ return -1;
+ _av_packet_unref(input_packet.ptr);
+ // return 0;
+ } else {
+ if (
+ remux(
+ input_packet,
+ encoder.avfc,
+ decoder.video_avs.time_base,
+ encoder.video_avs.time_base
+ )
+ )
+ return -1;
+ }
+ } else if (
+ decoder.avfc.nth_stream(input_packet.stream_index).codecpar.codec_type ===
+ AVMEDIA_TYPE_AUDIO
+ ) {
+ } else {
+ console.log("ignore all non video or audio packets");
+ }
+ }
+
+ // if (encode_video(decoder, encoder, NULL)) return -1;
+ console.timeEnd("transcode");
+
+
+ // TODO: free resources.
+
+ return 0;
+};
+
+function _outputVideoFrame(frame,AVPixelFormat) {
+
+ // 1. format
+ let format;
+ // console.log("format: " + frame.format)
+ switch (frame.format) {
+ case AVPixelFormat.AV_PIX_FMT_YUV420P:
+ format = "I420";
+ break;
+
+ case AVPixelFormat.AV_PIX_FMT_YUVA420P:
+ format = "I420A";
+ break;
+
+ case AVPixelFormat.AV_PIX_FMT_YUV422P:
+ format = "I422";
+ break;
+
+ case AVPixelFormat.AV_PIX_FMT_YUV444P:
+ format = "I444";
+ break;
+
+ case AVPixelFormat.AV_PIX_FMT_NV12:
+ format = "NV12";
+ break;
+
+ case AVPixelFormat.AV_PIX_FMT_RGBA:
+ format = "RGBA";
+ break;
+
+ case AVPixelFormat.AV_PIX_FMT_BGRA:
+ format = "BGRA";
+ break;
+
+ default:
+ throw new DOMException("Unsupported AVPixelFormat format!", "EncodingError")
+ }
+
+ // 2. width and height
+ const codedWidth = frame.width;
+ const codedHeight = frame.height;
+
+ // Check for non-square pixels
+ let displayWidth = codedWidth;
+ let displayHeight = codedHeight;
+ if (frame.sample_aspect_ratio[0]) {
+ const sar = frame.sample_aspect_ratio;
+ if (sar[0] > sar[1])
+ displayWidth = ~~(codedWidth * sar[0] / sar[1]);
+ else
+ displayHeight = ~~(codedHeight * sar[1] / sar[0]);
+ }
+
+ // 3. timestamp
+ const timestamp = (frame.ptshi * 0x100000000 + frame.pts) * 1000;
+
+ // 4. data
+ let raw;//Uint8Array;
+ {
+ let size = 0;
+ const planes = numPlanes(format);
+ const sbs = [];
+ const hssfs = [];
+ const vssfs = [];
+ for (let i = 0; i < planes; i++) {
+ sbs.push(sampleBytes(format, i));
+ hssfs.push(horizontalSubSamplingFactor(format, i));
+ vssfs.push(verticalSubSamplingFactor(format, i));
+ }
+ for (let i = 0; i < planes; i++) {
+ size += frame.width * frame.height * sbs[i] / hssfs[i]
+ / vssfs[i];
+ }
+ }
+
+ return { data: frame.data, format, codedWidth, codedHeight, displayWidth, displayHeight, timestamp };
+}
+
+function numPlanes(format) {
+ switch (format) {
+ case "I420":
+ case "I422":
+ case "I444":
+ return 3;
+
+ case "I420A":
+ return 4;
+
+ case "NV12":
+ return 2;
+
+ case "RGBA":
+ case "RGBX":
+ case "BGRA":
+ case "BGRX":
+ return 1;
+
+ default:
+ throw new DOMException("Unsupported video pixel format", "NotSupportedError");
+ }
+}
+
+function sampleBytes(format, planeIndex) {
+ switch (format) {
+ case "I420":
+ case "I420A":
+ case "I422":
+ case "I444":
+ return 1;
+
+ case "NV12":
+ if (planeIndex === 1)
+ return 2;
+ else
+ return 1;
+
+ case "RGBA":
+ case "RGBX":
+ case "BGRA":
+ case "BGRX":
+ return 4;
+
+ default:
+ throw new DOMException("Unsupported video pixel format", "NotSupportedError");
+ }
+}
+
+
+/**
+ * Horizontal sub-sampling factor for the given format and plane.
+ * @param format The format
+ * @param planeIndex The plane index
+ */
+function horizontalSubSamplingFactor(
+ format, planeIndex
+) {
+ // First plane (often luma) is always full
+ if (planeIndex === 0)
+ return 1;
+
+ switch (format) {
+ case "I420":
+ case "I422":
+ return 2;
+
+ case "I420A":
+ if (planeIndex === 3)
+ return 1;
+ else
+ return 2;
+
+ case "I444":
+ return 1;
+
+ case "NV12":
+ return 2;
+
+ case "RGBA":
+ case "RGBX":
+ case "BGRA":
+ case "BGRX":
+ return 1;
+
+ default:
+ throw new DOMException("Unsupported video pixel format", "NotSupportedError");
+ }
+}
+
+/**
+* Vertical sub-sampling factor for the given format and plane.
+* @param format The format
+* @param planeIndex The plane index
+*/
+function verticalSubSamplingFactor(
+ format, planeIndex
+) {
+ // First plane (often luma) is always full
+ if (planeIndex === 0)
+ return 1;
+
+ switch (format) {
+ case "I420":
+ return 2;
+
+ case "I420A":
+ if (planeIndex === 3)
+ return 1;
+ else
+ return 2;
+
+ case "I422":
+ case "I444":
+ return 1;
+
+ case "NV12":
+ return 2;
+
+ case "RGBA":
+ case "RGBX":
+ case "BGRA":
+ case "BGRX":
+ return 1;
+
+ default:
+ throw new DOMException("Unsupported video pixel format", "NotSupportedError");
+ }
+}
+
+
+const canvas = document.getElementById('yuv');
+let yuvCanvas = YUVCanvas.attach(canvas),
+ format,
+ frame,
+ sourceData = {},
+ sourceFader = {
+ y: 1,
+ u: 1,
+ v: 1
+ };
+
+function draw(raw, width, height) {
+ format = YUVBuffer.format({
+ width: width,
+ height: height,
+ chromaWidth: width / 2,
+ chromaHeight: height / 2
+ });
+ frame = YUVBuffer.frame(format);
+ sourceData["y"] = raw[0];
+ sourceData["u"] = raw[1];
+ sourceData["v"] = raw[2];
+ frame.y = {
+ bytes: sourceData["y"],
+ stride:width
+ }
+ frame.u = {
+ bytes: sourceData["u"],
+ stride:width/2
+ }
+ frame.v = {
+ bytes: sourceData["v"],
+ stride:width/2
+ }
+ yuvCanvas.drawFrame(frame);
+}
diff --git a/src/bind/bind.js b/src/bind/bind.js
index 20e6b62..3f9438a 100644
--- a/src/bind/bind.js
+++ b/src/bind/bind.js
@@ -109,6 +109,10 @@ class AVStream extends Base {
return new AVRational(Module["__avstream_avg_frame_rate"](this.ptr));
}
+ get nb_frames() {
+ return Module["__avstream_nb_frames"](this.ptr);
+ }
+
set time_base(tb) {
Module["__avstream_set_time_base"](this.ptr, tb.ptr);
}
@@ -212,6 +216,37 @@ class AVFrame extends Base {
set pict_type(t) {
Module["__avframe_pict_type"](this.ptr, t);
}
+
+ get width() {
+ return Module["__avframe_width"](this.ptr);
+ }
+
+ get height() {
+ return Module["__avframe_height"](this.ptr);
+ }
+
+ get format() {
+ return Module["__avframe_format"](this.ptr);
+ }
+
+ get nb_samples() {
+ return Module.__avframe_nb_samples(this.ptr);
+ }
+
+ get linesize() {
+ let ret = [];
+ for (var i = 0; i < 8 /* AV_NUM_DATA_POINTERS */; i++) {
+ var linesize = Module.__avframe_linesize(this.ptr, i);
+ ret.push(linesize);
+ if (!linesize)
+ break;
+ }
+ return ret;
+ }
+
+ copyout_frame() {
+ return _copyout_frame(this.ptr);
+ }
}
class AVPacket extends Base {
@@ -284,3 +319,157 @@ Module["onRuntimeInitialized"] = function () {
Module["FF_COMPLIANCE_EXPERIMENTAL"] =
Module["__ff_compliance_experimental"]();
};
+
+var copyout_u8 = Module.copyout_u8 = function (ptr, len) {
+ return (new Uint8Array(Module.HEAPU8.buffer, ptr, len)).slice(0);
+};
+
+var copyout_s16 = Module.copyout_s16 = function (ptr, len) {
+ return (new Int16Array(Module.HEAPU8.buffer, ptr, len)).slice(0);
+};
+
+var copyout_s32 = Module.copyout_s32 = function (ptr, len) {
+ return (new Int32Array(Module.HEAPU8.buffer, ptr, len)).slice(0);
+};
+
+var copyout_f32 = Module.copyout_f32 = function (ptr, len) {
+ return (new Float32Array(Module.HEAPU8.buffer, ptr, len)).slice(0);
+};
+
+var _copyout_frame = Module._copyout_frame = function(frame) {
+ var nb_samples = Module.__avframe_nb_samples(frame);
+ if (nb_samples === 0) {
+ // Maybe a video frame?
+ var width = Module.__avframe_width(frame);
+ if (width)
+ return _copyout_frame_video(frame, width);
+ }
+ var channels = Module.__avframe_channels(frame);
+ var format = Module.__avframe_format(frame);
+ var outFrame = {
+ data: null,
+ channel_layout: Module.__avframe_channel_layout(frame),
+ channels: channels,
+ format: format,
+ nb_samples: nb_samples,
+ pts: Module.__avframe_pts(frame),
+ ptshi: Module.__avframe_pts_high(frame),
+ sample_rate: Module.__avframe_sample_rate(frame)
+ };
+
+ // FIXME: Need to support *every* format here
+ if (format >= 5 /* U8P */) {
+ // Planar format, multiple data pointers
+ var data = [];
+ for (var ci = 0; ci < channels; ci++) {
+ var inData = Module.__avframe_data(frame, ci);
+ switch (format) {
+ case 5: // U8P
+ data.push(copyout_u8(inData, nb_samples));
+ break;
+
+ case 6: // S16P
+ data.push(copyout_s16(inData, nb_samples));
+ break;
+
+ case 7: // S32P
+ data.push(copyout_s32(inData, nb_samples));
+ break;
+
+ case 8: // FLT
+ data.push(copyout_f32(inData, nb_samples));
+ break;
+ }
+ }
+ outFrame.data = data;
+
+ } else {
+ var ct = channels*nb_samples;
+ var inData = Module.__avframe_data(frame, 0);
+ switch (format) {
+ case 0: // U8
+ outFrame.data = copyout_u8(inData, ct);
+ break;
+
+ case 1: // S16
+ outFrame.data = copyout_s16(inData, ct);
+ break;
+
+ case 2: // S32
+ outFrame.data = copyout_s32(inData, ct);
+ break;
+
+ case 3: // FLT
+ outFrame.data = copyout_f32(inData, ct);
+ break;
+ }
+
+ }
+
+ return outFrame;
+};
+
+var _copyout_frame_video = Module._copyout_frame_video = function(frame, width) {
+ var data = [];
+ var height = Module.__avframe_height(frame);
+ var format = Module.__avframe_format(frame);
+ var desc = Module._av_pix_fmt_desc_get(format);
+ var outFrame = {
+ data: data,
+ width: width,
+ height: height,
+ format: Module.__avframe_format(frame),
+ key_frame: Module.__avframe_key_frame(frame),
+ pict_type: Module.__avframe_pict_type(frame),
+ pts: Module.__avframe_pts(frame),
+ ptshi: Module.__avframe_pts_high(frame),
+ sample_aspect_ratio: [
+ Module.__avframe_sample_aspect_ratio_num(frame),
+ Module.__avframe_sample_aspect_ratio_den(frame)
+ ]
+ };
+
+ for (var i = 0; i < 8 /* AV_NUM_DATA_POINTERS */; i++) {
+ var linesize = Module.__avframe_linesize(frame, i);
+ if (!linesize)
+ break;
+ var inData = Module.__avframe_data(frame, i);
+ var h = height;
+ if (i === 1 || i === 2)
+ h >>= Module.__avpixfmtdescriptor_log2_chroma_h(desc);
+ data.push(copyout_u8(inData, linesize*h));
+ }
+
+ return outFrame;
+};
+
+function enume(vals, first) {
+ let ret = {};
+ var i = first;
+ vals.forEach(function(val) {
+ ret[val] = i++;
+ });
+ return ret;
+}
+
+// AVPixelFormat
+Module['AVPixelFormat'] = enume(["AV_PIX_FMT_NONE", "AV_PIX_FMT_YUV420P",
+"AV_PIX_FMT_YUYV422", "AV_PIX_FMT_RGB24", "AV_PIX_FMT_BGR24",
+"AV_PIX_FMT_YUV422P", "AV_PIX_FMT_YUV444P",
+"AV_PIX_FMT_YUV410P", "AV_PIX_FMT_YUV411P", "AV_PIX_FMT_GRAY8",
+"AV_PIX_FMT_MONOWHITE", "AV_PIX_FMT_MONOBLACK",
+"AV_PIX_FMT_PAL8", "AV_PIX_FMT_YUVJ420P",
+"AV_PIX_FMT_YUVJ422P", "AV_PIX_FMT_YUVJ444P",
+"AV_PIX_FMT_UYVY422", "AV_PIX_FMT_UYYVYY411",
+"AV_PIX_FMT_BGR8", "AV_PIX_FMT_BGR4", "AV_PIX_FMT_BGR4_BYTE",
+"AV_PIX_FMT_RGB8", "AV_PIX_FMT_RGB4", "AV_PIX_FMT_RGB4_BYTE",
+"AV_PIX_FMT_NV12", "AV_PIX_FMT_NV21", "AV_PIX_FMT_ARGB",
+"AV_PIX_FMT_RGBA", "AV_PIX_FMT_ABGR", "AV_PIX_FMT_BGRA",
+"AV_PIX_FMT_GRAY16BE", "AV_PIX_FMT_GRAY16LE",
+"AV_PIX_FMT_YUV440P", "AV_PIX_FMT_YUVJ440P",
+"AV_PIX_FMT_YUVA420P", "AV_PIX_FMT_RGB48BE",
+"AV_PIX_FMT_RGB48LE", "AV_PIX_FMT_RGB565BE",
+"AV_PIX_FMT_RGB565LE", "AV_PIX_FMT_RGB555BE",
+"AV_PIX_FMT_RGB555LE", "AV_PIX_FMT_BGR565BE",
+"AV_PIX_FMT_BGR565LE", "AV_PIX_FMT_BGR555BE",
+"AV_PIX_FMT_BGR555LE"], -1);
diff --git a/src/bind/export.js b/src/bind/export.js
index cb58a98..0ca8d78 100644
--- a/src/bind/export.js
+++ b/src/bind/export.js
@@ -9,10 +9,12 @@ const EXPORTED_FUNCTIONS = [
"_av_packet_free",
"_av_packet_unref",
"_av_read_frame",
+ "_av_seek_frame",
"_av_write_trailer",
"_avcodec_alloc_context3",
"_avcodec_find_decoder",
"_avcodec_find_encoder_by_name",
+ "_avcodec_flush_buffers",
"_avcodec_open2",
"_avcodec_parameters_copy",
"_avcodec_parameters_from_context",
@@ -31,6 +33,7 @@ const EXPORTED_FUNCTIONS = [
"_avio_open",
"_malloc",
"_free",
+ "_av_pix_fmt_desc_get",
];
console.log(EXPORTED_FUNCTIONS.join(","));
diff --git a/src/bind/libavformat/avformat.c b/src/bind/libavformat/avformat.c
index 96b1fa9..f1346ff 100644
--- a/src/bind/libavformat/avformat.c
+++ b/src/bind/libavformat/avformat.c
@@ -103,6 +103,12 @@ void _avstream_set_time_base(AVStream *stream, AVRational *time_base) {
stream->time_base = *time_base;
}
+
+EMSCRIPTEN_KEEPALIVE
+int _avstream_nb_frames(AVStream *stream) {
+ return stream->nb_frames;
+}
+
/**
* Functions
*/
diff --git a/src/bind/libavutil/frame.c b/src/bind/libavutil/frame.c
index b876154..f8bcc1d 100644
--- a/src/bind/libavutil/frame.c
+++ b/src/bind/libavutil/frame.c
@@ -1,11 +1,51 @@
#include
#include
+#define A(prefix, struc, type, field) \
+ EMSCRIPTEN_KEEPALIVE \
+ type prefix ## _ ## field(struc *a) { return a->field; }
+
+#define AL(prefix, struc, type, field) \
+ EMSCRIPTEN_KEEPALIVE \
+ uint32_t prefix ## _ ## field(struc *a) { return (uint32_t) a->field; } \
+ EMSCRIPTEN_KEEPALIVE \
+ uint32_t prefix ## _ ## field ## _ ## high(struc *a) { return (uint32_t) (a->field >> 32); }
+
+#define AA(prefix, struc, type, field) \
+ EMSCRIPTEN_KEEPALIVE \
+ type prefix ## _ ## field(struc *a, size_t c) { return a->field[c]; }
/**
* struct AVFrame
*/
+#define B(type, field) A(_avframe, AVFrame, type, field)
+#define BL(type, field) AL(_avframe, AVFrame, type, field)
+#define BA(type, field) AA(_avframe, AVFrame, type, field)
+BL(uint64_t, channel_layout)
+B(int, channels)
+BA(uint8_t *, data)
+B(int, format)
+B(int, height)
+B(int, key_frame)
+BA(int, linesize)
+B(int, nb_samples)
+// B(int, pict_type)
+BL(int64_t, pts)
+B(int, sample_rate)
+B(int, width)
+
EMSCRIPTEN_KEEPALIVE
void _avframe_pict_type(AVFrame *f, enum AVPictureType pict_type) {
f->pict_type = pict_type;
}
+
+EMSCRIPTEN_KEEPALIVE
+int _avframe_sample_aspect_ratio_num(AVFrame *a) {
+ return a->sample_aspect_ratio.num;
+}
+
+EMSCRIPTEN_KEEPALIVE
+int _avframe_sample_aspect_ratio_den(AVFrame *a) {
+ return a->sample_aspect_ratio.den;
+}
+
diff --git a/src/bind/libavutil/pixdesc.c b/src/bind/libavutil/pixdesc.c
new file mode 100644
index 0000000..7b6ef0d
--- /dev/null
+++ b/src/bind/libavutil/pixdesc.c
@@ -0,0 +1,6 @@
+#include
+#include
+
+/* AVPixFmtDescriptor */
+EMSCRIPTEN_KEEPALIVE
+uint8_t _avpixfmtdescriptor_log2_chroma_h(AVPixFmtDescriptor *a) { return a->log2_chroma_h; }