Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions cpp/uvc/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,10 @@ Running the example in standalone mode builds and deploys it as an OAK app so th
```

`oakctl` uses the provided `oakapp.toml` to build the C++ project inside the Luxonis base container and deploy it to the device. Configuration tweaks such as changing the camera resolution or registering more topics should be done in `src/uvc_example.cpp`, then re-run `oakctl app run ./cpp/uvc`.

### Video format selection

The example supports two UVC stream formats controlled by `UVC_FORMAT` environment variable:

- `nv12` / `uncompressed` (default): Uses DepthAI `NV12` output and exposes UVC uncompressed NV12 format.
- `mjpeg`: Uses `VideoEncoder` and exposes UVC MJPEG format.
4 changes: 2 additions & 2 deletions cpp/uvc/oakapp.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@

# Application metadata
identifier = "com.example.streaming.uvc"
app_version = "3.0.0"
app_version = "3.1.0"

# Command to run when the container starts
entrypoint = ["bash", "-c", "/app/uvc-start.sh start"]
entrypoint = ["bash", "-c", "export UVC_FORMAT=nv12 && /app/uvc-start.sh start"]

# Here is the place where you can install all the dependencies that are needed at run-time
prepare_container = [
Expand Down
144 changes: 94 additions & 50 deletions cpp/uvc/src/uvc_example.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,19 @@
*/

#include <atomic>
#include <algorithm>
#include <cstdlib>
#include <csignal>
#include <cctype>
#include <cstring>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>

#include "depthai/depthai.hpp"
#include "depthai/pipeline/MessageQueue.hpp"
#include "depthai/pipeline/datatype/Buffer.hpp"
#include "depthai/pipeline/datatype/ImgFrame.hpp"
#include "depthai/pipeline/datatype/MessageGroup.hpp"
#include "uvc_example.hpp"

extern "C" {
Expand All @@ -37,6 +41,28 @@ std::atomic<bool> quitEvent(false);
std::shared_ptr<dai::InputQueue> inputQueue{nullptr};
std::shared_ptr<dai::MessageQueue> outputQueue;

enum class StreamFormat {
MJPEG,
UNCOMPRESSED,
};

static StreamFormat gStreamFormat = StreamFormat::UNCOMPRESSED;
static std::vector<uint8_t> gNv12Buffer;

static StreamFormat parseStreamFormat() {
const char* format = std::getenv("UVC_FORMAT");
if(format == nullptr) return StreamFormat::UNCOMPRESSED;

std::string formatStr(format);
std::transform(formatStr.begin(), formatStr.end(), formatStr.begin(), [](unsigned char c) { return std::tolower(c); });

if(formatStr == "mjpeg") return StreamFormat::MJPEG;
if(formatStr == "uncompressed" || formatStr == "nv12") return StreamFormat::UNCOMPRESSED;

std::cerr << "Unknown UVC_FORMAT=\"" << formatStr << "\", defaulting to uncompressed NV12." << std::endl;
return StreamFormat::UNCOMPRESSED;
}

/* Necessary for and only used by signal handler. */
static struct events *sigint_events;

Expand All @@ -48,56 +74,65 @@ void signalHandler(int signum) {
events_stop(sigint_events);
}

// Custom host node for saving video data
class VideoSaver : public dai::node::CustomNode<VideoSaver> {
public:
VideoSaver() : fileHandle("video.encoded", std::ios::binary) {
if(!fileHandle.is_open()) {
throw std::runtime_error("Could not open video.encoded for writing");
}
}

~VideoSaver() {
if(fileHandle.is_open()) {
fileHandle.close();
}
}

std::shared_ptr<dai::Buffer> processGroup(std::shared_ptr<dai::MessageGroup> message) override {
if(!fileHandle.is_open()) return nullptr;

// Get raw data and write to file
auto frame = message->get<dai::EncodedFrame>("data");
unsigned char* frameData = frame->getData().data();
size_t frameSize = frame->getData().size();
std::cout << "Storing frame of size: " << frameSize << std::endl;
fileHandle.write(reinterpret_cast<const char*>(frameData), frameSize);

// Don't send anything back
return nullptr;
}

private:
std::ofstream fileHandle;
};

extern "C" void depthai_uvc_get_buffer(struct video_source *s, struct video_buffer *buf) {
unsigned int frame_size, size;
uint8_t *f;
const uint8_t *f;

if(quitEvent) {
std::cout << "depthai_uvc_get_buffer(): Stopping capture due to quit event." << std::endl;
return;
}

auto frame = outputQueue->get<dai::ImgFrame>();
if(frame == nullptr) {
std::cerr << "depthai_uvc_get_buffer(): No frame available." << std::endl;
return;
}
if(gStreamFormat == StreamFormat::MJPEG) {
auto frame = outputQueue->get<dai::Buffer>();
if(frame == nullptr || frame->getData().empty()) {
std::cerr << "depthai_uvc_get_buffer(): No MJPEG frame available." << std::endl;
return;
}
f = frame->getData().data();
frame_size = frame->getData().size();
} else {
auto frame = outputQueue->get<dai::ImgFrame>();
if(frame == nullptr) {
std::cerr << "depthai_uvc_get_buffer(): No uncompressed frame available." << std::endl;
return;
}
if(frame->getType() != dai::ImgFrame::Type::NV12) {
std::cerr << "depthai_uvc_get_buffer(): Unexpected frame type for uncompressed mode: " << static_cast<int>(frame->getType()) << std::endl;
return;
}

f = frame->getData().data();
frame_size = frame->getData().size();
const auto width = frame->getWidth();
const auto height = frame->getHeight();
const auto stride = frame->getStride();
const auto uvPlaneOffset = frame->getPlaneStride(0);
const auto compactNv12FrameSize = (width * height * 3) / 2;
const auto expectedSrcBytes = uvPlaneOffset + (stride * (height / 2));
const auto& data = frame->getData();

if(data.size() < expectedSrcBytes) {
std::cerr << "depthai_uvc_get_buffer(): NV12 frame smaller than expected: have "
<< data.size() << " need " << expectedSrcBytes << std::endl;
return;
}

gNv12Buffer.resize(compactNv12FrameSize);
const auto* src = data.data();
auto* dst = gNv12Buffer.data();

for(uint32_t y = 0; y < height; ++y) {
memcpy(dst + (y * width), src + (y * stride), width);
}

const auto* uvSrc = src + uvPlaneOffset;
auto* uvDst = dst + (width * height);
for(uint32_t y = 0; y < height / 2; ++y) {
memcpy(uvDst + (y * width), uvSrc + (y * stride), width);
}

f = gNv12Buffer.data();
frame_size = static_cast<unsigned int>(gNv12Buffer.size());
}

size = std::min(frame_size, buf->size);
memcpy(buf->mem, f, size);
Expand Down Expand Up @@ -125,6 +160,8 @@ int main() {
struct video_source* src;
struct uvc_stream* stream;

gStreamFormat = parseStreamFormat();

depthai_uvc_register_get_buffer(depthai_uvc_get_buffer);

fc = configfs_parse_uvc_function("uvc.0");
Expand Down Expand Up @@ -176,13 +213,20 @@ int main() {
// Create nodes
auto camRgb = pipeline.create<dai::node::Camera>()->build(socket);
inputQueue = camRgb->inputControl.createInputQueue();
auto output = camRgb->requestOutput(std::make_pair(1920, 1080), dai::ImgFrame::Type::NV12);

// Create video encoder node
auto encoded = pipeline.create<dai::node::VideoEncoder>();
encoded->setDefaultProfilePreset(30, dai::VideoEncoderProperties::Profile::MJPEG);
output->link(encoded->input);
outputQueue = encoded->bitstream.createOutputQueue(1, false);
constexpr uint32_t width = 1920;
constexpr uint32_t height = 1080;
auto output = camRgb->requestOutput(std::make_pair(width, height), dai::ImgFrame::Type::NV12);

if(gStreamFormat == StreamFormat::MJPEG) {
auto encoded = pipeline.create<dai::node::VideoEncoder>();
encoded->setDefaultProfilePreset(30, dai::VideoEncoderProperties::Profile::MJPEG);
output->link(encoded->input);
outputQueue = encoded->bitstream.createOutputQueue(1, false);
std::cout << "Configured UVC stream format: MJPEG" << std::endl;
} else {
outputQueue = output->createOutputQueue(1, false);
std::cout << "Configured UVC stream format: uncompressed NV12" << std::endl;
}

// Start pipeline
pipeline.start();
Expand Down
2 changes: 1 addition & 1 deletion cpp/uvc/uvc-gadget
36 changes: 28 additions & 8 deletions cpp/uvc/uvc-start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,14 @@ fi
MANUF="Luxonis"
PRODUCT="Luxonis UVC Camera"
UDC=$(ls /sys/class/udc | head -n1) # will identify the 'first' UDC
: "${UVC_FORMAT:=uncompressed}"
UVC_FORMAT=$(echo "$UVC_FORMAT" | tr '[:upper:]' '[:lower:]')

log "=== Detecting platform:"
log " product : $PRODUCT"
log " udc : $UDC"
log " serial : $SERIAL"
log " format : $UVC_FORMAT"

remove_uvc_gadget() {
if [ ! -d /sys/kernel/config/usb_gadget/g1/functions/uvc.0 ]; then
Expand Down Expand Up @@ -101,6 +104,20 @@ create_frame() {
EOF
}

configure_uncompressed_nv12_descriptor() {
FUNCTION=$1
NAME=$2

FRAME_DIR="functions/$FUNCTION/streaming/uncompressed/$NAME/1080p"
FORMAT_DIR="functions/$FUNCTION/streaming/uncompressed/$NAME"

# NV12 is 12bpp (4:2:0), frame size is width * height * 3 / 2.
echo 12 > "$FORMAT_DIR/bBitsPerPixel"
echo $(( 1920 * 1080 * 3 / 2 )) > "$FRAME_DIR/dwMaxVideoFrameBufferSize"
# UVC GUID for NV12: 4e 56 31 32 00 00 10 00 80 00 00 aa 00 38 9b 71
echo -ne '\x4e\x56\x31\x32\x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71' > "$FORMAT_DIR/guidFormat"
}

create_uvc() {
# Example usage:
# create_uvc <target config> <function name>
Expand All @@ -113,17 +130,20 @@ create_uvc() {
pushd "$GADGET/g1" >/dev/null
mkdir "functions/$FUNCTION"

# create_frame "$FUNCTION" 640 360 uncompressed u
# create_frame "$FUNCTION" 1280 720 uncompressed u
# create_frame "$FUNCTION" 320 180 uncompressed u
create_frame "$FUNCTION" 1920 1080 mjpeg m
# create_frame "$FUNCTION" 640 480 mjpeg m
# create_frame "$FUNCTION" 640 360 mjpeg m
if [ "$UVC_FORMAT" = "mjpeg" ]; then
create_frame "$FUNCTION" 1920 1080 mjpeg m
else
create_frame "$FUNCTION" 1920 1080 uncompressed u
configure_uncompressed_nv12_descriptor "$FUNCTION" "u"
fi

mkdir "functions/$FUNCTION/streaming/header/h"
cd "functions/$FUNCTION/streaming/header/h"
# ln -s ../../uncompressed/u
ln -s ../../mjpeg/m
if [ "$UVC_FORMAT" = "mjpeg" ]; then
ln -s ../../mjpeg/m
else
ln -s ../../uncompressed/u
fi
cd ../../class/fs
ln -s ../../header/h
cd ../../class/hs
Expand Down