Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions src/libmongoc/src/mongoc/mongoc-gridfs-bucket.c
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ mongoc_gridfs_bucket_open_upload_stream_with_id(mongoc_gridfs_bucket_t *bucket,
file->bucket = bucket;
file->chunk_size = gridfs_opts.chunkSizeBytes;
file->metadata = bson_copy(&gridfs_opts.metadata);
BSON_ASSERT(gridfs_opts.chunkSizeBytes > 0); // Validated in _mongoc_gridfs_bucket_opts_parse.
file->buffer = bson_malloc((size_t)gridfs_opts.chunkSizeBytes);
file->in_buffer = 0;

Expand Down Expand Up @@ -347,6 +348,26 @@ mongoc_gridfs_bucket_open_download_stream(mongoc_gridfs_bucket_t *bucket,

bson_destroy(&file_doc);

if (file->chunk_size <= 0) {
_mongoc_set_error(error,
MONGOC_ERROR_GRIDFS,
MONGOC_ERROR_GRIDFS_CORRUPT,
"File document contains invalid chunk size: %" PRId32,
file->chunk_size);
_mongoc_gridfs_bucket_file_destroy(file);
return NULL;
}

if (file->length < 0) {
_mongoc_set_error(error,
MONGOC_ERROR_GRIDFS,
MONGOC_ERROR_GRIDFS_CORRUPT,
"File document contains invalid length: %" PRId64,
file->length);
_mongoc_gridfs_bucket_file_destroy(file);
return NULL;
}

file->file_id = (bson_value_t *)bson_malloc0(sizeof *(file->file_id));
bson_value_copy(file_id, file->file_id);
file->bucket = bucket;
Expand Down
175 changes: 175 additions & 0 deletions src/libmongoc/tests/test-mongoc-gridfs-bucket.c
Original file line number Diff line number Diff line change
Expand Up @@ -1048,6 +1048,180 @@ test_gridfs_bucket_opts(void)
mongoc_client_destroy(client);
}

// Regression test for CDRIVER-6125
static void
test_bad_sizes(void)
{
mongoc_client_t *client = test_framework_new_default_client();
bson_error_t error;

mongoc_database_t *db = mongoc_client_get_database(client, "test_bad_sizes");
mongoc_database_drop(db, NULL);

// Test negative chunkSize:
{
bson_t *doc = tmp_bson(BSON_STR({
"_id" : 0,
"filename" : "foo.txt",
"length" : 1000,
"chunkSize" : -1, // Negative!
"uploadDate" : {"$date" : 1234567890000}
}));

bson_iter_t id_iter;
ASSERT(bson_iter_init_find(&id_iter, doc, "_id"));
const bson_value_t *id_value = bson_iter_value(&id_iter);

// Insert manually:
{
mongoc_collection_t *files = mongoc_database_get_collection(db, "fs.files");
mongoc_collection_insert_one(files, doc, NULL, NULL, &error);
mongoc_collection_destroy(files);
}

// Try to read:
{
mongoc_gridfs_bucket_t *bucket = mongoc_gridfs_bucket_new(db, NULL, NULL, &error);
ASSERT_OR_PRINT(bucket, error);
mongoc_stream_t *stream = mongoc_gridfs_bucket_open_download_stream(bucket, id_value, &error);
ASSERT(!stream);
ASSERT_ERROR_CONTAINS(error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CORRUPT, "invalid chunk size");
mongoc_gridfs_bucket_destroy(bucket);
}

// Try to write:
{
mongoc_gridfs_bucket_t *bucket = mongoc_gridfs_bucket_new(db, NULL, NULL, &error);
ASSERT_OR_PRINT(bucket, error);
bson_t *opts = tmp_bson(BSON_STR({"chunkSizeBytes" : 0}));
mongoc_stream_t *stream = mongoc_gridfs_bucket_open_upload_stream(bucket, "foo.txt", opts, NULL, &error);
ASSERT(!stream);
ASSERT_ERROR_CONTAINS(
error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "should be greater than 0");
mongoc_gridfs_bucket_destroy(bucket);
}
}

mongoc_database_drop(db, NULL);

// Test zero chunkSize:
{
bson_t *doc = tmp_bson(BSON_STR({
"_id" : 0,
"filename" : "foo.txt",
"length" : 1000,
"chunkSize" : 0, // Zero!
"uploadDate" : {"$date" : 1234567890000}
}));

bson_iter_t id_iter;
ASSERT(bson_iter_init_find(&id_iter, doc, "_id"));
const bson_value_t *id_value = bson_iter_value(&id_iter);

// Insert manually:
{
mongoc_collection_t *files = mongoc_database_get_collection(db, "fs.files");
mongoc_collection_insert_one(files, doc, NULL, NULL, &error);
mongoc_collection_destroy(files);
}

// Try to read:
{
mongoc_gridfs_bucket_t *bucket = mongoc_gridfs_bucket_new(db, NULL, NULL, &error);
ASSERT_OR_PRINT(bucket, error);
mongoc_stream_t *stream = mongoc_gridfs_bucket_open_download_stream(bucket, id_value, &error);
ASSERT(!stream);
ASSERT_ERROR_CONTAINS(error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CORRUPT, "invalid chunk size");
mongoc_gridfs_bucket_destroy(bucket);
}

// Try to write:
{
mongoc_gridfs_bucket_t *bucket = mongoc_gridfs_bucket_new(db, NULL, NULL, &error);
ASSERT_OR_PRINT(bucket, error);
bson_t *opts = tmp_bson(BSON_STR({"chunkSizeBytes" : -1}));
mongoc_stream_t *stream = mongoc_gridfs_bucket_open_upload_stream(bucket, "foo.txt", opts, NULL, &error);
ASSERT(!stream);
ASSERT_ERROR_CONTAINS(
error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "should be greater than 0");
mongoc_gridfs_bucket_destroy(bucket);
}
}

mongoc_database_drop(db, NULL);

// Test negative length:
{
bson_t *doc = tmp_bson(BSON_STR({
"_id" : 0,
"filename" : "foo.txt",
"length" : -1, // Negative!
"chunkSize" : 10,
"uploadDate" : {"$date" : 1234567890000}
}));

bson_iter_t id_iter;
ASSERT(bson_iter_init_find(&id_iter, doc, "_id"));
const bson_value_t *id_value = bson_iter_value(&id_iter);

// Insert manually:
{
mongoc_collection_t *files = mongoc_database_get_collection(db, "fs.files");
mongoc_collection_insert_one(files, doc, NULL, NULL, &error);
mongoc_collection_destroy(files);
}

// Try to read:
{
mongoc_gridfs_bucket_t *bucket = mongoc_gridfs_bucket_new(db, NULL, NULL, &error);
mongoc_stream_t *stream = mongoc_gridfs_bucket_open_download_stream(bucket, id_value, &error);
ASSERT(!stream);
ASSERT_ERROR_CONTAINS(error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CORRUPT, "invalid length");
mongoc_gridfs_bucket_destroy(bucket);
}
}

mongoc_database_drop(db, NULL);

// Test a zero length (OK):
{
bson_t *doc = tmp_bson(BSON_STR({
"_id" : 0,
"filename" : "foo.txt",
"length" : 0, // Zero!
"chunkSize" : 10,
"uploadDate" : {"$date" : 1234567890000}
}));

bson_iter_t id_iter;
ASSERT(bson_iter_init_find(&id_iter, doc, "_id"));
const bson_value_t *id_value = bson_iter_value(&id_iter);

// Insert manually:
{
mongoc_collection_t *files = mongoc_database_get_collection(db, "fs.files");
mongoc_collection_insert_one(files, doc, NULL, NULL, &error);
mongoc_collection_destroy(files);
}

// Try to read:
{
mongoc_gridfs_bucket_t *bucket = mongoc_gridfs_bucket_new(db, NULL, NULL, &error);
mongoc_stream_t *stream = mongoc_gridfs_bucket_open_download_stream(bucket, id_value, &error);
ASSERT(stream);
// OK. Gets back an empty read.
uint8_t buf[64];
ssize_t r = mongoc_stream_read(stream, buf, sizeof buf, 0, 0);
ASSERT_CMPINT(r, ==, 0);
mongoc_stream_destroy(stream);
mongoc_gridfs_bucket_destroy(bucket);
}
}

mongoc_database_destroy(db);
mongoc_client_destroy(client);
}

void
test_gridfs_bucket_install(TestSuite *suite)
{
Expand All @@ -1070,4 +1244,5 @@ test_gridfs_bucket_install(TestSuite *suite)
test_framework_skip_if_no_sessions,
test_framework_skip_if_no_crypto);
TestSuite_AddLive(suite, "/gridfs/options", test_gridfs_bucket_opts);
TestSuite_AddLive(suite, "/gridfs/bad_sizes", test_bad_sizes);
}