Skip to content
This repository was archived by the owner on Dec 4, 2023. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 22 additions & 18 deletions src/main/scala/io/findify/s3mock/provider/FileProvider.scala
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
package io.findify.s3mock.provider

import java.util.UUID
import java.io.{FileInputStream, File => JFile}

Expand All @@ -18,7 +19,7 @@ import scala.util.Random
/**
* Created by shutty on 8/9/16.
*/
class FileProvider(dir:String) extends Provider with LazyLogging {
class FileProvider(dir: String) extends Provider with LazyLogging {
val workDir = File(dir)
if (!workDir.exists) workDir.createDirectories()

Expand All @@ -39,18 +40,19 @@ class FileProvider(dir:String) extends Provider with LazyLogging {
case pos => Some(p + dir.substring(p.length, pos) + d)
}
}

val prefixNoLeadingSlash = prefix.getOrElse("").dropWhile(_ == '/')
val bucketFile = File(s"$dir/$bucket/")
if (!bucketFile.exists) throw NoSuchBucketException(bucket)
val bucketFileString = fromOs(bucketFile.toString)
val bucketFiles = bucketFile.listRecursively.filter(f => {
val fString = fromOs(f.toString).drop(bucketFileString.length).dropWhile(_ == '/')
fString.startsWith(prefixNoLeadingSlash) && !f.isDirectory
})
val fString = fromOs(f.toString).drop(bucketFileString.length).dropWhile(_ == '/')
fString.startsWith(prefixNoLeadingSlash) && !f.isDirectory
})
val files = bucketFiles.map(f => {
val stream = new FileInputStream(f.toJava)
val md5 = DigestUtils.md5Hex(stream)
Content(fromOs(f.toString).drop(bucketFileString.length+1).dropWhile(_ == '/'), DateTime(f.lastModifiedTime.toEpochMilli), md5, f.size, "STANDARD")
Content(fromOs(f.toString).drop(bucketFileString.length + 1).dropWhile(_ == '/'), DateTime(f.lastModifiedTime.toEpochMilli), md5, f.size, "STANDARD")
}).toList
logger.debug(s"listing bucket contents: ${files.map(_.key)}")
val commonPrefixes = delimiter match {
Expand All @@ -60,16 +62,17 @@ class FileProvider(dir:String) extends Provider with LazyLogging {
val filteredFiles = files.filterNot(f => commonPrefixes.exists(p => f.key.startsWith(p)))
val count = maxkeys.getOrElse(Int.MaxValue)
val result = filteredFiles.sortBy(_.key)
ListBucket(bucket, prefix, delimiter, commonPrefixes, result.take(count), isTruncated = result.size>count)
ListBucket(bucket, prefix, delimiter, commonPrefixes, result.take(count), isTruncated = result.size > count)
}

override def createBucket(name:String, bucketConfig:CreateBucketConfiguration) = {
override def createBucket(name: String, bucketConfig: CreateBucketConfiguration) = {
val bucket = File(s"$dir/$name")
if (!bucket.exists) bucket.createDirectory()
logger.debug(s"creating bucket $name")
CreateBucket(name)
}
override def putObject(bucket:String, key:String, data:Array[Byte], objectMetadata: ObjectMetadata): Unit = {

override def putObject(bucket: String, key: String, data: Array[Byte], objectMetadata: ObjectMetadata): Unit = {
val bucketFile = File(s"$dir/$bucket")
val file = File(s"$dir/$bucket/$key")
if (!bucketFile.exists) throw NoSuchBucketException(bucket)
Expand All @@ -79,7 +82,8 @@ class FileProvider(dir:String) extends Provider with LazyLogging {
objectMetadata.setLastModified(org.joda.time.DateTime.now().toDate)
metadataStore.put(bucket, key, objectMetadata)
}
override def getObject(bucket:String, key:String): GetObjectData = {

override def getObject(bucket: String, key: String): GetObjectData = {
val bucketFile = File(s"$dir/$bucket")
val file = File(s"$dir/$bucket/$key")
logger.debug(s"reading object for s://$bucket/$key")
Expand All @@ -90,7 +94,7 @@ class FileProvider(dir:String) extends Provider with LazyLogging {
GetObjectData(file.byteArray, meta)
}

override def putObjectMultipartStart(bucket:String, key:String, metadata: ObjectMetadata):InitiateMultipartUploadResult = {
override def putObjectMultipartStart(bucket: String, key: String, metadata: ObjectMetadata): InitiateMultipartUploadResult = {
val id = Math.abs(Random.nextLong()).toString
val bucketFile = File(s"$dir/$bucket")
if (!bucketFile.exists) throw NoSuchBucketException(bucket)
Expand All @@ -99,15 +103,16 @@ class FileProvider(dir:String) extends Provider with LazyLogging {
logger.debug(s"starting multipart upload for s3://$bucket/$key")
InitiateMultipartUploadResult(bucket, key, id)
}
override def putObjectMultipartPart(bucket:String, key:String, partNumber:Int, uploadId:String, data:Array[Byte]) = {

override def putObjectMultipartPart(bucket: String, key: String, partNumber: Int, uploadId: String, data: Array[Byte]) = {
val bucketFile = File(s"$dir/$bucket")
if (!bucketFile.exists) throw NoSuchBucketException(bucket)
val file = File(s"$dir/.mp/$bucket/$key/$uploadId/$partNumber")
logger.debug(s"uploading multipart chunk $partNumber for s3://$bucket/$key")
file.writeByteArray(data)(OpenOptions.default)
}

override def putObjectMultipartComplete(bucket:String, key:String, uploadId:String, request:CompleteMultipartUpload): CompleteMultipartUploadResult = {
override def putObjectMultipartComplete(bucket: String, key: String, uploadId: String, request: CompleteMultipartUpload): CompleteMultipartUploadResult = {
val bucketFile = File(s"$dir/$bucket")
if (!bucketFile.exists) throw NoSuchBucketException(bucket)
val files = request.parts.map(part => File(s"$dir/.mp/$bucket/$key/$uploadId/${part.partNumber}"))
Expand All @@ -118,7 +123,7 @@ class FileProvider(dir:String) extends Provider with LazyLogging {
file.writeBytes(data.toIterator)
File(s"$dir/.mp/$bucket/$key").delete()
val hash = file.md5
metadataStore.get(bucket, key).foreach {m =>
metadataStore.get(bucket, key).foreach { m =>
m.setContentMD5(hash)
m.setLastModified(org.joda.time.DateTime.now().toDate)
}
Expand All @@ -142,23 +147,22 @@ class FileProvider(dir:String) extends Provider with LazyLogging {
}


override def copyObjectMultipart(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, part: Int, uploadId:String, fromByte: Int, toByte: Int, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = {
override def copyObjectMultipart(sourceBucket: String, sourceKey: String, destBucket: String, destKey: String, part: Int, uploadId: String, fromByte: Int, toByte: Int, newMeta: Option[ObjectMetadata] = None): CopyObjectResult = {
val data = getObject(sourceBucket, sourceKey).bytes.slice(fromByte, toByte + 1)
putObjectMultipartPart(destBucket, destKey, part, uploadId, data)
new CopyObjectResult(DateTime.now, DigestUtils.md5Hex(data))
}

override def deleteObject(bucket:String, key:String): Unit = {
override def deleteObject(bucket: String, key: String): Unit = {
val file = File(s"$dir/$bucket/$key")
logger.debug(s"deleting object s://$bucket/$key")
if (!file.exists) throw NoSuchKeyException(bucket, key)
if (!file.isDirectory) {
if (!file.isDirectory && file.exists) {
file.delete()
metadataStore.delete(bucket, key)
}
}

override def deleteBucket(bucket:String): Unit = {
override def deleteBucket(bucket: String): Unit = {
val bucketFile = File(s"$dir/$bucket")
logger.debug(s"deleting bucket s://$bucket")
if (!bucketFile.exists) throw NoSuchBucketException(bucket)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,7 @@ class InMemoryProvider extends Provider with LazyLogging {
logger.debug(s"recursive delete by prefix is not supported by S3")
Unit
case None =>
logger.warn(s"key does not exist")
throw NoSuchKeyException(bucket, key)
Unit
}
}
case None => throw NoSuchBucketException(bucket)
Expand Down
3 changes: 0 additions & 3 deletions src/main/scala/io/findify/s3mock/route/DeleteObject.scala
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@ case class DeleteObject(implicit provider: Provider) extends LazyLogging {
case Success(_) =>
logger.info(s"deleted object $bucket/$path")
HttpResponse(StatusCodes.NoContent)
case Failure(NoSuchKeyException(_, _)) =>
logger.info(s"cannot delete object $bucket/$path: no such key")
HttpResponse(StatusCodes.NotFound)
case Failure(ex) =>
logger.error(s"cannot delete object $bucket/$path", ex)
HttpResponse(StatusCodes.NotFound)
Expand Down
5 changes: 3 additions & 2 deletions src/test/scala/io/findify/s3mock/DeleteTest.scala
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,9 @@ class DeleteTest extends S3MockTest {
s3.listObjects("delobj", "somefile").getObjectSummaries.asScala.exists(_.getKey == "somefile") shouldBe false
}

it should "return 404 for non-existent keys when deleting" in {
Try(s3.deleteObject("nodel", "xxx")).isFailure shouldBe true
it should "do nothing for non-existing keys when deleting" in {
s3.createBucket("nodel")
Try(s3.deleteObject("nodel", "xxx")).isSuccess shouldBe true
}

it should "produce NoSuchBucket if bucket does not exist when deleting" in {
Expand Down