diff --git a/.gitignore b/.gitignore
index 3016facee0..cb622078a3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,6 +14,7 @@
*/**/*tx_database*
*/**/*dapps*
build/_vendor/pkg
+vendor/*
#*
.#*
diff --git a/go.mod b/go.mod
index a64125cf9a..ddfb89c3e7 100644
--- a/go.mod
+++ b/go.mod
@@ -4,6 +4,7 @@ go 1.13
require (
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898
+ github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876 // indirect
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
@@ -13,6 +14,8 @@ require (
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containerd/containerd v1.2.7 // indirect
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
+ github.com/dgraph-io/badger v1.6.0
+ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/docker v0.7.3-0.20190806133308-ecdb0b22393b
github.com/docker/go-connections v0.4.0 // indirect
@@ -24,7 +27,7 @@ require (
github.com/go-kit/kit v0.9.0 // indirect
github.com/go-logfmt/logfmt v0.4.0 // indirect
github.com/gogo/protobuf v1.2.1 // indirect
- github.com/golang/protobuf v1.3.2 // indirect
+ github.com/golang/protobuf v1.3.5 // indirect
github.com/googleapis/gnostic v0.0.0-20190624222214-25d8b0b66985 // indirect
github.com/gorilla/mux v1.7.3 // indirect
github.com/hashicorp/golang-lru v0.5.3
@@ -42,6 +45,7 @@ require (
github.com/opentracing/opentracing-go v1.1.0
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222
github.com/peterh/liner v0.0.0-20190123174540-a2c9a5303de7 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/rnsdomains/rns-go-lib v0.0.0-20191114120302-3505575b0b8f
github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d // indirect
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00
@@ -53,9 +57,10 @@ require (
github.com/vbauerster/mpb v3.4.0+incompatible
go.uber.org/atomic v1.4.0 // indirect
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
- golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
+ golang.org/x/net v0.0.0-20200320220750-118fecf932d8
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
golang.org/x/sync v0.0.0-20190423024810-112230192c58
+ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd // indirect
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
google.golang.org/appengine v1.6.1 // indirect
google.golang.org/grpc v1.22.1 // indirect
diff --git a/go.sum b/go.sum
index 3f65912b67..85c618569f 100644
--- a/go.sum
+++ b/go.sum
@@ -2,6 +2,10 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898 h1:SC+c6A1qTFstO9qmB86mPV2IpYm
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=
+github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
+github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
+github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876 h1:3c3mGlhASTJh6H6Ba9EHv2FDSmEUyJuJHR6UD7b+YuE=
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e h1:Ix5oKbq0MlolI+T4EPCL9sddfEw6LgRMpC+qx0Kz5/E=
@@ -27,6 +31,7 @@ github.com/apilayer/freegeoip v0.0.0-20180702111401-3f942d1392f6 h1:9uC+gZZ11spz
github.com/apilayer/freegeoip v0.0.0-20180702111401-3f942d1392f6/go.mod h1:CUfFqErhFhXneJendyQ/rRcuA8kH8JxHvYnbOozmlCU=
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 h1:rtI0fD4oG/8eVokGVPYJEW1F88p1ZNgXiEIs9thEE4A=
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI=
@@ -43,12 +48,22 @@ github.com/containerd/containerd v1.2.7 h1:8lqLbl7u1j3MmiL9cJ/O275crSq7bfwUayvva
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
+github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo=
+github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
+github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
@@ -59,6 +74,8 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c h1:JHHhtb9XWJrGNMcrVP6vyzO4dusgi/HnceHTgxSejUM=
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/gosigar v0.0.0-20180330100440-37f05ff46ffa h1:o8OuEkracbk3qH6GvlI6XpEN1HTSxkzOG42xZpfDv/s=
@@ -102,6 +119,8 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -126,6 +145,7 @@ github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCO
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607 h1:+7wvV++11s0Okyl1dekihkIiCIYDz+Qk2LvxAShINU4=
github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607/go.mod h1:41HzSPxBGeFRQKEEwgh49TRw/nKBsYZ2cF1OzPjSJsA=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
@@ -134,6 +154,7 @@ github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3 h1:DqD8eigqlUm0+znmx7z
github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb v0.0.0-20180221223340-01288bdb0883 h1:HsZXaxH4mZRDDcxGk5m1+o3R/ofaT5YrMG+aR0altIw=
github.com/influxdata/influxdb v0.0.0-20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
github.com/jackpal/go-nat-pmp v0.0.0-20160603034137-1fa385a6f458 h1:LPECOO5LcZx5tvkxraIptrg6AiAUf+28rFV9+noSZFA=
@@ -161,6 +182,7 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
@@ -170,6 +192,8 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.6 h1:V2iyH+aX9C5fsYCpK60U8BYIvmhqxuOL3JZcqc1NB7k=
github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -208,6 +232,7 @@ github.com/oschwald/maxminddb-golang v0.0.0-20180819230143-277d39ecb83e h1:omG1V
github.com/oschwald/maxminddb-golang v0.0.0-20180819230143-277d39ecb83e/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY=
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 h1:goeTyGkArOZIVOMA0dQbyuPWGNQJZGPwPu/QS9GlpnA=
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/peterh/liner v0.0.0-20190123174540-a2c9a5303de7 h1:Imx0QZXGB4siHjlmDJ/kx/bU+D36ytDj5dgy/TkIQ+A=
github.com/peterh/liner v0.0.0-20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
@@ -215,6 +240,8 @@ github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
@@ -238,14 +265,20 @@ github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 h1:3hxavr+IHMsQBrYUPQM
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
github.com/rs/xhandler v0.0.0-20170707052532-1eb70cf1520d h1:8Tt7DYYdFqLlOIuyiE0RluKem4T+048AUafnIjH80wg=
github.com/rs/xhandler v0.0.0-20170707052532-1eb70cf1520d/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
@@ -269,13 +302,16 @@ github.com/uber/jaeger-client-go v0.0.0-20180607151842-f7e0d4744fa6 h1:x2aYRH9ay
github.com/uber/jaeger-client-go v0.0.0-20180607151842-f7e0d4744fa6/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v0.0.0-20180615202729-a51202d6f4a7 h1:0M6xAhuJ/tVOsrSyesayxF8bqlfHjmUsXPrN4JAtJtI=
github.com/uber/jaeger-lib v0.0.0-20180615202729-a51202d6f4a7/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/vbauerster/mpb v3.4.0+incompatible h1:mfiiYw87ARaeRW6x5gWwYRUawxaW1tLAD8IceomUCNw=
github.com/vbauerster/mpb v3.4.0+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU=
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk=
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -292,8 +328,11 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200320220750-118fecf932d8 h1:1+zQlQqEEhUeStBTi653GZAnAuivZq/2hz+Iz+OP7rg=
+golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
@@ -306,6 +345,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -314,8 +354,11 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M=
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 44799fcfbe..10af6ff5cc 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -17,18 +17,7 @@
package fcds
import (
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "sort"
- "sync"
- "time"
-
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethersphere/swarm/log"
-
+ "github.com/dgraph-io/badger"
"github.com/ethersphere/swarm/chunk"
)
@@ -38,334 +27,133 @@ import (
type Storer interface {
Get(addr chunk.Address) (ch chunk.Chunk, err error)
Has(addr chunk.Address) (yes bool, err error)
- Put(ch chunk.Chunk) (shard uint8, err error)
+ Put(ch chunk.Chunk) (err error)
Delete(addr chunk.Address) (err error)
- ShardSize() (slots []ShardInfo, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
Close() (err error)
}
-var _ Storer = new(Store)
-
-// Number of files that store chunk data.
-var ShardCount = uint8(32)
-
-// ErrStoreClosed is returned if store is already closed.
-var ErrStoreClosed = errors.New("closed store")
+var _ Storer = new(BadgerStore)
-// Store is the main FCDS implementation. It stores chunk data into
-// a number of files partitioned by the last byte of the chunk address.
-type Store struct {
- shards []shard // relations with shard id and a shard file and their mutexes
- meta MetaStore // stores chunk offsets
- wg sync.WaitGroup // blocks Close until all other method calls are done
- maxChunkSize int // maximal chunk data size
- quit chan struct{} // quit disables all operations after Close is called
- quitOnce sync.Once // protects quit channel from multiple Close calls
+type BadgerStore struct {
+ db *badger.DB
}
-// Option is an optional argument passed to New.
-type Option func(*Store)
-
-// New constructs a new Store with files at path, with specified max chunk size.
-func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s *Store, err error) {
- s = &Store{
- shards: make([]shard, ShardCount),
- meta: metaStore,
- maxChunkSize: maxChunkSize,
- quit: make(chan struct{}),
- }
- for _, o := range opts {
- o(s)
- }
- if err := os.MkdirAll(path, 0777); err != nil {
+func New(path string) (s *BadgerStore, err error) {
+ o := badger.DefaultOptions(path)
+ o.SyncWrites = false
+ o.ValueLogMaxEntries = 50000
+ o.NumMemtables = 10
+ //o.MaxCacheSize = o.MaxCacheSize * 4
+ //o.EventLogging = false
+ o.Logger = nil
+ db, err := badger.Open(o)
+ if err != nil {
return nil, err
}
- for i := byte(0); i < ShardCount; i++ {
- s.shards[i].f, err = os.OpenFile(filepath.Join(path, fmt.Sprintf("chunks-%v.db", i)), os.O_CREATE|os.O_RDWR, 0666)
- if err != nil {
- return nil, err
- }
- s.shards[i].mu = new(sync.Mutex)
- }
- return s, nil
+ return &BadgerStore{
+ db: db,
+ }, nil
}
-func (s *Store) ShardSize() (slots []ShardInfo, err error) {
- slots = make([]ShardInfo, len(s.shards))
- for i, sh := range s.shards {
- sh.mu.Lock()
- fs, err := sh.f.Stat()
- sh.mu.Unlock()
+func (s *BadgerStore) Get(addr chunk.Address) (c chunk.Chunk, err error) {
+ err = s.db.View(func(txn *badger.Txn) (err error) {
+ item, err := txn.Get(addr)
if err != nil {
- return nil, err
+ if err == badger.ErrKeyNotFound {
+ return chunk.ErrChunkNotFound
+ }
}
- slots[i] = ShardInfo{Shard: uint8(i), Val: fs.Size()}
- }
-
- return slots, nil
-}
-
-// Get returns a chunk with data.
-func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
- if err := s.protect(); err != nil {
- return nil, err
- }
- defer s.unprotect()
-
- m, err := s.getMeta(addr)
- if err != nil {
- return nil, err
- }
-
- sh := s.shards[m.Shard]
- sh.mu.Lock()
-
- data := make([]byte, m.Size)
- n, err := sh.f.ReadAt(data, m.Offset)
- if err != nil && err != io.EOF {
- metrics.GetOrRegisterCounter("fcds/get/error", nil).Inc(1)
-
- sh.mu.Unlock()
- return nil, err
- }
- if n != int(m.Size) {
- return nil, fmt.Errorf("incomplete chunk data, read %v of %v", n, m.Size)
- }
- sh.mu.Unlock()
-
- metrics.GetOrRegisterCounter("fcds/get/ok", nil).Inc(1)
-
- return chunk.NewChunk(addr, data), nil
+ return item.Value(func(val []byte) error {
+ c = chunk.NewChunk(addr, val)
+ return nil
+ })
+ })
+ return c, err
}
-// Has returns true if chunk is stored.
-func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
- if err := s.protect(); err != nil {
- return false, err
- }
- defer s.unprotect()
-
- _, err = s.getMeta(addr)
- if err != nil {
- if err == chunk.ErrChunkNotFound {
- metrics.GetOrRegisterCounter("fcds/has/no", nil).Inc(1)
- return false, nil
+func (s *BadgerStore) Has(addr chunk.Address) (yes bool, err error) {
+ yes = false
+ err = s.db.View(func(txn *badger.Txn) (err error) {
+ item, err := txn.Get(addr)
+ if err != nil {
+ if err == badger.ErrKeyNotFound {
+ return chunk.ErrChunkNotFound
+ }
}
- metrics.GetOrRegisterCounter("fcds/has/err", nil).Inc(1)
- return false, err
- }
- metrics.GetOrRegisterCounter("fcds/has/ok", nil).Inc(1)
-
- return true, nil
-}
-
-// Put stores chunk data.
-// Returns the shard number into which the chunk was added.
-func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
- if err := s.protect(); err != nil {
- return 0, err
- }
- defer s.unprotect()
- m, err := s.getMeta(ch.Address())
- if err == nil {
- return m.Shard, nil
- }
- addr := ch.Address()
- data := ch.Data()
-
- size := len(data)
- if size > s.maxChunkSize {
- return 0, fmt.Errorf("chunk data size %v exceeds %v bytes", size, s.maxChunkSize)
- }
-
- section := make([]byte, s.maxChunkSize)
- copy(section, data)
-
- shardId, offset, reclaimed, cancel, err := s.getOffset()
- if err != nil {
- return 0, err
- }
-
- sh := s.shards[shardId]
- sh.mu.Lock()
- defer sh.mu.Unlock()
-
- if reclaimed {
- metrics.GetOrRegisterCounter("fcds/put/reclaimed", nil).Inc(1)
- }
-
- if offset < 0 {
- metrics.GetOrRegisterCounter("fcds/put/append", nil).Inc(1)
- // no free offsets found,
- // append the chunk data by
- // seeking to the end of the file
- offset, err = sh.f.Seek(0, io.SeekEnd)
- } else {
- metrics.GetOrRegisterCounter("fcds/put/offset", nil).Inc(1)
- // seek to the offset position
- // to replace the chunk data at that position
- _, err = sh.f.Seek(offset, io.SeekStart)
- }
- if err != nil {
- cancel()
- return 0, err
- }
-
- if _, err = sh.f.Write(section); err != nil {
- cancel()
- return 0, err
- }
-
- err = s.meta.Set(addr, shardId, reclaimed, &Meta{
- Size: uint16(size),
- Offset: offset,
- Shard: shardId,
+ return item.Value(func(val []byte) error {
+ yes = true
+ return nil
+ })
})
- if err != nil {
- cancel()
- }
-
- return shardId, err
-}
-
-// getOffset returns an offset on a shard where chunk data can be written to
-// and a flag if the offset is reclaimed from a previously removed chunk.
-// If offset is less then 0, no free offsets are available.
-func (s *Store) getOffset() (shard uint8, offset int64, reclaimed bool, cancel func(), err error) {
- cancel = func() {}
- shard, offset, cancel = s.meta.FreeOffset()
- if offset >= 0 {
- return shard, offset, true, cancel, nil
- }
-
- // each element Val is the shard size in bytes
- shardSizes, err := s.ShardSize()
- if err != nil {
- return 0, 0, false, cancel, err
- }
-
- // sorting them will make the first element the largest shard and the last
- // element the smallest shard; pick the smallest
- sort.Sort(byVal(shardSizes))
-
- return shardSizes[len(shardSizes)-1].Shard, -1, false, cancel, nil
-
+ return yes, err
}
-// Delete makes the chunk unavailable.
-func (s *Store) Delete(addr chunk.Address) (err error) {
- if err := s.protect(); err != nil {
- return err
- }
- defer s.unprotect()
-
- m, err := s.getMeta(addr)
- if err != nil {
- return err
- }
-
- mu := s.shards[m.Shard].mu
- mu.Lock()
- defer mu.Unlock()
-
- err = s.meta.Remove(addr, m.Shard)
- if err != nil {
- metrics.GetOrRegisterCounter("fcds/delete/fail", nil).Inc(1)
+func (s *BadgerStore) Put(ch chunk.Chunk) (err error) {
+ return s.db.Update(func(txn *badger.Txn) (err error) {
+ err = txn.Set(ch.Address(), ch.Data())
return err
- }
-
- metrics.GetOrRegisterCounter("fcds/delete/ok", nil).Inc(1)
- return nil
-}
-
-// Count returns a number of stored chunks.
-func (s *Store) Count() (count int, err error) {
- return s.meta.Count()
+ })
}
-// Iterate iterates over stored chunks in no particular order.
-func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error) {
- if err := s.protect(); err != nil {
- return err
- }
- defer s.unprotect()
- for _, sh := range s.shards {
- sh.mu.Lock()
- }
- defer func() {
- for _, sh := range s.shards {
- sh.mu.Unlock()
- }
- }()
-
- return s.meta.Iterate(func(addr chunk.Address, m *Meta) (stop bool, err error) {
- data := make([]byte, m.Size)
- _, err = s.shards[m.Shard].f.ReadAt(data, m.Offset)
- if err != nil {
- return true, err
- }
- return fn(chunk.NewChunk(addr, data))
+func (s *BadgerStore) Delete(addr chunk.Address) (err error) {
+ return s.db.Update(func(txn *badger.Txn) (err error) {
+ return txn.Delete(addr)
})
}
-// Close disables of further operations on the Store.
-// Every call to its methods will return ErrStoreClosed error.
-// Close will wait for all running operations to finish before
-// closing its MetaStore and returning.
-func (s *Store) Close() (err error) {
- s.quitOnce.Do(func() {
- close(s.quit)
+func (s *BadgerStore) Count() (count int, err error) {
+ err = s.db.View(func(txn *badger.Txn) (err error) {
+ o := badger.DefaultIteratorOptions
+ o.PrefetchValues = false
+ i := txn.NewIterator(o)
+ defer i.Close()
+ for i.Rewind(); i.Valid(); i.Next() {
+ item := i.Item()
+ k := item.KeySize()
+ if k < 1 {
+ continue
+ }
+ count++
+ }
+ return nil
})
-
- timeout := 15 * time.Second
- done := make(chan struct{})
- go func() {
- s.wg.Wait()
- close(done)
- }()
- select {
- case <-done:
- case <-time.After(timeout):
- log.Debug("timeout on waiting chunk store parallel operations to finish", "timeout", timeout)
- }
-
- for _, sh := range s.shards {
- if err := sh.f.Close(); err != nil {
- return err
+ return count, err
+}
+
+func (s *BadgerStore) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error) {
+ return s.db.View(func(txn *badger.Txn) (err error) {
+ o := badger.DefaultIteratorOptions
+ o.PrefetchValues = true
+ o.PrefetchSize = 1024
+ i := txn.NewIterator(o)
+ defer i.Close()
+ for i.Rewind(); i.Valid(); i.Next() {
+ item := i.Item()
+ k := item.Key()
+ if len(k) < 1 {
+ continue
+ }
+ v, err := item.ValueCopy(nil)
+ if err != nil {
+ return err
+ }
+ if len(v) == 0 {
+ continue
+ }
+ stop, err := fn(chunk.NewChunk(k, v))
+ if err != nil {
+ return err
+ }
+ if stop {
+ return nil
+ }
}
- }
- return s.meta.Close()
-}
-
-// protect protects Store from executing operations
-// after the Close method is called and makes sure
-// that Close method will wait for all ongoing operations
-// to finish before returning. Method unprotect done
-// must be closed to unblock the Close method call.
-func (s *Store) protect() (err error) {
- select {
- case <-s.quit:
- return ErrStoreClosed
- default:
- }
- s.wg.Add(1)
- return nil
-}
-
-// unprotect removes a protection set by the protect method
-// allowing the Close method to unblock.
-func (s *Store) unprotect() {
- s.wg.Done()
-}
-
-// getMeta returns Meta information from MetaStore.
-func (s *Store) getMeta(addr chunk.Address) (m *Meta, err error) {
- return s.meta.Get(addr)
+ return nil
+ })
}
-type shard struct {
- f *os.File
- mu *sync.Mutex
+func (s *BadgerStore) Close() error {
+ return s.db.Close()
}
diff --git a/storage/fcds/fcds_test.go b/storage/fcds/fcds_test.go
new file mode 100644
index 0000000000..482f306b48
--- /dev/null
+++ b/storage/fcds/fcds_test.go
@@ -0,0 +1,304 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package fcds
+
+import (
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethersphere/swarm/chunk"
+)
+
+const (
+ ConcurrentThreads = 128
+)
+
+func newDB(b *testing.B) (db Storer, clean func()) {
+ b.Helper()
+
+ path, err := ioutil.TempDir("/tmp/swarm", "swarm-shed")
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ db, err = New(path)
+ if err != nil {
+ os.RemoveAll(path)
+ b.Fatal(err)
+ }
+ return db, func() {
+ db.Close()
+ os.RemoveAll(path)
+ }
+}
+
+func getChunks(count int, chunkCache []chunk.Chunk) []chunk.Chunk {
+ l := len(chunkCache)
+ if l == 0 {
+ chunkCache = make([]chunk.Chunk, count)
+ for i := 0; i < count; i++ {
+ chunkCache[i] = GenerateTestRandomChunk()
+ }
+ return chunkCache
+ }
+ if l < count {
+ for i := 0; i < count-l; i++ {
+ chunkCache = append(chunkCache, GenerateTestRandomChunk())
+ }
+ return chunkCache
+ }
+ return chunkCache[:count]
+}
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+func GenerateTestRandomChunk() chunk.Chunk {
+ data := make([]byte, chunk.DefaultSize)
+ rand.Read(data)
+ key := make([]byte, 32)
+ rand.Read(key)
+ return chunk.NewChunk(key, data)
+}
+
+func createBenchBaseline(b *testing.B, baseChunksCount int) (db Storer, clean func(), baseChunks []chunk.Chunk) {
+ db, clean = newDB(b)
+
+ if baseChunksCount > 0 {
+ baseChunks = getChunks(baseChunksCount, baseChunks)
+ //start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(baseChunksCount)
+ for i, ch := range baseChunks {
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Put(ch); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ //elapsed := time.Since(start)
+ //fmt.Println("-- adding base chunks took, ", elapsed)
+ }
+
+ rand.Shuffle(baseChunksCount, func(i, j int) {
+ baseChunks[i], baseChunks[j] = baseChunks[j], baseChunks[i]
+ })
+
+ return db, clean, baseChunks
+}
+
+// Benchmarkings
+
+func runBenchmark(b *testing.B, db Storer, basechunks []chunk.Chunk, baseChunksCount int, writeChunksCount int, readChunksCount int, deleteChunksCount int) {
+ var writeElapsed time.Duration
+ var readElapsed time.Duration
+ var deleteElapsed time.Duration
+
+ var writeChunks []chunk.Chunk
+ writeChunks = getChunks(writeChunksCount, writeChunks)
+ b.StartTimer()
+
+ var jobWg sync.WaitGroup
+ if writeChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(writeChunksCount)
+ for i, ch := range writeChunks {
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Put(ch); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- writing chunks took , ", elapsed)
+ writeElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ if readChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ errCount := 0
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads*4)
+ var wg sync.WaitGroup
+ wg.Add(readChunksCount)
+ for i, ch := range basechunks {
+ if i >= readChunksCount {
+ break
+ }
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ _, err := db.Get(ch.Address())
+ if err != nil {
+ //panic(err)
+ errCount++
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- reading chunks took , ", elapsed)
+ readElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ if deleteChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(deleteChunksCount)
+ for i, ch := range basechunks {
+ if i >= deleteChunksCount {
+ break
+ }
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Delete(ch.Address()); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- deleting chunks took , ", elapsed)
+ deleteElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ jobWg.Wait()
+
+ //if writeElapsed > 0 {
+ // fmt.Println("- Average write time : ", writeElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+ //if readElapsed > 0 {
+ // //fmt.Println("- Average read time : ", readElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+ //if deleteElapsed > 0 {
+ // //fmt.Println("- Average delete time : ", deleteElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+}
+
+func BenchmarkWrite(b *testing.B) {
+ for i := 10000; i <= 1000000; i *= 10 {
+ b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
+ // for each baseline, insert 10k, 20k, 50k, 100k
+ for _, k := range []int{10000, 20000, 50000, 100000} {
+ b.Run(fmt.Sprintf("add_%d", k), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
+ runBenchmark(b, db, baseChunks, 0, k, 0, 0)
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+ }
+ })
+ }
+ })
+ }
+}
+
+func BenchmarkRead(b *testing.B) {
+ for i := 10000; i <= 1000000; i *= 10 {
+ b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
+ for k := 10000; k <= i; k *= 10 {
+ b.Run(fmt.Sprintf("read_%d", k), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ runBenchmark(b, db, baseChunks, 0, 0, k, 0)
+ }
+ })
+ }
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+ })
+ }
+}
+
+//func BenchmarkWriteOverClean_100000(t *testing.B) { runBenchmark(t, 0, 100000, 0, 0, 6) }
+//func BenchmarkWriteOverClean_1000000(t *testing.B) { runBenchmark(t, 0, 1000000, 0, 0, 4) }
+
+//func BenchmarkWriteOver1Million_10000(t *testing.B) {
+//for i := 0; i < t.N; i++ {
+//runBenchmark(t, 1000000, 10000, 0, 0, 8)
+//}
+
+//}
+
+//func BenchmarkWriteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 0, 0,6) }
+//func BenchmarkWriteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 0, 0, 4) }
+//func BenchmarkWriteOver1Million_5000000(t *testing.B) { runBenchmark(t, 5000000, 1000000, 0, 0, 4) }
+//func BenchmarkWriteOver1Million_10000000(t *testing.B) { runBenchmark(t, 10000000, 1000000, 0, 0, 4) }
+
+//func BenchmarkReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 10000, 0,8) }
+//func BenchmarkReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 100000, 0, 6) }
+//func BenchmarkReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 1000000, 0, 4) }
+
+//func BenchmarkDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 10000,8) }
+//func BenchmarkDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 100000,6) }
+//func BenchmarkDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 1000000, 4) }
+
+//func BenchmarkWriteReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 0,8) }
+//func BenchmarkWriteReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 0,6) }
+//func BenchmarkWriteReadOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 0, 4)}
+
+//func BenchmarkWriteReadDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 10000,8) }
+//func BenchmarkWriteReadDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 100000,6) }
+//func BenchmarkWriteReadDeleteOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 1000000, 4)}
diff --git a/storage/fcds/leveldb/fcds_test.go b/storage/fcds/leveldb/fcds_test.go
new file mode 100644
index 0000000000..3d73f75e09
--- /dev/null
+++ b/storage/fcds/leveldb/fcds_test.go
@@ -0,0 +1,306 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package leveldb_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethersphere/swarm/chunk"
+ "github.com/ethersphere/swarm/storage/fcds"
+ "github.com/ethersphere/swarm/storage/fcds/leveldb"
+)
+
+const (
+ ConcurrentThreads = 128
+)
+
+func newDB(b *testing.B) (db fcds.Storer, clean func()) {
+ b.Helper()
+
+ path, err := ioutil.TempDir("/tmp/swarm", "swarm-shed")
+ if err != nil {
+ b.Fatal(err)
+ }
+ metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ db, err = fcds.New(path, 4096, metaStore)
+ if err != nil {
+ os.RemoveAll(path)
+ b.Fatal(err)
+ }
+ return db, func() {
+ db.Close()
+ os.RemoveAll(path)
+ }
+}
+
+func getChunks(count int, chunkCache []chunk.Chunk) []chunk.Chunk {
+ l := len(chunkCache)
+ if l == 0 {
+ chunkCache = make([]chunk.Chunk, count)
+ for i := 0; i < count; i++ {
+ chunkCache[i] = GenerateTestRandomChunk()
+ }
+ return chunkCache
+ }
+ if l < count {
+ for i := 0; i < count-l; i++ {
+ chunkCache = append(chunkCache, GenerateTestRandomChunk())
+ }
+ return chunkCache
+ }
+ return chunkCache[:count]
+}
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+func GenerateTestRandomChunk() chunk.Chunk {
+ data := make([]byte, chunk.DefaultSize)
+ rand.Read(data)
+ key := make([]byte, 32)
+ rand.Read(key)
+ return chunk.NewChunk(key, data)
+}
+
+func createBenchBaseline(b *testing.B, baseChunksCount int) (db fcds.Storer, clean func(), baseChunks []chunk.Chunk) {
+ db, clean = newDB(b)
+
+ if baseChunksCount > 0 {
+ baseChunks = getChunks(baseChunksCount, baseChunks)
+ //start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(baseChunksCount)
+ for i, ch := range baseChunks {
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if _, err := db.Put(ch); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ //elapsed := time.Since(start)
+ //fmt.Println("-- adding base chunks took, ", elapsed)
+ }
+
+ rand.Shuffle(baseChunksCount, func(i, j int) {
+ baseChunks[i], baseChunks[j] = baseChunks[j], baseChunks[i]
+ })
+
+ return db, clean, baseChunks
+}
+
+// Benchmarkings
+
+func runBenchmark(b *testing.B, db fcds.Storer, basechunks []chunk.Chunk, baseChunksCount int, writeChunksCount int, readChunksCount int, deleteChunksCount int) {
+ var writeElapsed time.Duration
+ var readElapsed time.Duration
+ var deleteElapsed time.Duration
+
+ var writeChunks []chunk.Chunk
+ writeChunks = getChunks(writeChunksCount, writeChunks)
+ b.StartTimer()
+
+ var jobWg sync.WaitGroup
+ if writeChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(writeChunksCount)
+ for i, ch := range writeChunks {
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if _, err := db.Put(ch); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- writing chunks took , ", elapsed)
+ writeElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ if readChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ errCount := 0
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads*4)
+ var wg sync.WaitGroup
+ wg.Add(readChunksCount)
+ for i, ch := range basechunks {
+ if i >= readChunksCount {
+ break
+ }
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ _, err := db.Get(ch.Address())
+ if err != nil {
+ //panic(err)
+ errCount++
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- reading chunks took , ", elapsed)
+ readElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ if deleteChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(deleteChunksCount)
+ for i, ch := range basechunks {
+ if i >= deleteChunksCount {
+ break
+ }
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Delete(ch.Address()); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- deleting chunks took , ", elapsed)
+ deleteElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ jobWg.Wait()
+
+ //if writeElapsed > 0 {
+ // fmt.Println("- Average write time : ", writeElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+ //if readElapsed > 0 {
+ // //fmt.Println("- Average read time : ", readElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+ //if deleteElapsed > 0 {
+ // //fmt.Println("- Average delete time : ", deleteElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+}
+
+func BenchmarkWrite_Add10K(b *testing.B) {
+ for i := 30000; i <= 3000000; i *= 10 {
+ b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
+ runBenchmark(b, db, baseChunks, 0, 10000, 0, 0)
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+ }
+ })
+ }
+}
+
+func BenchmarkReadOverClean(b *testing.B) {
+ for i := 30000; i <= 3000000; i *= 10 {
+ b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
+ for k := 30000; k <= i; k *= 10 {
+ b.Run(fmt.Sprintf("read_%d", k), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ runBenchmark(b, db, baseChunks, 0, 0, k, 0)
+ }
+ })
+ }
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+ })
+ }
+}
+
+//func BenchmarkWriteOverClean_100000(t *testing.B) { runBenchmark(t, 0, 100000, 0, 0, 6) }
+//func BenchmarkWriteOverClean_1000000(t *testing.B) { runBenchmark(t, 0, 1000000, 0, 0, 4) }
+
+//func BenchmarkWriteOver1Million_10000(t *testing.B) {
+//for i := 0; i < t.N; i++ {
+//runBenchmark(t, 1000000, 10000, 0, 0, 8)
+//}
+
+//}
+
+//func BenchmarkWriteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 0, 0,6) }
+//func BenchmarkWriteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 0, 0, 4) }
+//func BenchmarkWriteOver1Million_5000000(t *testing.B) { runBenchmark(t, 5000000, 1000000, 0, 0, 4) }
+//func BenchmarkWriteOver1Million_10000000(t *testing.B) { runBenchmark(t, 10000000, 1000000, 0, 0, 4) }
+
+//func BenchmarkReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 10000, 0,8) }
+//func BenchmarkReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 100000, 0, 6) }
+//func BenchmarkReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 1000000, 0, 4) }
+
+//func BenchmarkDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 10000,8) }
+//func BenchmarkDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 100000,6) }
+//func BenchmarkDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 1000000, 4) }
+
+//func BenchmarkWriteReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 0,8) }
+//func BenchmarkWriteReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 0,6) }
+//func BenchmarkWriteReadOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 0, 4)}
+
+//func BenchmarkWriteReadDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 10000,8) }
+//func BenchmarkWriteReadDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 100000,6) }
+//func BenchmarkWriteReadDeleteOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 1000000, 4)}
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
deleted file mode 100644
index 8d44cbbb52..0000000000
--- a/storage/fcds/leveldb/leveldb.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package leveldb
-
-import (
- "encoding/binary"
- "sync"
-
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/storage/fcds"
- "github.com/syndtr/goleveldb/leveldb"
- "github.com/syndtr/goleveldb/leveldb/opt"
-)
-
-var _ fcds.MetaStore = new(MetaStore)
-
-// MetaStore implements FCDS MetaStore with LevelDB
-// for persistence.
-type MetaStore struct {
- db *leveldb.DB
- free map[uint8]map[int64]struct{} // free slots map. root map key is shard id
- mtx sync.Mutex // synchronise free slots
-}
-
-// NewMetaStore returns new MetaStore at path.
-func NewMetaStore(path string) (s *MetaStore, err error) {
- db, err := leveldb.OpenFile(path, &opt.Options{})
- if err != nil {
- return nil, err
- }
-
- ms := &MetaStore{
- db: db,
- free: make(map[uint8]map[int64]struct{}),
- }
-
- for i := uint8(0); i < fcds.ShardCount; i++ {
- ms.free[i] = make(map[int64]struct{})
- }
-
- // caution - this _will_ break if we one day decide to
- // decrease the shard count
- ms.IterateFree(func(shard uint8, offset int64) {
- ms.free[shard][offset] = struct{}{}
- })
-
- return ms, nil
-}
-
-// Get returns chunk meta information.
-func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
- data, err := s.db.Get(chunkKey(addr), nil)
- if err != nil {
- if err == leveldb.ErrNotFound {
- return nil, chunk.ErrChunkNotFound
- }
- return nil, err
- }
- m = new(fcds.Meta)
- if err := m.UnmarshalBinary(data); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// Has returns true if chunk has meta information stored.
-func (s *MetaStore) Has(addr chunk.Address) (yes bool, err error) {
- if _, err = s.db.Get(chunkKey(addr), nil); err != nil {
- if err == leveldb.ErrNotFound {
- return false, nil
- }
- return false, err
- }
- return true, nil
-}
-
-// Set adds a new chunk meta information for a shard.
-// Reclaimed flag denotes that the chunk is at the place of
-// already deleted chunk, not appended to the end of the file.
-// Caller expected to hold the shard lock.
-func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds.Meta) (err error) {
- batch := new(leveldb.Batch)
- if reclaimed {
- batch.Delete(freeKey(shard, m.Offset))
- }
- meta, err := m.MarshalBinary()
- if err != nil {
- return err
- }
- batch.Put(chunkKey(addr), meta)
- return s.db.Write(batch, nil)
-}
-
-// Remove removes chunk meta information from the shard.
-func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
- m, err := s.Get(addr)
- if err != nil {
- return err
- }
- batch := new(leveldb.Batch)
- batch.Put(freeKey(shard, m.Offset), nil)
- batch.Delete(chunkKey(addr))
-
- err = s.db.Write(batch, nil)
- if err != nil {
- return err
- }
-
- s.mtx.Lock()
- s.free[shard][m.Offset] = struct{}{}
- s.mtx.Unlock()
-
- return nil
-}
-
-// FreeOffset returns an offset that can be reclaimed by
-// another chunk. If the returned value is less then 0
-// there are no free offsets on any shards and the chunk must be
-// appended to the shortest shard
-func (s *MetaStore) FreeOffset() (shard uint8, offset int64, cancel func()) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
- for shard, offsets := range s.free {
- for offset, _ = range offsets {
- delete(offsets, offset)
- return shard, offset, func() {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- s.free[shard][offset] = struct{}{}
- }
- }
- }
-
- return 0, -1, func() {}
-}
-
-// Count returns a number of chunks in MetaStore.
-// This operation is slow for larger numbers of chunks.
-func (s *MetaStore) Count() (count int, err error) {
- it := s.db.NewIterator(nil, nil)
- defer it.Release()
-
- for ok := it.First(); ok; ok = it.Next() {
- value := it.Value()
- if len(value) == 0 {
- continue
- }
- key := it.Key()
- if len(key) < 1 {
- continue
- }
- count++
- }
- return count, it.Error()
-}
-
-// Iterate iterates over all chunk meta information.
-func (s *MetaStore) Iterate(fn func(chunk.Address, *fcds.Meta) (stop bool, err error)) (err error) {
- it := s.db.NewIterator(nil, nil)
- defer it.Release()
-
- for ok := it.First(); ok; ok = it.Next() {
- value := it.Value()
- if len(value) == 0 {
- continue
- }
- key := it.Key()
- if len(key) < 1 {
- continue
- }
- m := new(fcds.Meta)
- if err := m.UnmarshalBinary(value); err != nil {
- return err
- }
- b := make([]byte, len(key)-1)
- copy(b, key[1:])
- stop, err := fn(chunk.Address(b), m)
- if err != nil {
- return err
- }
- if stop {
- return nil
- }
- }
- return it.Error()
-}
-
-// IterateFree iterates over all free slot entries in leveldb
-// and calls the defined callback function on each entry found.
-func (s *MetaStore) IterateFree(fn func(shard uint8, offset int64)) {
- i := s.db.NewIterator(nil, nil)
- defer i.Release()
-
- for ok := i.Seek([]byte{freePrefix}); ok; ok = i.Next() {
- key := i.Key()
- if key == nil || key[0] != freePrefix {
- return
- }
- shard := uint8(key[1])
- offset := int64(binary.BigEndian.Uint64(key[2:10]))
- fn(shard, offset)
- }
-}
-
-// Close closes the underlaying LevelDB instance.
-func (s *MetaStore) Close() (err error) {
- return s.db.Close()
-}
-
-const (
- chunkPrefix = 0
- freePrefix = 1
-)
-
-func chunkKey(addr chunk.Address) (key []byte) {
- return append([]byte{chunkPrefix}, addr...)
-}
-
-func freeKey(shard uint8, offset int64) (key []byte) {
- key = make([]byte, 10)
- key[0] = freePrefix
- key[1] = shard
- binary.BigEndian.PutUint64(key[2:10], uint64(offset))
- return key
-}
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
deleted file mode 100644
index c421594832..0000000000
--- a/storage/fcds/leveldb/leveldb_test.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package leveldb_test
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
-
- "github.com/ethersphere/swarm/chunk"
- chunktesting "github.com/ethersphere/swarm/chunk/testing"
- "github.com/ethersphere/swarm/storage/fcds"
- "github.com/ethersphere/swarm/storage/fcds/leveldb"
- "github.com/ethersphere/swarm/storage/fcds/test"
-)
-
-// TestFCDS runs a standard series of tests on main Store implementation
-// with LevelDB meta store.
-func TestFCDS(t *testing.T) {
- test.RunAll(t, func(t *testing.T) (fcds.Storer, func()) {
- path, err := ioutil.TempDir("", "swarm-fcds-")
- if err != nil {
- t.Fatal(err)
- }
-
- metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
- if err != nil {
- t.Fatal(err)
- }
-
- return test.NewFCDSStore(t, path, metaStore)
- })
-}
-
-// TestFreeSlotCounter tests that the free slot counter gets persisted
-// and properly loaded on existing store restart
-func TestFreeSlotCounter(t *testing.T) {
- path, err := ioutil.TempDir("", "swarm-fcds-")
- if err != nil {
- t.Fatal(err)
- }
-
- metaPath := filepath.Join(path, "meta")
-
- metaStore, err := leveldb.NewMetaStore(metaPath)
- if err != nil {
- t.Fatal(err)
- }
-
- store, err := fcds.New(path, chunk.DefaultSize, metaStore)
- if err != nil {
- os.RemoveAll(path)
- t.Fatal(err)
- }
-
- defer func() {
- store.Close()
- os.RemoveAll(path)
- }()
-
- // put some chunks, delete some chunks, find the free slots
- // then close the store, init a new one on the same dir
- // then check free slots again and compare
- numChunks := 100
- deleteChunks := 10
- chunks := make([]chunk.Chunk, numChunks)
-
- for i := 0; i < numChunks; i++ {
- chunks[i] = chunktesting.GenerateTestRandomChunk()
- _, err := store.Put(chunks[i])
- if err != nil {
- t.Fatal(err)
- }
- }
-
- for i := 0; i < deleteChunks; i++ {
- err := store.Delete(chunks[i].Address())
- if err != nil {
- t.Fatal(err)
- }
- }
-
- // verify free slots
- cnt := 0
-
- metaStore.IterateFree(func(uint8, int64) {
- cnt++
- })
-
- if cnt != 10 {
- t.Fatalf("expected %d free slots but got %d", 10, cnt)
- }
-
- store.Close()
- metaStore.Close()
-
- metaStore2, err := leveldb.NewMetaStore(metaPath)
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- metaStore2.Close()
- os.RemoveAll(metaPath)
- }()
-
- cnt = 0
-
- metaStore2.IterateFree(func(_ uint8, _ int64) {
- cnt++
- })
-
- if cnt != 10 {
- t.Fatalf("expected %d free slots but got %d", 10, cnt)
- }
-}
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
deleted file mode 100644
index ba0b1f9214..0000000000
--- a/storage/fcds/mem/mem.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package mem
-
-import (
- "sync"
-
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/storage/fcds"
-)
-
-var _ fcds.MetaStore = new(MetaStore)
-
-// MetaStore is the simplest in-memory implementation of FCDS MetaStore.
-// It is meant to be used as the reference implementation.
-type MetaStore struct {
- meta map[string]*fcds.Meta
- free map[uint8]map[int64]struct{}
- mtx sync.RWMutex
-}
-
-// NewMetaStore constructs a new MetaStore.
-func NewMetaStore() (s *MetaStore) {
- free := make(map[uint8]map[int64]struct{})
- for shard := uint8(0); shard < 255; shard++ {
- free[shard] = make(map[int64]struct{})
- }
- return &MetaStore{
- meta: make(map[string]*fcds.Meta),
- free: free,
- }
-}
-
-// Get returns chunk meta information.
-func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
- s.mtx.RLock()
- m = s.meta[string(addr)]
- s.mtx.RUnlock()
- if m == nil {
- return nil, chunk.ErrChunkNotFound
- }
- return m, nil
-}
-
-// Get returns true is meta information is stored.
-func (s *MetaStore) Has(addr chunk.Address) (yes bool, err error) {
- s.mtx.RLock()
- _, yes = s.meta[string(addr)]
- s.mtx.RUnlock()
- return yes, nil
-}
-
-// Set adds a new chunk meta information for a shard.
-// Reclaimed flag denotes that the chunk is at the place of
-// already deleted chunk, not appended to the end of the file.
-func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds.Meta) (err error) {
- s.mtx.Lock()
-
- if reclaimed {
- delete(s.free[shard], m.Offset)
- }
- s.meta[string(addr)] = m
- s.mtx.Unlock()
- return nil
-}
-
-// Remove removes chunk meta information from the shard.
-func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- key := string(addr)
- m := s.meta[key]
- if m == nil {
- return chunk.ErrChunkNotFound
- }
- s.free[shard][m.Offset] = struct{}{}
-
- delete(s.meta, key)
- return nil
-}
-
-// FreeOffset returns an offset that can be reclaimed by
-// another chunk. If the returned value is less then 0
-// there are no free offsets on any shards and the chunk must be
-// appended to the shortest shard
-func (s *MetaStore) FreeOffset() (shard uint8, offset int64, cancel func()) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
- for shard, offsets := range s.free {
- for offset, _ = range offsets {
- delete(offsets, offset)
- return shard, offset, func() {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- s.free[shard][offset] = struct{}{}
- }
- }
- }
-
- return 0, -1, func() {}
-}
-
-// Count returns a number of chunks in MetaStore.
-func (s *MetaStore) Count() (count int, err error) {
- s.mtx.RLock()
- count = len(s.meta)
- s.mtx.RUnlock()
- return count, nil
-}
-
-// Iterate iterates over all chunk meta information.
-func (s *MetaStore) Iterate(fn func(chunk.Address, *fcds.Meta) (stop bool, err error)) (err error) {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
- for a, m := range s.meta {
- stop, err := fn(chunk.Address(a), m)
- if err != nil {
- return err
- }
- if stop {
- return nil
- }
- }
- return nil
-}
-
-// IterateFree iterates over all free slot entries
-// and calls the defined callback function on each entry found.
-func (s *MetaStore) IterateFree(fn func(shard uint8, offset int64)) {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
-
- for shard, offsets := range s.free {
- for offset, _ := range offsets {
- fn(shard, offset)
- }
- }
-}
-
-// Close doesn't do anything.
-// It exists to implement fcdb.MetaStore interface.
-func (s *MetaStore) Close() (err error) {
- return nil
-}
diff --git a/storage/fcds/mem/mem_test.go b/storage/fcds/mem/mem_test.go
deleted file mode 100644
index 288ab47157..0000000000
--- a/storage/fcds/mem/mem_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package mem_test
-
-import (
- "io/ioutil"
- "testing"
-
- "github.com/ethersphere/swarm/storage/fcds"
- "github.com/ethersphere/swarm/storage/fcds/mem"
- "github.com/ethersphere/swarm/storage/fcds/test"
-)
-
-// TestFCDS runs a standard series of tests on main Store implementation
-// with in-memory meta store.
-func TestFCDS(t *testing.T) {
- test.RunAll(t, func(t *testing.T) (fcds.Storer, func()) {
- path, err := ioutil.TempDir("", "swarm-fcds-")
- if err != nil {
- t.Fatal(err)
- }
-
- return test.NewFCDSStore(t, path, mem.NewMetaStore())
- })
-}
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
deleted file mode 100644
index 4832366ab4..0000000000
--- a/storage/fcds/meta.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package fcds
-
-import (
- "encoding/binary"
- "fmt"
-
- "github.com/ethersphere/swarm/chunk"
-)
-
-// MetaStore defines methods to store and manage
-// chunk meta information in Store FCDS implementation.
-type MetaStore interface {
- Get(addr chunk.Address) (*Meta, error)
- Has(addr chunk.Address) (bool, error)
- Set(addr chunk.Address, shard uint8, reclaimed bool, m *Meta) error
- Remove(addr chunk.Address, shard uint8) error
- Count() (int, error)
- Iterate(func(chunk.Address, *Meta) (stop bool, err error)) error
- IterateFree(func(shard uint8, offset int64))
- FreeOffset() (shard uint8, offset int64, cancel func())
- Close() error
-}
-
-// Meta stores chunk data size and its offset in a file.
-type Meta struct {
- Size uint16
- Offset int64
- Shard uint8
-}
-
-// MarshalBinary returns binary encoded value of meta chunk information.
-func (m *Meta) MarshalBinary() (data []byte, err error) {
- data = make([]byte, 12)
- binary.BigEndian.PutUint64(data[:8], uint64(m.Offset))
- binary.BigEndian.PutUint16(data[8:10], m.Size)
- binary.BigEndian.PutUint16(data[10:12], uint16(m.Shard))
- return data, nil
-}
-
-// UnmarshalBinary sets meta chunk information from encoded data.
-func (m *Meta) UnmarshalBinary(data []byte) error {
- m.Offset = int64(binary.BigEndian.Uint64(data[:8]))
- m.Size = binary.BigEndian.Uint16(data[8:10])
- m.Shard = uint8(binary.BigEndian.Uint16(data[10:12]))
- return nil
-}
-
-func (m *Meta) String() (s string) {
- if m == nil {
- return ""
- }
- return fmt.Sprintf("{Size: %v, Offset %v}", m.Size, m.Offset)
-}
-
-type byVal []ShardInfo
-
-func (a byVal) Len() int { return len(a) }
-func (a byVal) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byVal) Less(i, j int) bool { return a[j].Val < a[i].Val }
-
-// ShardInfo contains data about an arbitrary shard
-// in that Val could potentially represent any scalar
-// size pertaining to a shard (number of free slots,
-// size in bytes, number of occupied slots, etc).
-type ShardInfo struct {
- Shard uint8
- Val int64
-}
diff --git a/storage/fcds/meta_test.go b/storage/fcds/meta_test.go
deleted file mode 100644
index 95dbc71688..0000000000
--- a/storage/fcds/meta_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2020 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package fcds
-
-import (
- "sort"
- "testing"
-)
-
-// TestShardSlotSort is a unit test to ensure correct sorting of a slice of ShartSlot
-func TestShardSlotSort(t *testing.T) {
-
- for _, tc := range []struct {
- freeSlots []int // how many free slots in which shard (slice index denotes shard id, value denotes number of free slots.
- expectOrder []int // the order of bins expected to show up (slice value denotes shard id).
- }{
- {
- freeSlots: []int{0, 0, 0, 0},
- expectOrder: []int{0, 1, 2, 3},
- },
- {
- freeSlots: []int{0, 1, 0, 0},
- expectOrder: []int{1, 0, 2, 3},
- },
- {
- freeSlots: []int{0, 0, 2, 0},
- expectOrder: []int{2, 0, 1, 3},
- },
- {
- freeSlots: []int{0, 0, 0, 1},
- expectOrder: []int{3, 0, 1, 2},
- },
- {
- freeSlots: []int{1, 1, 0, 0},
- expectOrder: []int{0, 1, 2, 3},
- },
- {
- freeSlots: []int{1, 0, 0, 1},
- expectOrder: []int{0, 3, 1, 2},
- },
- {
- freeSlots: []int{1, 2, 0, 0},
- expectOrder: []int{1, 0, 2, 3},
- },
- {
- freeSlots: []int{0, 3, 2, 1},
- expectOrder: []int{1, 2, 3, 0},
- },
- } {
- s := make([]ShardInfo, len(tc.freeSlots))
-
- for i, v := range tc.freeSlots {
- s[i] = ShardInfo{Shard: uint8(i), Val: int64(v)}
- }
- sort.Sort(byVal(s))
-
- for i, v := range s {
- if v.Shard != uint8(tc.expectOrder[i]) {
- t.Fatalf("expected shard index %d to be %d but got %d", i, tc.expectOrder[i], v.Shard)
- }
- }
- }
-}
diff --git a/storage/fcds/mock/mock.go b/storage/fcds/mock/mock.go
deleted file mode 100644
index fc73dcd35a..0000000000
--- a/storage/fcds/mock/mock.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package mock
-
-import (
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/storage/fcds"
- "github.com/ethersphere/swarm/storage/mock"
-)
-
-var _ fcds.Storer = new(Store)
-
-// Store implements FCDS Interface by using mock
-// store for persistence.
-type Store struct {
- m *mock.NodeStore
-}
-
-// New returns a new store with mock NodeStore
-// for storing Chunk data.
-func New(m *mock.NodeStore) (s *Store) {
- return &Store{
- m: m,
- }
-}
-
-// Get returns a chunk with data.
-func (s *Store) Get(addr chunk.Address) (c chunk.Chunk, err error) {
- data, err := s.m.Get(addr)
- if err != nil {
- if err == mock.ErrNotFound {
- return nil, chunk.ErrChunkNotFound
- }
- return nil, err
- }
- return chunk.NewChunk(addr, data), nil
-}
-
-// Has returns true if chunk is stored.
-func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
- _, err = s.m.Get(addr)
- if err != nil {
- if err == mock.ErrNotFound {
- return false, nil
- }
- return false, err
- }
- return true, nil
-}
-
-// Put stores chunk data.
-func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
- err = s.m.Put(ch.Address(), ch.Data())
- return 0, err
-}
-
-// Delete removes chunk data.
-func (s *Store) Delete(addr chunk.Address) (err error) {
- return s.m.Delete(addr)
-}
-
-// Count returns a number of stored chunks.
-func (s *Store) Count() (count int, err error) {
- var startKey []byte
- for {
- keys, err := s.m.Keys(startKey, 0)
- if err != nil {
- return 0, err
- }
- count += len(keys.Keys)
- if keys.Next == nil {
- break
- }
- startKey = keys.Next
- }
- return count, nil
-}
-
-// Iterate iterates over stored chunks in no particular order.
-func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error) {
- var startKey []byte
- for {
- keys, err := s.m.Keys(startKey, 0)
- if err != nil {
- return err
- }
- for _, addr := range keys.Keys {
- data, err := s.m.Get(addr)
- if err != nil {
- return err
- }
- stop, err := fn(chunk.NewChunk(addr, data))
- if err != nil {
- return err
- }
- if stop {
- return nil
- }
- }
- if keys.Next == nil {
- break
- }
- startKey = keys.Next
- }
- return nil
-}
-
-func (s *Store) ShardSize() (slots []fcds.ShardInfo, err error) {
- i, err := s.Count()
- return []fcds.ShardInfo{fcds.ShardInfo{Shard: 0, Val: int64(i)}}, err
-}
-
-// Close doesn't do anything.
-// It exists to implement fcdb.MetaStore interface.
-func (s *Store) Close() error {
- return nil
-}
diff --git a/storage/fcds/mock/mock_test.go b/storage/fcds/mock/mock_test.go
deleted file mode 100644
index 8d8adcf0b2..0000000000
--- a/storage/fcds/mock/mock_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package mock_test
-
-import (
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethersphere/swarm/storage/fcds"
- "github.com/ethersphere/swarm/storage/fcds/mock"
- "github.com/ethersphere/swarm/storage/fcds/test"
- "github.com/ethersphere/swarm/storage/mock/mem"
-)
-
-// TestFCDS runs a standard series of tests on mock Store implementation.
-func TestFCDS(t *testing.T) {
- test.RunStd(t, func(t *testing.T) (fcds.Storer, func()) {
- return mock.New(
- mem.NewGlobalStore().NewNodeStore(
- common.BytesToAddress(make([]byte, 20)),
- ),
- ), func() {}
- })
-}
diff --git a/storage/fcds/offsetcache.go b/storage/fcds/offsetcache.go
deleted file mode 100644
index 66311fdbc1..0000000000
--- a/storage/fcds/offsetcache.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package fcds
-
-import "sync"
-
-// offsetCache is a simple cache of offset integers
-// by shard files.
-type offsetCache struct {
- m map[uint8]map[int64]struct{}
- mu sync.RWMutex
-}
-
-// newOffsetCache constructs offsetCache for a fixed number of shards.
-func newOffsetCache(shardCount uint8) (c *offsetCache) {
- m := make(map[uint8]map[int64]struct{})
- for i := uint8(0); i < shardCount; i++ {
- m[i] = make(map[int64]struct{})
- }
- return &offsetCache{
- m: m,
- }
-}
-
-// get returns a free offset in a shard. If the returned
-// value is less then 0, there are no free offset in that
-// shard.
-func (c *offsetCache) get(shard uint8) (offset int64) {
- c.mu.RLock()
- for o := range c.m[shard] {
- c.mu.RUnlock()
- return o
- }
- c.mu.RUnlock()
- return -1
-}
-
-// set sets a free offset for a shard file.
-func (c *offsetCache) set(shard uint8, offset int64) {
- c.mu.Lock()
- c.m[shard][offset] = struct{}{}
- c.mu.Unlock()
-}
-
-// remove removes a free offset for a shard file.
-func (c *offsetCache) remove(shard uint8, offset int64) {
- c.mu.Lock()
- delete(c.m[shard], offset)
- c.mu.Unlock()
-}
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
deleted file mode 100644
index 6329338dc1..0000000000
--- a/storage/fcds/test/store.go
+++ /dev/null
@@ -1,467 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package test
-
-import (
- "bytes"
- "encoding/hex"
- "flag"
- "fmt"
- "math/rand"
- "os"
- "sync"
- "testing"
-
- "github.com/ethersphere/swarm/chunk"
- chunktesting "github.com/ethersphere/swarm/chunk/testing"
- "github.com/ethersphere/swarm/storage/fcds"
-)
-
-var (
- chunksFlag = flag.Int("chunks", 100, "Number of chunks to use in tests.")
- concurrencyFlag = flag.Int("concurrency", 8, "Maximal number of parallel operations.")
-)
-
-// Main parses custom cli flags automatically on test runs.
-func Main(m *testing.M) {
- flag.Parse()
- os.Exit(m.Run())
-}
-
-// RunAll runs all available tests for a Store implementation.
-func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- RunStd(t, newStoreFunc)
-
- t.Run("no grow", func(t *testing.T) {
- runNoGrow(t, newStoreFunc)
- })
-}
-
-// RunStd runs all standard tests that are agnostic to specific Store implementation details.
-func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- t.Run("empty", func(t *testing.T) {
- RunStore(t, &RunStoreOptions{
- ChunkCount: *chunksFlag,
- NewStoreFunc: newStoreFunc,
- })
- })
-
- t.Run("cleaned", func(t *testing.T) {
- RunStore(t, &RunStoreOptions{
- ChunkCount: *chunksFlag,
- NewStoreFunc: newStoreFunc,
- Cleaned: true,
- })
- })
-
- for _, tc := range []struct {
- name string
- deleteSplit int
- }{
- {
- name: "delete-all",
- deleteSplit: 1,
- },
- {
- name: "delete-half",
- deleteSplit: 2,
- },
- {
- name: "delete-fifth",
- deleteSplit: 5,
- },
- {
- name: "delete-tenth",
- deleteSplit: 10,
- },
- {
- name: "delete-percent",
- deleteSplit: 100,
- },
- {
- name: "delete-permill",
- deleteSplit: 1000,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- RunStore(t, &RunStoreOptions{
- ChunkCount: *chunksFlag,
- DeleteSplit: tc.deleteSplit,
- NewStoreFunc: newStoreFunc,
- })
- })
- }
-
- t.Run("iterator", func(t *testing.T) {
- RunIterator(t, newStoreFunc)
- })
-
-}
-
-func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- defer func(s uint8) {
- fcds.ShardCount = s
- }(fcds.ShardCount)
-
- fcds.ShardCount = 4
-
- db, clean := newStoreFunc(t)
-
- defer clean()
-
- chunkCount := 1000
- chunks := getChunks(chunkCount)
-
- chunkShards := make(map[string]uint8)
-
- for _, ch := range chunks {
- if shard, err := db.Put(ch); err != nil {
- t.Fatal(err)
- } else {
- chunkShards[ch.Address().String()] = shard
- }
- }
-
- // delete 4,3,2,1 chunks from shards 0,1,2,3
- del := 4
- deleteChunks := []string{}
-
- for i := uint8(0); i < fcds.ShardCount; i++ {
- d := del
- for addr, storedOn := range chunkShards {
- if storedOn == i {
-
- // delete the chunk to make a free slot on the shard
- c := unmarshalAddressString(t, addr)
-
- if err := db.Delete(*c); err != nil {
- t.Fatal(err)
- }
- deleteChunks = append(deleteChunks, addr)
- if len(deleteChunks) == d {
- break
- }
- }
- }
- for _, v := range deleteChunks {
- delete(chunkShards, v)
- }
- deleteChunks = []string{}
-
- del--
- }
-
- ins := 4 + 3 + 2 + 1
-
- freeSlots := []int{4, 3, 2, 1}
-
- for i := 0; i < ins; i++ {
- cc := chunktesting.GenerateTestRandomChunk()
- if shard, err := db.Put(cc); err != nil {
- t.Fatal(err)
- } else {
- freeSlots[shard]--
- if freeSlots[shard] < 0 {
- t.Fatalf("shard %d slots went negative", shard)
- }
- chunkShards[cc.Address().String()] = shard
- }
- }
-
- slots, err := db.ShardSize()
- if err != nil {
- t.Fatal(err)
- }
-
- sum := 0
- for _, v := range slots {
- sum += int(v.Val)
- }
-
- if sum != 4096*1000 {
- t.Fatal(sum)
- }
-
- // now for each new chunk, we should first check all shard
- // sizes, and locate the smallest shard
- // for each Put we should get that shard as next
-
- insNew := 10000
- for i := 0; i < insNew; i++ {
- slots, err := db.ShardSize()
- if err != nil {
- t.Fatal(err)
- }
-
- minSize, minSlot := slots[0].Val, uint8(0)
- for i, v := range slots {
- // take the _last_ minimum
- if v.Val <= minSize {
- minSize = v.Val
- minSlot = uint8(i)
- }
- }
- cc := chunktesting.GenerateTestRandomChunk()
- if shard, err := db.Put(cc); err != nil {
- t.Fatal(err)
- } else {
- if shard != minSlot {
- t.Fatalf("next slot expected to be %d but got %d. chunk number %d", minSlot, shard, i)
- }
- }
- }
-}
-
-// RunStoreOptions define parameters for Store test function.
-type RunStoreOptions struct {
- NewStoreFunc func(t *testing.T) (fcds.Storer, func())
- ChunkCount int
- DeleteSplit int
- Cleaned bool
-}
-
-// RunStore tests a single Store implementation for its general functionalities.
-// Subtests are deliberately separated into sections that can have timings
-// printed on test runs for each of them.
-func RunStore(t *testing.T, o *RunStoreOptions) {
- db, clean := o.NewStoreFunc(t)
- defer clean()
-
- chunks := getChunks(o.ChunkCount)
-
- if o.Cleaned {
- t.Run("clean", func(t *testing.T) {
- sem := make(chan struct{}, *concurrencyFlag)
- var wg sync.WaitGroup
-
- wg.Add(o.ChunkCount)
- for _, ch := range chunks {
- sem <- struct{}{}
-
- go func(ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
-
- if _, err := db.Put(ch); err != nil {
- panic(err)
- }
- }(ch)
- }
- wg.Wait()
-
- wg = sync.WaitGroup{}
-
- wg.Add(o.ChunkCount)
- for _, ch := range chunks {
- sem <- struct{}{}
-
- go func(ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
-
- if err := db.Delete(ch.Address()); err != nil {
- panic(err)
- }
- }(ch)
- }
- wg.Wait()
- })
- }
-
- rand.Shuffle(o.ChunkCount, func(i, j int) {
- chunks[i], chunks[j] = chunks[j], chunks[i]
- })
-
- var deletedChunks sync.Map
-
- t.Run("write", func(t *testing.T) {
- sem := make(chan struct{}, *concurrencyFlag)
- var wg sync.WaitGroup
- var wantCount int
- var wantCountMu sync.Mutex
- wg.Add(o.ChunkCount)
- for i, ch := range chunks {
- sem <- struct{}{}
-
- go func(i int, ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
-
- if _, err := db.Put(ch); err != nil {
- panic(err)
- }
- if o.DeleteSplit > 0 && i%o.DeleteSplit == 0 {
- if err := db.Delete(ch.Address()); err != nil {
- panic(err)
- }
- deletedChunks.Store(string(ch.Address()), nil)
- } else {
- wantCountMu.Lock()
- wantCount++
- wantCountMu.Unlock()
- }
- }(i, ch)
- }
- wg.Wait()
- })
-
- rand.Shuffle(o.ChunkCount, func(i, j int) {
- chunks[i], chunks[j] = chunks[j], chunks[i]
- })
-
- t.Run("read", func(t *testing.T) {
- sem := make(chan struct{}, *concurrencyFlag)
- var wg sync.WaitGroup
-
- wg.Add(o.ChunkCount)
- for i, ch := range chunks {
- sem <- struct{}{}
-
- go func(i int, ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
-
- got, err := db.Get(ch.Address())
-
- if _, ok := deletedChunks.Load(string(ch.Address())); ok {
- if err != chunk.ErrChunkNotFound {
- panic(fmt.Errorf("got error %v, want %v", err, chunk.ErrChunkNotFound))
- }
- } else {
- if err != nil {
- panic(fmt.Errorf("chunk %v %s: %v", i, ch.Address().Hex(), err))
- }
- if !bytes.Equal(got.Address(), ch.Address()) {
- panic(fmt.Errorf("got chunk %v address %x, want %x", i, got.Address(), ch.Address()))
- }
- if !bytes.Equal(got.Data(), ch.Data()) {
- panic(fmt.Errorf("got chunk %v data %x, want %x", i, got.Data(), ch.Data()))
- }
- }
- }(i, ch)
- }
- wg.Wait()
- })
-}
-
-// RunIterator validates behaviour of Iterate and Count methods on a Store.
-func RunIterator(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- chunkCount := 1000
-
- db, clean := newStoreFunc(t)
- defer clean()
-
- chunks := getChunks(chunkCount)
-
- for _, ch := range chunks {
- if _, err := db.Put(ch); err != nil {
- t.Fatal(err)
- }
- }
-
- gotCount, err := db.Count()
- if err != nil {
- t.Fatal(err)
- }
- if gotCount != chunkCount {
- t.Fatalf("got %v count, want %v", gotCount, chunkCount)
- }
-
- var iteratedCount int
- if err := db.Iterate(func(ch chunk.Chunk) (stop bool, err error) {
- for _, c := range chunks {
- if bytes.Equal(c.Address(), ch.Address()) {
- if !bytes.Equal(c.Data(), ch.Data()) {
- t.Fatalf("invalid data in iterator for key %s", c.Address())
- }
- iteratedCount++
- return false, nil
- }
- }
- return false, nil
- }); err != nil {
- t.Fatal(err)
- }
- if iteratedCount != chunkCount {
- t.Fatalf("iterated on %v chunks, want %v", iteratedCount, chunkCount)
- }
-}
-
-// NewFCDSStore is a test helper function that constructs
-// a new Store for testing purposes into which a specific MetaStore can be injected.
-func NewFCDSStore(t *testing.T, path string, metaStore fcds.MetaStore) (s *fcds.Store, clean func()) {
- t.Helper()
-
- s, err := fcds.New(path, chunk.DefaultSize, metaStore)
- if err != nil {
- os.RemoveAll(path)
- t.Fatal(err)
- }
- return s, func() {
- s.Close()
- os.RemoveAll(path)
- }
-}
-
-// chunkCache reduces the work done by generating random chunks
-// by getChunks function by keeping storing them for future reuse.
-var chunkCache []chunk.Chunk
-
-// getChunk returns a number of chunks with random data for testing purposes.
-// By calling it multiple times, it will return same chunks from the cache.
-func getChunks(count int) []chunk.Chunk {
- l := len(chunkCache)
- if l == 0 {
- chunkCache = make([]chunk.Chunk, count)
- for i := 0; i < count; i++ {
- chunkCache[i] = chunktesting.GenerateTestRandomChunk()
- }
- return chunkCache
- }
- if l < count {
- for i := 0; i < count-l; i++ {
- chunkCache = append(chunkCache, chunktesting.GenerateTestRandomChunk())
- }
- return chunkCache
- }
- return chunkCache[:count]
-}
-
-func unmarshalAddressString(t *testing.T, s string) *chunk.Address {
- t.Helper()
- v, err := hex.DecodeString(s)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(v) != 32 {
- t.Fatalf("address length mistmatch. got %d bytes but expected %d", len(v), 32)
- }
- a := new(chunk.Address)
- *a = make([]byte, 32)
- copy(*a, v)
-
- return a
-}
diff --git a/storage/localstore/localstore.go b/storage/localstore/localstore.go
index 7be6d923dd..4b38b02421 100644
--- a/storage/localstore/localstore.go
+++ b/storage/localstore/localstore.go
@@ -20,7 +20,6 @@ import (
"encoding/binary"
"errors"
"os"
- "path/filepath"
"runtime/pprof"
"sync"
"time"
@@ -30,8 +29,6 @@ import (
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/shed"
"github.com/ethersphere/swarm/storage/fcds"
- fcdsleveldb "github.com/ethersphere/swarm/storage/fcds/leveldb"
- fcdsmock "github.com/ethersphere/swarm/storage/fcds/mock"
"github.com/ethersphere/swarm/storage/mock"
)
@@ -221,23 +218,59 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
return nil, err
}
- if o.MockStore == nil {
- metaStore, err := fcdsleveldb.NewMetaStore(filepath.Join(path, "meta"))
- if err != nil {
- return nil, err
+ // Functions for retrieval data index.
+ var (
+ encodeValueFunc func(fields shed.Item) (value []byte, err error)
+ decodeValueFunc func(keyItem shed.Item, value []byte) (e shed.Item, err error)
+ )
+ if o.MockStore != nil {
+ encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
+ b := make([]byte, 16)
+ binary.BigEndian.PutUint64(b[:8], fields.BinID)
+ binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
+ err = o.MockStore.Put(fields.Address, fields.Data)
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
}
- db.data, err = fcds.New(
- filepath.Join(path, "data"),
- chunk.DefaultSize+8, // chunk data has additional 8 bytes prepended
- metaStore,
- )
- if err != nil {
- return nil, err
+ decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
+ e.BinID = binary.BigEndian.Uint64(value[:8])
+ e.Data, err = o.MockStore.Get(keyItem.Address)
+ return e, err
}
} else {
- // Mock store is provided, use mock FCDS.
- db.data = fcdsmock.New(o.MockStore)
+ encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
+ b := make([]byte, 16)
+ binary.BigEndian.PutUint64(b[:8], fields.BinID)
+ binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
+ value = append(b, fields.Data...)
+ return value, nil
+ }
+ decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
+ e.BinID = binary.BigEndian.Uint64(value[:8])
+ e.Data = value[16:]
+ return e, nil
+ }
+ }
+ // Index storing actual chunk address, data and bin id.
+ db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|Data", shed.IndexFuncs{
+ EncodeKey: func(fields shed.Item) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e shed.Item, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: encodeValueFunc,
+ DecodeValue: decodeValueFunc,
+ })
+ if err != nil {
+ return nil, err
}
+
// Index storing bin id, store and access timestamp for a particular address.
// It is needed in order to update gc index keys for iteration order.
db.metaIndex, err = db.shed.NewIndex("Address->BinID|StoreTimestamp|AccessTimestamp", shed.IndexFuncs{
diff --git a/storage/localstore/mode_put.go b/storage/localstore/mode_put.go
index 9a6b34ca73..739955b6dd 100644
--- a/storage/localstore/mode_put.go
+++ b/storage/localstore/mode_put.go
@@ -143,7 +143,7 @@ func (db *DB) put(mode chunk.ModePut, chs ...chunk.Chunk) (exist []bool, err err
for i, ch := range chs {
if !exist[i] {
- if _, err := db.data.Put(ch); err != nil {
+ if err := db.data.Put(ch); err != nil {
return nil, err
}
}