Skip to content

Commit

Permalink
Fix WebDAV spelling, remove some inconsistencies (#143)
Browse files Browse the repository at this point in the history
* Simplify logging, fix WebDAV spelling

* Define options types per package

* Move util functions that are not used cross package

* Add per file license headers

* Rename config type
  • Loading branch information
m90 committed Aug 18, 2022
1 parent 279844c commit b60c747
Show file tree
Hide file tree
Showing 10 changed files with 307 additions and 234 deletions.
26 changes: 7 additions & 19 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -1,19 +1,7 @@
# Ignore everything
*

# Exceptions:
# Note: Wildcards for directories like * or ** don't work (yet) with exclamation marks!

!cmd/backup/*.go
!cmd/backup/*.tmpl

!internal/storage/*.go
!internal/storage/local/*.go
!internal/storage/s3/*.go
!internal/storage/ssh/*.go
!internal/storage/webdav/*.go
!internal/utilities/*.go

!Dockerfile
!entrypoint.sh
!go.*
test
.github
.circleci
docs
.editorconfig
LICENSE
README.md
5 changes: 2 additions & 3 deletions cmd/backup/lock.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"time"

"github.com/gofrs/flock"
"github.com/offen/docker-volume-backup/internal/utilities"
)

// lock opens a lockfile at the given location, keeping it locked until the
Expand All @@ -32,7 +31,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
for {
acquired, err := fileLock.TryLock()
if err != nil {
return utilities.Noop, fmt.Errorf("lock: error trying lock: %w", err)
return noop, fmt.Errorf("lock: error trying lock: %w", err)
}
if acquired {
if s.encounteredLock {
Expand All @@ -53,7 +52,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
case <-retry.C:
continue
case <-deadline.C:
return utilities.Noop, errors.New("lock: timed out waiting for lockfile to become available")
return noop, errors.New("lock: timed out waiting for lockfile to become available")
}
}
}
108 changes: 65 additions & 43 deletions cmd/backup/script.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ type script struct {
// reading from env vars or other configuration sources is expected to happen
// in this method.
func newScript() (*script, error) {
stdOut, logBuffer := utilities.Buffer(os.Stdout)
stdOut, logBuffer := buffer(os.Stdout)
s := &script{
c: &Config{},
logger: &logrus.Logger{
Expand All @@ -72,7 +72,7 @@ func newScript() (*script, error) {
LogOutput: logBuffer,
Storages: map[string]StorageStats{
"S3": {},
"WebDav": {},
"WebDAV": {},
"SSH": {},
"Local": {},
},
Expand Down Expand Up @@ -107,55 +107,77 @@ func newScript() (*script, error) {
s.cli = cli
}

logFunc := func(logType storage.LogType, context string, msg string, params ...interface{}) error {
var allParams []interface{}
allParams = append(allParams, context)
allParams = append(allParams, params...)

logFunc := func(logType storage.LogLevel, context string, msg string, params ...interface{}) {
switch logType {
case storage.INFO:
s.logger.Infof("[%s] "+msg, allParams...)
return nil
case storage.WARNING:
s.logger.Warnf("[%s] "+msg, allParams...)
return nil
case storage.ERROR:
return fmt.Errorf("[%s] "+msg, allParams...)
case storage.LogLevelWarning:
s.logger.Warnf("["+context+"] "+msg, params...)
case storage.LogLevelError:
s.logger.Errorf("["+context+"] "+msg, params...)
case storage.LogLevelInfo:
default:
s.logger.Warnf("[%s] "+msg, allParams...)
return nil
s.logger.Infof("["+context+"] "+msg, params...)
}
}

if s.c.AwsS3BucketName != "" {
if s3Backend, err := s3.NewStorageBackend(s.c.AwsEndpoint, s.c.AwsAccessKeyID, s.c.AwsSecretAccessKey, s.c.AwsIamRoleEndpoint,
s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc); err != nil {
s3Config := s3.Config{
Endpoint: s.c.AwsEndpoint,
AccessKeyID: s.c.AwsAccessKeyID,
SecretAccessKey: s.c.AwsSecretAccessKey,
IamRoleEndpoint: s.c.AwsIamRoleEndpoint,
EndpointProto: s.c.AwsEndpointProto,
EndpointInsecure: s.c.AwsEndpointInsecure,
RemotePath: s.c.AwsS3Path,
BucketName: s.c.AwsS3BucketName,
StorageClass: s.c.AwsStorageClass,
}
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, s3Backend)
}
}

if s.c.WebdavUrl != "" {
if webdavBackend, err := webdav.NewStorageBackend(s.c.WebdavUrl, s.c.WebdavPath, s.c.WebdavUsername, s.c.WebdavPassword,
s.c.WebdavUrlInsecure, logFunc); err != nil {
webDavConfig := webdav.Config{
URL: s.c.WebdavUrl,
URLInsecure: s.c.WebdavUrlInsecure,
Username: s.c.WebdavUsername,
Password: s.c.WebdavPassword,
RemotePath: s.c.WebdavPath,
}
if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, webdavBackend)
}
}

if s.c.SSHHostName != "" {
if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile,
s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc); err != nil {
sshConfig := ssh.Config{
HostName: s.c.SSHHostName,
Port: s.c.SSHPort,
User: s.c.SSHUser,
Password: s.c.SSHPassword,
IdentityFile: s.c.SSHIdentityFile,
IdentityPassphrase: s.c.SSHIdentityPassphrase,
RemotePath: s.c.SSHRemotePath,
}
if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, sshBackend)
}
}

localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc)
s.storages = append(s.storages, localBackend)
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
localConfig := local.Config{
ArchivePath: s.c.BackupArchive,
LatestSymlink: s.c.BackupLatestSymlink,
}
localBackend := local.NewStorageBackend(localConfig, logFunc)
s.storages = append(s.storages, localBackend)
}

if s.c.EmailNotificationRecipient != "" {
emailURL := fmt.Sprintf(
Expand Down Expand Up @@ -228,14 +250,14 @@ func newScript() (*script, error) {
// restart everything that has been stopped.
func (s *script) stopContainers() (func() error, error) {
if s.cli == nil {
return utilities.Noop, nil
return noop, nil
}

allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
})
if err != nil {
return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
}

containerLabel := fmt.Sprintf(
Expand All @@ -251,11 +273,11 @@ func (s *script) stopContainers() (func() error, error) {
})

if err != nil {
return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
}

if len(containersToStop) == 0 {
return utilities.Noop, nil
return noop, nil
}

s.logger.Infof(
Expand Down Expand Up @@ -357,7 +379,7 @@ func (s *script) createArchive() error {
backupSources = filepath.Join("/tmp", s.c.BackupSources)
// copy before compressing guard against a situation where backup folder's content are still growing.
s.registerHook(hookLevelPlumbing, func(error) error {
if err := utilities.Remove(backupSources); err != nil {
if err := remove(backupSources); err != nil {
return fmt.Errorf("takeBackup: error removing snapshot: %w", err)
}
s.logger.Infof("Removed snapshot `%s`.", backupSources)
Expand All @@ -367,23 +389,23 @@ func (s *script) createArchive() error {
PreserveTimes: true,
PreserveOwner: true,
}); err != nil {
return fmt.Errorf("takeBackup: error creating snapshot: %w", err)
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
}
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
}

tarFile := s.file
s.registerHook(hookLevelPlumbing, func(error) error {
if err := utilities.Remove(tarFile); err != nil {
return fmt.Errorf("takeBackup: error removing tar file: %w", err)
if err := remove(tarFile); err != nil {
return fmt.Errorf("createArchive: error removing tar file: %w", err)
}
s.logger.Infof("Removed tar file `%s`.", tarFile)
return nil
})

backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
if err != nil {
return fmt.Errorf("takeBackup: error getting absolute path: %w", err)
return fmt.Errorf("createArchive: error getting absolute path: %w", err)
}

var filesEligibleForBackup []string
Expand All @@ -398,11 +420,11 @@ func (s *script) createArchive() error {
filesEligibleForBackup = append(filesEligibleForBackup, path)
return nil
}); err != nil {
return fmt.Errorf("compress: error walking filesystem tree: %w", err)
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
}

if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
}

s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
Expand All @@ -419,16 +441,16 @@ func (s *script) encryptArchive() error {

gpgFile := fmt.Sprintf("%s.gpg", s.file)
s.registerHook(hookLevelPlumbing, func(error) error {
if err := utilities.Remove(gpgFile); err != nil {
return fmt.Errorf("encryptBackup: error removing gpg file: %w", err)
if err := remove(gpgFile); err != nil {
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
}
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
return nil
})

outFile, err := os.Create(gpgFile)
if err != nil {
return fmt.Errorf("encryptBackup: error opening out file: %w", err)
return fmt.Errorf("encryptArchive: error opening out file: %w", err)
}
defer outFile.Close()

Expand All @@ -438,17 +460,17 @@ func (s *script) encryptArchive() error {
FileName: name,
}, nil)
if err != nil {
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err)
}
defer dst.Close()

src, err := os.Open(s.file)
if err != nil {
return fmt.Errorf("encryptBackup: error opening backup file `%s`: %w", s.file, err)
return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err)
}

if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err)
return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err)
}

s.file = gpgFile
Expand All @@ -461,7 +483,7 @@ func (s *script) encryptArchive() error {
func (s *script) copyArchive() error {
_, name := path.Split(s.file)
if stat, err := os.Stat(s.file); err != nil {
return fmt.Errorf("copyBackup: unable to stat backup file: %w", err)
return fmt.Errorf("copyArchive: unable to stat backup file: %w", err)
} else {
size := stat.Size()
s.stats.BackupFile = BackupFileStats{
Expand Down
52 changes: 52 additions & 0 deletions cmd/backup/util.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Copyright 2022 - Offen Authors <[email protected]>
// SPDX-License-Identifier: MPL-2.0

package main

import (
"bytes"
"fmt"
"io"
"os"
)

var noop = func() error { return nil }

// remove removes the given file or directory from disk.
func remove(location string) error {
fi, err := os.Lstat(location)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
}
if fi.IsDir() {
err = os.RemoveAll(location)
} else {
err = os.Remove(location)
}
if err != nil {
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
}
return nil
}

// buffer takes an io.Writer and returns a wrapped version of the
// writer that writes to both the original target as well as the returned buffer
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
return buffering, &buffering.buf
}

type bufferingWriter struct {
buf bytes.Buffer
writer io.Writer
}

func (b *bufferingWriter) Write(p []byte) (n int, err error) {
if n, err := b.buf.Write(p); err != nil {
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
}
return b.writer.Write(p)
}
Loading

0 comments on commit b60c747

Please sign in to comment.