Skip to content

Commit

Permalink
Defined centralized HandleRecovery method with unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
charliecon committed Jan 31, 2025
1 parent de3261b commit f83cd5d
Show file tree
Hide file tree
Showing 6 changed files with 175 additions and 93 deletions.
67 changes: 10 additions & 57 deletions genesyscloud/provider/sdk_client_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"log"
"sync"
resourceExporter "terraform-provider-genesyscloud/genesyscloud/resource_exporter"
"terraform-provider-genesyscloud/genesyscloud/util/constants"
prl "terraform-provider-genesyscloud/genesyscloud/util/panic_recovery_logger"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
Expand Down Expand Up @@ -100,30 +101,26 @@ type GetAllConfigFunc func(context.Context, *platformclientv2.Configuration) (re
type GetCustomConfigFunc func(context.Context, *platformclientv2.Configuration) (resourceExporter.ResourceIDMetaMap, *resourceExporter.DependencyResource, diag.Diagnostics)

func CreateWithPooledClient(method resContextFunc) schema.CreateContextFunc {
methodWrappedWithRecover := wrapWithRecover(method, Create)
methodWrappedWithRecover := wrapWithRecover(method, constants.Create)
return schema.CreateContextFunc(runWithPooledClient(methodWrappedWithRecover))
}

func ReadWithPooledClient(method resContextFunc) schema.ReadContextFunc {
methodWrappedWithRecover := wrapWithRecover(method, Read)
methodWrappedWithRecover := wrapWithRecover(method, constants.Read)
return schema.ReadContextFunc(runWithPooledClient(methodWrappedWithRecover))
}

func UpdateWithPooledClient(method resContextFunc) schema.UpdateContextFunc {
methodWrappedWithRecover := wrapWithRecover(method, Update)
methodWrappedWithRecover := wrapWithRecover(method, constants.Update)
return schema.UpdateContextFunc(runWithPooledClient(methodWrappedWithRecover))
}

func DeleteWithPooledClient(method resContextFunc) schema.DeleteContextFunc {
methodWrappedWithRecover := wrapWithRecover(method, Delete)
methodWrappedWithRecover := wrapWithRecover(method, constants.Delete)
return schema.DeleteContextFunc(runWithPooledClient(methodWrappedWithRecover))
}

// wrapWithRecover will wrap the resource context function with a recover if the panic recovery logger is enabled
// In the case of a Create - Fail, provide the stack trace info, and warn of dangling resource.
// For all other operations - Write the stack trace info to a log file and complete the TF operation.
// If the file writing is unsuccessful, we will fail to avoid the loss of data.
func wrapWithRecover(method resContextFunc, operation Operation) resContextFunc {
func wrapWithRecover(method resContextFunc, operation constants.CRUDOperation) resContextFunc {
return func(ctx context.Context, r *schema.ResourceData, meta any) (diagErr diag.Diagnostics) {
panicRecoverLogger := prl.GetPanicRecoveryLoggerInstance()
if !panicRecoverLogger.LoggerEnabled {
Expand All @@ -132,37 +129,17 @@ func wrapWithRecover(method resContextFunc, operation Operation) resContextFunc

defer func() {
if r := recover(); r != nil {
diagErr = handleRecover(r, operation)
err := panicRecoverLogger.HandleRecovery(r, operation)
if err != nil {
diagErr = diag.FromErr(err)
}
}
}()

return method(ctx, r, meta)
}
}

func handleRecover(r any, operation Operation) (diagErr diag.Diagnostics) {
panicRecoverLogger := prl.GetPanicRecoveryLoggerInstance()

if operation == Create {
diagErr = diag.Errorf("creation failed becasue of stack trace: %s. There may be dangling resource left in your org", r)
}

log.Println("Writing stack traces to file")
err := panicRecoverLogger.WriteStackTracesToFile(r)
if err == nil {
return
}

// WriteStackTracesToFile failed - return error info in diagErr object
if diagErr != nil {
diagErr = diag.Errorf("%v.\n%v", diagErr, err)
} else {
diagErr = diag.FromErr(err)
}

return diagErr
}

// Inject a pooled SDK client connection into a resource method's meta argument
// and automatically return it to the Pool on completion
func runWithPooledClient(method resContextFunc) resContextFunc {
Expand Down Expand Up @@ -216,27 +193,3 @@ func GetAllWithPooledClientCustom(method GetCustomConfigFunc) resourceExporter.G
return method(ctx, clientConfig)
}
}

type Operation int

const (
Create Operation = iota
Read
Update
Delete
)

func (o Operation) String() string {
switch o {
case Create:
return "Create"
case Read:
return "Read"
case Update:
return "Update"
case Delete:
return "Delete"
default:
return "Unknown"
}
}
26 changes: 1 addition & 25 deletions genesyscloud/tfexporter/genesyscloud_resource_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
featureToggles "terraform-provider-genesyscloud/genesyscloud/util/feature_toggles"
"terraform-provider-genesyscloud/genesyscloud/util/files"
"terraform-provider-genesyscloud/genesyscloud/util/lists"
prl "terraform-provider-genesyscloud/genesyscloud/util/panic_recovery_logger"
"terraform-provider-genesyscloud/genesyscloud/util/stringmap"
"time"

Expand Down Expand Up @@ -1164,8 +1163,7 @@ func getResourceState(ctx context.Context, resource *schema.Resource, resID stri
}
}

refreshWithoutUpgrade := wrapRefreshWithoutUpgradeWithRecover(resource.RefreshWithoutUpgrade)
state, err := refreshWithoutUpgrade(ctx, instanceState, meta)
state, err := resource.RefreshWithoutUpgrade(ctx, instanceState, meta)
if err != nil {
if strings.Contains(fmt.Sprintf("%v", err), "API Error: 404") ||
strings.Contains(fmt.Sprintf("%v", err), "API Error: 410") {
Expand All @@ -1183,28 +1181,6 @@ func getResourceState(ctx context.Context, resource *schema.Resource, resID stri
return state, nil
}

type refreshWithoutUpgradeFunc func(context.Context, *terraform.InstanceState, any) (*terraform.InstanceState, diag.Diagnostics)

// wrapRefreshWithoutUpgradeWithRecover will wrap the function RefreshWithoutUpgrade and add a recover if log_stack_traces is true
func wrapRefreshWithoutUpgradeWithRecover(method refreshWithoutUpgradeFunc) refreshWithoutUpgradeFunc {
return func(ctx context.Context, state *terraform.InstanceState, meta any) (*terraform.InstanceState, diag.Diagnostics) {
defer func() {
panicRecoveryLogger := prl.GetPanicRecoveryLoggerInstance()
if !panicRecoveryLogger.LoggerEnabled {
return
}
if r := recover(); r != nil {
log.Printf("wrapRefreshWithoutUpgradeWithRecover: Recovered from panic while refreshing resource state: %v", r)
if err := panicRecoveryLogger.WriteStackTracesToFile(r); err != nil {
err = fmt.Errorf("failed to write stack traces to file during export: %w", err)
log.Println(err)
}
}
}()
return method(ctx, state, meta)
}
}

func correctCustomFunctions(config string) string {
config = correctInterpolatedFileShaFunctions(config)
return correctDependsOn(config, true)
Expand Down
24 changes: 24 additions & 0 deletions genesyscloud/util/constants/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,27 @@ func ConsistencyChecks() int {

return defaultChecks
}

type CRUDOperation int

const (
Create CRUDOperation = iota
Read
Update
Delete
)

func (o CRUDOperation) String() string {
switch o {
case Create:
return "Create"
case Read:
return "Read"
case Update:
return "Update"
case Delete:
return "Delete"
default:
return "Unknown"
}
}
52 changes: 47 additions & 5 deletions genesyscloud/util/panic_recovery_logger/panic_recovery_logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,16 @@ import (
"log"
"os"
"runtime/debug"
tfExporterState "terraform-provider-genesyscloud/genesyscloud/tfexporter_state"
"terraform-provider-genesyscloud/genesyscloud/util/constants"
)

type PanicRecoveryLogger struct {
LoggerEnabled bool
FilePath string

writeStackTracesToFileAttr func(*PanicRecoveryLogger, any) error
isExporterActiveAttr func() bool
}

var panicRecoverLogger *PanicRecoveryLogger
Expand All @@ -21,6 +26,9 @@ func InitPanicRecoveryLoggerInstance(enabled bool, filepath string) {
panicRecoverLogger = &PanicRecoveryLogger{
LoggerEnabled: enabled,
FilePath: filepath,

writeStackTracesToFileAttr: writeStackTracesToFileFn,
isExporterActiveAttr: isExporterActiveFn,
}
}

Expand All @@ -33,12 +41,34 @@ func GetPanicRecoveryLoggerInstance() *PanicRecoveryLogger {
return panicRecoverLogger
}

func (p *PanicRecoveryLogger) WriteStackTracesToFile(r any) error {
tracesToWrite := fmt.Sprintf("\nStacktrace recovered: %v. %s", r, string(debug.Stack()))
if err := appendToFile(p.FilePath, []byte(tracesToWrite)); err != nil {
return fmt.Errorf("WriteStackTracesToFile: failed to write to %s: %w", p.FilePath, err)
// HandleRecovery — In the case of a Create: return an error object with stack trace info and warn of potential dangling resources.
// In the case of any export, return an error to avoid exporting an invalid configuration.
// Next and in any case, write the stack trace info to the log file. If the file writing is unsuccessful, we will fail to avoid the loss of data.
func (p *PanicRecoveryLogger) HandleRecovery(r any, operation constants.CRUDOperation) (err error) {
if operation == constants.Create {
err = fmt.Errorf("creation failed becasue of stack trace: %s. There may be dangling resource left in your org", r)
} else if operation == constants.Read && p.isExporterActiveAttr() {
err = fmt.Errorf("failed to export resource because of stack trace: %s", r)
}
return nil

log.Printf("Writing stack traces to file %s", p.FilePath)
writeErr := p.WriteStackTracesToFile(r)
if writeErr == nil {
return
}

// WriteStackTracesToFile failed - append error info
if err != nil {
err = fmt.Errorf("%w.\n%w", err, writeErr)
} else {
err = writeErr
}

return err
}

func (p *PanicRecoveryLogger) WriteStackTracesToFile(r any) error {
return p.writeStackTracesToFileAttr(p, r)
}

// appendToFile appends data to a file. If the file does not exist, it will be created.
Expand Down Expand Up @@ -93,3 +123,15 @@ func deleteFileIfExists(filepath string) (err error) {

return err
}

func writeStackTracesToFileFn(p *PanicRecoveryLogger, r any) error {
tracesToWrite := fmt.Sprintf("\nStacktrace recovered: %v. %s", r, string(debug.Stack()))
if err := appendToFile(p.FilePath, []byte(tracesToWrite)); err != nil {
return fmt.Errorf("WriteStackTracesToFile: failed to write to %s: %w", p.FilePath, err)
}
return nil
}

func isExporterActiveFn() bool {
return tfExporterState.IsExporterActive()
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ package panic_recovery_logger

import (
"fmt"
"strings"
"terraform-provider-genesyscloud/genesyscloud/util/constants"
"testing"
)

Expand All @@ -10,8 +12,6 @@ func TestUnitGetPanicRecoveryLoggerInstance(t *testing.T) {
panicRecoverLoggerCopy := panicRecoverLogger
defer func() {
panicRecoverLogger = panicRecoverLoggerCopy

fmt.Println()
}()

// LoggerEnabled should return false when instance has not been initialised
Expand All @@ -35,3 +35,91 @@ func TestUnitGetPanicRecoveryLoggerInstance(t *testing.T) {
t.Error("Expected LoggerEnabled to be false, but got true")
}
}

func TestUnitHandleRecovery(t *testing.T) {
// restore after all
panicRecoverLoggerCopy := panicRecoverLogger
defer func() {
panicRecoverLogger = panicRecoverLoggerCopy
}()

const mockWriteErrorMessage = "mock error"

InitPanicRecoveryLoggerInstance(true, "example/path.log")

// 1. returns error if operation is create
panicRecoverLogger.writeStackTracesToFileAttr = func(logger *PanicRecoveryLogger, a any) error {
return nil
}
panicRecoverLogger.isExporterActiveAttr = func() bool {
return false
}

err := panicRecoverLogger.HandleRecovery(nil, constants.Create)
if err == nil {
t.Error("Expected error, but got nil")
}

// 2. returns error if exporter is active
panicRecoverLogger.isExporterActiveAttr = func() bool {
return true
}

err = panicRecoverLogger.HandleRecovery(nil, constants.Read)
if err == nil {
t.Error("Expected error, but got nil")
}

// 3. returns nil if operation is not create, exporter not active, and file write successful
panicRecoverLogger.isExporterActiveAttr = func() bool {
return false
}
err = panicRecoverLogger.HandleRecovery(nil, constants.Read)
if err != nil {
t.Errorf("Expected nil error, got '%s'", err.Error())
}

// 4. returns error if operation is not create, exporter not active, but file write unsuccessful
panicRecoverLogger.writeStackTracesToFileAttr = func(logger *PanicRecoveryLogger, a any) error {
return fmt.Errorf(mockWriteErrorMessage)
}

err = panicRecoverLogger.HandleRecovery(nil, constants.Read)
if err == nil {
t.Errorf("Expected error '%s', got nil", mockWriteErrorMessage)
} else if err.Error() != mockWriteErrorMessage {
t.Errorf("Expected error '%s', got '%s'", mockWriteErrorMessage, err.Error())
}

// 5. returns error if #1 is true and file write unsuccessful
err = panicRecoverLogger.HandleRecovery(nil, constants.Create)
if err == nil {
t.Errorf("Expected error, got nil")
}

// verify that details of create issue and write are both in the error message
if err != nil {
const snippetOfCreateErrorMessage = "creation failed"
if !strings.Contains(err.Error(), mockWriteErrorMessage) || !strings.Contains(err.Error(), snippetOfCreateErrorMessage) {
t.Errorf("Expected error '%s' to contain '%s' and '%s'", err.Error(), mockWriteErrorMessage, snippetOfCreateErrorMessage)
}
}

// 6. returns error if #2 is true and file write unsuccessful
panicRecoverLogger.isExporterActiveAttr = func() bool {
return true
}

err = panicRecoverLogger.HandleRecovery(nil, constants.Read)
if err == nil {
t.Errorf("Expected error, got nil")
}

// verify that details of create issue and write are both in the error message
if err != nil {
const snippetOfExportErrorMessage = "failed to export resource"
if !strings.Contains(err.Error(), mockWriteErrorMessage) || !strings.Contains(err.Error(), snippetOfExportErrorMessage) {
t.Errorf("Expected error '%s' to contain '%s' and '%s'", err.Error(), mockWriteErrorMessage, snippetOfExportErrorMessage)
}
}
}
Loading

0 comments on commit f83cd5d

Please sign in to comment.