Skip to content

Commit c57d6db

Browse files
authored
style: project not formatted with either gofmt or golangci-lint (#11)
1 parent 52c77b3 commit c57d6db

File tree

11 files changed

+153
-155
lines changed

11 files changed

+153
-155
lines changed

Diff for: sample-app/main.go

+9-10
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ func workflow_example() {
1515

1616
traceloop, err := tlp.NewClient(ctx, tlp.Config{
1717
BaseURL: "api-staging.traceloop.com",
18-
APIKey: os.Getenv("TRACELOOP_API_KEY"),
18+
APIKey: os.Getenv("TRACELOOP_API_KEY"),
1919
})
2020
defer func() { traceloop.Shutdown(ctx) }()
2121

@@ -24,7 +24,7 @@ func workflow_example() {
2424
return
2525
}
2626

27-
request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{ "date": time.Now().Format("01/02") })
27+
request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{"date": time.Now().Format("01/02")})
2828
if err != nil {
2929
fmt.Printf("GetOpenAIChatCompletionRequest error: %v\n", err)
3030
return
@@ -40,11 +40,11 @@ func workflow_example() {
4040
}
4141

4242
llmSpan, err := traceloop.LogPrompt(
43-
ctx,
43+
ctx,
4444
tlp.Prompt{
45-
Vendor: "openai",
46-
Mode: "chat",
47-
Model: request.Model,
45+
Vendor: "openai",
46+
Mode: "chat",
47+
Model: request.Model,
4848
Messages: promptMsgs,
4949
},
5050
tlp.WorkflowAttributes{
@@ -79,11 +79,10 @@ func workflow_example() {
7979
Model: resp.Model,
8080
Messages: completionMsgs,
8181
}, tlp.Usage{
82-
TotalTokens: resp.Usage.TotalTokens,
83-
CompletionTokens: resp.Usage.CompletionTokens,
84-
PromptTokens: resp.Usage.PromptTokens,
82+
TotalTokens: resp.Usage.TotalTokens,
83+
CompletionTokens: resp.Usage.CompletionTokens,
84+
PromptTokens: resp.Usage.PromptTokens,
8585
})
8686

87-
8887
fmt.Println(resp.Choices[0].Message.Content)
8988
}

Diff for: sample-app/workflow_example.go

+12-12
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ func main() {
1515

1616
traceloop, err := tlp.NewClient(ctx, tlp.Config{
1717
BaseURL: "api-staging.traceloop.com",
18-
APIKey: os.Getenv("TRACELOOP_API_KEY"),
18+
APIKey: os.Getenv("TRACELOOP_API_KEY"),
1919
})
2020
defer func() { traceloop.Shutdown(ctx) }()
2121

@@ -32,7 +32,7 @@ func main() {
3232
factGenTask := wf.NewTask("current_date_fact_generation")
3333
defer factGenTask.End()
3434

35-
request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{ "date": time.Now().Format("01/02") })
35+
request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{"date": time.Now().Format("01/02")})
3636
if err != nil {
3737
fmt.Printf("GetOpenAIChatCompletionRequest error: %v\n", err)
3838
return
@@ -49,9 +49,9 @@ func main() {
4949

5050
llmSpan, err := factGenTask.LogPrompt(
5151
tlp.Prompt{
52-
Vendor: "openai",
53-
Mode: "chat",
54-
Model: request.Model,
52+
Vendor: "openai",
53+
Mode: "chat",
54+
Model: request.Model,
5555
Messages: promptMsgs,
5656
},
5757
)
@@ -83,9 +83,9 @@ func main() {
8383
Model: resp.Model,
8484
Messages: completionMsgs,
8585
}, tlp.Usage{
86-
TotalTokens: resp.Usage.TotalTokens,
87-
CompletionTokens: resp.Usage.CompletionTokens,
88-
PromptTokens: resp.Usage.PromptTokens,
86+
TotalTokens: resp.Usage.TotalTokens,
87+
CompletionTokens: resp.Usage.CompletionTokens,
88+
PromptTokens: resp.Usage.PromptTokens,
8989
})
9090

9191
someOtherTask := wf.NewTask("some_other_task")
@@ -94,7 +94,7 @@ func main() {
9494
otherPrompt, _ := someOtherTask.LogPrompt(tlp.Prompt{
9595
Vendor: "openai",
9696
Mode: "chat",
97-
Model: request.Model,
97+
Model: request.Model,
9898
Messages: []tlp.Message{
9999
{
100100
Index: 0,
@@ -108,9 +108,9 @@ func main() {
108108
Model: resp.Model,
109109
Messages: completionMsgs,
110110
}, tlp.Usage{
111-
TotalTokens: resp.Usage.TotalTokens,
112-
CompletionTokens: resp.Usage.CompletionTokens,
113-
PromptTokens: resp.Usage.PromptTokens,
111+
TotalTokens: resp.Usage.TotalTokens,
112+
CompletionTokens: resp.Usage.CompletionTokens,
113+
PromptTokens: resp.Usage.PromptTokens,
114114
})
115115

116116
fmt.Println(resp.Choices[0].Message.Content)

Diff for: semconv-ai/attributes.go

+28-28
Original file line numberDiff line numberDiff line change
@@ -4,33 +4,33 @@ import "go.opentelemetry.io/otel/attribute"
44

55
const (
66
// LLM
7-
LLMVendor = attribute.Key("llm.vendor")
8-
LLMRequestType = attribute.Key("llm.request.type")
9-
LLMRequestModel = attribute.Key("llm.request.model")
10-
LLMResponseModel = attribute.Key("llm.response.model")
11-
LLMRequestMaxTokens = attribute.Key("llm.request.max_tokens")
12-
LLMUsageTotalTokens = attribute.Key("llm.usage.total_tokens")
13-
LLMUsageCompletionTokens = attribute.Key("llm.usage.completion_tokens")
14-
LLMUsagePromptTokens = attribute.Key("llm.usage.prompt_tokens")
15-
LLMTemperature = attribute.Key("llm.temperature")
16-
LLMUser = attribute.Key("llm.user")
17-
LLMHeaders = attribute.Key("llm.headers")
18-
LLMTopP = attribute.Key("llm.top_p")
19-
LLMTopK = attribute.Key("llm.top_k")
20-
LLMFrequencyPenalty = attribute.Key("llm.frequency_penalty")
21-
LLMPresencePenalty = attribute.Key("llm.presence_penalty")
22-
LLMPrompts = attribute.Key("llm.prompts")
23-
LLMCompletions = attribute.Key("llm.completions")
24-
LLMChatStopSequence = attribute.Key("llm.chat.stop_sequences")
25-
LLMRequestFunctions = attribute.Key("llm.request.functions")
7+
LLMVendor = attribute.Key("llm.vendor")
8+
LLMRequestType = attribute.Key("llm.request.type")
9+
LLMRequestModel = attribute.Key("llm.request.model")
10+
LLMResponseModel = attribute.Key("llm.response.model")
11+
LLMRequestMaxTokens = attribute.Key("llm.request.max_tokens")
12+
LLMUsageTotalTokens = attribute.Key("llm.usage.total_tokens")
13+
LLMUsageCompletionTokens = attribute.Key("llm.usage.completion_tokens")
14+
LLMUsagePromptTokens = attribute.Key("llm.usage.prompt_tokens")
15+
LLMTemperature = attribute.Key("llm.temperature")
16+
LLMUser = attribute.Key("llm.user")
17+
LLMHeaders = attribute.Key("llm.headers")
18+
LLMTopP = attribute.Key("llm.top_p")
19+
LLMTopK = attribute.Key("llm.top_k")
20+
LLMFrequencyPenalty = attribute.Key("llm.frequency_penalty")
21+
LLMPresencePenalty = attribute.Key("llm.presence_penalty")
22+
LLMPrompts = attribute.Key("llm.prompts")
23+
LLMCompletions = attribute.Key("llm.completions")
24+
LLMChatStopSequence = attribute.Key("llm.chat.stop_sequences")
25+
LLMRequestFunctions = attribute.Key("llm.request.functions")
2626

27-
// Vector DB
28-
VectorDBVendor = attribute.Key("vector_db.vendor")
29-
VectorDBQueryTopK = attribute.Key("vector_db.query.top_k")
27+
// Vector DB
28+
VectorDBVendor = attribute.Key("vector_db.vendor")
29+
VectorDBQueryTopK = attribute.Key("vector_db.query.top_k")
3030

31-
// LLM Workflows
32-
TraceloopSpanKind = attribute.Key("traceloop.span.kind")
33-
TraceloopWorkflowName = attribute.Key("traceloop.workflow.name")
34-
TraceloopEntityName = attribute.Key("traceloop.entity.name")
35-
TraceloopAssociationProperties = attribute.Key("traceloop.association.properties")
36-
)
31+
// LLM Workflows
32+
TraceloopSpanKind = attribute.Key("traceloop.span.kind")
33+
TraceloopWorkflowName = attribute.Key("traceloop.workflow.name")
34+
TraceloopEntityName = attribute.Key("traceloop.entity.name")
35+
TraceloopAssociationProperties = attribute.Key("traceloop.association.properties")
36+
)

Diff for: traceloop-sdk/config.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@ package traceloop
33
import "time"
44

55
type BackoffConfig struct {
6-
MaxRetries uint64
6+
MaxRetries uint64
77
}
88

99
type Config struct {
10-
BaseURL string
11-
APIKey string
12-
TracerName string
13-
ServiceName string
14-
PollingInterval time.Duration
15-
BackoffConfig BackoffConfig
10+
BaseURL string
11+
APIKey string
12+
TracerName string
13+
ServiceName string
14+
PollingInterval time.Duration
15+
BackoffConfig BackoffConfig
1616
}

Diff for: traceloop-sdk/model/prompt_registry.go

+31-31
Original file line numberDiff line numberDiff line change
@@ -3,48 +3,48 @@ package model
33
import "time"
44

55
type ModelConfig struct {
6-
Mode string `json:"mode"`
7-
Model string `json:"model"`
8-
Temperature float32 `json:"temperature"`
9-
TopP float32 `json:"top_p"`
10-
Stop []string `json:"stop"`
11-
FrequencyPenalty float32 `json:"frequency_penalty"`
12-
PresencePenalty float32 `json:"presence_penalty"`
6+
Mode string `json:"mode"`
7+
Model string `json:"model"`
8+
Temperature float32 `json:"temperature"`
9+
TopP float32 `json:"top_p"`
10+
Stop []string `json:"stop"`
11+
FrequencyPenalty float32 `json:"frequency_penalty"`
12+
PresencePenalty float32 `json:"presence_penalty"`
1313
}
1414

1515
type Message struct {
16-
Index int `json:"index"`
17-
Role string `json:"role"`
18-
Template string `json:"template"`
19-
Variables []string `json:"variables"`
16+
Index int `json:"index"`
17+
Role string `json:"role"`
18+
Template string `json:"template"`
19+
Variables []string `json:"variables"`
2020
}
2121

2222
type PromptVersion struct {
23-
Id string `json:"id"`
24-
Hash string `json:"hash"`
25-
Version uint `json:"version"`
26-
Name string `json:"name"`
27-
CreatedAt time.Time `json:"created_at"`
28-
Provider string `json:"provider"`
29-
TemplatingEngine string `json:"templating_engine"`
30-
Messages []Message `json:"messages"`
31-
LlmConfig ModelConfig `json:"llm_config"`
23+
Id string `json:"id"`
24+
Hash string `json:"hash"`
25+
Version uint `json:"version"`
26+
Name string `json:"name"`
27+
CreatedAt time.Time `json:"created_at"`
28+
Provider string `json:"provider"`
29+
TemplatingEngine string `json:"templating_engine"`
30+
Messages []Message `json:"messages"`
31+
LlmConfig ModelConfig `json:"llm_config"`
3232
}
3333

3434
type Target struct {
35-
Id string `json:"id"`
36-
PromptId string `json:"prompt_id"`
37-
Version string `json:"version"`
38-
UpdatedAt time.Time `json:"updated_at"`
35+
Id string `json:"id"`
36+
PromptId string `json:"prompt_id"`
37+
Version string `json:"version"`
38+
UpdatedAt time.Time `json:"updated_at"`
3939
}
4040

4141
type Prompt struct {
42-
Id string `json:"id"`
43-
Versions []PromptVersion `json:"versions"`
44-
Target Target `json:"target"`
45-
Key string `json:"key"`
46-
CreatedAt time.Time `json:"created_at"`
47-
UpdatedAt time.Time `json:"updated_at"`
42+
Id string `json:"id"`
43+
Versions []PromptVersion `json:"versions"`
44+
Target Target `json:"target"`
45+
Key string `json:"key"`
46+
CreatedAt time.Time `json:"created_at"`
47+
UpdatedAt time.Time `json:"updated_at"`
4848
}
4949

50-
type PromptRegistry map[string]*Prompt
50+
type PromptRegistry map[string]*Prompt

Diff for: traceloop-sdk/prompt_registry.go

+10-10
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@ import (
1111
)
1212

1313
type PromptsResponse struct {
14-
Prompts []model.Prompt `json:"prompts"`
15-
Environment string `json:"environment"`
14+
Prompts []model.Prompt `json:"prompts"`
15+
Environment string `json:"environment"`
1616
}
1717

1818
func (instance *Traceloop) populatePromptRegistry() {
@@ -38,7 +38,7 @@ func (instance *Traceloop) populatePromptRegistry() {
3838

3939
func (instance *Traceloop) pollPrompts() {
4040
prompts := make(chan []model.Prompt)
41-
errs := make(chan error)
41+
errs := make(chan error)
4242

4343
instance.populatePromptRegistry()
4444

@@ -103,12 +103,12 @@ func (instance *Traceloop) GetOpenAIChatCompletionRequest(key string, variables
103103
}
104104

105105
return &openai.ChatCompletionRequest{
106-
Model: promptVersion.LlmConfig.Model,
107-
Temperature: promptVersion.LlmConfig.Temperature,
108-
TopP: promptVersion.LlmConfig.TopP,
109-
Stop: promptVersion.LlmConfig.Stop,
106+
Model: promptVersion.LlmConfig.Model,
107+
Temperature: promptVersion.LlmConfig.Temperature,
108+
TopP: promptVersion.LlmConfig.TopP,
109+
Stop: promptVersion.LlmConfig.Stop,
110110
FrequencyPenalty: promptVersion.LlmConfig.FrequencyPenalty,
111-
PresencePenalty: promptVersion.LlmConfig.PresencePenalty,
112-
Messages: messages,
111+
PresencePenalty: promptVersion.LlmConfig.PresencePenalty,
112+
Messages: messages,
113113
}, nil
114-
}
114+
}

Diff for: traceloop-sdk/sdk.go

+12-12
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,10 @@ import (
1919
const PromptsPath = "/v1/traceloop/prompts"
2020

2121
type Traceloop struct {
22-
config Config
23-
promptRegistry model.PromptRegistry
24-
tracerProvider *trace.TracerProvider
25-
http.Client
22+
config Config
23+
promptRegistry model.PromptRegistry
24+
tracerProvider *trace.TracerProvider
25+
http.Client
2626
}
2727

2828
type LLMSpan struct {
@@ -47,7 +47,7 @@ func NewClient(ctx context.Context, config Config) (*Traceloop, error) {
4747
func (instance *Traceloop) initialize(ctx context.Context) error {
4848
if instance.config.BaseURL == "" {
4949
baseUrl := os.Getenv("TRACELOOP_BASE_URL")
50-
if baseUrl == "" {
50+
if baseUrl == "" {
5151
instance.config.BaseURL = "api.traceloop.com"
5252
} else {
5353
instance.config.BaseURL = baseUrl
@@ -78,8 +78,8 @@ func setMessagesAttribute(span apitrace.Span, prefix string, messages []Message)
7878
for _, message := range messages {
7979
attrsPrefix := fmt.Sprintf("%s.%d", prefix, message.Index)
8080
span.SetAttributes(
81-
attribute.String(attrsPrefix + ".content", message.Content),
82-
attribute.String(attrsPrefix + ".role", message.Role),
81+
attribute.String(attrsPrefix+".content", message.Content),
82+
attribute.String(attrsPrefix+".role", message.Role),
8383
)
8484
}
8585
}
@@ -89,7 +89,7 @@ func (instance *Traceloop) tracerName() string {
8989
return instance.config.TracerName
9090
} else {
9191
return "traceloop.tracer"
92-
}
92+
}
9393
}
9494

9595
func (instance *Traceloop) getTracer() apitrace.Tracer {
@@ -99,7 +99,7 @@ func (instance *Traceloop) getTracer() apitrace.Tracer {
9999
func (instance *Traceloop) LogPrompt(ctx context.Context, prompt Prompt, workflowAttrs WorkflowAttributes) (LLMSpan, error) {
100100
spanName := fmt.Sprintf("%s.%s", prompt.Vendor, prompt.Mode)
101101
_, span := instance.getTracer().Start(ctx, spanName)
102-
102+
103103
span.SetAttributes(
104104
semconvai.LLMVendor.String(prompt.Vendor),
105105
semconvai.LLMRequestModel.String(prompt.Model),
@@ -109,7 +109,7 @@ func (instance *Traceloop) LogPrompt(ctx context.Context, prompt Prompt, workflo
109109

110110
setMessagesAttribute(span, "llm.prompts", prompt.Messages)
111111

112-
return LLMSpan{
112+
return LLMSpan{
113113
span: span,
114114
}, nil
115115
}
@@ -130,7 +130,7 @@ func (llmSpan *LLMSpan) LogCompletion(ctx context.Context, completion Completion
130130
}
131131

132132
func (instance *Traceloop) Shutdown(ctx context.Context) {
133-
if instance.tracerProvider != nil{
134-
instance.tracerProvider.Shutdown(ctx)
133+
if instance.tracerProvider != nil {
134+
instance.tracerProvider.Shutdown(ctx)
135135
}
136136
}

0 commit comments

Comments
 (0)