This repository was archived by the owner on Sep 30, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
Copy pathresolver.go
210 lines (187 loc) · 8.79 KB
/
resolver.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
package modelconfig
import (
"context"
"fmt"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend"
"github.com/sourcegraph/sourcegraph/internal/conf"
modelconfigSDK "github.com/sourcegraph/sourcegraph/internal/modelconfig/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/schema"
)
type modelconfigResolver struct {
logger log.Logger
}
func newResolver(logger log.Logger) graphqlbackend.ModelconfigResolver {
return &modelconfigResolver{logger: logger}
}
var _ = (*graphqlbackend.CodyLLMConfigurationResolver)(nil)
func (r *modelconfigResolver) CodyLLMConfiguration(ctx context.Context) (graphqlbackend.CodyLLMConfigurationResolver, error) {
siteConfig := conf.Get().SiteConfig()
modelCfgSvc := Get()
modelconfig, err := modelCfgSvc.Get()
if err != nil {
r.logger.Warn("error obtaining model configuration data", log.Error(err))
return nil, errors.New("error fetching model configuration data")
}
// Create a new instance of the codyLLMConfigurationResolver per-request, so that
// we always pick up the latest site config, rather than using a stale version from
// when the Sourcegraph instance was initialized.
resolver := &codyLLMConfigurationResolver{
modelconfig: modelconfig,
doNotUseCompletionsConfig: siteConfig.Completions,
}
return resolver, nil
}
type codyLLMConfigurationResolver struct {
// modelconfig is the LLM model configuration data for this Sourcegraph instance.
// This is the source of truth and accurately reflects the site configuration.
modelconfig *modelconfigSDK.ModelConfiguration
// doNotUseCompletionsConfig is the older-style configuration data for Cody
// Enterprise, and is only passed along for backwards compatibility.
//
// DO NOT USE IT.
//
// The information it returns is only looking at the "completions" site config
// data, which may not even be provided. Only read from this value if you really
// know what you are doing.
doNotUseCompletionsConfig *schema.Completions
}
// toLegacyModelIdentifier converts the "new style" model identity into the old style
// expected by Cody Clients.
//
// This is dangerous, as it will only work if this Sourcegraph backend AND Cody Gateway
// can correctly map the legacy identifier into the correct ModelRef.
//
// Once Cody Clients are capable of natively using the modelref format, we should remove
// this function and have all of our GraphQL APIs only refer to models using a ModelRef.
func toLegacyModelIdentifier(mref modelconfigSDK.ModelRef) string {
return fmt.Sprintf("%s/%s", mref.ProviderID(), mref.ModelID())
}
func (r *codyLLMConfigurationResolver) ChatModel() (string, error) {
defaultChatModelRef := r.modelconfig.DefaultModels.Chat
model := r.modelconfig.GetModelByMRef(defaultChatModelRef)
if model == nil {
return "", errors.Errorf("default chat model %q not found", defaultChatModelRef)
}
return toLegacyModelIdentifier(model.ModelRef), nil
}
func (r *codyLLMConfigurationResolver) ChatModelMaxTokens() (*int32, error) {
defaultChatModelRef := r.modelconfig.DefaultModels.Chat
model := r.modelconfig.GetModelByMRef(defaultChatModelRef)
if model == nil {
return nil, errors.Errorf("default chat model %q not found", defaultChatModelRef)
}
maxTokens := int32(model.ContextWindow.MaxInputTokens)
return &maxTokens, nil
}
func (r *codyLLMConfigurationResolver) SmartContextWindow() string {
if r.doNotUseCompletionsConfig != nil {
if r.doNotUseCompletionsConfig.SmartContextWindow == "disabled" {
return "disabled"
} else {
return "enabled"
}
}
// If the admin has explicitly provided the newer "modelConfiguration" site config
// data, disable SmartContextWindow. We may want to re-enable this capability, but
// in some other way. (e.g. passing this flag on a per-model basis, or just having
// a more nuanced view of a model's specific context window.)
return "disabled"
}
func (r *codyLLMConfigurationResolver) DisableClientConfigAPI() bool {
if r.doNotUseCompletionsConfig != nil {
if val := r.doNotUseCompletionsConfig.DisableClientConfigAPI; val != nil {
return *val
}
}
return false
}
func (r *codyLLMConfigurationResolver) FastChatModel() (string, error) {
defaultFastChatModelRef := r.modelconfig.DefaultModels.FastChat
model := r.modelconfig.GetModelByMRef(defaultFastChatModelRef)
if model == nil {
return "", errors.Errorf("default fast chat model %q not found", defaultFastChatModelRef)
}
return toLegacyModelIdentifier(model.ModelRef), nil
}
func (r *codyLLMConfigurationResolver) FastChatModelMaxTokens() (*int32, error) {
defaultFastChatModelRef := r.modelconfig.DefaultModels.FastChat
model := r.modelconfig.GetModelByMRef(defaultFastChatModelRef)
if model == nil {
return nil, errors.Errorf("default fast chat model %q not found", defaultFastChatModelRef)
}
maxTokens := int32(model.ContextWindow.MaxInputTokens)
return &maxTokens, nil
}
// Here Be Dragons (written July 30 2024)
//
// Cody clients currently rely on CodyLLMConfiguration, they shouldn't, they should
// use the information provided by the ModelsService and the /.api/client-config -
// both of which supersede this information. However, they use it today.
//
// Clients currently rely on this CodyLLMConfiguration.provider field ONLY to
// determine which **autocomplete provider implementation** to use. That is, to
// control context limits, prompting behavior, and other aspects of autocomplete.
// This is not a great way to handle this (provider/model-specific behavior being
// fundamentally tied together), but again, it's how it works today.
//
// The 'autocomplete provider name string' is determined by the following logic:
// 1) If the server has the new `modelConfiguration` in their site config, then the
// client will use the autocomplete `Model.provider` (provider ID, not name) field
// as the string.
// 2) Else `if (authStatus.configOverwrites?.provider)` -- i.e. the string returned by this
// function -- will be used.
// 3) Else, the default string 'anthropic' will be used.
//
// The 'autocomplete provider name string' is then entered into a switch statement
// (see create-provider.ts, `createProviderConfig` - which can be summarized as:
//
// * 'openai', 'azure-openai' => createUnstableOpenAIProviderConfig
// * 'fireworks' => createFireworksProviderConfig
// * 'aws-bedrock', 'anthropic' => createAnthropicProviderConfig
// * 'google' => createAnthropicProviderConfig or createGeminiProviderConfig depending on model
//
// Note that all other cases are irrelevant:
//
// * 'experimental-openaicompatible' => deprecated; client-side only option; does not need to be returned by this function.
// * 'openaicompatible' => does not need to be returned by this function (uses new Models service instead of CodyLLMConfiguration.provider)
// * Ollama and other options => are client-side only
//
// Finally, it is worth noting that Sourcegraph instance versions prior to Aug 7th 2024
// using Cody Gateway would return 'sourcegraph' as the provider name here, which would
// hit a default 'fireworks' case in the client. Today, this logic no longer exists but
// some remnants of it exist in the client codebase.
//
// Lastly, remember that we only ever use the default autocomplete model. There is no UI
// currently in the product to choose the autocomplete model as a Cody user. However,
// when we choose to add that feature, note that this return value is NOT used when
// `modelConfiguration` is present in the site config: `Model.provider`.
//
// TL;DR: this function is currently expected to return strings that satisfy these conditions:
//
// * 'openai', 'azure-openai' => createUnstableOpenAIProviderConfig
// * 'fireworks' => createFireworksProviderConfig
// * 'aws-bedrock', 'anthropic' => createAnthropicProviderConfig
// * 'google' => createAnthropicProviderConfig or createGeminiProviderConfig depending on model
func (r *codyLLMConfigurationResolver) Provider() string {
// Provider ID is the thing we have that maps closest to these strings and should be correct in the cases noted above.
return string(r.modelconfig.DefaultModels.CodeCompletion.ProviderID())
}
func (r *codyLLMConfigurationResolver) CompletionModel() (string, error) {
defaultCompletionModel := r.modelconfig.DefaultModels.CodeCompletion
model := r.modelconfig.GetModelByMRef(defaultCompletionModel)
if model == nil {
return "", errors.Errorf("default code completion model %q not found", defaultCompletionModel)
}
return string(model.ModelRef.ModelID()), nil
}
func (r *codyLLMConfigurationResolver) CompletionModelMaxTokens() (*int32, error) {
defaultCompletionModel := r.modelconfig.DefaultModels.CodeCompletion
model := r.modelconfig.GetModelByMRef(defaultCompletionModel)
if model == nil {
return nil, errors.Errorf("default code completion model %q not found", defaultCompletionModel)
}
maxTokens := int32(model.ContextWindow.MaxInputTokens)
return &maxTokens, nil
}