Skip to content

Commit e2de81c

Browse files
committed
revert formating
1 parent e9e3384 commit e2de81c

File tree

2 files changed

+10
-40
lines changed

2 files changed

+10
-40
lines changed

appender.go

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,6 @@ func New(client elastictransport.Interface, cfg Config) (*Appender, error) {
113113
if err := BulkIndexerConfigFrom(client, cfg).Validate(); err != nil {
114114
return nil, fmt.Errorf("error creating bulk indexer: %w", err)
115115
}
116-
117116
indexer := &Appender{
118117
pool: cfg.BulkIndexerPool,
119118
config: cfg,
@@ -209,11 +208,8 @@ func (a *Appender) Add(ctx context.Context, index string, document io.WriterTo)
209208
Body: document,
210209
}
211210
if len(a.bulkItems) == cap(a.bulkItems) {
212-
a.metrics.blockedAdd.Add(
213-
context.Background(),
214-
1,
215-
metric.WithAttributeSet(a.config.MetricAttributes),
216-
)
211+
attrs := metric.WithAttributeSet(a.config.MetricAttributes)
212+
a.metrics.blockedAdd.Add(context.Background(), 1, attrs)
217213
}
218214

219215
select {

appender_test.go

Lines changed: 8 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,7 @@ func TestAppender(t *testing.T) {
9797
))
9898

9999
indexerAttrs := attribute.NewSet(
100-
attribute.String("a", "b"),
101-
attribute.String("c", "d"),
100+
attribute.String("a", "b"), attribute.String("c", "d"),
102101
)
103102

104103
indexer, err := docappender.New(client, docappender.Config{
@@ -228,7 +227,6 @@ func TestAppenderRetry(t *testing.T) {
228227
var bytesTotal int64
229228
var bytesUncompressed int64
230229
var first atomic.Bool
231-
232230
client := docappendertest.NewMockElasticsearchClient(t, func(w http.ResponseWriter, r *http.Request) {
233231
bytesTotal += r.ContentLength
234232
_, result, stat := docappendertest.DecodeBulkRequestWithStats(r)
@@ -265,23 +263,20 @@ func TestAppenderRetry(t *testing.T) {
265263
))
266264

267265
indexerAttrs := attribute.NewSet(
268-
attribute.String("a", "b"),
269-
attribute.String("c", "d"),
266+
attribute.String("a", "b"), attribute.String("c", "d"),
270267
)
271268

272269
indexer, err := docappender.New(client, docappender.Config{
273-
FlushInterval: 2 * time.Minute,
270+
FlushInterval: time.Minute,
274271
FlushBytes: 800, // this is enough to flush after 9 documents
275272
MaxRequests: 1, // to ensure the test is stable
276273
MaxDocumentRetries: 1, // to test the document retry logic
277-
MeterProvider: sdkmetric.NewMeterProvider(
278-
sdkmetric.WithReader(rdr),
279-
),
280-
MetricAttributes: indexerAttrs,
274+
MeterProvider: sdkmetric.NewMeterProvider(sdkmetric.WithReader(rdr)),
275+
MetricAttributes: indexerAttrs,
281276
})
282277

283278
require.NoError(t, err)
284-
// defer indexer.Close(context.Background())
279+
defer indexer.Close(context.Background())
285280

286281
const N = 10
287282
for i := 0; i < N; i++ {
@@ -454,7 +449,6 @@ func TestAppenderAvailableAppenders(t *testing.T) {
454449
for i := 0; i < N; i++ {
455450
addMinimalDoc(t, indexer, "logs-foo-testing")
456451
}
457-
458452
timeout := time.NewTimer(2 * time.Second)
459453
defer timeout.Stop()
460454
for i := 0; i < N; i++ {
@@ -878,7 +872,6 @@ func TestAppenderFlushRequestError(t *testing.T) {
878872

879873
// Closing the indexer flushes enqueued documents.
880874
err = indexer.Close(context.Background())
881-
882875
switch includeSource {
883876
case docappender.False, docappender.True:
884877
// include_source=false is implemented in ES so we just assert we're not
@@ -1774,7 +1767,6 @@ func TestAppenderScaling(t *testing.T) {
17741767
t.Cleanup(func() { indexer.Close(context.Background()) })
17751768
return indexer
17761769
}
1777-
17781770
sendDocuments := func(t *testing.T, indexer *docappender.Appender, docs int) {
17791771
for i := 0; i < docs; i++ {
17801772
err := indexer.Add(context.Background(), "logs-foo-testing", newJSONReader(map[string]any{
@@ -1866,7 +1858,6 @@ func TestAppenderScaling(t *testing.T) {
18661858
}
18671859
}
18681860
}
1869-
18701861
t.Run("DownscaleIdle", func(t *testing.T) {
18711862
rdr := sdkmetric.NewManualReader(sdkmetric.WithTemporalitySelector(
18721863
func(ik sdkmetric.InstrumentKind) metricdata.Temporality {
@@ -1893,7 +1884,6 @@ func TestAppenderScaling(t *testing.T) {
18931884
})
18941885
docs := int64(20)
18951886
sendDocuments(t, indexer, int(docs))
1896-
18971887
waitForScaleUp(t, indexer, 3)
18981888
waitForScaleDown(t, indexer, rdr, 1)
18991889

@@ -1954,7 +1944,6 @@ func TestAppenderScaling(t *testing.T) {
19541944
}
19551945
})
19561946
})
1957-
19581947
t.Run("DownscaleActiveLimit", func(t *testing.T) {
19591948
rdr := sdkmetric.NewManualReader(sdkmetric.WithTemporalitySelector(
19601949
func(ik sdkmetric.InstrumentKind) metricdata.Temporality {
@@ -1982,11 +1971,9 @@ func TestAppenderScaling(t *testing.T) {
19821971
})
19831972
docs := int64(14)
19841973
sendDocuments(t, indexer, int(docs))
1985-
19861974
waitForScaleUp(t, indexer, 3)
19871975
// Set the gomaxprocs to 4, which should result in an activeLimit of 1.
19881976
setGOMAXPROCS(t, 4)
1989-
19901977
// Wait for the indexers to scale down from 3 to 1. The downscale cool
19911978
// down of `1m` isn't respected, since the active limit is breached with
19921979
// the gomaxprocs change.
@@ -2051,7 +2038,6 @@ func TestAppenderScaling(t *testing.T) {
20512038
}
20522039
})
20532040
})
2054-
20552041
t.Run("UpscaleCooldown", func(t *testing.T) {
20562042
rdr := sdkmetric.NewManualReader(sdkmetric.WithTemporalitySelector(
20572043
func(ik sdkmetric.InstrumentKind) metricdata.Temporality {
@@ -2079,10 +2065,8 @@ func TestAppenderScaling(t *testing.T) {
20792065
})
20802066
docs := int64(50)
20812067
sendDocuments(t, indexer, int(docs))
2082-
20832068
waitForScaleUp(t, indexer, 2)
20842069
waitForBulkRequests(t, indexer, rdr, docs)
2085-
20862070
assert.Equal(t, int64(2), indexer.IndexersActive())
20872071

20882072
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
@@ -2144,7 +2128,6 @@ func TestAppenderScaling(t *testing.T) {
21442128
}
21452129
})
21462130
})
2147-
21482131
t.Run("Downscale429Rate", func(t *testing.T) {
21492132
rdr := sdkmetric.NewManualReader(sdkmetric.WithTemporalitySelector(
21502133
func(ik sdkmetric.InstrumentKind) metricdata.Temporality {
@@ -2177,23 +2160,19 @@ func TestAppenderScaling(t *testing.T) {
21772160
FlushBytes: 1,
21782161
Scaling: docappender.ScalingConfig{
21792162
ScaleUp: docappender.ScaleActionConfig{
2180-
Threshold: 5,
2181-
CoolDown: 1,
2163+
Threshold: 5, CoolDown: 1,
21822164
},
21832165
ScaleDown: docappender.ScaleActionConfig{
2184-
Threshold: 100,
2185-
CoolDown: 100 * time.Millisecond,
2166+
Threshold: 100, CoolDown: 100 * time.Millisecond,
21862167
},
21872168
IdleInterval: 100 * time.Millisecond,
21882169
},
21892170
MeterProvider: sdkmetric.NewMeterProvider(sdkmetric.WithReader(rdr)),
21902171
})
21912172
require.NoError(t, err)
21922173
t.Cleanup(func() { indexer.Close(context.Background()) })
2193-
21942174
docs := int64(20)
21952175
sendDocuments(t, indexer, int(docs))
2196-
21972176
waitForScaleUp(t, indexer, 3)
21982177
waitForBulkRequests(t, indexer, rdr, docs)
21992178

@@ -2204,7 +2183,6 @@ func TestAppenderScaling(t *testing.T) {
22042183
mu.Unlock()
22052184
docs += 5
22062185
sendDocuments(t, indexer, 5)
2207-
22082186
waitForScaleDown(t, indexer, rdr, 1)
22092187
waitForBulkRequests(t, indexer, rdr, docs)
22102188

@@ -2215,15 +2193,11 @@ func TestAppenderScaling(t *testing.T) {
22152193
mu.Unlock()
22162194
docs += 600
22172195
sendDocuments(t, indexer, 600)
2218-
22192196
waitForScaleUp(t, indexer, 3)
22202197
waitForBulkRequests(t, indexer, rdr, docs)
2221-
22222198
assert.Equal(t, int64(3), indexer.IndexersActive())
2223-
22242199
var rm metricdata.ResourceMetrics
22252200
assert.NoError(t, rdr.Collect(context.Background(), &rm))
2226-
22272201
docappendertest.AssertOTelMetrics(t, rm.ScopeMetrics[0].Metrics, func(m metricdata.Metrics) {
22282202
switch n := m.Name; n {
22292203
case "elasticsearch.indexer.created":

0 commit comments

Comments
 (0)