Skip to content
This repository was archived by the owner on Aug 23, 2023. It is now read-only.

Commit 6c096e4

Browse files
committed
More build fixes
1 parent ab0e08c commit 6c096e4

File tree

13 files changed

+45
-52
lines changed

13 files changed

+45
-52
lines changed

.circleci/config.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ jobs:
2525
- image: circleci/golang:1.17.3
2626
steps:
2727
- checkout
28-
- run: GO111MODULE=off go test -v -race --short ./...
28+
- run: go test -v -race --short ./...
2929

3030
qa:
3131
working_directory: /go/src/github.com/grafana/metrictank
@@ -53,8 +53,8 @@ jobs:
5353
- run: go version
5454
- run: scripts/qa/docs.sh
5555
- run: docker load -i build_docker/metrictank.tar
56-
- run: GO111MODULE=off go test -v ./stacktest/tests/end2end_carbon
57-
- run: GO111MODULE=off go test -v ./stacktest/tests/end2end_carbon_bigtable
56+
- run: go test -v ./stacktest/tests/end2end_carbon
57+
- run: go test -v ./stacktest/tests/end2end_carbon_bigtable
5858

5959
qa-chaos:
6060
working_directory: /home/circleci/.go_workspace/src/github.com/grafana/metrictank
@@ -75,7 +75,7 @@ jobs:
7575
- run: docker pull jaegertracing/all-in-one
7676
# kafka broker advertises itself as 'kafka' but that doesn't resolve. we do have a docker proxy on localhost
7777
- run: echo "127.0.0.1 kafka" | sudo tee -a /etc/hosts
78-
- run: GO111MODULE=off go test -v ./stacktest/tests/chaos_cluster
78+
- run: go test -v ./stacktest/tests/chaos_cluster
7979

8080
deploy:
8181
docker:

Makefile

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,19 +2,19 @@
22
default:
33
$(MAKE) all
44
test:
5-
GO111MODULE=off CGO_ENABLED=1 go test -race -short ./...
5+
CGO_ENABLED=1 go test -race -short ./...
66
test-all:
7-
GO111MODULE=off CGO_ENABLED=1 go test -race ./...
7+
CGO_ENABLED=1 go test -race ./...
88
benchmark:
9-
GO111MODULE=off CGO_ENABLED=0 go test -count=10 -run='^$$' -bench=. -benchtime=100ms ./... | tee benchmark.txt
9+
CGO_ENABLED=0 go test -count=10 -run='^$$' -bench=. -benchtime=100ms ./... | tee benchmark.txt
1010

1111
stacktest:
1212
# count=1 forces uncached runs
1313
# not using stacktest/... here because Go would run them all in parallel,
1414
# or at least the TestMain's, and the stacks would conflict with each other
15-
GO111MODULE=off go test -count=1 -v ./stacktest/tests/chaos_cluster
16-
GO111MODULE=off go test -count=1 -v ./stacktest/tests/end2end_carbon
17-
GO111MODULE=off go test -count=1 -v ./stacktest/tests/end2end_carbon_bigtable
15+
go test -count=1 -v ./stacktest/tests/chaos_cluster
16+
go test -count=1 -v ./stacktest/tests/end2end_carbon
17+
go test -count=1 -v ./stacktest/tests/end2end_carbon_bigtable
1818

1919
check:
2020
$(MAKE) test

cmd/mt-gateway/api_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@ import (
66
"testing"
77
)
88

9-
//Set up a mock http.ServeMux that returns the name of the service routed to.
10-
//We then verify that we're routing to the expected service
9+
// Set up a mock http.ServeMux that returns the name of the service routed to.
10+
// We then verify that we're routing to the expected service
1111
func TestApi(t *testing.T) {
1212
mux := Api{
1313
ingestHandler: stubHandler("ingest"),
@@ -66,7 +66,7 @@ func TestApi(t *testing.T) {
6666

6767
}
6868

69-
//creates a new http.Handler that always responds with the name of the service
69+
// creates a new http.Handler that always responds with the name of the service
7070
func stubHandler(svc string) http.Handler {
7171
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
7272
w.Write([]byte(svc))

pkg/api/dataprocessor_test.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -643,7 +643,6 @@ func generateChunks(span uint32, start uint32, end uint32) []chunk.Chunk {
643643
//
644644
// query: |--------|
645645
// result: |-----|-----|
646-
//
647646
func TestGetSeriesCachedStore(t *testing.T) {
648647
// reduce overhead of creating accounter over and over
649648
old := accnt.EventQSize

pkg/api/query_engine_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -318,10 +318,10 @@ func TestPlanRequests_Singles_DifferentTimeRanges(t *testing.T) {
318318

319319
// TestPlanRequestsMaxPointsPerReqSoft tests how maxPointsPerReqSoft gets applied.
320320
// we validate that:
321-
// * requests are coarsened, PNGroup by PNGroup (we cannot predict PNGroup map iteration order, so we only test with 1 PNGroup),
322-
// and singles in groups by retention (in schemaID order)
323-
// * PNGroups obviously will need a common interval, which gets interesting when using multiple schemas
324-
// * coarsening continues until all data is fetched at its coarsest. At that point we may breach soft, but never hard
321+
// - requests are coarsened, PNGroup by PNGroup (we cannot predict PNGroup map iteration order, so we only test with 1 PNGroup),
322+
// and singles in groups by retention (in schemaID order)
323+
// - PNGroups obviously will need a common interval, which gets interesting when using multiple schemas
324+
// - coarsening continues until all data is fetched at its coarsest. At that point we may breach soft, but never hard
325325
func TestPlanRequestsMaxPointsPerReqSoft(t *testing.T) {
326326
in, out := generate(0, 1000, []reqProp{
327327
// 4 singles from 2 different retentions

pkg/expr/func_aggregate_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ func TestAggregateMultipleDiffQuery(t *testing.T) {
136136
)
137137
}
138138

139-
//mimic target=Aggregate(foo.*,foo.*,a,a)
139+
// mimic target=Aggregate(foo.*,foo.*,a,a)
140140
func TestAggregateMultipleTimesSameInput(t *testing.T) {
141141
input := [][]models.Series{
142142
{

pkg/idx/memory/time_limit.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,15 @@ func (l *TimeLimiter) add(now time.Time, d time.Duration) {
5050
}
5151

5252
// Wait returns when we are not rate limited
53-
// * if we passed the window, we reset everything (this is only safe for callers
53+
// - if we passed the window, we reset everything (this is only safe for callers
5454
// that behave correctly, i.e. that wait the instructed time after each add)
55-
// * if limit is not reached, no sleep is needed
56-
// * if limit has been exceeded, sleep until next period + extra multiple to compensate
57-
// this is perhaps best explained with an example:
58-
// if window is 1s and limit 100ms, but we spent 250ms, then we spent effectively 2.5 seconds worth of work.
59-
// let's say we are 800ms into the 1s window, that means we should sleep 2500-800 = 1.7s
60-
// in order to maximize work while honoring the imposed limit.
61-
// * if limit has been met exactly, sleep until next period (this is a special case of the above)
55+
// - if limit is not reached, no sleep is needed
56+
// - if limit has been exceeded, sleep until next period + extra multiple to compensate
57+
// this is perhaps best explained with an example:
58+
// if window is 1s and limit 100ms, but we spent 250ms, then we spent effectively 2.5 seconds worth of work.
59+
// let's say we are 800ms into the 1s window, that means we should sleep 2500-800 = 1.7s
60+
// in order to maximize work while honoring the imposed limit.
61+
// - if limit has been met exactly, sleep until next period (this is a special case of the above)
6262
func (l *TimeLimiter) Wait() {
6363
time.Sleep(l.wait(time.Now()))
6464
}

pkg/input/kafkamdm/lag_monitor.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -144,10 +144,11 @@ func NewLagMonitor(size int, partitions []int32) *LagMonitor {
144144
// (minimum lag seen in last N measurements) / input rate.
145145
// example:
146146
// lag (in messages/metrics) input rate ---> score (seconds behind)
147-
// 10k 1k/second 10
148-
// 200 1k/second 0 (less than 1s behind)
149-
// 0 * 0 (perfectly in sync)
150-
// anything 0 (after startup) same as lag
147+
//
148+
// 10k 1k/second 10
149+
// 200 1k/second 0 (less than 1s behind)
150+
// 0 * 0 (perfectly in sync)
151+
// anything 0 (after startup) same as lag
151152
//
152153
// The returned total score for the node is the max of the scores of individual partitions.
153154
// Note that one or more StoreOffset() (rate) calls may have been made but no StoreLag().

pkg/mdata/chunk/tsz/tszlong.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,10 @@ import (
1111
)
1212

1313
// SeriesLong similar to Series4h, except:
14-
// * it doesn't write t0 to the stream (for callers that track t0 corresponding to a chunk separately)
15-
// * it doesn't store an initial delta. instead, it assumes a starting delta of 60 and uses delta-of-delta
16-
// encoding from the get-go.
17-
// * it uses a more compact way to mark end-of-stream
14+
// - it doesn't write t0 to the stream (for callers that track t0 corresponding to a chunk separately)
15+
// - it doesn't store an initial delta. instead, it assumes a starting delta of 60 and uses delta-of-delta
16+
// encoding from the get-go.
17+
// - it uses a more compact way to mark end-of-stream
1818
type SeriesLong struct {
1919
// TODO(dgryski): timestamps in the paper are uint64
2020
T0 uint32 // exposed for caller convenience. do NOT set directly. set via constructor

pkg/schema/partition.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,9 @@ func (m *MetricDefinition) PartitionID(method PartitionByMethod, partitions int3
108108
partition = -partition
109109
}
110110
case PartitionBySeriesWithTags:
111-
h := xxhash.New()
112-
h.WriteString(m.NameWithTags())
113-
partition = jump.Hash(h.Sum64(), int(partitions))
111+
// h := xxhash.New()
112+
// h.WriteString(m.NameWithTags())
113+
// partition = jump.Hash(h.Sum64(), int(partitions))
114114
case PartitionBySeriesWithTagsFnv:
115115
h := util.NewFnv32aStringWriter()
116116
if len(m.nameWithTags) > 0 {

0 commit comments

Comments
 (0)