Skip to content

Commit e9e19af

Browse files
committed
add ci test
1 parent 7168f79 commit e9e19af

File tree

13 files changed

+1228
-28
lines changed

13 files changed

+1228
-28
lines changed

Dockerfile.test.gravity

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM golang:1.11.4
1+
FROM golang:1.13.3
22

33
WORKDIR /gravity
44

Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ dev-down:
2727

2828
go-test:
2929
go test -failfast -race ./integration_test
30+
cd pkg/registry/test_data && make build
3031
go test -timeout 10m -coverprofile=cover.out $(TEST_DIRS) && go tool cover -func=cover.out | tail -n 1
3132

3233
test-local:

docker-compose-gravity-test.yml

Lines changed: 89 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
version: '3.2'
22
services:
33
source-db:
4-
image: mysql:5.7.18
4+
image: mysql:5.7
55
container_name: source-db-test
66
environment:
77
- MYSQL_ALLOW_EMPTY_PASSWORD=yes
@@ -14,7 +14,7 @@ services:
1414
- ./mycnf:/etc/mysql/conf.d
1515

1616
target-db:
17-
image: mysql:5.7.18
17+
image: mysql:5.7
1818
container_name: target-db-test
1919
environment:
2020
- MYSQL_ALLOW_EMPTY_PASSWORD=yes
@@ -62,13 +62,99 @@ services:
6262
- "ES_JAVA_OPTS=-Xms750m -Xmx750m"
6363
logging:
6464
driver: none
65+
66+
pd:
67+
image: pingcap/pd:latest
68+
ports:
69+
- "2379"
70+
volumes:
71+
- ./integration_test/config/pd.toml:/pd.toml:ro
72+
command:
73+
- --name=pd
74+
- --client-urls=http://0.0.0.0:2379
75+
- --peer-urls=http://0.0.0.0:2380
76+
- --advertise-client-urls=http://pd:2379
77+
- --advertise-peer-urls=http://pd:2380
78+
- --initial-cluster=pd=http://pd:2380
79+
- --data-dir=/data/pd
80+
- --config=/pd.toml
81+
restart: on-failure
82+
83+
tikv:
84+
image: pingcap/tikv:latest
85+
volumes:
86+
- ./integration_test/config/tikv.toml:/tikv.toml:ro
87+
command:
88+
- --addr=0.0.0.0:20160
89+
- --advertise-addr=tikv:20160
90+
- --data-dir=/data/tikv
91+
- --pd=pd:2379
92+
- --config=/tikv.toml
93+
depends_on:
94+
- "pd"
95+
restart: on-failure
96+
97+
pump:
98+
image: pingcap/tidb-binlog:latest
99+
logging:
100+
driver: none
101+
volumes:
102+
- ./integration_test/config/pump.toml:/pump.toml:ro
103+
command:
104+
- /pump
105+
- --addr=0.0.0.0:8250
106+
- --advertise-addr=pump:8250
107+
- --data-dir=/data/pump
108+
- --node-id=pump
109+
- --pd-urls=http://pd:2379
110+
- --config=/pump.toml
111+
depends_on:
112+
- "pd"
113+
restart: on-failure
114+
115+
drainer:
116+
image: pingcap/tidb-binlog:latest
117+
logging:
118+
driver: none
119+
volumes:
120+
- ./integration_test/config/drainer.toml:/drainer.toml:ro
121+
command:
122+
- /drainer
123+
- --addr=drainer:8249
124+
- --data-dir=/data/data.drainer
125+
- --pd-urls=http://pd:2379
126+
- --config=/drainer.toml
127+
- --initial-commit-ts=0
128+
- --dest-db-type=kafka
129+
depends_on:
130+
- "pd"
131+
- "kafka"
132+
restart: on-failure
133+
134+
tidb:
135+
image: pingcap/tidb:latest
136+
ports:
137+
- "4000:4000"
138+
- "10080:10080"
139+
volumes:
140+
- ./integration_test/config/tidb.toml:/tidb.toml:ro
141+
command:
142+
- --store=tikv
143+
- --path=pd:2379
144+
- --config=/tidb.toml
145+
- --enable-binlog=true
146+
depends_on:
147+
- "tikv"
148+
- "pump"
149+
restart: on-failure
65150

66151
gravity-test:
67152
build:
68153
context: ./
69154
dockerfile: Dockerfile.test.gravity
70155
depends_on:
71156
- mongo
157+
- tidb
72158
environment:
73159
- MONGO_HOST=mongo
74160
- KAFKA_BROKER=kafka:9092
@@ -78,4 +164,5 @@ services:
78164
"--", "./wait-for-it.sh", "mongo:27017", "-t", "0",
79165
"--", "./wait-for-it.sh", "kafka:9092", "-t", "0",
80166
"--", "./wait-for-it.sh", "elasticsearch:9200", "-t", "0",
167+
"--", "./wait-for-it.sh", "tidb:4000", "-t", "0",
81168
"--", "make", "go-test"]

integration_test/config/drainer.toml

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
# drainer Configuration.
2+
3+
# addr (i.e. 'host:port') to listen on for drainer connections
4+
# will register this addr into etcd
5+
# addr = "127.0.0.1:8249"
6+
7+
# the interval time (in seconds) of detect pumps' status
8+
detect-interval = 10
9+
10+
# drainer meta data directory path
11+
data-dir = "data.drainer"
12+
13+
# a comma separated list of PD endpoints
14+
pd-urls = "http://127.0.0.1:2379"
15+
16+
# Use the specified compressor to compress payload between pump and drainer
17+
compressor = ""
18+
19+
#[security]
20+
# Path of file that contains list of trusted SSL CAs for connection with cluster components.
21+
# ssl-ca = "/path/to/ca.pem"
22+
# Path of file that contains X509 certificate in PEM format for connection with cluster components.
23+
# ssl-cert = "/path/to/pump.pem"
24+
# Path of file that contains X509 key in PEM format for connection with cluster components.
25+
# ssl-key = "/path/to/pump-key.pem"
26+
27+
# syncer Configuration.
28+
[syncer]
29+
30+
# Assume the upstream sql-mode.
31+
# If this is setted , will use the same sql-mode to parse DDL statment, and set the same sql-mode at downstream when db-type is mysql.
32+
# If this is not setted, it will not set any sql-mode.
33+
# sql-mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION"
34+
35+
# number of binlog events in a transaction batch
36+
txn-batch = 20
37+
38+
# work count to execute binlogs
39+
# if the latency between drainer and downstream(mysql or tidb) are too high, you might want to increase this
40+
# to get higher throughput by higher concurrent write to the downstream
41+
worker-count = 16
42+
43+
#disable-dispatch = false
44+
45+
# safe mode will split update to delete and insert
46+
safe-mode = false
47+
48+
# downstream storage, equal to --dest-db-type
49+
# valid values are "mysql", "file", "tidb", "flash", "kafka"
50+
db-type = "kafka"
51+
52+
# disable sync these schema
53+
ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql"
54+
55+
##replicate-do-db priority over replicate-do-table if have same db name
56+
##and we support regex expression , start with '~' declare use regex expression.
57+
#
58+
#replicate-do-db = ["~^b.*","s1"]
59+
60+
#[[syncer.replicate-do-table]]
61+
#db-name ="test"
62+
#tbl-name = "log"
63+
64+
#[[syncer.replicate-do-table]]
65+
#db-name ="test"
66+
#tbl-name = "~^a.*"
67+
68+
# disable sync these table
69+
#[[syncer.ignore-table]]
70+
#db-name = "test"
71+
#tbl-name = "log"
72+
73+
# the downstream mysql protocol database
74+
#[syncer.to]
75+
#host = "127.0.0.1"
76+
#user = "root"
77+
#password = ""
78+
#port = 3306
79+
80+
[syncer.to.checkpoint]
81+
# you can uncomment this to change the database to save checkpoint when the downstream is mysql or tidb
82+
#schema = "tidb_binlog"
83+
84+
# Uncomment this if you want to use file as db-type.
85+
#[syncer.to]
86+
# directory to save binlog file, default same as data-dir(save checkpoint file) if this is not configured.
87+
# dir = "data.drainer"
88+
89+
90+
# when db-type is kafka, you can uncomment this to config the down stream kafka, it will be the globle config kafka default
91+
[syncer.to]
92+
# only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed.
93+
# zookeeper-addrs = "127.0.0.1:2181"
94+
kafka-addrs = "kafka:9092"
95+
kafka-version = "5.1.0"
96+
kafka-max-messages = 1024
97+
#
98+
#
99+
# the topic name drainer will push msg, the default name is <cluster-id>_obinlog
100+
# be careful don't use the same name if run multi drainer instances
101+
topic-name = "obinlog"

integration_test/config/pd.toml

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
# PD Configuration.
2+
3+
name = "pd"
4+
data-dir = "default.pd"
5+
6+
client-urls = "http://127.0.0.1:2379"
7+
# if not set, use ${client-urls}
8+
advertise-client-urls = ""
9+
10+
peer-urls = "http://127.0.0.1:2380"
11+
# if not set, use ${peer-urls}
12+
advertise-peer-urls = ""
13+
14+
initial-cluster = "pd=http://127.0.0.1:2380"
15+
initial-cluster-state = "new"
16+
17+
lease = 3
18+
tso-save-interval = "3s"
19+
20+
[security]
21+
# Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty
22+
cacert-path = ""
23+
# Path of file that contains X509 certificate in PEM format.
24+
cert-path = ""
25+
# Path of file that contains X509 key in PEM format.
26+
key-path = ""
27+
28+
[log]
29+
level = "error"
30+
31+
# log format, one of json, text, console
32+
#format = "text"
33+
34+
# disable automatic timestamps in output
35+
#disable-timestamp = false
36+
37+
# file logging
38+
[log.file]
39+
#filename = ""
40+
# max log file size in MB
41+
#max-size = 300
42+
# max log file keep days
43+
#max-days = 28
44+
# maximum number of old log files to retain
45+
#max-backups = 7
46+
# rotate log by day
47+
#log-rotate = true
48+
49+
[metric]
50+
# prometheus client push interval, set "0s" to disable prometheus.
51+
interval = "15s"
52+
# prometheus pushgateway address, leaves it empty will disable prometheus.
53+
# address = "pushgateway:9091"
54+
address = ""
55+
56+
[schedule]
57+
max-merge-region-size = 0
58+
split-merge-interval = "1h"
59+
max-snapshot-count = 3
60+
max-pending-peer-count = 16
61+
max-store-down-time = "30m"
62+
leader-schedule-limit = 4
63+
region-schedule-limit = 4
64+
replica-schedule-limit = 8
65+
merge-schedule-limit = 8
66+
tolerant-size-ratio = 5.0
67+
68+
# customized schedulers, the format is as below
69+
# if empty, it will use balance-leader, balance-region, hot-region as default
70+
# [[schedule.schedulers]]
71+
# type = "evict-leader"
72+
# args = ["1"]
73+
74+
[replication]
75+
# The number of replicas for each region.
76+
max-replicas = 3
77+
# The label keys specified the location of a store.
78+
# The placement priorities is implied by the order of label keys.
79+
# For example, ["zone", "rack"] means that we should place replicas to
80+
# different zones first, then to different racks if we don't have enough zones.
81+
location-labels = []
82+
83+
[label-property]
84+
# Do not assign region leaders to stores that have these tags.
85+
# [[label-property.reject-leader]]
86+
# key = "zone"
87+
# value = "cn1

integration_test/config/pump.toml

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
# pump Configuration.
2+
3+
# addr(i.e. 'host:port') to listen on for client traffic
4+
addr = "127.0.0.1:8250"
5+
6+
# addr(i.e. 'host:port') to advertise to the public
7+
advertise-addr = ""
8+
9+
# a integer value to control expiry date of the binlog data, indicates for how long (in days) the binlog data would be stored.
10+
# must bigger than 0
11+
gc = 7
12+
13+
# path to the data directory of pump's data
14+
data-dir = "data.pump"
15+
16+
# number of seconds between heartbeat ticks (in 2 seconds)
17+
heartbeat-interval = 2
18+
19+
# a comma separated list of PD endpoints
20+
pd-urls = "http://127.0.0.1:2379"
21+
22+
#[security]
23+
# Path of file that contains list of trusted SSL CAs for connection with cluster components.
24+
# ssl-ca = "/path/to/ca.pem"
25+
# Path of file that contains X509 certificate in PEM format for connection with cluster components.
26+
# ssl-cert = "/path/to/drainer.pem"
27+
# Path of file that contains X509 key in PEM format for connection with cluster components.
28+
# ssl-key = "/path/to/drainer-key.pem"
29+
#
30+
[storage]
31+
stop-write-at-available-space = 100
32+
# Set to `true` (default) for best reliability, which prevents data loss when there is a power failure.
33+
# sync-log = true
34+
#
35+
# we suggest using the default config of the embedded LSM DB now, do not change it useless you know what you are doing
36+
#[storage.kv]
37+
# block-cache-capacity = 8388608
38+
# block-restart-interval = 16
39+
# block-size = 4096
40+
# compaction-L0-trigger = 8
41+
# compaction-table-size = 67108864
42+
# compaction-total-size = 536870912
43+
# compaction-total-size-multiplier = 8.0
44+
# write-buffer = 67108864
45+
# write-L0-pause-trigger = 24
46+
# write-L0-slowdown-trigger = 17

0 commit comments

Comments
 (0)