Skip to content

Commit 7adf279

Browse files
YuJuncenti-chi-bot
authored andcommitted
This is an automated cherry-pick of pingcap#58433
Signed-off-by: ti-chi-bot <[email protected]>
1 parent 1bdef59 commit 7adf279

File tree

7 files changed

+215
-5
lines changed

7 files changed

+215
-5
lines changed

br/pkg/task/common_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ func TestUrlNoQuery(t *testing.T) {
4343

4444
func TestTiDBConfigUnchanged(t *testing.T) {
4545
cfg := config.GetGlobalConfig()
46-
restoreConfig := enableTiDBConfig()
46+
restoreConfig := tweakLocalConfForRestore()
4747
require.NotEqual(t, config.GetGlobalConfig(), cfg)
4848
restoreConfig()
4949
require.Equal(t, config.GetGlobalConfig(), cfg)

br/pkg/task/restore.go

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -659,7 +659,7 @@ func runRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf
659659
}
660660

661661
// pre-set TiDB config for restore
662-
restoreDBConfig := enableTiDBConfig()
662+
restoreDBConfig := tweakLocalConfForRestore()
663663
defer restoreDBConfig()
664664

665665
if client.GetSupportPolicy() {
@@ -899,6 +899,7 @@ func filterRestoreFiles(
899899
return
900900
}
901901

902+
<<<<<<< HEAD
902903
// restorePreWork executes some prepare work before restore.
903904
// TODO make this function returns a restore post work.
904905
func restorePreWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr, switchToImport bool) (pdutil.UndoFunc, error) {
@@ -935,8 +936,11 @@ func restorePostWork(
935936
}
936937

937938
// enableTiDBConfig tweaks some of configs of TiDB to make the restore progress go well.
939+
=======
940+
// tweakLocalConfForRestore tweaks some of configs of TiDB to make the restore progress go well.
941+
>>>>>>> 384f858a6c8 (br/stream: allow pitr to create oversized indices (#58433))
938942
// return a function that could restore the config to origin.
939-
func enableTiDBConfig() func() {
943+
func tweakLocalConfForRestore() func() {
940944
restoreConfig := config.RestoreFunc()
941945
config.UpdateGlobal(func(conf *config.Config) {
942946
// set max-index-length before execute DDLs and create tables

br/pkg/task/stream.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1194,6 +1194,9 @@ func restoreStream(
11941194
ctx, cancelFn := context.WithCancel(c)
11951195
defer cancelFn()
11961196

1197+
restoreCfg := tweakLocalConfForRestore()
1198+
defer restoreCfg()
1199+
11971200
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
11981201
span1 := span.Tracer().StartSpan(
11991202
"restoreStream",
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# config of tidb
2+
3+
max-index-length = 12288
4+
5+
[security]
6+
ssl-ca = "/tmp/backup_restore_test/certs/ca.pem"
7+
ssl-cert = "/tmp/backup_restore_test/certs/tidb.pem"
8+
ssl-key = "/tmp/backup_restore_test/certs/tidb.key"
9+
cluster-ssl-ca = "/tmp/backup_restore_test/certs/ca.pem"
10+
cluster-ssl-cert = "/tmp/backup_restore_test/certs/tidb.pem"
11+
cluster-ssl-key = "/tmp/backup_restore_test/certs/tidb.key"
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
-- basic test: [INDEX/UNIQUE], [COMMENT], [INDEXTYPE], [INVISIBLE], [EXPRESSION]
2+
ALTER TABLE test.pairs ADD INDEX i1(y, z) USING HASH COMMENT "edelw;fe?fewfe\nefwe" INVISIBLE;
3+
ALTER TABLE test.pairs ADD UNIQUE KEY u1(x, y) USING RTREE VISIBLE;
4+
ALTER TABLE test.pairs ADD INDEX i2(y, (z + 1)) USING BTREE COMMENT "123";
5+
ALTER TABLE test.pairs ADD UNIQUE KEY u2(x, (y+1)) USING HASH COMMENT "243";
6+
7+
-- test: [COLUMN LENGTH], [EXPRESSION], [PRIMARY]
8+
ALTER TABLE test.pairs2 ADD INDEX i1(y, z(10));
9+
ALTER TABLE test.pairs2 ADD UNIQUE KEY u1(y, z(10), (y * 2)) USING RTREE VISIBLE;
10+
ALTER TABLE test.pairs2 ADD PRIMARY KEY (x) USING HASH;
11+
12+
-- test: [MULTIVALUED]
13+
ALTER TABLE test.pairs3 ADD INDEX zips2((CAST(custinfo->'$.zipcode' AS UNSIGNED ARRAY)));
14+
15+
-- test: DROP operation
16+
ALTER TABLE test.pairs4 ADD INDEX i1(y, z) USING HASH COMMENT "edelw;fe?fewfe\nefwe" INVISIBLE;
17+
ALTER TABLE test.pairs4 ADD UNIQUE KEY u1(x, y) USING RTREE VISIBLE;
18+
ALTER TABLE test.pairs4 ADD INDEX i2(y, (z + 1)) USING BTREE COMMENT "123";
19+
ALTER TABLE test.pairs4 ADD UNIQUE KEY u2(x, (y+1)) USING HASH COMMENT "243";
20+
ALTER TABLE test.pairs4 DROP INDEX i1;
21+
ALTER TABLE test.pairs4 DROP INDEX u1;
22+
ALTER TABLE test.pairs4 DROP INDEX i2;
23+
ALTER TABLE test.pairs4 DROP INDEX u2;
24+
25+
-- test: DROP operation
26+
ALTER TABLE test.pairs5 ADD INDEX i1(y, z(10));
27+
ALTER TABLE test.pairs5 ADD UNIQUE KEY u1(y, z(10), (y * 2)) USING RTREE VISIBLE;
28+
ALTER TABLE test.pairs5 ADD PRIMARY KEY (x) USING HASH;
29+
ALTER TABLE test.pairs5 DROP INDEX i1;
30+
ALTER TABLE test.pairs5 DROP INDEX u1;
31+
ALTER TABLE test.pairs5 DROP INDEX `PRIMARY`;
32+
33+
-- test: [strange string in EXPRESSION], [rename operation]
34+
ALTER TABLE test.pairs6 ADD INDEX zips2((CAST(`cust``;info`->'$.zipcode' AS UNSIGNED ARRAY)));
35+
ALTER TABLE test.pairs6 ADD INDEX i1(`nam``;e`, (`nam``;e` * 2));
36+
RENAME TABLE test.pairs6 TO test.pairs7;
37+
ALTER TABLE test.pairs7 RENAME INDEX i1 to i2;
38+
39+
-- future test: [MODIFY COLUMN operation]
40+
ALTER TABLE test.pairs8 ADD INDEX i1(y);
41+
ALTER TABLE test.pairs8 MODIFY y varchar(20);
42+
43+
-- future test: [CHANGE COLUMN operation]
44+
ALTER TABLE test.pairs9 ADD INDEX i1(y);
45+
ALTER TABLE test.pairs9 CHANGE y y2 varchar(20);
46+
47+
-- test partition
48+
ALTER TABLE test.pairs10 ADD INDEX i1(y);
49+
50+
51+
CREATE INDEX huge ON test.huge_idx(blob1, blob2);

br/tests/br_pitr/prepare_data/ingest_repair.sql

Lines changed: 48 additions & 0 deletions
Large diffs are not rendered by default.

br/tests/br_pitr/run.sh

Lines changed: 95 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,15 @@ CUR=$(cd `dirname $0`; pwd)
2222
PREFIX="pitr_backup" # NOTICE: don't start with 'br' because `restart services` would remove file/directory br*.
2323
res_file="$TEST_DIR/sql_res.$TEST_NAME.txt"
2424

25+
restart_services_allowing_huge_index() {
26+
echo "restarting services with huge indices enabled..."
27+
stop_services
28+
start_services --tidb-cfg "$CUR/config/tidb-max-index-length.toml"
29+
echo "restart services done..."
30+
}
31+
2532
# start a new cluster
26-
echo "restart a services"
27-
restart_services
33+
restart_services_allowing_huge_index
2834

2935
# prepare the data
3036
echo "prepare the data"
@@ -91,11 +97,16 @@ done
9197
# ...
9298

9399
# start a new cluster
100+
<<<<<<< HEAD
94101
echo "restart a services"
95102
restart_services
103+
=======
104+
restart_services_allowing_huge_index
105+
>>>>>>> 384f858a6c8 (br/stream: allow pitr to create oversized indices (#58433))
96106

97107
# PITR restore
98108
echo "run pitr"
109+
<<<<<<< HEAD
99110
run_br --pd $PD_ADDR restore point -s "local://$TEST_DIR/$PREFIX/log" --full-backup-storage "local://$TEST_DIR/$PREFIX/full" > $res_file 2>&1
100111

101112
# check something in downstream cluster
@@ -109,3 +120,85 @@ run_sql "select * from mysql.gc_delete_range_done"
109120
run_sql "select count(*) DELETE_RANGE_CNT from (select * from mysql.gc_delete_range union all select * from mysql.gc_delete_range_done) del_range group by ts order by DELETE_RANGE_CNT desc limit 1;"
110121
expect_delete_range=$(($incremental_delete_range_count-$prepare_delete_range_count))
111122
check_contains "DELETE_RANGE_CNT: $expect_delete_range"
123+
=======
124+
run_sql "DROP DATABASE __TiDB_BR_Temporary_Log_Restore_Checkpoint;"
125+
run_sql "DROP DATABASE __TiDB_BR_Temporary_Custom_SST_Restore_Checkpoint;"
126+
run_br --pd $PD_ADDR restore point -s "local://$TEST_DIR/$PREFIX/log" --full-backup-storage "local://$TEST_DIR/$PREFIX/full" > $res_file 2>&1 || ( cat $res_file && exit 1 )
127+
128+
check_result
129+
130+
# start a new cluster for incremental + log
131+
restart_services_allowing_huge_index
132+
133+
echo "run snapshot restore#2"
134+
run_br --pd $PD_ADDR restore full -s "local://$TEST_DIR/$PREFIX/full"
135+
136+
echo "run incremental restore + log restore"
137+
run_br --pd $PD_ADDR restore point -s "local://$TEST_DIR/$PREFIX/log" --full-backup-storage "local://$TEST_DIR/$PREFIX/inc" > $res_file 2>&1
138+
139+
check_result
140+
141+
# start a new cluster for incremental + log
142+
echo "restart services"
143+
restart_services_allowing_huge_index
144+
145+
echo "run snapshot restore#3"
146+
run_br --pd $PD_ADDR restore full -s "local://$TEST_DIR/$PREFIX/full"
147+
148+
echo "run incremental restore but failed"
149+
restore_fail=0
150+
run_br --pd $PD_ADDR restore full -s "local://$TEST_DIR/$PREFIX/inc_fail" || restore_fail=1
151+
if [ $restore_fail -ne 1 ]; then
152+
echo 'pitr success on incremental restore'
153+
exit 1
154+
fi
155+
156+
# start a new cluster for corruption
157+
restart_services_allowing_huge_index
158+
159+
file_corruption() {
160+
echo "corrupt the whole log files"
161+
for filename in $(find $TEST_DIR/$PREFIX/log -regex ".*\.log" | grep -v "schema-meta"); do
162+
echo "corrupt the log file $filename"
163+
filename_temp=$filename"_temp"
164+
echo "corruption" > $filename_temp
165+
cat $filename >> $filename_temp
166+
mv $filename_temp $filename
167+
truncate -s -11 $filename
168+
done
169+
}
170+
171+
# file corruption
172+
file_corruption
173+
export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/utils/set-remaining-attempts-to-one=return(true)"
174+
restore_fail=0
175+
run_br --pd $PD_ADDR restore point -s "local://$TEST_DIR/$PREFIX/log" --full-backup-storage "local://$TEST_DIR/$PREFIX/full" || restore_fail=1
176+
export GO_FAILPOINTS=""
177+
if [ $restore_fail -ne 1 ]; then
178+
echo 'pitr success on file corruption'
179+
exit 1
180+
fi
181+
182+
# start a new cluster for corruption
183+
restart_services_allowing_huge_index
184+
185+
file_lost() {
186+
echo "lost the whole log files"
187+
for filename in $(find $TEST_DIR/$PREFIX/log -regex ".*\.log" | grep -v "schema-meta"); do
188+
echo "lost the log file $filename"
189+
filename_temp=$filename"_temp"
190+
mv $filename $filename_temp
191+
done
192+
}
193+
194+
# file lost
195+
file_lost
196+
export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/utils/set-remaining-attempts-to-one=return(true)"
197+
restore_fail=0
198+
run_br --pd $PD_ADDR restore point -s "local://$TEST_DIR/$PREFIX/log" --full-backup-storage "local://$TEST_DIR/$PREFIX/full" || restore_fail=1
199+
export GO_FAILPOINTS=""
200+
if [ $restore_fail -ne 1 ]; then
201+
echo 'pitr success on file lost'
202+
exit 1
203+
fi
204+
>>>>>>> 384f858a6c8 (br/stream: allow pitr to create oversized indices (#58433))

0 commit comments

Comments
 (0)